query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
ReadResponse reads a server response into the received o.
func (o *IPAMServicesListReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewIPAMServicesListOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *ResourceHandler) ReadResponse(dataOut unsafe.Pointer, bytesToRead int32, bytesRead *int32, callback *Callback) int32 {\n\treturn lookupResourceHandlerProxy(d.Base()).ReadResponse(d, dataOut, bytesToRead, bytesRead, callback)\n}", "func (o *GetServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *InteractionBindReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInteractionBindOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewInteractionBindNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewInteractionBindInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *InteractionUnbindReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInteractionUnbindOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewInteractionUnbindNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewInteractionUnbindInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (r *ResponseReader) ReadResponse(req *Request) (res *Response, err error) {\n\tres = CreateEmptyResponse(req)\n\t_, err = readFirstLine(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = readHeaders(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t_, err = readBodyContent(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn res, nil\n}", "func (c *Conn) ReadResponse(rmsg *Response) error {\n\tdata, err := c.ReadDataUnit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcolor.Printf(\"@{c}<!-- RESPONSE -->\\n%s\\n\\n\", string(data))\n\terr = xml.Unmarshal(data, rmsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// color.Fprintf(os.Stderr, \"@{y}%s\\n\", spew.Sprintf(\"%+v\", msg))\n\tif len(rmsg.Results) != 0 {\n\t\tr := rmsg.Results[0]\n\t\tif r.IsError() {\n\t\t\treturn r\n\t\t}\n\t}\n\treturn nil\n}", "func (o *VerifyConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewVerifyConnectionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetAvailableReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetAvailableOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ClosePositionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewClosePositionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewClosePositionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 401:\n\t\tresult := NewClosePositionUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewClosePositionNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 405:\n\t\tresult := NewClosePositionMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *DescribeServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDescribeServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewDescribeServerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewDescribeServerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 409:\n\t\tresult := NewDescribeServerConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewDescribeServerInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetServerSessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetServerSessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetServerSessionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewGetServerSessionUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetServerSessionNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetServerSessionInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /dsmcontroller/namespaces/{namespace}/servers/{podName}/session returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *StartReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewStartOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (resp *PharosResponse) readResponse() {\n\tif !resp.hasBeenRead && resp.Response != nil && resp.Response.Body != nil {\n\t\tresp.data, resp.Error = ioutil.ReadAll(resp.Response.Body)\n\t\tresp.Response.Body.Close()\n\t\tresp.hasBeenRead = true\n\t}\n}", "func (o *HelloWorldReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHelloWorldOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewHelloWorldBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewHelloWorldInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (reader *BasicRpcReader) ReadResponse(r io.Reader, method string, requestID int32, resp proto.Message) error {\n\trrh := &hadoop.RpcResponseHeaderProto{}\n\terr := readRPCPacket(r, rrh, resp)\n\tif err != nil {\n\t\treturn err\n\t} else if int32(rrh.GetCallId()) != requestID {\n\t\treturn errors.New(\"unexpected sequence number\")\n\t} else if rrh.GetStatus() != hadoop.RpcResponseHeaderProto_SUCCESS {\n\t\treturn &NamenodeError{\n\t\t\tmethod: method,\n\t\t\tmessage: rrh.GetErrorMsg(),\n\t\t\tcode: int(rrh.GetErrorDetail()),\n\t\t\texception: rrh.GetExceptionClassName(),\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *UpdateAntivirusServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewUpdateAntivirusServerNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewUpdateAntivirusServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *HasEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHasEventsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewHasEventsUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewHasEventsForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetV2Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetV2OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetV2InternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SaveReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewSaveNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSaveInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *TestWriteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewTestWriteOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewTestWriteUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *AllConnectionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewAllConnectionsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewAllConnectionsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewAllConnectionsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SendDataToDeviceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSendDataToDeviceOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewSendDataToDeviceBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewSendDataToDeviceInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *HealthNoopReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHealthNoopOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PutOutOfRotationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewPutOutOfRotationNoContent()\n\t\tresult.HttpResponse = response\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\terrorResult := kbcommon.NewKillbillError(response.Code())\n\t\tif err := consumer.Consume(response.Body(), &errorResult); err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errorResult\n\t}\n}", "func (o *GetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *StatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewStatusOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewStatusUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewStatusForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ReplaceServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewReplaceServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewReplaceServerAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewReplaceServerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewReplaceServerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewReplaceServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func ReadResponse(r *bfe_bufio.Reader, req *Request) (*Response, error) {\n\ttp := textproto.NewReader(r)\n\tresp := &Response{\n\t\tRequest: req,\n\t}\n\n\t// Parse the first line of the response.\n\tline, err := tp.ReadLine()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\treturn nil, err\n\t}\n\tf := strings.SplitN(line, \" \", 3)\n\tif len(f) < 2 {\n\t\treturn nil, &badStringError{\"malformed HTTP response\", line}\n\t}\n\treasonPhrase := \"\"\n\tif len(f) > 2 {\n\t\treasonPhrase = f[2]\n\t}\n\tresp.Status = f[1] + \" \" + reasonPhrase\n\tresp.StatusCode, err = strconv.Atoi(f[1])\n\tif err != nil {\n\t\treturn nil, &badStringError{\"malformed HTTP status code\", f[1]}\n\t}\n\n\tresp.Proto = f[0]\n\tvar ok bool\n\tif resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {\n\t\treturn nil, &badStringError{\"malformed HTTP version\", resp.Proto}\n\t}\n\n\t// Parse the response headers.\n\tmimeHeader, err := tp.ReadMIMEHeader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp.Header = Header(mimeHeader)\n\n\tfixPragmaCacheControl(resp.Header)\n\n\terr = readTransfer(resp, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (o *PostChatroomsChannelHashReadReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostChatroomsChannelHashReadOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 403:\n\t\tresult := NewPostChatroomsChannelHashReadForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *TogglePacketGeneratorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewTogglePacketGeneratorsCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *FrontPutBinaryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewFrontPutBinaryOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SystemPingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSystemPingOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSystemPingInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SendDummyAlertReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSendDummyAlertOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewSendDummyAlertBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewSendDummyAlertNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetViewsConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetViewsConnectionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetViewsConnectionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *SyncCopyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSyncCopyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewSyncCopyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostPatientsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostPatientsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewPostPatientsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 405:\n\t\tresult := NewPostPatientsMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (c *Conn) readResponse(res *response_) error {\n\terr := c.readDataUnit()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = IgnoreEOF(scanResponse.Scan(c.decoder, res))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.Result.IsError() {\n\t\treturn res.Result\n\t}\n\treturn nil\n}", "func (o *AllConnectionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n switch response.Code() {\n \n case 200:\n result := NewAllConnectionsOK()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return result, nil\n \n case 400:\n result := NewAllConnectionsBadRequest()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return nil, result\n \n case 404:\n result := NewAllConnectionsNotFound()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return nil, result\n \n default:\n return nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n }\n}", "func (o *GetMsgVpnReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetMsgVpnOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewGetMsgVpnDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (r *Response) Read(p []byte) (n int, err error) {\n\n\tif r.Error != nil {\n\t\treturn -1, r.Error\n\t}\n\n\treturn r.RawResponse.Body.Read(p)\n}", "func (o *PostPciLinksMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostPciLinksMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostPciLinksMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *THSRAPIODFare2121Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewTHSRAPIODFare2121OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 299:\n\t\tresult := NewTHSRAPIODFare2121Status299()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 304:\n\t\tresult := NewTHSRAPIODFare2121NotModified()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PostGatewayConnectNetaddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewPostGatewayConnectNetaddressNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostGatewayConnectNetaddressDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *DNSGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDNSGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewDNSGetDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetGreetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetGreetStatusOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostAPIV2EventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostAPIV2EventsNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostAPIV2EventsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPostAPIV2EventsForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *CreateAntivirusServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewCreateAntivirusServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewCreateAntivirusServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostCarsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostCarsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 405:\n\t\tresult := NewPostCarsMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *LogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewLogOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewLogNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ChatGetConnectedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewChatGetConnectedOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewChatGetConnectedBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 401:\n\t\tresult := NewChatGetConnectedUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewChatGetConnectedNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *WebModifyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewWebModifyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewWebModifyAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewWebModifyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetHyperflexServerModelsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetHyperflexServerModelsMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetHyperflexServerModelsMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewGetHyperflexServerModelsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *KillQueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewKillQueryNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewKillQueryBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewKillQueryNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 422:\n\t\tresult := NewKillQueryUnprocessableEntity()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetProgressionViewReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetProgressionViewOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetProgressionViewBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *UpdateRackTopoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUpdateRackTopoOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUpdateRackTopoBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *UtilTestReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUtilTestOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetByUIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetByUIDOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetByUIDNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetMeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetMeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetMeDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *Delete1Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewDelete1NoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDelete1NotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *RevokeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewRevokeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewRevokeUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewRevokeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostGatewayDisconnectNetaddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewPostGatewayDisconnectNetaddressNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostGatewayDisconnectNetaddressDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetProtocolsUsingGETReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetProtocolsUsingGETOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *DestroySessionUsingPOSTReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDestroySessionUsingPOSTOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *CompleteTransactionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewCompleteTransactionNoContent()\n\t\tresult.HttpResponse = response\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\terrorResult := kbcommon.NewKillbillError(response.Code())\n\t\tif err := consumer.Consume(response.Body(), &errorResult); err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errorResult\n\t}\n}", "func (o *GetMapNameEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetMapNameEventsOK(o.writer)\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetMapNameEventsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *RecoveryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewRecoveryOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewRecoveryInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetPeersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetPeersOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 403:\n\t\tresult := NewGetPeersForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *InstallEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInstallEventsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SetMemoRequiredReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSetMemoRequiredOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewSetMemoRequiredBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewSetMemoRequiredInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UpdateRackTopoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUpdateRackTopoOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUpdateRackTopoBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewUpdateRackTopoNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewUpdateRackTopoInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetVoicesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetVoicesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PatchHyperflexServerModelsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPatchHyperflexServerModelsMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPatchHyperflexServerModelsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *BounceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tresult := NewBounceDefault(response.Code())\n\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\treturn nil, err\n\t}\n\tif response.Code()/100 == 2 {\n\t\treturn result, nil\n\t}\n\treturn nil, result\n}", "func (o *PostHyperflexHxdpVersionsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostHyperflexHxdpVersionsMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostHyperflexHxdpVersionsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetObmsLibraryIdentifierReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetObmsLibraryIdentifierOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewGetObmsLibraryIdentifierNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewGetObmsLibraryIdentifierDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *DeleteApplianceRestoresMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteApplianceRestoresMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDeleteApplianceRestoresMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewDeleteApplianceRestoresMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *UserQuerySessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUserQuerySessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUserQuerySessionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewUserQuerySessionInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /sessionbrowser/namespaces/{namespace}/gamesession returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *GetDiscoverReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetDiscoverOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (r *overwriteConsumerReader) ReadResponse(resp runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tif r.forStatusCode == ForAllStatusCodes || resp.Code() == r.forStatusCode {\n\t\treturn r.requestReader.ReadResponse(resp, r.consumer)\n\t}\n\n\treturn r.requestReader.ReadResponse(resp, consumer)\n}", "func (o *UnclaimTrafficFilterLinkIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUnclaimTrafficFilterLinkIDOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUnclaimTrafficFilterLinkIDBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewUnclaimTrafficFilterLinkIDInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetDebugRequestReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetDebugRequestOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewGetDebugRequestNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ChangeaspecificSpeedDialReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewChangeaspecificSpeedDialNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostMemoryArraysMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostMemoryArraysMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostMemoryArraysMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (c *Client) readResponse(conn net.Conn) ([]byte, error) {\n\tif c.Timeout > 0 {\n\t\t_ = conn.SetReadDeadline(time.Now().Add(c.Timeout))\n\t}\n\n\tproto := \"udp\"\n\tif _, ok := conn.(*net.TCPConn); ok {\n\t\tproto = \"tcp\"\n\t}\n\n\tif proto == \"udp\" {\n\t\tbufSize := c.UDPSize\n\t\tif bufSize == 0 {\n\t\t\tbufSize = dns.MinMsgSize\n\t\t}\n\t\tresponse := make([]byte, bufSize)\n\t\tn, err := conn.Read(response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn response[:n], nil\n\t}\n\n\t// If we got here, this is a TCP connection\n\t// so we should read a 2-byte prefix first\n\treturn readPrefixed(conn)\n}", "func (o *PayReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPayOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPayBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewPayNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 409:\n\t\tresult := NewPayConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested POST /platform/public/namespaces/{namespace}/payment/orders/{paymentOrderNo}/pay returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *CountReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewCountOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewCountBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostNodesIdentifierObmIdentifyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 201:\n\t\tresult := NewPostNodesIdentifierObmIdentifyCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewPostNodesIdentifierObmIdentifyNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewPostNodesIdentifierObmIdentifyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetInterpreterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetInterpreterOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetInterpreterNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *DeleteEventsEventIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewDeleteEventsEventIDNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 401:\n\t\tresult := NewDeleteEventsEventIDUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewDeleteEventsEventIDNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UtilityServiceReadyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUtilityServiceReadyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewUtilityServiceReadyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *HTTPGetPersistenceItemDataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHTTPGetPersistenceItemDataOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewHTTPGetPersistenceItemDataNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SubscriptionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSubscriptionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PostEquipmentIoExpandersMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostEquipmentIoExpandersMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostEquipmentIoExpandersMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *FrontSessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewFrontSessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (pr *PushedRequest) ReadResponse(ctx context.Context) (*http.Response, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\tpr.Cancel()\n\t\tpr.pushedStream.bufPipe.CloseWithError(ctx.Err())\n\t\treturn nil, ctx.Err()\n\tcase <-pr.pushedStream.peerReset:\n\t\treturn nil, pr.pushedStream.resetErr\n\tcase resErr := <-pr.pushedStream.resc:\n\t\tif resErr.err != nil {\n\t\t\tfmt.Println(resErr.err.Error())\n\t\t\tpr.Cancel()\n\t\t\tpr.pushedStream.bufPipe.CloseWithError(resErr.err)\n\t\t\treturn nil, resErr.err\n\t\t}\n\t\tresErr.res.Request = pr.Promise\n\t\tresErr.res.TLS = pr.pushedStream.cc.tlsState\n\t\treturn resErr.res, resErr.err\n\t}\n}", "func (o *GetZippedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tresult := NewGetZippedDefault(response.Code())\n\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\treturn nil, err\n\t}\n\tif response.Code()/100 == 2 {\n\t\treturn result, nil\n\t}\n\treturn nil, result\n}", "func (o *DeleteFirmwareUpgradesMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteFirmwareUpgradesMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDeleteFirmwareUpgradesMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewDeleteFirmwareUpgradesMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetEtherPhysicalPortsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetEtherPhysicalPortsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetEtherPhysicalPortsDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *ZoneStreamReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewZoneStreamOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ByNamespaceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewByNamespaceOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewByNamespaceNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetRequestTrackerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetRequestTrackerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 403:\n\t\tresult := NewGetRequestTrackerForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewGetRequestTrackerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}" ]
[ "0.7640225", "0.7607834", "0.75210214", "0.7509121", "0.74803215", "0.74724656", "0.7433606", "0.74244606", "0.7375357", "0.7367311", "0.73589337", "0.73551553", "0.7350114", "0.7347274", "0.7346054", "0.733966", "0.7336042", "0.73239547", "0.7315819", "0.73155594", "0.7310195", "0.730769", "0.72904205", "0.7287086", "0.72826135", "0.72742283", "0.7274111", "0.72655845", "0.726384", "0.7262403", "0.7255057", "0.72496617", "0.72492456", "0.72479755", "0.72409135", "0.7224629", "0.722366", "0.7219326", "0.7216009", "0.72122216", "0.72110355", "0.72099286", "0.7209348", "0.72004783", "0.71978456", "0.719778", "0.71926844", "0.7177653", "0.71745974", "0.71737057", "0.716626", "0.7155474", "0.71500206", "0.7149536", "0.7148374", "0.7143972", "0.7143686", "0.7141745", "0.71397567", "0.713703", "0.7136677", "0.7136661", "0.7135863", "0.7135147", "0.71337897", "0.71312535", "0.7124361", "0.7123878", "0.71200526", "0.7120036", "0.7119569", "0.71148854", "0.7104891", "0.7100936", "0.70989054", "0.70989", "0.70984536", "0.70977753", "0.709657", "0.70961034", "0.70941985", "0.70932794", "0.70886916", "0.70850074", "0.7083912", "0.7080819", "0.7078785", "0.70775825", "0.70765215", "0.7076268", "0.7070042", "0.70699906", "0.7068155", "0.7068122", "0.7066828", "0.70625323", "0.70621973", "0.70599294", "0.70577264", "0.7054454", "0.70509636" ]
0.0
-1
NewIPAMServicesListOK creates a IPAMServicesListOK with default headers values
func NewIPAMServicesListOK() *IPAMServicesListOK { return &IPAMServicesListOK{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewServiceInfo_List(s *capnp.Segment, sz int32) (ServiceInfo_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)\n\treturn ServiceInfo_List{l}, err\n}", "func NewListServicesOK() *ListServicesOK {\n\treturn &ListServicesOK{}\n}", "func NewListServicesOK() *ListServicesOK {\n\treturn &ListServicesOK{}\n}", "func NewServiceInfoRequest_List(s *capnp.Segment, sz int32) (ServiceInfoRequest_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn ServiceInfoRequest_List{l}, err\n}", "func NewServiceList(initialiser Initialiser) ExternalServiceList {\n\treturn ExternalServiceList{\n\t\tInit: initialiser,\n\t}\n}", "func NewListServicesBadRequest() *ListServicesBadRequest {\n\treturn &ListServicesBadRequest{}\n}", "func List(client *occlient.Client, applicationName string) (ServiceList, error) {\n\tlabels := map[string]string{\n\t\tapplabels.ApplicationLabel: applicationName,\n\t}\n\n\t//since, service is associated with application, it consist of application label as well\n\t// which we can give as a selector\n\tapplicationSelector := util.ConvertLabelsToSelector(labels)\n\n\t// get service instance list based on given selector\n\tserviceInstanceList, err := client.GetServiceInstanceList(applicationSelector)\n\tif err != nil {\n\t\treturn ServiceList{}, errors.Wrapf(err, \"unable to list services\")\n\t}\n\n\tvar services []Service\n\t// Iterate through serviceInstanceList and add to service\n\tfor _, elem := range serviceInstanceList {\n\t\tconditions := elem.Status.Conditions\n\t\tvar status string\n\t\tif len(conditions) == 0 {\n\t\t\tklog.Warningf(\"no condition in status for %+v, marking it as Unknown\", elem)\n\t\t\tstatus = \"Unknown\"\n\t\t} else {\n\t\t\tstatus = conditions[0].Reason\n\t\t}\n\n\t\t// Check and make sure that \"name\" exists..\n\t\tif elem.Labels[componentlabels.ComponentLabel] == \"\" {\n\t\t\treturn ServiceList{}, errors.New(fmt.Sprintf(\"element %v returned blank name\", elem))\n\t\t}\n\n\t\tservices = append(services,\n\t\t\tService{\n\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\tKind: \"Service\",\n\t\t\t\t\tAPIVersion: apiVersion,\n\t\t\t\t},\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: elem.Labels[componentlabels.ComponentLabel],\n\t\t\t\t},\n\t\t\t\tSpec: ServiceSpec{Type: elem.Labels[componentlabels.ComponentTypeLabel], Plan: elem.Spec.ClusterServicePlanExternalName},\n\t\t\t\tStatus: ServiceStatus{Status: status},\n\t\t\t})\n\t}\n\n\treturn ServiceList{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"List\",\n\t\t\tAPIVersion: apiVersion,\n\t\t},\n\t\tItems: services,\n\t}, nil\n}", "func newRpcServices(c *RpccontrollerV1Client, namespace string) *rpcServices {\n\treturn &rpcServices{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func (a *Client) ListServices(params *ListServicesParams, opts ...ClientOption) (*ListServicesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListServicesParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"listServices\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/astrolabe\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListServicesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ListServicesOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for listServices: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func New(cfg *config.Config) *AuthnReqListsService {\n\n\treturn &AuthnReqListsService{Client: client.New(\n\t\t*cfg,\n\t\tmetadata.ClientInfo{\n\t\t\tServiceName: ServiceName,\n\t\t\tEndpoint: *cfg.Endpoint,\n\t\t\tAPIVersion: pingaccess.SDKVersion,\n\t\t},\n\t)}\n}", "func ExampleServicesClient_NewListPager() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armwindowsiot.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpager := clientFactory.NewServicesClient().NewListPager(nil)\n\tfor pager.More() {\n\t\tpage, err := pager.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to advance page: %v\", err)\n\t\t}\n\t\tfor _, v := range page.Value {\n\t\t\t// You could use page here. We use blank identifier for just demo purposes.\n\t\t\t_ = v\n\t\t}\n\t\t// If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t\t// page.DeviceServiceDescriptionListResult = armwindowsiot.DeviceServiceDescriptionListResult{\n\t\t// \tValue: []*armwindowsiot.DeviceService{\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"service1125\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.WindowsIoT/Services\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/27de630f-e1ee-42de-8849-90def4986454/resourceGroups/res2627/providers/Microsoft.WindowsIoT/Services/service1125\"),\n\t\t// \t\t\tProperties: &armwindowsiot.DeviceServiceProperties{\n\t\t// \t\t\t\tAdminDomainName: to.Ptr(\"d.e.f\"),\n\t\t// \t\t\t\tBillingDomainName: to.Ptr(\"a.b.c\"),\n\t\t// \t\t\t\tNotes: to.Ptr(\"blah\"),\n\t\t// \t\t\t\tQuantity: to.Ptr[int64](1000000),\n\t\t// \t\t\t\tStartDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-01-01T12:00:00000Z\"); return t}()),\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"service3699\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.WindowsIoT/Services\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/27de630f-e1ee-42de-8849-90def4986454/resourceGroups/testcmk3/providers/Microsoft.WindowsIoT/Services/service3699\"),\n\t\t// \t\t\tProperties: &armwindowsiot.DeviceServiceProperties{\n\t\t// \t\t\t\tAdminDomainName: to.Ptr(\"d.e.f\"),\n\t\t// \t\t\t\tBillingDomainName: to.Ptr(\"a.b.c\"),\n\t\t// \t\t\t\tNotes: to.Ptr(\"blah\"),\n\t\t// \t\t\t\tQuantity: to.Ptr[int64](1000000),\n\t\t// \t\t\t\tStartDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-01-01T12:00:00000Z\"); return t}()),\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"service6637\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.WindowsIoT/Services\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/27de630f-e1ee-42de-8849-90def4986454/resourceGroups/testcmk3/providers/Microsoft.WindowsIoT/Services/service6637\"),\n\t\t// \t\t\tProperties: &armwindowsiot.DeviceServiceProperties{\n\t\t// \t\t\t\tAdminDomainName: to.Ptr(\"d.e.f\"),\n\t\t// \t\t\t\tBillingDomainName: to.Ptr(\"a.b.c\"),\n\t\t// \t\t\t\tNotes: to.Ptr(\"blah\"),\n\t\t// \t\t\t\tQuantity: to.Ptr[int64](1000000),\n\t\t// \t\t\t\tStartDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-01-01T12:00:00000Z\"); return t}()),\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"service834\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.WindowsIoT/Services\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/27de630f-e1ee-42de-8849-90def4986454/resourceGroups/res8186/providers/Microsoft.WindowsIoT/Services/service834\"),\n\t\t// \t\t\tProperties: &armwindowsiot.DeviceServiceProperties{\n\t\t// \t\t\t\tAdminDomainName: to.Ptr(\"d.e.f\"),\n\t\t// \t\t\t\tBillingDomainName: to.Ptr(\"a.b.c\"),\n\t\t// \t\t\t\tNotes: to.Ptr(\"blah\"),\n\t\t// \t\t\t\tQuantity: to.Ptr[int64](1000000),\n\t\t// \t\t\t\tStartDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-01-01T12:00:00000Z\"); return t}()),\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"service9174\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.WindowsIoT/Services\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/27de630f-e1ee-42de-8849-90def4986454/resourceGroups/testcmk3/providers/Microsoft.WindowsIoT/Services/service9174\"),\n\t\t// \t\t\tProperties: &armwindowsiot.DeviceServiceProperties{\n\t\t// \t\t\t\tAdminDomainName: to.Ptr(\"d.e.f\"),\n\t\t// \t\t\t\tBillingDomainName: to.Ptr(\"a.b.c\"),\n\t\t// \t\t\t\tNotes: to.Ptr(\"blah\"),\n\t\t// \t\t\t\tQuantity: to.Ptr[int64](1000000),\n\t\t// \t\t\t\tStartDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-01-01T12:00:00000Z\"); return t}()),\n\t\t// \t\t\t},\n\t\t// \t}},\n\t\t// }\n\t}\n}", "func listServices(client fastlyClientSerivces) ([]*fastly.Service, error) {\n\tvar i *fastly.ListServicesInput\n\tservices, err := client.ListServices(i)\n\tif err != nil {\n\t\tlog.Fatalf(\"Listing services failed: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn services, err\n}", "func listServices(nginx *v1alpha1.Nginx) ([]v1alpha1.NginxService, error) {\n\tserviceList := &corev1.ServiceList{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Service\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t}\n\n\tlabelSelector := labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name)).String()\n\tlistOps := &metav1.ListOptions{LabelSelector: labelSelector}\n\terr := sdk.List(nginx.Namespace, serviceList, sdk.WithListOptions(listOps))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar services []v1alpha1.NginxService\n\tfor _, s := range serviceList.Items {\n\t\tif s.Spec.ClusterIP == \"\" {\n\t\t\ts.Spec.ClusterIP = \"<pending>\"\n\t\t}\n\t\tservices = append(services, v1alpha1.NginxService{\n\t\t\tName: s.Name,\n\t\t\tType: string(s.Spec.Type),\n\t\t\tServiceIP: s.Spec.ClusterIP,\n\t\t})\n\t}\n\n\tsort.Slice(services, func(i, j int) bool {\n\t\treturn services[i].Name < services[j].Name\n\t})\n\n\treturn services, nil\n}", "func serviceList(m map[string]corev1.Service) []client.Object {\n\tvar l []client.Object\n\tfor _, v := range m {\n\t\tobj := v\n\t\tl = append(l, &obj)\n\t}\n\treturn l\n}", "func (client *CloudServicesClient) NewListPager(resourceGroupName string, options *CloudServicesClientListOptions) *runtime.Pager[CloudServicesClientListResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[CloudServicesClientListResponse]{\n\t\tMore: func(page CloudServicesClientListResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *CloudServicesClientListResponse) (CloudServicesClientListResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listCreateRequest(ctx, resourceGroupName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn CloudServicesClientListResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.pl.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn CloudServicesClientListResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn CloudServicesClientListResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listHandleResponse(resp)\n\t\t},\n\t})\n}", "func (client *CloudServicesClient) listAllCreateRequest(ctx context.Context, options *CloudServicesClientListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (s ServiceInfoRequest) NewServiceTypes(n int32) (ServiceType_List, error) {\n\tl, err := NewServiceType_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn ServiceType_List{}, err\n\t}\n\terr = s.Struct.SetPtr(0, l.List.ToPtr())\n\treturn l, err\n}", "func (a *IamProjectApiService) IamProjectServiceList(ctx context.Context, projectId string) ApiIamProjectServiceListRequest {\n\treturn ApiIamProjectServiceListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tprojectId: projectId,\n\t}\n}", "func New() []di.Def {\n\t// add user services\n\tsvc := append(services, user.Services...)\n\treturn svc\n}", "func ListService(ctx *gin.Context) {\n\tlog := logger.RuntimeLog\n\tzoneName := ctx.Param(\"zone\")\n\tnamespace := ctx.Param(\"ns\")\n\n\t// fetch k8s-client handler by zoneName\n\tkclient, err := GetClientByAzCode(zoneName)\n\tif err != nil {\n\t\tlog.WithError(err)\n\t\tSendResponse(ctx, errno.ErrTokenInvalid, nil)\n\t\treturn\n\t}\n\n\tstartAt := time.Now()\n\tsvcs, err := kclient.CoreV1().Services(namespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tSendResponse(ctx, err, \"failed to get Service info.\")\n\t\treturn\n\t}\n\tlogger.MetricsEmit(\n\t\tSVC_CONST.K8S_LOG_Method_ListService,\n\t\tutil.GetReqID(ctx),\n\t\tfloat32(time.Since(startAt)/time.Millisecond),\n\t\terr == err,\n\t)\n\n\tSendResponse(ctx, errno.OK, svcs.Items)\n}", "func (c *rpcServices) List(opts metav1.ListOptions) (result *v1.RpcServiceList, err error) {\n\tresult = &v1.RpcServiceList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"rpcservices\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (ssc *ServicesController) List() (*[]*service.Service, error) {\n\tvar services []*service.Service\n\n\tresponse, e := ssc.c.ClientREST.HTTPMethod(\"GET\", endpointFService)\n\n\tif e != nil {\n\t\tservices = append(services, &service.Service{})\n\t\treturn &services, e\n\t}\n\n\tdocuments := response.BodyMap()[\"DOCUMENT_POOL\"].(map[string]interface{})\n\n\tfor _, v := range documents {\n\t\tservice := NewService(v.(map[string]interface{}))\n\t\tservices = append(services, service)\n\t}\n\n\treturn &services, e\n}", "func NewIFInfoRequest_List(s *capnp.Segment, sz int32) (IFInfoRequest_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn IFInfoRequest_List{l}, err\n}", "func NewListOfDevicesDefault(code int) *ListOfDevicesDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &ListOfDevicesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (client *CloudServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *CloudServicesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (req *Request) CreateHeaderList(headers ...string) error {\n\tif headers == nil {\n\t\treturn nil\n\t}\n\tif req.Req == nil {\n\t\treturn errors.New(\"request is not initialized, please use the `InitRequest` method before apply the headers\")\n\t}\n\n\tlength := len(headers)\n\n\tif len(headers)%2 != 0 {\n\t\terr := errors.New(`headers have to be a \"key:value\" list, got instead a odd number of elements`)\n\t\tlog.Debug(err)\n\t\treturn err\n\t}\n\n\tcounter := 0\n\n\tfor i := 0; i < length; i += 2 {\n\t\tkey := headers[i]\n\t\tvalue := headers[i+1]\n\t\tlog.Debug(\"createHeaderList | \", counter, \") Key: \", key, \" Value: \", value)\n\t\tcounter++\n\t\tif strings.EqualFold(`Authorization`, key) {\n\t\t\treq.Req.Header.Set(key, value)\n\t\t} else {\n\t\t\treq.Req.Header.Add(key, value)\n\t\t}\n\t\t//log.Debug(\"sendRequest | Adding header: {\", key, \"|\", value, \"}\")\n\t}\n\tlog.Debug(\"createHeaderList | LIST: \", req.Req.Header)\n\treturn nil\n}", "func (s *ServicesClient) List(ctx context.Context, orgIDs, projectIDs *[]*identity.ID, names *[]string) ([]ServiceResult, error) {\n\tv := &url.Values{}\n\tif orgIDs != nil {\n\t\tfor _, id := range *orgIDs {\n\t\t\tv.Add(\"org_id\", id.String())\n\t\t}\n\t}\n\tif projectIDs != nil {\n\t\tfor _, id := range *projectIDs {\n\t\t\tv.Add(\"project_id\", id.String())\n\t\t}\n\t}\n\tif names != nil {\n\t\tfor _, n := range *names {\n\t\t\tv.Add(\"name\", n)\n\t\t}\n\t}\n\n\treq, _, err := s.client.NewRequest(\"GET\", \"/services\", v, nil, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tservices := []ServiceResult{}\n\t_, err = s.client.Do(ctx, req, &services, nil, nil)\n\treturn services, err\n}", "func NewList(client *secretsapi.Client, p listPrimeable) *List {\n\treturn &List{\n\t\tsecretsClient: client,\n\t\tout: p.Output(),\n\t\tproj: p.Project(),\n\t}\n}", "func GetServiceList(token string, url string, appPort string, appId string, remote common.DCOSCalls) ([]string, error) {\n\tvar urlList []string\n\n\texposedPort := appPort\n\tportLocation := -1\n\ttimeout := time.Duration(5 * time.Second)\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t}\n\tresp, err := remote.ServiceDiscovery(token, url, client)\n\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\tfmt.Println(\"Error: Http request timeout...\")\n\t\tfmt.Println(\"------------ Try again in next cycle ------------\")\n\t\treturn urlList, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\t//fmt.Println(resp.StatusCode)\n\tdefer resp.Body.Close()\n\tvar result common.AppResult\n\n\terr = json.NewDecoder(resp.Body).Decode(&result)\n\tif resp.StatusCode == 401 {\n\t\treturn nil, errors.New(\"401\")\n\t} else if resp.StatusCode == 404 {\n\t\tfmt.Println(\"Error: Application [\" + url + \"]does not exsist..\")\n\t\tfmt.Println(\"------------ Try again in next cycle ------------\")\n\t\treturn urlList, nil\n\t} else {\n\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\tfmt.Println(\"Error: Http request timeout...\")\n\t\t\tfmt.Println(\"------------ Try again in next cycle ------------\")\n\t\t\treturn urlList, nil\n\t\t}\n\t}\n\tvar containerPortMapping []common.PortMappings\n\n\tif result.App != nil {\n\t\tif result.App.Container.Docker != nil && len(result.App.Container.Docker.Portmappings) > 0 {\n\t\t\tfmt.Println(\"OLD version of DCOS\")\n\t\t\tcontainerPortMapping = result.App.Container.Docker.Portmappings\n\t\t} else if len(result.App.Container.Portmappings) > 0 {\n\t\t\tfmt.Println(\"NEW version of DCOS\")\n\t\t\tcontainerPortMapping = result.App.Container.Portmappings\n\t\t} else {\n\t\t\tif portLocation == -1 {\n\t\t\t\tfmt.Println(\"Error: config file has wrong port OR port is not exposed in the application container OR container is not up yet\" + url)\n\t\t\t\treturn urlList, nil\n\t\t\t}\n\t\t}\n\t\tif len(containerPortMapping) > 0 {\n\t\t\tfor i := 0; i < len(containerPortMapping); i++ {\n\t\t\t\tfmt.Println(containerPortMapping[i].ContainerPort)\n\t\t\t\tif exposedPort == strconv.Itoa(containerPortMapping[i].ContainerPort) {\n\t\t\t\t\tportLocation = i\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvar appMap = make(map[int][]string, 1)\n\t\tfor i := 0; i < len(result.App.Tasks); i++ {\n\t\t\tfmt.Println(result.App.Tasks[i].State)\n\t\t\tif result.App.Tasks[i].State == \"TASK_RUNNING\" {\n\t\t\t\tif portLocation > -1 {\n\t\t\t\t\turlList = append(urlList, result.App.Tasks[i].Host+\":\"+strconv.Itoa(result.App.Tasks[i].Ports[portLocation]))\n\t\t\t\t} else {\n\t\t\t\t\tprintln(\"Error: Port [\" + exposedPort + \"] in configration seems to be wrong! Please make sure port is exposed from the container \" + url)\n\t\t\t\t}\n\t\t\t\tif len(containerPortMapping) > 0 {\n\t\t\t\t\tfor j := 0; j < len(containerPortMapping); j++ {\n\t\t\t\t\t\tappMap[containerPortMapping[j].ContainerPort] = append(appMap[containerPortMapping[j].ContainerPort], result.App.Tasks[i].Host+\":\"+strconv.Itoa(result.App.Tasks[i].Ports[j]))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tsdMap[appId] = appMap\n\t}\n\n\treturn urlList, nil\n}", "func NewServiceInfoReply_List(s *capnp.Segment, sz int32) (ServiceInfoReply_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz)\n\treturn ServiceInfoReply_List{l}, err\n}", "func NewService() *ServiceList {\n\treturn &ServiceList{}\n}", "func (c *restClient) ListServices(ctx context.Context, req *serviceusagepb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator {\n\tit := &ServiceIterator{}\n\treq = proto.Clone(req).(*serviceusagepb.ListServicesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*serviceusagepb.Service, string, error) {\n\t\tresp := &serviceusagepb.ListServicesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/services\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetServices(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (client *APIClient) listByServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *APIClientListByServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\tif options != nil && options.Tags != nil {\n\t\treqQP.Set(\"tags\", *options.Tags)\n\t}\n\tif options != nil && options.ExpandAPIVersionSet != nil {\n\t\treqQP.Set(\"expandApiVersionSet\", strconv.FormatBool(*options.ExpandAPIVersionSet))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *FileServicesClient) List(ctx context.Context, resourceGroupName string, accountName string, options *FileServicesListOptions) (FileServiceItemsResponse, error) {\n\treq, err := client.listCreateRequest(ctx, resourceGroupName, accountName, options)\n\tif err != nil {\n\t\treturn FileServiceItemsResponse{}, err\n\t}\n\tresp, err := client.con.Pipeline().Do(req)\n\tif err != nil {\n\t\treturn FileServiceItemsResponse{}, err\n\t}\n\tif !resp.HasStatusCode(http.StatusOK) {\n\t\treturn FileServiceItemsResponse{}, client.listHandleError(resp)\n\t}\n\treturn client.listHandleResponse(resp)\n}", "func NewListServicesNotFound() *ListServicesNotFound {\n\treturn &ListServicesNotFound{}\n}", "func NewListServicesNotFound() *ListServicesNotFound {\n\treturn &ListServicesNotFound{}\n}", "func NewHealthCheckServiceSpec_List(s *capnp.Segment, sz int32) (HealthCheckServiceSpec_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)\n\treturn HealthCheckServiceSpec_List{l}, err\n}", "func newIPVSService(svc *Service) *ipvsService {\n\tipvsSvc := &ipvsService{\n\t\tAddress: svc.Address,\n\t\tProtocol: svc.Protocol,\n\t\tPort: svc.Port,\n\t\tFirewallMark: svc.FirewallMark,\n\t\tScheduler: svc.Scheduler,\n\t\tFlags: svc.Flags,\n\t\tTimeout: svc.Timeout,\n\t\tPersistenceEngine: svc.PersistenceEngine,\n\t}\n\n\tif ip4 := svc.Address.To4(); ip4 != nil {\n\t\tipvsSvc.AddrFamily = syscall.AF_INET\n\t\tipvsSvc.Netmask = 0xffffffff\n\t} else {\n\t\tipvsSvc.AddrFamily = syscall.AF_INET6\n\t\tipvsSvc.Netmask = 128\n\t}\n\n\treturn ipvsSvc\n}", "func GetServices(nbmaster string, httpClient *http.Client, jwt string, host string, hostUuid string) {\r\n fmt.Printf(\"\\nGet NetBackup services available on %s...\\n\\n\", host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/admin/hosts/\" + hostUuid + \"/services\"\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Content-Type\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get services\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n resp, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(resp, &obj)\r\n data := obj.(map[string]interface{})\r\n var services []interface{} = data[\"data\"].([]interface{})\r\n\r\n fmt.Printf(\"id status\\n\");\r\n fmt.Printf(\"============.=========\\n\");\r\n for _, service := range services {\r\n id := (service.(map[string]interface{}))[\"id\"]\r\n status := (((service.(map[string]interface{}))[\"attributes\"]).(map[string]interface{}))[\"status\"]\r\n\r\n fmt.Printf(\"%-12s %s\\n\", id, status);\r\n }\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n}", "func (client *PublicIPAddressesClient) listCloudServicePublicIPAddressesCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *PublicIPAddressesClientListCloudServicePublicIPAddressesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/publicipaddresses\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *ApiService) ListService(params *ListServiceParams) ([]VerifyV2Service, error) {\n\tresponse, errors := c.StreamService(params)\n\n\trecords := make([]VerifyV2Service, 0)\n\tfor record := range response {\n\t\trecords = append(records, record)\n\t}\n\n\tif err := <-errors; err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}", "func NewListLoadBalancerServicesOK() *ListLoadBalancerServicesOK {\n\treturn &ListLoadBalancerServicesOK{}\n}", "func NewGetServicesDefault(code int) *GetServicesDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetServicesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func listServices(ctx context.Context, c client.Client, rokku *rokkuv1alpha1.Rokku) ([]rokkuv1alpha1.ServiceStatus, error) {\n\tserviceList := &corev1.ServiceList{}\n\tlabelSelector := labels.SelectorFromSet(k8s.LabelsForRokku(rokku.Name))\n\tlistOps := &client.ListOptions{Namespace: rokku.Namespace, LabelSelector: labelSelector}\n\terr := c.List(ctx, serviceList, listOps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar services []rokkuv1alpha1.ServiceStatus\n\tfor _, s := range serviceList.Items {\n\t\tservices = append(services, rokkuv1alpha1.ServiceStatus{\n\t\t\tName: s.Name,\n\t\t})\n\t}\n\n\tsort.Slice(services, func(i, j int) bool {\n\t\treturn services[i].Name < services[j].Name\n\t})\n\n\treturn services, nil\n}", "func (client *PublicIPAddressesClient) listAllCreateRequest(ctx context.Context, options *PublicIPAddressesClientListAllOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewListServicesUnauthorized() *ListServicesUnauthorized {\n\treturn &ListServicesUnauthorized{}\n}", "func NewListServicesUnauthorized() *ListServicesUnauthorized {\n\treturn &ListServicesUnauthorized{}\n}", "func NewPublicCloudtypesListGetOK() *PublicCloudtypesListGetOK {\n\treturn &PublicCloudtypesListGetOK{\n\t\tAccessControlAllowHeaders: \"Origin, X-Requested-With, Content-Type, Accept\",\n\t\tAccessControlAllowMethods: \"POST, GET, OPTIONS\",\n\t\tAccessControlAllowOrigin: \"*\",\n\t\tCacheControl: \"private, no-cache, no-store, no-transform, must-revalidate, max-age=0\",\n\t\tContentEncoding: \"gzip\",\n\t\tContentLength: \"298\",\n\t\tContentType: \"application/json\",\n\t\tDate: \"Wed, 25 Oct 2017 14:13:39 GMT\",\n\t\tLastModified: \"2017-10-25 10:13:39.237937\",\n\t\tServer: \"waitress\",\n\t\tVary: \"Accept-Encoding\",\n\t}\n}", "func listServices(ctx context.Context, c client.Client, nginx *nginxv1alpha1.Nginx) ([]nginxv1alpha1.ServiceStatus, error) {\n\tserviceList := &corev1.ServiceList{}\n\tlabelSelector := labels.SelectorFromSet(k8s.LabelsForNginx(nginx.Name))\n\tlistOps := &client.ListOptions{Namespace: nginx.Namespace, LabelSelector: labelSelector}\n\terr := c.List(ctx, serviceList, listOps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar services []nginxv1alpha1.ServiceStatus\n\tfor _, s := range serviceList.Items {\n\t\tservices = append(services, nginxv1alpha1.ServiceStatus{\n\t\t\tName: s.Name,\n\t\t})\n\t}\n\n\tsort.Slice(services, func(i, j int) bool {\n\t\treturn services[i].Name < services[j].Name\n\t})\n\n\treturn services, nil\n}", "func NewServiceListPage(getNextPage func(context.Context, ServiceList) (ServiceList, error)) ServiceListPage {\n\treturn ServiceListPage{fn: getNextPage}\n}", "func NewList() *List {\n newObj := &List {\n counters : make(map[string]Counter),\n }\n\n return newObj\n}", "func (client *PortalConfigClient) listByServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *PortalConfigClientListByServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalconfigs\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func Services(opt *options.SearchOptions, keyword string, wide bool) {\n\tvar serviceInfo string\n\n\tserviceList := utils.ServiceList(opt)\n\n\tif len(serviceList.Items) <= 0 {\n\t\tif opt.AllNamespaces {\n\t\t\tfmt.Printf(\"No resources found.\\n\")\n\t\t} else {\n\t\t\tvar ns = opt.Namespace\n\t\t\tif len(opt.Namespace) <= 0 {\n\t\t\t\tns = \"default\"\n\t\t\t}\n\t\t\tfmt.Printf(\"No resources found in %s namespace.\\n\", ns)\n\t\t}\n\t\treturn\n\t}\n\n\tbuf := bytes.NewBuffer(nil)\n\tw := tabwriter.NewWriter(buf, 0, 0, 3, ' ', 0)\n\n\tif wide {\n\t\tfmt.Fprintln(w, constants.ServicesHeaderWide)\n\t} else {\n\t\tfmt.Fprintln(w, constants.ServicesHeader)\n\t}\n\tfor _, s := range serviceList.Items {\n\t\tvar ports []string\n\t\t// return all if no keyword specific\n\t\tif len(keyword) > 0 {\n\t\t\tmatch := strings.Contains(s.Name, keyword)\n\t\t\tif !match {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tage := utils.GetAge(time.Since(s.CreationTimestamp.Time))\n\n\t\tfor _, p := range s.Spec.Ports {\n\t\t\tvar concatenated string\n\t\t\tif p.NodePort != 0 {\n\t\t\t\tconcatenated = fmt.Sprintf(\"%d:%d/%s\", p.Port, p.NodePort, p.Protocol)\n\t\t\t} else {\n\t\t\t\tconcatenated = fmt.Sprintf(\"%d/%s\", p.Port, p.Protocol)\n\t\t\t}\n\t\t\tports = append(ports, concatenated)\n\t\t}\n\n\t\tvar selectors []string\n\t\tvar selector string\n\t\tif s.Spec.Selector != nil {\n\t\t\tfor k, v := range s.Spec.Selector {\n\t\t\t\tselector = fmt.Sprintf(\"%s=%s\", k, v)\n\t\t\t\tselectors = append(selectors, selector)\n\t\t\t}\n\t\t}\n\t\tselectorOutput := \"<none>\"\n\t\tif len(selectors) > 0 {\n\t\t\tselectorOutput = strings.Join(selectors, \",\")\n\t\t}\n\n\t\tvar externalIPs []string\n\t\tif s.Spec.ExternalIPs == nil {\n\t\t\tfor _, i := range s.Status.LoadBalancer.Ingress {\n\t\t\t\texternalIPs = append(externalIPs, i.Hostname)\n\t\t\t}\n\t\t}\n\n\t\tvar externalIPsDisplay string = \"<none>\"\n\t\tif len(externalIPs) > 0 {\n\t\t\texternalIPsDisplay = strings.Join(externalIPs, \",\")\n\t\t}\n\n\t\tif wide {\n\t\t\tserviceInfo = fmt.Sprintf(constants.ServicesRowTemplateWide,\n\t\t\t\ts.Namespace,\n\t\t\t\ts.Name,\n\t\t\t\ts.Spec.Type,\n\t\t\t\ts.Spec.ClusterIP,\n\t\t\t\texternalIPsDisplay,\n\t\t\t\tstrings.Join(ports, \",\"),\n\t\t\t\tage,\n\t\t\t\tselectorOutput,\n\t\t\t)\n\t\t} else {\n\t\t\tserviceInfo = fmt.Sprintf(constants.ServicesRowTemplate,\n\t\t\t\ts.Namespace,\n\t\t\t\ts.Name,\n\t\t\t\ts.Spec.Type,\n\t\t\t\ts.Spec.ClusterIP,\n\t\t\t\texternalIPsDisplay,\n\t\t\t\tstrings.Join(ports, \",\"),\n\t\t\t\tage,\n\t\t\t)\n\t\t}\n\n\t\tfmt.Fprintln(w, serviceInfo)\n\t}\n\tw.Flush()\n\n\tfmt.Printf(\"%s\", buf.String())\n}", "func newBackingServices(c *Client, namespace string) *backingservices {\n\treturn &backingservices{\n\t\tr: c,\n\t\tns: namespace,\n\t}\n}", "func (e *EmailHeaderService) List(opts *ListOptions) ([]EmailHeader, *Response, error) {\n\tendpoint := \"/assets/email/headers\"\n\temailHeaders := new([]EmailHeader)\n\tresp, err := e.client.getRequestListDecode(endpoint, emailHeaders, opts)\n\treturn *emailHeaders, resp, err\n}", "func (client *BuildServiceClient) listBuildServicesCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *BuildServiceClientListBuildServicesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *GroupClient) listByServiceCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *GroupListByServiceOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/groups\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\treqQP.Set(\"api-version\", \"2021-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{4}\n}", "func (*ListServicesRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_manage_grpc_service_proto_rawDescGZIP(), []int{0}\n}", "func (client *FileServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *FileServicesListOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2019-06-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewTodoListService(addr string) *Service {\n\ts := &Service{\n\t\th: http.DefaultServeMux,\n\t}\n\n\t// 使用默认设置初始化httpServer\n\ts.httpServer = &http.Server{\n\t\tAddr: addr,\n\t\tHandler: s.h,\n\t\tReadTimeout: 5 * time.Second,\n\t\tWriteTimeout: 5 * time.Second,\n\t\tMaxHeaderBytes: http.DefaultMaxHeaderBytes,\n\t}\n\n\t// 使用一个模拟的数据持久化服务\n\ts.storageService = NewMemStorageService()\n\n\t// 初始化接口配置\n\ts.initRouter()\n\treturn s\n}", "func listKnownServices(deployableUnitSet goldpushk.DeployableUnitSet) error {\n\tmode := \"production\"\n\tif flagTesting {\n\t\tmode = \"testing\"\n\t}\n\tfmt.Printf(\"Known Gold instances and services (%s):\\n\", mode)\n\n\t// Print out table header.\n\tw := tabwriter.NewWriter(os.Stdout, 10, 0, 2, ' ', 0)\n\tif _, err := fmt.Fprintln(w, \"\\nINSTANCE\\tSERVICE\\tCANONICAL NAME\"); err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\n\t// Print out table body.\n\tfor _, instance := range deployableUnitSet.KnownInstances() {\n\t\tfor _, service := range deployableUnitSet.KnownServices() {\n\t\t\tunit, ok := deployableUnitSet.Get(goldpushk.DeployableUnitID{Instance: instance, Service: service})\n\t\t\tif ok {\n\t\t\t\tif _, err := fmt.Fprintf(w, \"%s\\t%s\\t%s\\n\", instance, service, unit.CanonicalName()); err != nil {\n\t\t\t\t\treturn skerr.Wrap(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Flush output and return.\n\tif err := w.Flush(); err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\treturn nil\n}", "func NewIpAddress_List(s *capnp.Segment, sz int32) (IpAddress_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}, sz)\n\treturn IpAddress_List{l}, err\n}", "func (ipvsc *ipvsControllerController) getServices(cfgMap *apiv1.ConfigMap) []vip {\n\tsvcs := []vip{}\n\n\t// k -> IP to use\n\t// v -> <namespace>/<service name>:<lvs method>\n\tfor externalIP, nsSvcLvs := range cfgMap.Data {\n\t\tif nsSvcLvs == \"\" {\n\t\t\t// if target is empty string we will not forward to any service but\n\t\t\t// instead just configure the IP on the machine and let it up to\n\t\t\t// another Pod or daemon to bind to the IP address\n\t\t\tsvcs = append(svcs, vip{\n\t\t\t\tName: \"\",\n\t\t\t\tIP: externalIP,\n\t\t\t\tPort: 0,\n\t\t\t\tLVSMethod: \"VIP\",\n\t\t\t\tBackends: nil,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t})\n\t\t\tglog.V(2).Infof(\"Adding VIP only service: %v\", externalIP)\n\t\t\tcontinue\n\t\t}\n\n\t\tns, svc, lvsm, err := parseNsSvcLVS(nsSvcLvs)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"%v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tnsSvc := fmt.Sprintf(\"%v/%v\", ns, svc)\n\t\tsvcObj, svcExists, err := ipvsc.svcLister.Store.GetByKey(nsSvc)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"error getting service %v: %v\", nsSvc, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !svcExists {\n\t\t\tglog.Warningf(\"service %v not found\", nsSvc)\n\t\t\tcontinue\n\t\t}\n\n\t\ts := svcObj.(*apiv1.Service)\n\t\tfor _, servicePort := range s.Spec.Ports {\n\t\t\tep := ipvsc.getEndpoints(s, &servicePort)\n\t\t\tif len(ep) == 0 {\n\t\t\t\tglog.Warningf(\"no endpoints found for service %v, port %+v\", s.Name, servicePort)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsort.Sort(serviceByIPPort(ep))\n\n\t\t\tsvcs = append(svcs, vip{\n\t\t\t\tName: fmt.Sprintf(\"%v-%v\", s.Namespace, s.Name),\n\t\t\t\tIP: externalIP,\n\t\t\t\tPort: int(servicePort.Port),\n\t\t\t\tLVSMethod: lvsm,\n\t\t\t\tBackends: ep,\n\t\t\t\tProtocol: fmt.Sprintf(\"%v\", servicePort.Protocol),\n\t\t\t})\n\t\t\tglog.V(2).Infof(\"found service: %v:%v\", s.Name, servicePort.Port)\n\t\t}\n\t}\n\n\tsort.Sort(vipByNameIPPort(svcs))\n\n\treturn svcs\n}", "func ExampleServicesClient_NewListByResourceGroupPager() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armwindowsiot.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpager := clientFactory.NewServicesClient().NewListByResourceGroupPager(\"res6117\", nil)\n\tfor pager.More() {\n\t\tpage, err := pager.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to advance page: %v\", err)\n\t\t}\n\t\tfor _, v := range page.Value {\n\t\t\t// You could use page here. We use blank identifier for just demo purposes.\n\t\t\t_ = v\n\t\t}\n\t\t// If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t\t// page.DeviceServiceDescriptionListResult = armwindowsiot.DeviceServiceDescriptionListResult{\n\t\t// \tValue: []*armwindowsiot.DeviceService{\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"service4036\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.WindowsIoT/Services\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/27de630f-e1ee-42de-8849-90def4986454/resourceGroups/res6117/providers/Microsoft.WindowsIoT/Services/service4036\"),\n\t\t// \t\t\tProperties: &armwindowsiot.DeviceServiceProperties{\n\t\t// \t\t\t\tAdminDomainName: to.Ptr(\"d.e.f\"),\n\t\t// \t\t\t\tBillingDomainName: to.Ptr(\"a.b.c\"),\n\t\t// \t\t\t\tNotes: to.Ptr(\"blah\"),\n\t\t// \t\t\t\tQuantity: to.Ptr[int64](1000000),\n\t\t// \t\t\t\tStartDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-01-01T12:00:00000Z\"); return t}()),\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"service4452\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.WindowsIoT/Services\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/27de630f-e1ee-42de-8849-90def4986454/resourceGroups/res6117/providers/Microsoft.WindowsIoT/Services/service4452\"),\n\t\t// \t\t\tProperties: &armwindowsiot.DeviceServiceProperties{\n\t\t// \t\t\t\tAdminDomainName: to.Ptr(\"d.e.f\"),\n\t\t// \t\t\t\tBillingDomainName: to.Ptr(\"a.b.c\"),\n\t\t// \t\t\t\tNotes: to.Ptr(\"blah\"),\n\t\t// \t\t\t\tQuantity: to.Ptr[int64](1000000),\n\t\t// \t\t\t\tStartDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-01-01T12:00:00000Z\"); return t}()),\n\t\t// \t\t\t},\n\t\t// \t}},\n\t\t// }\n\t}\n}", "func CreateDescribePortConnsListResponse() (response *DescribePortConnsListResponse) {\n\tresponse = &DescribePortConnsListResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *LocalRulestacksClient) listSecurityServicesCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, typeParam SecurityServicesTypeEnum, options *LocalRulestacksClientListSecurityServicesOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listSecurityServices\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif localRulestackName == \"\" {\n\t\treturn nil, errors.New(\"parameter localRulestackName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{localRulestackName}\", url.PathEscape(localRulestackName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-29\")\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"skip\", *options.Skip)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\treqQP.Set(\"type\", string(typeParam))\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *awsServiceDiscoveryServices) List(opts meta_v1.ListOptions) (result *v1.AwsServiceDiscoveryServiceList, err error) {\n\tresult = &v1.AwsServiceDiscoveryServiceList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"awsservicediscoveryservices\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (c *ApiService) CreateService(params *CreateServiceParams) (*VerifyV2Service, error) {\n\tpath := \"/v2/Services\"\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.FriendlyName != nil {\n\t\tdata.Set(\"FriendlyName\", *params.FriendlyName)\n\t}\n\tif params != nil && params.CodeLength != nil {\n\t\tdata.Set(\"CodeLength\", fmt.Sprint(*params.CodeLength))\n\t}\n\tif params != nil && params.LookupEnabled != nil {\n\t\tdata.Set(\"LookupEnabled\", fmt.Sprint(*params.LookupEnabled))\n\t}\n\tif params != nil && params.SkipSmsToLandlines != nil {\n\t\tdata.Set(\"SkipSmsToLandlines\", fmt.Sprint(*params.SkipSmsToLandlines))\n\t}\n\tif params != nil && params.DtmfInputRequired != nil {\n\t\tdata.Set(\"DtmfInputRequired\", fmt.Sprint(*params.DtmfInputRequired))\n\t}\n\tif params != nil && params.TtsName != nil {\n\t\tdata.Set(\"TtsName\", *params.TtsName)\n\t}\n\tif params != nil && params.Psd2Enabled != nil {\n\t\tdata.Set(\"Psd2Enabled\", fmt.Sprint(*params.Psd2Enabled))\n\t}\n\tif params != nil && params.DoNotShareWarningEnabled != nil {\n\t\tdata.Set(\"DoNotShareWarningEnabled\", fmt.Sprint(*params.DoNotShareWarningEnabled))\n\t}\n\tif params != nil && params.CustomCodeEnabled != nil {\n\t\tdata.Set(\"CustomCodeEnabled\", fmt.Sprint(*params.CustomCodeEnabled))\n\t}\n\tif params != nil && params.PushIncludeDate != nil {\n\t\tdata.Set(\"Push.IncludeDate\", fmt.Sprint(*params.PushIncludeDate))\n\t}\n\tif params != nil && params.PushApnCredentialSid != nil {\n\t\tdata.Set(\"Push.ApnCredentialSid\", *params.PushApnCredentialSid)\n\t}\n\tif params != nil && params.PushFcmCredentialSid != nil {\n\t\tdata.Set(\"Push.FcmCredentialSid\", *params.PushFcmCredentialSid)\n\t}\n\tif params != nil && params.TotpIssuer != nil {\n\t\tdata.Set(\"Totp.Issuer\", *params.TotpIssuer)\n\t}\n\tif params != nil && params.TotpTimeStep != nil {\n\t\tdata.Set(\"Totp.TimeStep\", fmt.Sprint(*params.TotpTimeStep))\n\t}\n\tif params != nil && params.TotpCodeLength != nil {\n\t\tdata.Set(\"Totp.CodeLength\", fmt.Sprint(*params.TotpCodeLength))\n\t}\n\tif params != nil && params.TotpSkew != nil {\n\t\tdata.Set(\"Totp.Skew\", fmt.Sprint(*params.TotpSkew))\n\t}\n\tif params != nil && params.DefaultTemplateSid != nil {\n\t\tdata.Set(\"DefaultTemplateSid\", *params.DefaultTemplateSid)\n\t}\n\n\tresp, err := c.requestHandler.Post(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &VerifyV2Service{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}", "func newServicePorts(m *influxdatav1alpha1.Influxdb) []corev1.ServicePort {\n\tvar ports []corev1.ServicePort\n\n\tports = append(ports, corev1.ServicePort{Port: 8086, Name: \"api\"},\n\t\tcorev1.ServicePort{Port: 2003, Name: \"graphite\"},\n\t\tcorev1.ServicePort{Port: 25826, Name: \"collectd\"},\n\t\tcorev1.ServicePort{Port: 8089, Name: \"udp\"},\n\t\tcorev1.ServicePort{Port: 4242, Name: \"opentsdb\"},\n\t\tcorev1.ServicePort{Port: 8088, Name: \"backup-restore\"},\n\t)\n\treturn ports\n}", "func newListServicesCmd(options *edgeOptions) *cobra.Command {\n\tvar asIdentity string\n\tvar configTypes []string\n\tvar roleFilters []string\n\tvar roleSemantic string\n\n\tcmd := &cobra.Command{\n\t\tUse: \"services <filter>?\",\n\t\tShort: \"lists services managed by the Ziti Edge Controller\",\n\t\tLong: \"lists services managed by the Ziti Edge Controller\",\n\t\tArgs: cobra.MaximumNArgs(1),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\toptions.Cmd = cmd\n\t\t\toptions.Args = args\n\t\t\terr := runListServices(asIdentity, configTypes, roleFilters, roleSemantic, options)\n\t\t\tcmdhelper.CheckErr(err)\n\t\t},\n\t\tSuggestFor: []string{},\n\t}\n\n\t// allow interspersing positional args and flags\n\tcmd.Flags().SetInterspersed(true)\n\tcmd.Flags().StringVar(&asIdentity, \"as-identity\", \"\", \"Allow admins to see services as they would be seen by a different identity\")\n\tcmd.Flags().StringSliceVar(&configTypes, \"config-types\", nil, \"Override which config types to view on services\")\n\tcmd.Flags().StringSliceVar(&roleFilters, \"role-filters\", nil, \"Allow filtering by roles\")\n\tcmd.Flags().StringVar(&roleSemantic, \"role-semantic\", \"\", \"Specify which roles semantic to use \")\n\toptions.AddCommonFlags(cmd)\n\n\treturn cmd\n}", "func ListServices(source DescriptorSource) ([]string, error) {\n\tsvcs, err := source.ListServices()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Strings(svcs)\n\treturn svcs, nil\n}", "func ExampleAPIClient_NewListByServicePager() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armapimanagement.NewAPIClient(\"subid\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpager := client.NewListByServicePager(\"rg1\",\n\t\t\"apimService1\",\n\t\t&armapimanagement.APIClientListByServiceOptions{Filter: nil,\n\t\t\tTop: nil,\n\t\t\tSkip: nil,\n\t\t\tTags: nil,\n\t\t\tExpandAPIVersionSet: nil,\n\t\t})\n\tfor pager.More() {\n\t\tnextResult, err := pager.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to advance page: %v\", err)\n\t\t}\n\t\tfor _, v := range nextResult.Value {\n\t\t\t// TODO: use page item\n\t\t\t_ = v\n\t\t}\n\t}\n}", "func (client *PublicIPAddressesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *PublicIPAddressesClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (g *Goods) ListNew(c Context) {\n\t// TODO\n\tc.String(http.StatusOK, \"get new goods list\")\n}", "func (t *Transformer) CreateServices(o *object.Service) ([]runtime.Object, error) {\n\tresult := []runtime.Object{}\n\n\tService := func() *api_v1.Service {\n\t\tserviceLabels := map[string]string(o.Labels)\n\t\treturn &api_v1.Service{\n\t\t\tObjectMeta: api_v1.ObjectMeta{\n\t\t\t\tName: o.Name,\n\t\t\t\tLabels: *util.MergeMaps(\n\t\t\t\t\t// The map containing `\"service\": o.Name` should always be\n\t\t\t\t\t// passed later to avoid being overridden by util.MergeMaps()\n\t\t\t\t\t&serviceLabels,\n\t\t\t\t\t&map[string]string{\n\t\t\t\t\t\t\"service\": o.Name,\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\tSpec: api_v1.ServiceSpec{\n\t\t\t\tSelector: map[string]string{\n\t\t\t\t\t\"service\": o.Name,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tis := Service()\n\tis.Spec.Type = api_v1.ServiceTypeClusterIP\n\n\tes := Service()\n\tes.Spec.Type = api_v1.ServiceTypeLoadBalancer\n\n\tfor _, c := range o.Containers {\n\t\t// We don't want to generate service if there are no ports to be mapped\n\t\tif len(c.Ports) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, p := range c.Ports {\n\t\t\tvar s *api_v1.Service\n\t\t\tswitch p.Type {\n\t\t\tcase object.PortType_Internal:\n\t\t\t\ts = is\n\t\t\tcase object.PortType_External:\n\t\t\t\ts = es\n\t\t\tdefault:\n\t\t\t\t// There is a mistake in our code; and in Golang because it doesn't have strongly typed enumerations :)\n\t\t\t\treturn result, fmt.Errorf(\"Internal error: unknown PortType %#v\", p.Type)\n\t\t\t}\n\n\t\t\ts.Spec.Ports = append(s.Spec.Ports, api_v1.ServicePort{\n\t\t\t\tName: fmt.Sprintf(\"port-%d\", p.Port.ServicePort),\n\t\t\t\tPort: int32(p.Port.ServicePort),\n\t\t\t\tTargetPort: intstr.FromInt(p.Port.ContainerPort),\n\t\t\t})\n\t\t}\n\t}\n\n\tif len(is.Spec.Ports) > 0 {\n\t\tresult = append(result, is)\n\t}\n\n\tif len(es.Spec.Ports) > 0 {\n\t\tresult = append(result, es)\n\t}\n\n\treturn result, nil\n}", "func (*ListServicesResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{5}\n}", "func NewHostInfo_List(s *capnp.Segment, sz int32) (HostInfo_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz)\n\treturn HostInfo_List{l}, err\n}", "func newKubeListRequest(values url.Values, site, resourceKind string) (*kubeproto.ListKubernetesResourcesRequest, error) {\n\tlimit, err := queryLimitAsInt32(values, \"limit\", defaults.MaxIterationLimit)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tsortBy := types.GetSortByFromString(values.Get(\"sort\"))\n\n\tstartKey := values.Get(\"startKey\")\n\treq := &kubeproto.ListKubernetesResourcesRequest{\n\t\tResourceType: resourceKind,\n\t\tLimit: limit,\n\t\tStartKey: startKey,\n\t\tSortBy: &sortBy,\n\t\tPredicateExpression: values.Get(\"query\"),\n\t\tSearchKeywords: client.ParseSearchKeywords(values.Get(\"search\"), ' '),\n\t\tUseSearchAsRoles: values.Get(\"searchAsRoles\") == \"yes\",\n\t\tTeleportCluster: site,\n\t\tKubernetesCluster: values.Get(\"kubeCluster\"),\n\t\tKubernetesNamespace: values.Get(\"kubeNamespace\"),\n\t}\n\treturn req, nil\n}", "func NewListClient(accessToken string, endMs int64, startMs int64, assetID int64, endMs1 *int64, dataInputID int64, startMs1 *int64, durationMs *int64, endMs2 *int64, driverIDOrExternalID string, groupID *int64, timestamp int64, include string, sequenceID string, endingBefore string, limit *float64, startingAfter string, duration *int64, endTime *int64, endTime1 *int64, startTime *int64, endMs3 int64, startMs2 int64, tagID int64, vehicleIDOrExternalID string) ListClient {\n return NewListClientWithBaseURI(DefaultBaseURI, accessToken, endMs, startMs, assetID, endMs1, dataInputID, startMs1, durationMs, endMs2, driverIDOrExternalID, groupID, timestamp, include, sequenceID, endingBefore, limit, startingAfter, duration, endTime, endTime1, startTime, endMs3, startMs2, tagID, vehicleIDOrExternalID)\n}", "func NewHeaders()(*Headers) {\n m := &Headers{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (c *backingservices) List(opts kapi.ListOptions) (result *backingserviceapi.BackingServiceList, err error) {\n\tresult = &backingserviceapi.BackingServiceList{}\n\terr = c.r.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"backingservices\").\n\t\tVersionedParams(&opts, kapi.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func extractServices(param *types.Proto, protos []*descriptor.FileDescriptorProto) {\n\tsvcTmp := make([]types.Service, 0, 0)\n\tvar p *descriptor.FileDescriptorProto\n\tfor _, p = range protos {\n\t\tif generate, _ := inArray(p.GetName(), param.FilesToGenerate); generate {\n\t\t\tfor _, svc := range p.GetService() {\n\t\t\t\ts := types.Service{Name: svc.GetName()}\n\t\t\t\tmethods := make([]types.MethodTemplate, len(svc.Method))\n\t\t\t\tfor j, method := range svc.Method {\n\t\t\t\t\ttipe := methodTypeStandard\n\t\t\t\t\tif method.GetServerStreaming() && !method.GetClientStreaming() {\n\t\t\t\t\t\ttipe = methodTypeServerStream\n\t\t\t\t\t} else if !method.GetServerStreaming() && method.GetClientStreaming() {\n\t\t\t\t\t\ttipe = methodTypeClientStream\n\t\t\t\t\t} else if method.GetServerStreaming() && method.GetClientStreaming() {\n\t\t\t\t\t\ttipe = methodTypeBidirectional\n\t\t\t\t\t}\n\t\t\t\t\t_, pkg := getGoPackage(p)\n\t\t\t\t\tmethods[j] = types.MethodTemplate{\n\t\t\t\t\t\tName: strings.Title(*method.Name),\n\t\t\t\t\t\tServiceName: svc.GetName(),\n\t\t\t\t\t\tInput: getMessageType(pkg, protos, p.GetDependency(), method.GetInputType()),\n\t\t\t\t\t\tOutput: getMessageType(pkg, protos, p.GetDependency(), method.GetOutputType()),\n\t\t\t\t\t\tMethodType: tipe,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ts.Methods = methods\n\t\t\t\tsvcTmp = append(svcTmp, s)\n\t\t\t}\n\t\t}\n\n\t}\n\tif len(svcTmp) != 0 {\n\t\t_, pkg := getGoPackage(p)\n\t\tpath, outFile := getOutFile(p, pkg)\n\t\tparam.Services = svcTmp\n\t\tparam.Package = pkg\n\t\tparam.PackagePath = path\n\t\tparam.OutFile = outFile\n\t}\n}", "func (s predefinedServiceNamespaceLister) List(selector labels.Selector) (ret []*v1.PredefinedService, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.PredefinedService))\n\t})\n\treturn ret, err\n}", "func NewListLoadBalancerServicesBadRequest() *ListLoadBalancerServicesBadRequest {\n\treturn &ListLoadBalancerServicesBadRequest{}\n}", "func (client *Client) ListAppInfoWithOptions(request *ListAppInfoRequest, runtime *util.RuntimeOptions) (_result *ListAppInfoResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.PageNo)) {\n\t\tquery[\"PageNo\"] = request.PageNo\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.PageSize)) {\n\t\tquery[\"PageSize\"] = request.PageSize\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Status)) {\n\t\tquery[\"Status\"] = request.Status\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"ListAppInfo\"),\n\t\tVersion: tea.String(\"2017-03-21\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &ListAppInfoResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func newList(ctx TransactionContextInterface) *list {\n\t stateList := new(ledgerapi.StateList)\n\t stateList.Ctx = ctx\n\t stateList.Class = \"Asset\"\n\t stateList.Deserialize = func(bytes []byte, state ledgerapi.StateInterface) error {\n\t\t return Deserialize(bytes, state.(*Asset))\n\t }\n \n\t list := new(list)\n\t list.stateList = stateList\n \n\t return list\n }", "func (a *API) CreateList(list ListCreate) (*CreateListResponse, error) {\n\t// root namespace has id 1\n\tif list.NamespaceID == 0 {\n\t\treturn nil, fmt.Errorf(\"no namespace set\")\n\t}\n\n\t// no need to check UnSubscribtionMode since 0 is a valid option\n\tep, err := url.ParseRequestURI(a.endPoint.String() + \"/api/list\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// use the default configured system setting if nothing is set\n\tif list.SendConfiguration == 0 {\n\t\tlist.SendConfiguration = 1\n\t}\n\n\t// we add all data by reflecting the list\n\tdata := createData(list)\n\n\treq, err := http.NewRequest(http.MethodPost, ep.String(), strings.NewReader(data.Encode()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(data.Encode())))\n\n\tres, err := a.Request(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp CreateListResponse\n\terr = json.Unmarshal(res, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &resp, nil\n}", "func (s *AuthnReqListsService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewListResultOK(body *ListResponseBody) *inventory.ListResult {\n\tv := &inventory.ListResult{\n\t\tNextCursor: *body.NextCursor,\n\t\tTotal: *body.Total,\n\t}\n\tv.Items = make([]*inventory.Inventory, len(body.Items))\n\tfor i, val := range body.Items {\n\t\tv.Items[i] = unmarshalInventoryResponseBodyToInventoryInventory(val)\n\t}\n\n\treturn v\n}", "func NewHandleFactory_newHandle_Params_List(s *capnp.Segment, sz int32) (HandleFactory_newHandle_Params_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 0}, sz)\n\tif err != nil {\n\t\treturn HandleFactory_newHandle_Params_List{}, err\n\t}\n\treturn HandleFactory_newHandle_Params_List{l}, nil\n}", "func NewBookChapListDefault(code int) *BookChapListDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &BookChapListDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewASInfoReq_List(s *capnp.Segment, sz int32) (ASInfoReq_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz)\n\treturn ASInfoReq_List{l}, err\n}", "func CreateServiceStatusRequest() (request *ServiceStatusRequest) {\nrequest = &ServiceStatusRequest{\nRpcRequest: &requests.RpcRequest{},\n}\nrequest.InitWithApiInfo(\"Yundun\", \"2015-04-16\", \"ServiceStatus\", \"yundun\", \"openAPI\")\nreturn\n}", "func NewList(kubeClient kubernetes.Interface, appConfig config.Config, items ...v1.Ingress) *List {\n\treturn &List{\n\t\tkubeClient: kubeClient,\n\t\tappConfig: appConfig,\n\t\titems: items,\n\t}\n}", "func NewListAWSSizesNoCredentialsDefault(code int) *ListAWSSizesNoCredentialsDefault {\n\treturn &ListAWSSizesNoCredentialsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (client *APIClient) listByTagsCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *APIClientListByTagsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apisByTags\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Filter != nil {\n\t\treqQP.Set(\"$filter\", *options.Filter)\n\t}\n\tif options != nil && options.Top != nil {\n\t\treqQP.Set(\"$top\", strconv.FormatInt(int64(*options.Top), 10))\n\t}\n\tif options != nil && options.Skip != nil {\n\t\treqQP.Set(\"$skip\", strconv.FormatInt(int64(*options.Skip), 10))\n\t}\n\tif options != nil && options.IncludeNotTaggedApis != nil {\n\t\treqQP.Set(\"includeNotTaggedApis\", strconv.FormatBool(*options.IncludeNotTaggedApis))\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewListBadRequest(body *ListBadRequestResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func (c *MockPublicIPAddressesClient) List(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, error) {\n\tvar l []network.PublicIPAddress\n\tfor _, lb := range c.PubIPs {\n\t\tl = append(l, lb)\n\t}\n\treturn l, nil\n}", "func (s *predefinedServiceLister) List(selector labels.Selector) (ret []*v1.PredefinedService, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.PredefinedService))\n\t})\n\treturn ret, err\n}" ]
[ "0.6065806", "0.6028659", "0.6028659", "0.5739329", "0.54147124", "0.5413235", "0.53750134", "0.53568983", "0.5351941", "0.5338472", "0.5228231", "0.52080244", "0.52044785", "0.52023166", "0.51656145", "0.51602954", "0.5151704", "0.51462096", "0.5112179", "0.510865", "0.5086538", "0.5086308", "0.50703555", "0.50572175", "0.50529087", "0.50285053", "0.5017403", "0.4944677", "0.49189457", "0.49173626", "0.49170062", "0.48993295", "0.48987123", "0.48917755", "0.48727265", "0.48727265", "0.48584074", "0.48423985", "0.48289922", "0.4826294", "0.48215425", "0.48212114", "0.48028398", "0.48014233", "0.47965717", "0.47939596", "0.47939596", "0.47917783", "0.47823972", "0.47817683", "0.47794515", "0.47771302", "0.47757265", "0.4774093", "0.47687718", "0.47655383", "0.47607705", "0.4744639", "0.47388896", "0.47313258", "0.4729733", "0.4723205", "0.4703725", "0.4702581", "0.4693829", "0.46923107", "0.46818814", "0.4677893", "0.4668465", "0.46654052", "0.46592727", "0.46583915", "0.46476668", "0.4645622", "0.46444914", "0.4643034", "0.46361154", "0.4633984", "0.4633931", "0.4633018", "0.4631738", "0.46277183", "0.4617991", "0.461783", "0.46159533", "0.4613583", "0.45945892", "0.45884773", "0.45837536", "0.45798552", "0.45787108", "0.4578656", "0.4575775", "0.45757413", "0.4572224", "0.4570515", "0.45684037", "0.45682487", "0.4568086", "0.45673627" ]
0.73574454
0
normalizeWorkdir normalizes a user requested working directory in a platform semantically consistent way.
func normalizeWorkdir(_ string, current string, requested string) (string, error) { if requested == "" { return "", errors.New("cannot normalize nothing") } current = filepath.FromSlash(current) requested = filepath.FromSlash(requested) if !filepath.IsAbs(requested) { return filepath.Join(string(os.PathSeparator), current, requested), nil } return requested, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func normalizeDir(dir string) string {\n\tif strings.Count(dir, \"//\") > 0 {\n\t\tdir = normalizeDir(strings.Replace(dir, \"//\", \"/\", -1))\n\t} else {\n\t\tif !strings.HasSuffix(dir, \"/\") {\n\t\t\tdir = dir + \"/\"\n\t\t}\n\t}\n\treturn dir\n}", "func resolveWorkdirFromLSInvocation() (string, error) {\n\t// our default workdir is the process' one\n\tworkdirDefault, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// TODO: read this from Bob's userconfig\n\twrongAndCorrectWds := map[string]string{\n\t\t\"/persist/work\": \"/home/joonas/work\",\n\t}\n\n\tfor wdWrong, wdCorrect := range wrongAndCorrectWds {\n\t\tif strings.HasPrefix(workdirDefault, wdWrong) {\n\t\t\t/*\tgiven:\n\t\t\t\twdWrong=/wrong/work\n\t\t\t\twdCorrect=/correct/work\n\n\t\t\t\ttranslates /wrong/work/projectx/file_y.go -> /correct/work/projectx/file_y.go\n\t\t\t*/\n\t\t\ttranslated := wdCorrect + strings.TrimPrefix(workdirDefault, wdWrong)\n\n\t\t\tlog.Printf(\"translated incorrect prefix %s to %s\", wdWrong, translated)\n\n\t\t\treturn translated, nil\n\t\t}\n\t}\n\n\t// no correction had to be made, so workdir was already correct\n\treturn workdirDefault, nil\n}", "func (m *Meta) normalizePath(path string) string {\n\tm.fixupMissingWorkingDir()\n\treturn m.WorkingDir.NormalizePath(path)\n}", "func normalise(rawPath string, isDir bool) (string, error) {\n\t// Clean up the path.\n\tpath := CleanPath(rawPath)\n\n\t// Nothing to do.\n\tif path == \".\" {\n\t\treturn \".\", nil\n\t}\n\n\tif filepath.IsAbs(path) {\n\t\tpath = strings.TrimPrefix(path, \"/\")\n\t}\n\n\t// Check that the path is \"safe\", meaning that it doesn't resolve outside\n\t// of the tar archive. While this might seem paranoid, it is a legitimate\n\t// concern.\n\tif \"/\"+path != filepath.Join(\"/\", path) {\n\t\treturn \"\", errors.Errorf(\"escape warning: generated path is outside tar root: %s\", rawPath)\n\t}\n\n\t// With some other tar formats, you needed to have a '/' at the end of a\n\t// pathname in order to state that it is a directory. While this is no\n\t// longer necessary, some older tooling may assume that.\n\tif isDir {\n\t\tpath += \"/\"\n\t}\n\n\treturn path, nil\n}", "func normalizeMountPath(path string) string {\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\treturn normalizeMountPathWin(path)\n\tdefault:\n\t\treturn path\n\t}\n}", "func normalizeMountPathWin(path string) string {\n\tbase := filepath.VolumeName(path)\n\tif len(base) == 2 {\n\t\tpath = strings.TrimPrefix(path, base)\n\t\tbase = strings.ToLower(strings.TrimSuffix(base, \":\"))\n\t\treturn \"/\" + base + filepath.ToSlash(path)\n\t}\n\treturn filepath.ToSlash(path)\n}", "func NormalizeWindowsPath(path string) string {\n\tnormalizedPath := strings.Replace(path, \"/\", \"\\\\\", -1)\n\tif strings.HasPrefix(normalizedPath, \"\\\\\") {\n\t\tnormalizedPath = \"c:\" + normalizedPath\n\t}\n\treturn normalizedPath\n}", "func normalizePath(filepath string) (dir string, name string) {\n\tdir, name = path.Split(path.Clean(filepath))\n\tdir = path.Clean(dir)\n\treturn\n}", "func (w *walker) normalizePath(path string, info fs.FileInfo) (normPath string, err error) {\n\tif runtime.GOOS == \"darwin\" {\n\t\t// Mac OS X file names should always be NFD normalized.\n\t\tnormPath = norm.NFD.String(path)\n\t} else {\n\t\t// Every other OS in the known universe uses NFC or just plain\n\t\t// doesn't bother to define an encoding. In our case *we* do care,\n\t\t// so we enforce NFC regardless.\n\t\tnormPath = norm.NFC.String(path)\n\t}\n\n\tif path == normPath {\n\t\t// The file name is already normalized: nothing to do\n\t\treturn path, nil\n\t}\n\n\tif !w.AutoNormalize {\n\t\t// We're not authorized to do anything about it, so complain and skip.\n\n\t\treturn \"\", errUTF8Normalization\n\t}\n\n\t// We will attempt to normalize it.\n\tnormInfo, err := w.Filesystem.Lstat(normPath)\n\tif fs.IsNotExist(err) {\n\t\t// Nothing exists with the normalized filename. Good.\n\t\tif err = w.Filesystem.Rename(path, normPath); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tl.Infof(`Normalized UTF8 encoding of file name \"%s\".`, path)\n\t\treturn normPath, nil\n\t}\n\tif w.Filesystem.SameFile(info, normInfo) {\n\t\t// With some filesystems (ZFS), if there is an un-normalized path and you ask whether the normalized\n\t\t// version exists, it responds with true. Therefore we need to check fs.SameFile as well.\n\t\t// In this case, a call to Rename won't do anything, so we have to rename via a temp file.\n\n\t\t// We don't want to use the standard syncthing prefix here, as that will result in the file being ignored\n\t\t// and eventually deleted by Syncthing if the rename back fails.\n\n\t\ttempPath := fs.TempNameWithPrefix(normPath, \"\")\n\t\tif err = w.Filesystem.Rename(path, tempPath); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif err = w.Filesystem.Rename(tempPath, normPath); err != nil {\n\t\t\t// I don't ever expect this to happen, but if it does, we should probably tell our caller that the normalized\n\t\t\t// path is the temp path: that way at least the user's data still gets synced.\n\t\t\tl.Warnf(`Error renaming \"%s\" to \"%s\" while normalizating UTF8 encoding: %v. You will want to rename this file back manually`, tempPath, normPath, err)\n\t\t\treturn tempPath, nil\n\t\t}\n\t\treturn normPath, nil\n\t}\n\t// There is something already in the way at the normalized\n\t// file name.\n\treturn \"\", errUTF8Conflict\n}", "func Unnormalize(path string) string {\n\treturn filepath.FromSlash(path)\n}", "func NormalizeDockerfilePath(context, dockerfile string) (string, error) {\n\t// Expected case: should be found relative to the context directory.\n\t// If it does not exist, check if it's found relative to the current directory in case it's shared.\n\t// Otherwise return the path relative to the context directory, where it should have been.\n\trel := filepath.Join(context, dockerfile)\n\tif _, err := os.Stat(rel); os.IsNotExist(err) {\n\t\tif _, err := os.Stat(dockerfile); err == nil || !os.IsNotExist(err) {\n\t\t\treturn filepath.Abs(dockerfile)\n\t\t}\n\t}\n\tif runtime.GOOS == constants.Windows && (filepath.VolumeName(dockerfile) != \"\" || filepath.IsAbs(dockerfile)) {\n\t\treturn dockerfile, nil\n\t}\n\treturn filepath.Abs(rel)\n}", "func WorkDir() (string, error) {\n\texecPath, err := ExecPath()\n\treturn path.Dir(strings.Replace(execPath, \"\\\\\", \"/\", -1)), err\n}", "func NormalizePath(input string) string {\n\tif !strings.HasPrefix(input, \"/\") {\n\t\tinput = \"/\" + input\n\t}\n\tif strings.LastIndex(input, \".\") == -1 {\n\t\tif !strings.HasSuffix(input, \"/\") {\n\t\t\tinput += \"/\"\n\t\t}\n\t}\n\treturn input\n}", "func (this *Path) WorkDir() string {\n\texecPath := this.ExecPath()\n\treturn path.Dir(strings.Replace(execPath, \"\\\\\", \"/\", -1))\n}", "func WorkDir() string {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn usr.HomeDir\n}", "func normalizePath(s string) string {\n\tif s == \"/\" {\n\t\treturn \"\"\n\t}\n\treturn s\n}", "func (t TemplateDirectory) NormalizePath() (TemplateDirectory, error) {\n\troot, err := io_util.NormalizePath(t.String())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn TemplateDirectory(root), nil\n}", "func normalizePath(path string) string {\n\tpath = filepath.Clean(path)\n\n\tswitch path {\n\tcase \".\":\n\t\treturn FilePathSeparator\n\tcase \"..\":\n\t\treturn FilePathSeparator\n\tdefault:\n\t\treturn path\n\t}\n}", "func processDirName(dirname string) string {\n\tif dirname[len(dirname)-1:] == \"/\" {\n\t\treturn dirname[:len(dirname)-1]\n\t}\n\treturn dirname\n}", "func Normalize(path string) string {\n\tif filepath.IsAbs(path) {\n\t\trel, err := filepath.Rel(\"/\", path)\n\t\tif err != nil {\n\t\t\tpanic(\"absolute filepath must be relative to /\")\n\t\t}\n\t\treturn rel\n\t}\n\treturn path\n}", "func Normalize(path string) string {\n\tif filepath.IsAbs(path) {\n\t\trel, err := filepath.Rel(\"/\", path)\n\t\tif err != nil {\n\t\t\tpanic(\"absolute filepath must be relative to /\")\n\t\t}\n\t\treturn rel\n\t}\n\treturn path\n}", "func NormalizePathForTesting(path Path) string {\n\tp := path.String()\n\tif w, ok := path.(WritablePath); ok {\n\t\trel, err := filepath.Rel(w.buildDir(), p)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn rel\n\t}\n\treturn p\n}", "func normalize(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tparts := strings.Split(r.URL.Path, \"/\")[1:]\n\t\tswitch parts[0] {\n\t\tcase \"settings\", \"bots\", \"repos\", \"api\", \"login\", \"logout\", \"\", \"authorize\", \"hook\", \"static\", \"gitlab\":\n\t\t\t// no-op\n\t\tdefault:\n\n\t\t\tif len(parts) > 2 && parts[2] != \"settings\" {\n\t\t\t\tparts = append(parts[:2], append([]string{\"builds\"}, parts[2:]...)...)\n\t\t\t}\n\n\t\t\t// prefix the URL with /repo so that it\n\t\t\t// can be effectively routed.\n\t\t\tparts = append([]string{\"\", \"repos\"}, parts...)\n\n\t\t\t// reconstruct the path\n\t\t\tr.URL.Path = strings.Join(parts, \"/\")\n\t\t}\n\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func (r *Repo) WorkingDir() (string, error) {\n\tdir, err := gitCMD(\"rev-parse\", \"--show-prefix\")\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\n\tdir = strings.Replace(dir, \"\\n\", \"\", -1)\n\n\treturn dir, nil\n}", "func WorkDir() string { return workDir }", "func WorkDir() string {\n\treturn wd\n}", "func (t TestRepo) Workdir() string {\n\treturn filepath.Clean(t.repo.Workdir())\n}", "func getWorkDirPath(dir string) (string, error) {\n\tpath, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"%s%c%s\", path, os.PathSeparator, dir), nil\n}", "func Normalize(path string) string {\n\treturn filepath.Clean(filepath.ToSlash(path))\n}", "func (o BuildSpecRuntimeOutput) WorkDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecRuntime) *string { return v.WorkDir }).(pulumi.StringPtrOutput)\n}", "func CleanHostnameDir(hostname string) string {\n\thostname = directoryNameFilter.ReplaceAllString(hostname, \"_\")\n\tif len(hostname) > directoryToHostnameMaxSize {\n\t\treturn hostname[:directoryToHostnameMaxSize]\n\t}\n\treturn hostname\n}", "func normalizePath(s string) string {\n\tseparator := \"/\"\n\tif !strings.HasPrefix(s, separator) {\n\t\ts = separator + s\n\t}\n\n\tif len(s) > 1 && strings.HasSuffix(s, separator) {\n\t\ts = s[:len(s)-1]\n\t}\n\treturn s\n}", "func NormalizePath(path string) (AbsPath, error) {\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn AbsPath(absPath), nil\n}", "func ensureWorkingDir(rootfsPath string, frm def.Formula) {\n\tpth := filepath.Join(rootfsPath, frm.Action.Cwd)\n\tuinfo := UserinfoForPolicy(frm.Action.Policy)\n\tfs.MkdirAllWithAttribs(pth, fs.Metadata{\n\t\tMode: 0755,\n\t\tModTime: fs.Epochwhen,\n\t\tAccessTime: fs.Epochwhen,\n\t\tUid: uinfo.Uid,\n\t\tGid: uinfo.Gid,\n\t})\n}", "func dirNonClean(path string) string {\n\tvol := filepath.VolumeName(path)\n\ti := len(path) - 1\n\tfor i >= len(vol) && !os.IsPathSeparator(path[i]) {\n\t\ti--\n\t}\n\treturn path[len(vol) : i+1]\n}", "func sanitizePath(p string) string {\n\tbase := \".\"\n\tif filepath.IsLocal(p) {\n\t\treturn filepath.Clean(p)\n\t}\n\tresult := filepath.Join(base, filepath.Clean(filepath.Base(p)))\n\tif result == \"..\" {\n\t\treturn \".\"\n\t}\n\treturn result\n}", "func (v *VirtualEnvironment) WorkingDirectory() string {\n\twd, err := osutils.Getwd()\n\tif err != nil {\n\t\t// Shouldn't happen unless something is seriously wrong with your system\n\t\tpanic(locale.T(\"panic_couldnt_detect_wd\", map[string]interface{}{\"Error\": err.Error()}))\n\t}\n\n\treturn wd\n}", "func (config *Config) WorkingDir() string {\n\tconfigPath, err := filepath.Abs(config.configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"config error: unable to find config file (%s):\\n%v\\n\", config.configPath, err)\n\t}\n\n\tconfigPath, err = filepath.EvalSymlinks(configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"config error: unable to remove symbolic links for filepath (%s):\\n%v\\n\", configPath, err)\n\t}\n\n\treturn filepath.Join(filepath.Dir(configPath), config.Path)\n}", "func normalize(pkg *Package) *Package {\n\tabspaths(pkg.Dir, pkg.GoFiles)\n\tabspaths(pkg.Dir, pkg.CgoFiles)\n\tabspaths(pkg.Dir, pkg.IgnoredGoFiles)\n\tabspaths(pkg.Dir, pkg.TestGoFiles)\n\tabspaths(pkg.Dir, pkg.XTestGoFiles)\n\n\treturn pkg\n}", "func cleanAndExpandPath(path string) string {\n\t// NOTE: The os.ExpandEnv doesn't work with Windows cmd.exe-style\n\t// %VARIABLE%, but the variables can still be expanded via POSIX-style\n\t// $VARIABLE.\n\tpath = os.ExpandEnv(path)\n\n\tif !strings.HasPrefix(path, \"~\") {\n\t\treturn filepath.Clean(path)\n\t}\n\n\t// Expand initial ~ to the current user's home directory, or ~otheruser to\n\t// otheruser's home directory. On Windows, both forward and backward\n\t// slashes can be used.\n\tpath = path[1:]\n\n\tvar pathSeparators string\n\tif runtime.GOOS == \"windows\" {\n\t\tpathSeparators = string(os.PathSeparator) + \"/\"\n\t} else {\n\t\tpathSeparators = string(os.PathSeparator)\n\t}\n\n\tuserName := \"\"\n\tif i := strings.IndexAny(path, pathSeparators); i != -1 {\n\t\tuserName = path[:i]\n\t\tpath = path[i:]\n\t}\n\n\thomeDir := \"\"\n\tvar u *user.User\n\tvar err error\n\tif userName == \"\" {\n\t\tu, err = user.Current()\n\t} else {\n\t\tu, err = user.Lookup(userName)\n\t}\n\tif err == nil {\n\t\thomeDir = u.HomeDir\n\t}\n\t// Fallback to CWD if user lookup fails or user has no home directory.\n\tif homeDir == \"\" {\n\t\thomeDir = \".\"\n\t}\n\n\treturn filepath.Join(homeDir, path)\n}", "func cleanAndExpandPath(path string) string {\n\t// NOTE: The os.ExpandEnv doesn't work with Windows cmd.exe-style\n\t// %VARIABLE%, but the variables can still be expanded via POSIX-style\n\t// $VARIABLE.\n\tpath = os.ExpandEnv(path)\n\n\tif !strings.HasPrefix(path, \"~\") {\n\t\treturn filepath.Clean(path)\n\t}\n\n\t// Expand initial ~ to the current user's home directory, or ~otheruser to\n\t// otheruser's home directory. On Windows, both forward and backward\n\t// slashes can be used.\n\tpath = path[1:]\n\n\tvar pathSeparators string\n\tif runtime.GOOS == \"windows\" {\n\t\tpathSeparators = string(os.PathSeparator) + \"/\"\n\t} else {\n\t\tpathSeparators = string(os.PathSeparator)\n\t}\n\n\tuserName := \"\"\n\tif i := strings.IndexAny(path, pathSeparators); i != -1 {\n\t\tuserName = path[:i]\n\t\tpath = path[i:]\n\t}\n\n\thomeDir := \"\"\n\tvar u *user.User\n\tvar err error\n\tif userName == \"\" {\n\t\tu, err = user.Current()\n\t} else {\n\t\tu, err = user.Lookup(userName)\n\t}\n\tif err == nil {\n\t\thomeDir = u.HomeDir\n\t}\n\t// Fallback to CWD if user lookup fails or user has no home directory.\n\tif homeDir == \"\" {\n\t\thomeDir = \".\"\n\t}\n\n\treturn filepath.Join(homeDir, path)\n}", "func UserHomeDir() (string, error)", "func cleanAndExpandPath(path string) string {\n\t// Do not try to clean the empty string\n\tif path == \"\" {\n\t\treturn \"\"\n\t}\n\n\t// NOTE: The os.ExpandEnv doesn't work with Windows cmd.exe-style\n\t// %VARIABLE%, but the variables can still be expanded via POSIX-style\n\t// $VARIABLE.\n\tpath = os.ExpandEnv(path)\n\tif !strings.HasPrefix(path, \"~\") {\n\t\treturn filepath.Clean(path)\n\t}\n\n\t// Expand initial ~ to the current user's home directory, or ~otheruser to\n\t// otheruser's home directory. On Windows, both forward and backward\n\t// slashes can be used.\n\tpath = path[1:]\n\n\tvar pathSeparators string\n\tif runtime.GOOS == \"windows\" {\n\t\tpathSeparators = string(os.PathSeparator) + \"/\"\n\t} else {\n\t\tpathSeparators = string(os.PathSeparator)\n\t}\n\n\tuserName := \"\"\n\tif i := strings.IndexAny(path, pathSeparators); i != -1 {\n\t\tuserName = path[:i]\n\t\tpath = path[i:]\n\t}\n\n\thomeDir := \"\"\n\tvar u *user.User\n\tvar err error\n\tif userName == \"\" {\n\t\tu, err = user.Current()\n\t} else {\n\t\tu, err = user.Lookup(userName)\n\t}\n\tif err == nil {\n\t\thomeDir = u.HomeDir\n\t}\n\t// Fallback to CWD if user lookup fails or user has no home directory.\n\tif homeDir == \"\" {\n\t\thomeDir = \".\"\n\t}\n\n\treturn filepath.Join(homeDir, path)\n}", "func (o BuildRunStatusBuildSpecRuntimeOutput) WorkDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecRuntime) *string { return v.WorkDir }).(pulumi.StringPtrOutput)\n}", "func locateWorkDir() (string, error) {\n\t// 1. Use work directory if explicitly passed in as an env var.\n\tif v, ok := os.LookupEnv(EnvTestgroundWorkDir); ok {\n\t\treturn v, ensureDir(v)\n\t}\n\n\t// 2. Use \"$HOME/.testground\" as the work directory.\n\thome, ok := os.LookupEnv(\"HOME\")\n\tif !ok {\n\t\treturn \"\", errors.New(\"$HOME env variable not declared; cannot calculate work directory\")\n\t}\n\tp := path.Join(home, \".testground\")\n\treturn p, ensureDir(p)\n}", "func CleanAndExpandPath(path string) string {\n\t// Nothing to do when no path is given.\n\tif path == \"\" {\n\t\treturn path\n\t}\n\n\t// NOTE: The os.ExpandEnv doesn't work with Windows cmd.exe-style\n\t// %VARIABLE%, but the variables can still be expanded via POSIX-style\n\t// $VARIABLE.\n\tpath = os.ExpandEnv(path)\n\n\tif !strings.HasPrefix(path, \"~\") {\n\t\treturn filepath.Clean(path)\n\t}\n\n\t// Expand initial ~ to the current user's home directory, or ~otheruser\n\t// to otheruser's home directory. On Windows, both forward and backward\n\t// slashes can be used.\n\tpath = path[1:]\n\n\tvar pathSeparators string\n\tif runtime.GOOS == \"windows\" {\n\t\tpathSeparators = string(os.PathSeparator) + \"/\"\n\t} else {\n\t\tpathSeparators = string(os.PathSeparator)\n\t}\n\n\tuserName := \"\"\n\tif i := strings.IndexAny(path, pathSeparators); i != -1 {\n\t\tuserName = path[:i]\n\t\tpath = path[i:]\n\t}\n\n\thomeDir := \"\"\n\tvar u *user.User\n\tvar err error\n\tif userName == \"\" {\n\t\tu, err = user.Current()\n\t} else {\n\t\tu, err = user.Lookup(userName)\n\t}\n\tif err == nil {\n\t\thomeDir = u.HomeDir\n\t}\n\t// Fallback to CWD if user lookup fails or user has no home directory.\n\tif homeDir == \"\" {\n\t\thomeDir = \".\"\n\t}\n\n\treturn filepath.Join(homeDir, path)\n}", "func locateWorkDir() (string, error) {\n\t// 1. Use work directory if explicitly passed in as an env var.\n\tif v, ok := os.LookupEnv(EnvTestgroundWorkDir); ok {\n\t\treturn v, ensureDir(v)\n\t}\n\n\t// 2. Use \"$HOME/.testground\" as the work directory.\n\n\thome, err := os.UserHomeDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tp := path.Join(home, \".testground\")\n\treturn p, ensureDir(p)\n}", "func WorkingDir(dir string) types.Option {\n\treturn func(g *types.Cmd) {\n\t\tg.Dir = dir\n\t}\n}", "func WorkingDir() string {\n\tdir, _ := os.Getwd()\n\treturn dir\n}", "func SystemDirectory() (dir string, e error) {\r\n\te = errors.New(\"SystemDirectory: failed\")\r\n\r\n\tif numOfWC := C.GetSystemDirectoryW(nil, 0); numOfWC > 0 {\r\n\t\twideStr := make([]C.wchar_t, numOfWC)\r\n\t\tif rc := C.GetSystemDirectoryW((*C.WCHAR)(&wideStr[0]), numOfWC); rc > 0 {\r\n\t\t\tif utf8Str, err := wideToMB(C.CP_UTF8, wideStr); err == nil {\r\n\t\t\t\tdir, e = utf8Str, nil\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\treturn\r\n}", "func (p *Process) WorkDir() string {\n\treturn p.Builder.WorkDir\n}", "func normalize(pkg *Package) *Package {\n\tabspaths(pkg.Dir, pkg.GoFiles)\n\tabspaths(pkg.Dir, pkg.CgoFiles)\n\t//abspaths(pkg.Dir, pkg.CompiledGoFiles)\n\tabspaths(pkg.Dir, pkg.IgnoredGoFiles)\n\tabspaths(pkg.Dir, pkg.CFiles)\n\tabspaths(pkg.Dir, pkg.CXXFiles)\n\tabspaths(pkg.Dir, pkg.MFiles)\n\tabspaths(pkg.Dir, pkg.HFiles)\n\tabspaths(pkg.Dir, pkg.FFiles)\n\tabspaths(pkg.Dir, pkg.SFiles)\n\tabspaths(pkg.Dir, pkg.SwigFiles)\n\tabspaths(pkg.Dir, pkg.SwigCXXFiles)\n\tabspaths(pkg.Dir, pkg.SysoFiles)\n\tabspaths(pkg.Dir, pkg.TestGoFiles)\n\tabspaths(pkg.Dir, pkg.XTestGoFiles)\n\n\treturn pkg\n}", "func LintWorkflowDir(wfClientset wfclientset.Interface, namespace, dirPath string, strict bool) error {\n\twalkFunc := func(path string, info os.FileInfo, err error) error {\n\t\tif info == nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tfileExt := filepath.Ext(info.Name())\n\t\tswitch fileExt {\n\t\tcase \".yaml\", \".yml\", \".json\":\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t\treturn LintWorkflowFile(wfClientset, namespace, path, strict)\n\t}\n\treturn filepath.Walk(dirPath, walkFunc)\n}", "func enclosingDir(path string) string {\n\tfor {\n\t\tif stat, err := os.Lstat(path); err == nil {\n\t\t\tif stat.IsDir() {\n\t\t\t\treturn path\n\t\t\t}\n\t\t}\n\t\tif path == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\tpath = filepath.Dir(path)\n\t}\n}", "func unixPath(s string) string {\n\ts = filepath.Clean(s)\n\ts = strings.Replace(s, \"\\\\\", \"/\", -1)\n\n\t// Duplicate clean for trailing slashes that were previously windows ones.\n\treturn filepath.Clean(s)\n}", "func RelativeToCwd(relativePath string) (string, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn filepath.Join(cwd, relativePath), nil\n}", "func GetWorkDirPath(dir string, t *testing.T) string {\n\tpath, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get working directory: %s\", err)\n\t}\n\treturn fmt.Sprintf(\"%s%c%s\", path, os.PathSeparator, dir)\n}", "func WorkDir() string {\n\tworkDirOnce.Do(func() {\n\t\tworkDir = os.Getenv(\"GOGS_WORK_DIR\")\n\t\tif workDir != \"\" {\n\t\t\treturn\n\t\t}\n\n\t\tworkDir = filepath.Dir(AppPath())\n\t})\n\n\treturn workDir\n}", "func (o BuildStrategySpecBuildStepsOutput) WorkingDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildSteps) *string { return v.WorkingDir }).(pulumi.StringPtrOutput)\n}", "func CleanDir(o string) string {\n\to = strings.TrimSuffix(o, \"\\n\")\n\to = strings.TrimSuffix(o, \"\\r\")\n\to = strings.TrimSuffix(o, \"\\\"\")\n\to = strings.TrimPrefix(o, \"\\\"\")\n\treturn o\n}", "func UserHomeDir(tc Context) (string, error) {\n\treturn os.UserHomeDir()\n}", "func normPath(bases []string, abspath string) (string, error) {\n\tfor _, base := range bases {\n\t\tabsbase, err := filepath.Abs(base)\n\t\tif isUnder(absbase, abspath) {\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\trelpath, err := filepath.Rel(absbase, abspath)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn filepath.Join(base, relpath), nil\n\t\t}\n\t}\n\treturn abspath, nil\n}", "func ParsePWD(usr_info []string, path string) []string {\n\treturn SplitPath(strings.TrimPrefix(path, GetUserBaseDir(usr_info)))\n}", "func getWorkingDir() (string, error) {\n\texPath, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Dir(exPath), nil\n}", "func normalizePathSeparator(path string) string {\n\treturn strings.ReplaceAll(path, string(os.PathSeparator), \"/\")\n}", "func getWorkingDirectory(jobSpecFs afero.Fs, root string) (string, error) {\n\tdirectories, err := afero.ReadDir(jobSpecFs, root)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(directories) == 0 {\n\t\treturn root, nil\n\t}\n\n\tcurrentFolder := \". (current directory)\"\n\n\tavailableDirs := []string{currentFolder}\n\tfor _, dir := range directories {\n\t\tif !dir.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\t// if it contain job or resource, skip it from valid options\n\t\tdirItems, err := afero.ReadDir(jobSpecFs, filepath.Join(root, dir.Name()))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tvar alreadyOccupied bool\n\t\tfor _, dirItem := range dirItems {\n\t\t\tif utils.ContainsString(specFileNames, dirItem.Name()) {\n\t\t\t\talreadyOccupied = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif alreadyOccupied {\n\t\t\tcontinue\n\t\t}\n\t\tavailableDirs = append(availableDirs, dir.Name())\n\t}\n\n\tmessageStr := \"Select directory to save specification?\"\n\tif root != \"\" {\n\t\tmessageStr = fmt.Sprintf(\"%s [%s]\", messageStr, root)\n\t}\n\tvar selectedDir string\n\tif err = survey.AskOne(&survey.Select{\n\t\tMessage: messageStr,\n\t\tDefault: currentFolder,\n\t\tHelp: \"Optimus helps organize specifications in sub-directories.\\nPlease select where you want this new specification to be stored\",\n\t\tOptions: availableDirs,\n\t}, &selectedDir); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// check for sub directories\n\tif selectedDir != currentFolder {\n\t\treturn getWorkingDirectory(jobSpecFs, filepath.Join(root, selectedDir))\n\t}\n\n\treturn root, nil\n}", "func Getwd() (string, error)", "func (o ClusterBuildStrategySpecBuildStepsOutput) WorkingDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildSteps) *string { return v.WorkingDir }).(pulumi.StringPtrOutput)\n}", "func CleanPath(path string) string {\n\tresult := path\n\n\tif IsWindows() {\n\t\tresult = strings.Replace(result, \"/\", string(filepath.Separator), -1)\n\t} else {\n\t\tresult = strings.Replace(result, \"\\\\\", string(filepath.Separator), -1)\n\t}\n\n\tp := strings.Index(result, \"~\")\n\n\tif p != -1 {\n\t\tuserHomeDir := \"\"\n\n\t\tusr, err := user.Current()\n\t\tif !Error(err) {\n\t\t\tuserHomeDir = usr.HomeDir\n\t\t}\n\n\t\tresult = strings.Replace(result, \"~\", userHomeDir, -1)\n\t}\n\n\tresult = filepath.Clean(result)\n\n\tif !filepath.IsAbs(result) && !strings.HasPrefix(result, string(filepath.Separator)) {\n\t\tvar dir string\n\t\tvar err error\n\n\t\tif IsRunningInteractive() || strings.HasPrefix(path, \".\"+string(filepath.Separator)) {\n\t\t\tdir, err = os.Getwd()\n\t\t} else {\n\t\t\tdir, err = os.Executable()\n\t\t\tif err == nil {\n\t\t\t\tdir = filepath.Dir(dir)\n\t\t\t}\n\t\t}\n\n\t\tif !Error(err) {\n\t\t\tresult = filepath.Join(dir, result)\n\t\t}\n\t}\n\n\tr := strings.NewReplacer(\"\\\"\", \"\")\n\tresult = r.Replace(result)\n\n\tDebugFunc(\"%s -> %s\", path, result)\n\n\treturn result\n}", "func (app *AppBuilder) WorkingDir() *AppBuilder {\n\tnowPath, _ := os.Getwd()\n\tapp.workingDir = path.Join(nowPath, app.projectName)\n\treturn app\n}", "func FromWindows(path string) (string, error) {\n\tserver.wg.Wait()\n\tout, err := exec.Command(\"winepath\", \"--unix\", \"-0\", path).Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(bytes.TrimSuffix(out, []byte{0})), nil\n}", "func getRootDir() (string, error) {\n\t//TODO: fix this!! think it's a tad dodgy!\n\tpwd, _ := os.Getwd()\n\tlog.Printf(\"[DEBUG] getRootDir pwd is: %v\", pwd)\n\n\tb := strings.Contains(pwd, rootDirName)\n\tif !b {\n\t\treturn \"\", fmt.Errorf(\"could not find '%v' root directory in %v\", rootDirName, pwd)\n\t}\n\n\ts := strings.SplitAfter(pwd, rootDirName)\n\tlog.Printf(\"[DEBUG] path(s) after splitting: %v\\n\", s)\n\n\tif len(s) < 1 {\n\t\t//expect at least one result\n\t\treturn \"\", fmt.Errorf(\"could not split out '%v' from directory in %v\", rootDirName, pwd)\n\t}\n\n\tif !strings.HasSuffix(s[0], rootDirName) {\n\t\t//the first path should end with \"probr\"\n\t\treturn \"\", fmt.Errorf(\"first path after split (%v) does not end with '%v'\", s[0], rootDirName)\n\t}\n\n\treturn s[0], nil\n}", "func WorkingDirectory() (string, int) {\n working_directory, err := os.Getwd()\n if err != nil {\n os.Stderr.WriteString(err.Error() + \"\\n\")\n os.Exit(1)\n }\n directories := strings.Split(working_directory, \"/\")\n return working_directory, len(directories)-1\n}", "func normalizeStr(str string) string {\n\treturn strings.Replace(str, \"/\", \"-\", -1)\n}", "func GetHydrationDir(opts config.SkaffoldOptions, workingDir string, promptIfNeeded bool, isKptRendererOrDeployerUsed bool) (string, error) {\n\tvar hydratedDir string\n\tvar err error\n\n\tif !isKptRendererOrDeployerUsed {\n\t\tlog.Entry(context.TODO()).Info(\"no kpt renderer or deployer found, skipping hydrated-dir creation\")\n\t\treturn \"\", nil\n\t}\n\n\tif opts.HydrationDir == constants.DefaultHydrationDir {\n\t\thydratedDir = filepath.Join(workingDir, constants.DefaultHydrationDir)\n\t\tpromptIfNeeded = false\n\t} else {\n\t\thydratedDir = opts.HydrationDir\n\t}\n\tif hydratedDir, err = filepath.Abs(hydratedDir); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif _, err := os.Stat(hydratedDir); os.IsNotExist(err) {\n\t\tlog.Entry(context.TODO()).Infof(\"hydrated-dir does not exist, creating %v\\n\", hydratedDir)\n\t\tif err := os.MkdirAll(hydratedDir, os.ModePerm); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else if !isDirEmpty(hydratedDir) {\n\t\tif promptIfNeeded && !opts.AssumeYes {\n\t\t\tfmt.Println(\"you can skip this promp message with flag \\\"--assume-yes=true\\\"\")\n\t\t\tif ok := confirmHydrationDirOverride(os.Stdin); !ok {\n\t\t\t\tcmd.Exit(nil)\n\t\t\t}\n\t\t}\n\t}\n\tlog.Entry(context.TODO()).Infof(\"manifests hydration will take place in %v\\n\", hydratedDir)\n\treturn hydratedDir, nil\n}", "func GetUserSpecialDir(directory UserDirectory) (string, error) {\n\tc := C.g_get_user_special_dir(C.GUserDirectory(directory))\n\tif c == nil {\n\t\treturn \"\", nilPtrErr\n\t}\n\treturn C.GoString((*C.char)(c)), nil\n}", "func computeProgramDirectory() string {\n\tlocation := js.Global().Get(\"location\")\n\turl := location.Get(\"href\").String()\n\turl = filepath.Dir(url)\n\turl = strings.TrimPrefix(url, \"file:/\")\n\tif strings.HasPrefix(url, \"http:/\") {\n\t\turl = strings.TrimPrefix(url, \"http:/\")\n\t\turl = \"http://\" + url\n\t}\n\tif strings.HasPrefix(url, \"https:/\") {\n\t\turl = strings.TrimPrefix(url, \"https:/\")\n\t\turl = \"https://\" + url\n\t}\n\treturn url\n}", "func sanitizeCacheDir(image string) string {\n\tif runtime.GOOS == \"windows\" && hasWindowsDriveLetter(image) {\n\t\t// not sanitize Windows drive letter.\n\t\treturn image[:2] + strings.Replace(image[2:], \":\", \"_\", -1)\n\t}\n\treturn strings.Replace(image, \":\", \"_\", -1)\n}", "func tildeExpand(path string) (string, error) {\n\tpath = filepath.Clean(path)\n\tif path[:2] == \"~/\" {\n\t\tusr, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn path, err\n\t\t}\n\t\thomedir := usr.HomeDir\n\t\treturn strings.Replace(path, \"~\", homedir, 1), nil\n\t} else if path[:1] == \"~\" {\n\t\tslashindex := strings.Index(path, \"/\")\n\t\tvar username string\n\t\tif slashindex == -1 {\n\t\t\tusername = path[1:]\n\t\t} else {\n\t\t\tusername = path[1:slashindex]\n\t\t}\n\t\tusr, err := user.Lookup(username)\n\t\tif err != nil {\n\t\t\treturn path, err\n\t\t}\n\t\thomedir := usr.HomeDir\n\t\treturn homedir + path[slashindex:], nil\n\t} else {\n\t\treturn path, nil\n\t}\n}", "func setupWorkingDirectory(ctx context.Context, fs afero.Fs, wd string) (afero.File, error) {\n\tlog.Debug(ctx, \"creating directory %s in Filesystem %s\", wd, fs.Name())\n\tif err := fs.MkdirAll(wd, 0755); err != nil {\n\t\treturn nil, err\n\t}\n\n\tu, err := user.Current()\n\tif err != nil {\n\t\tlog.Error(ctx, \"Error while getting current user %v\", err)\n\t} else if u != nil && u.HomeDir != \"\" {\n\t\tif err := os.Setenv(\"HOME_CDS_PLUGINS\", u.HomeDir); err != nil {\n\t\t\tlog.Error(ctx, \"Error while setting home_plugin %v\", err)\n\t\t}\n\t}\n\n\tvar absWD string\n\tif x, ok := fs.(*afero.BasePathFs); ok {\n\t\tabsWD, _ = x.RealPath(wd)\n\t} else {\n\t\tabsWD = wd\n\t}\n\tif err := os.Setenv(\"HOME\", absWD); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := fs.Open(wd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn fi, nil\n}", "func (o BuildSpecRuntimePtrOutput) WorkDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecRuntime) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.WorkDir\n\t}).(pulumi.StringPtrOutput)\n}", "func (e *Engine) WorkDir() string {\n\treturn e.dirs.work\n}", "func WithWorkDir(v string) (p Pair) {\n\treturn Pair{Key: \"work_dir\", Value: v}\n}", "func normalizeLocation(location *url.URL) *url.URL {\n\tnormalized, _ := url.Parse(location.String())\n\tif len(normalized.Scheme) == 0 {\n\t\tnormalized.Scheme = \"http\"\n\t}\n\treturn normalized\n}", "func TestNormalizePaths(t *testing.T) {\n\ttype testNormalizePathsTestCases []struct {\n\t\trefPath string\n\t\tbase string\n\t\texpOutput string\n\t}\n\n\ttestCases := func() testNormalizePathsTestCases {\n\t\ttestCases := testNormalizePathsTestCases{\n\t\t\t{\n\t\t\t\t// http basePath, absolute refPath\n\t\t\t\trefPath: \"http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet\",\n\t\t\t\tbase: \"http://www.example.com/base/path/swagger.json\",\n\t\t\t\texpOutput: \"http://www.anotherexample.com/another/base/path/swagger.json#/definitions/Pet\",\n\t\t\t},\n\t\t\t{\n\t\t\t\t// http basePath, relative refPath\n\t\t\t\trefPath: \"another/base/path/swagger.json#/definitions/Pet\",\n\t\t\t\tbase: \"http://www.example.com/base/path/swagger.json\",\n\t\t\t\texpOutput: \"http://www.example.com/base/path/another/base/path/swagger.json#/definitions/Pet\",\n\t\t\t},\n\t\t}\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\ttestCases = append(testCases, testNormalizePathsTestCases{\n\t\t\t\t{\n\t\t\t\t\t// file basePath, absolute refPath, no fragment\n\t\t\t\t\trefPath: `C:\\another\\base\\path.json`,\n\t\t\t\t\tbase: `C:\\base\\path.json`,\n\t\t\t\t\texpOutput: `c:\\another\\base\\path.json`,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t// file basePath, absolute refPath\n\t\t\t\t\trefPath: `C:\\another\\base\\path.json#/definitions/Pet`,\n\t\t\t\t\tbase: `C:\\base\\path.json`,\n\t\t\t\t\texpOutput: `c:\\another\\base\\path.json#/definitions/Pet`,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t// file basePath, relative refPath\n\t\t\t\t\trefPath: `another\\base\\path.json#/definitions/Pet`,\n\t\t\t\t\tbase: `C:\\base\\path.json`,\n\t\t\t\t\texpOutput: `c:\\base\\another\\base\\path.json#/definitions/Pet`,\n\t\t\t\t},\n\t\t\t}...)\n\t\t\treturn testCases\n\t\t}\n\t\t// linux case\n\t\ttestCases = append(testCases, testNormalizePathsTestCases{\n\t\t\t{\n\t\t\t\t// file basePath, absolute refPath, no fragment\n\t\t\t\trefPath: \"/another/base/path.json\",\n\t\t\t\tbase: \"/base/path.json\",\n\t\t\t\texpOutput: \"/another/base/path.json\",\n\t\t\t},\n\t\t\t{\n\t\t\t\t// file basePath, absolute refPath\n\t\t\t\trefPath: \"/another/base/path.json#/definitions/Pet\",\n\t\t\t\tbase: \"/base/path.json\",\n\t\t\t\texpOutput: \"/another/base/path.json#/definitions/Pet\",\n\t\t\t},\n\t\t\t{\n\t\t\t\t// file basePath, relative refPath\n\t\t\t\trefPath: \"another/base/path.json#/definitions/Pet\",\n\t\t\t\tbase: \"/base/path.json\",\n\t\t\t\texpOutput: \"/base/another/base/path.json#/definitions/Pet\",\n\t\t\t},\n\t\t}...)\n\t\treturn testCases\n\t}()\n\n\tfor _, tcase := range testCases {\n\t\tout := normalizePaths(tcase.refPath, tcase.base)\n\t\tassert.Equal(t, tcase.expOutput, out)\n\t}\n}", "func AbsPathify(workingDir, inPath string) string {\n\tif filepath.IsAbs(inPath) {\n\t\treturn filepath.Clean(inPath)\n\t}\n\treturn filepath.Join(workingDir, inPath)\n}", "func GetUserBaseDir(usr_info []string) string {\n\tusr_dir := EncryptString(usr_info[USR_INFO_USERNAME], usr_info[USR_INFO_KEY])\n\treturn GetBaseDir(usr_dir)\n}", "func (h *handler) HandleCWD(dir string) {\n\t// convert to absolute path\n\tp := dir\n\tif !path.IsAbs(dir) {\n\t\tp = path.Join(h.dir, dir)\n\t}\n\n\t// ensure path is valid\n\tinfo, err := os.Lstat(p)\n\tif err != nil {\n\t\th.logError(err)\n\t\th.writeReply(newReply(\"550\", \"Directory change failed.\"))\n\t\treturn\n\t}\n\n\t// ensure path is directory\n\tif !info.IsDir() {\n\t\th.writeReply(newReply(\"550\", fmt.Sprintf(\"%s: Not a directory.\", dir)))\n\t\treturn\n\t}\n\n\th.dir = p\n\n\th.writeReply(newReply(\"250\", \"Directory change successful.\"))\n}", "func normalizeDomain(domain string) (string, error) {\n\tdomain = strings.Trim(strings.ToLower(domain), \" \")\n\t// not checking if it belongs to icann\n\tsuffix, _ := publicsuffix.PublicSuffix(domain)\n\tif domain != \"\" && suffix == domain { // input is publicsuffix\n\t\treturn \"\", errors.New(\"domain [\" + domain + \"] is public suffix\")\n\t}\n\tif !strings.HasPrefix(domain, \"http\") {\n\t\tdomain = fmt.Sprintf(\"http://%s\", domain)\n\t}\n\turl, err := url.Parse(domain)\n\tif nil == err && url.Host != \"\" {\n\t\treturn strings.Replace(url.Host, \"www.\", \"\", 1), nil\n\t}\n\treturn \"\", err\n}", "func ResolveUserFriendlyPath(path string, relativeToHome bool) string {\n\thome, _ := os.UserHomeDir()\n\tif home != \"\" && strings.HasPrefix(path, \"~\") {\n\t\treturn home + path[1:]\n\t}\n\n\tif IsAbs(path) {\n\t\treturn path\n\t}\n\n\tif relativeToHome {\n\t\treturn filepath.Join(home, path)\n\t}\n\n\treturn path\n}", "func Canonicalize(nsPath string) string {\n\tif nsPath == \"\" {\n\t\treturn \"\"\n\t}\n\n\t// Canonicalize the path to not have a '/' prefix\n\tnsPath = strings.TrimPrefix(nsPath, \"/\")\n\n\t// Canonicalize the path to always having a '/' suffix\n\tif !strings.HasSuffix(nsPath, \"/\") {\n\t\tnsPath += \"/\"\n\t}\n\n\treturn nsPath\n}", "func cleanPath(path *string, isrelativeclean bool) string {\n\tif isrelativeclean {\n\t\tabspath, _ := filepath.Abs(*path)\n\t\treturn addPathSeparator(filepath.Clean(abspath))\n\t}\n\n\treturn addPathSeparator(filepath.Clean(*path))\n}", "func GetCurrentDirectory(nBufferLength DWORD, lpBuffer LPWSTR) DWORD {\n\tret1 := syscall3(getCurrentDirectory, 2,\n\t\tuintptr(nBufferLength),\n\t\tuintptr(unsafe.Pointer(lpBuffer)),\n\t\t0)\n\treturn DWORD(ret1)\n}", "func GetValidFolderPath(url string) string {\n\tsafePath := filepath.Join(GetUserHome(), appHome)\n\tfullQualifyPath, err := filepath.Abs(filepath.Join(GetUserHome(), appHome, tempFolderName(url)))\n\tHandleError(err)\n\n\t//must ensure full qualify path is CHILD of safe path\n\t//to prevent directory traversal attack\n\t//using Rel function to get relative between parent and child\n\t//if relative join base == child, then child path MUST BE real child\n\trelative, err := filepath.Rel(safePath, fullQualifyPath)\n\tHandleError(err)\n\n\tif strings.Contains(relative, \"..\") {\n\t\tHandleError(errors.New(\"you may be a victim of directory traversal path attack\"))\n\t\treturn \"\" //return is redundant because in fatal check we have panic, but compiler does not able to check\n\t}\n\n\treturn fullQualifyPath\n}", "func (o BuildRunStatusBuildSpecRuntimePtrOutput) WorkDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecRuntime) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.WorkDir\n\t}).(pulumi.StringPtrOutput)\n}", "func GetWindowsDirectory(lpBuffer LPWSTR, uSize UINT) UINT {\n\tret1 := syscall3(getWindowsDirectory, 2,\n\t\tuintptr(unsafe.Pointer(lpBuffer)),\n\t\tuintptr(uSize),\n\t\t0)\n\treturn UINT(ret1)\n}", "func NormalizeURL(url string) string {\n\tfor strings.HasSuffix(url, \"/\") {\n\t\turl = url[:len(url)-1]\n\t}\n\treturn url\n}", "func UserConfigDir(tc Context) (string, error) {\n\treturn os.UserConfigDir()\n}", "func (u *Uploader) normalizeFilenameFromCache(dir, file string) string {\n\tfile = filepath.Base(file)\n\treturn filepath.Join(dir, file)\n}", "func (chrome *Chrome) Workdir() string {\n\tif \"\" == chrome.workdir {\n\t\tchrome.workdir = filepath.Join(os.TempDir(), \"headless-chrome\")\n\t}\n\treturn chrome.workdir\n}" ]
[ "0.63211966", "0.6105871", "0.59868485", "0.59674704", "0.5965855", "0.5911019", "0.5908416", "0.5823606", "0.57539886", "0.56102383", "0.55527806", "0.5523205", "0.54883075", "0.5439085", "0.541707", "0.5357461", "0.5356698", "0.53321666", "0.53159106", "0.53089756", "0.53089756", "0.52804816", "0.5260132", "0.5259103", "0.52432245", "0.5203338", "0.51933604", "0.51883274", "0.5171347", "0.5158378", "0.51288795", "0.51195717", "0.5107291", "0.51013947", "0.50673926", "0.5063504", "0.50595874", "0.50490826", "0.50369257", "0.50194836", "0.50194836", "0.50135225", "0.49660063", "0.49651775", "0.49651244", "0.4961692", "0.4945923", "0.4943933", "0.49427494", "0.49369746", "0.49232432", "0.49074602", "0.48818368", "0.48815885", "0.48724794", "0.4864616", "0.48606196", "0.4849818", "0.48438412", "0.48387092", "0.48314276", "0.48284978", "0.4819891", "0.48172227", "0.4813936", "0.47957796", "0.47780657", "0.47764516", "0.47636285", "0.4763331", "0.47627133", "0.47564012", "0.47381678", "0.47186106", "0.47109652", "0.4710349", "0.47056845", "0.46965802", "0.46764615", "0.4664338", "0.46635365", "0.46563378", "0.46452522", "0.46214584", "0.4616063", "0.46104723", "0.46069735", "0.45879716", "0.4580819", "0.45739534", "0.45738098", "0.4572041", "0.45716426", "0.45611882", "0.45504194", "0.45444345", "0.45428315", "0.45385522", "0.45363513", "0.45352593" ]
0.8082518
0
resolveCmdLine takes a command line arg set and optionally prepends a platformspecific shell in front of it.
func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os, _, _ string) ([]string, bool) { result := cmd.CmdLine if cmd.PrependShell && result != nil { result = append(getShell(runConfig, os), result...) } return result, false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func makeCmdLine(args []string) string {\n\tvar s string\n\tfor _, v := range args {\n\t\tif s != \"\" {\n\t\t\ts += \" \"\n\t\t}\n\t\ts += windows.EscapeArg(v)\n\t}\n\treturn s\n}", "func prepareArgs(subCmdName string, fSet *flag.FlagSet, override map[string]string) []string {\n\tvar args []string\n\t// Add all args up until (and including) the sub command.\n\tfor _, arg := range os.Args {\n\t\targs = append(args, arg)\n\t\tif arg == subCmdName {\n\t\t\tbreak\n\t\t}\n\t}\n\t// Set sub command flags. Iterate through all the explicitly set flags.\n\tfSet.Visit(func(gf *flag.Flag) {\n\t\t// If a conflict is found with override, then prefer override flag.\n\t\tif ov, ok := override[gf.Name]; ok {\n\t\t\targs = append(args, fmt.Sprintf(\"--%s=%s\", gf.Name, ov))\n\t\t\tdelete(override, gf.Name)\n\t\t\treturn\n\t\t}\n\t\t// Otherwise pass through the original flag.\n\t\targs = append(args, fmt.Sprintf(\"--%s=%s\", gf.Name, gf.Value))\n\t})\n\t// Apply remaining override flags (that didn't conflict above).\n\tfor of, ov := range override {\n\t\targs = append(args, fmt.Sprintf(\"--%s=%s\", of, ov))\n\t}\n\t// Add the non-flag arguments at the end.\n\targs = append(args, fSet.Args()...)\n\treturn args\n}", "func (c *container) SpecSetProcessArgs(imageOCIConfig *v1.Image) error {\n\tkubeCommands := c.config.Command\n\tkubeArgs := c.config.Args\n\n\t// merge image config and kube config\n\t// same as docker does today...\n\tif imageOCIConfig != nil {\n\t\tif len(kubeCommands) == 0 {\n\t\t\tif len(kubeArgs) == 0 {\n\t\t\t\tkubeArgs = imageOCIConfig.Config.Cmd\n\t\t\t}\n\t\t\tif kubeCommands == nil {\n\t\t\t\tkubeCommands = imageOCIConfig.Config.Entrypoint\n\t\t\t}\n\t\t}\n\t}\n\n\t// create entrypoint and args\n\tvar entrypoint string\n\tvar args []string\n\tswitch {\n\tcase len(kubeCommands) != 0:\n\t\tentrypoint = kubeCommands[0]\n\t\targs = kubeCommands[1:]\n\t\targs = append(args, kubeArgs...)\n\tcase len(kubeArgs) != 0:\n\t\tentrypoint = kubeArgs[0]\n\t\targs = kubeArgs[1:]\n\tdefault:\n\t\treturn errors.New(\"no command specified\")\n\t}\n\n\tc.spec.SetProcessArgs(append([]string{entrypoint}, args...))\n\treturn nil\n}", "func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {\n\tif process.CommandLine != \"\" {\n\t\tcreateProcessParms.CommandLine = process.CommandLine\n\t} else {\n\t\tcreateProcessParms.CommandLine = system.EscapeArgs(process.Args)\n\t}\n}", "func splitKernelCmdLine(cmdLine string) ([]map[string]string, error) {\n\tvar cmdLines []map[string]string\n\tif len(cmdLine) == 0 {\n\t\treturn cmdLines, nil\n\t}\n\n\tcurrCmdLine := make(map[string]string)\n\tkeylessCmdLineArgs := []string{}\n\n\targVals := strings.Split(cmdLine, \" \")\n\tfor _, argVal := range argVals {\n\t\tif !strings.Contains(argVal, \"=\") {\n\t\t\t// keyless cmd line (eg: nosplash)\n\t\t\tkeylessCmdLineArgs = append(keylessCmdLineArgs, argVal)\n\t\t\tcontinue\n\t\t}\n\n\t\tkv := strings.SplitN(argVal, \"=\", 2)\n\t\tk, v := kv[0], kv[1]\n\t\t// if the key is duplicate, start a new map\n\t\tif _, ok := currCmdLine[k]; ok {\n\t\t\tcmdLines = append(cmdLines, currCmdLine)\n\t\t\tcurrCmdLine = make(map[string]string)\n\t\t}\n\t\tcurrCmdLine[k] = v\n\t}\n\tif len(currCmdLine) > 0 {\n\t\tcmdLines = append(cmdLines, currCmdLine)\n\t}\n\tif len(keylessCmdLineArgs) > 0 {\n\t\tcl := make(map[string]string)\n\t\tcl[\"_\"] = strings.Join(keylessCmdLineArgs, \" \")\n\t\tcmdLines = append(cmdLines, cl)\n\t}\n\treturn cmdLines, nil\n}", "func normalizeCMDLineArgs(args []string) []string {\n\tif len(args) == 1 {\n\t\treturn args\n\t}\n\n\taction := \"\"\n\tpos := -1\n\tfor i, arg := range args[1:] {\n\t\tif !strings.HasPrefix(arg, \"-\") {\n\t\t\taction = arg\n\t\t\tif i == 0 || (i == pos+1 && i != 1) {\n\t\t\t\t// exit loop if 1st argument or if two consecuting values appear\n\t\t\t\tpos = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tpos = i\n\t\t}\n\t}\n\n\tvar result []string\n\tif pos == -1 {\n\t\tresult = args\n\t} else {\n\t\tresult = args[:pos+1]\n\t\tif pos < len(args) {\n\t\t\tresult = append(result, args[pos+2:]...)\n\t\t}\n\t\tresult = append(result, action)\n\t}\n\treturn result\n}", "func ResolveShell(path string, args []string) (*Shell, error) {\n\tbuilder := builder{\"\"}\n\tosshell := os.Getenv(SHELL_ENV)\n\n\tbuilder.\n\t\tuse(path).\n\t\tuse(osshell).\n\t\tuse(SHELL_DEFAULT)\n\n\tif binary, err := builder.build(); nil == err {\n\t\tD(\"using shell\", binary)\n\n\t\treturn &Shell{binary, args}, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func (c *configuration) cmdlineOverride(opts *cliOptions) {\n\t// Populate options that can be provided on both the commandline and config.\n\tif opts.Port > 0 {\n\t\tc.Port = int(opts.Port)\n\t}\n\tif opts.Rank != nil {\n\t\t// global rank parameter should only apply to first I/O service\n\t\tc.Servers[0].Rank = opts.Rank\n\t}\n\tif opts.Insecure {\n\t\tc.TransportConfig.AllowInsecure = true\n\t}\n\t// override each per-server config\n\tfor i := range c.Servers {\n\t\tsrv := &c.Servers[i]\n\n\t\tif opts.MountPath != \"\" {\n\t\t\t// override each per-server config in addition to global value\n\t\t\tc.ScmMountPath = opts.MountPath\n\t\t\tsrv.ScmMount = opts.MountPath\n\t\t} else if srv.ScmMount == \"\" {\n\t\t\t// if scm not specified for server, apply global\n\t\t\tsrv.ScmMount = c.ScmMountPath\n\t\t}\n\t\tif opts.Cores > 0 {\n\t\t\tlog.Debugf(\"-c option deprecated, please use -t instead\")\n\t\t\tsrv.Targets = int(opts.Cores)\n\t\t}\n\t\t// Targets should override Cores if specified in cmdline or\n\t\t// config file.\n\t\tif opts.Targets > 0 {\n\t\t\tsrv.Targets = int(opts.Targets)\n\t\t}\n\t\tif opts.NrXsHelpers != nil {\n\t\t\tsrv.NrXsHelpers = int(*opts.NrXsHelpers)\n\t\t}\n\t\tif opts.FirstCore > 0 {\n\t\t\tsrv.FirstCore = int(opts.FirstCore)\n\t\t}\n\t}\n\n\tif opts.Group != \"\" {\n\t\tc.SystemName = opts.Group\n\t}\n\tif opts.SocketDir != \"\" {\n\t\tc.SocketDir = opts.SocketDir\n\t}\n\tif opts.Modules != nil {\n\t\tc.Modules = *opts.Modules\n\t}\n\tif opts.Attach != nil {\n\t\tc.Attach = *opts.Attach\n\t}\n\tif opts.Map != nil {\n\t\tc.SystemMap = *opts.Map\n\t}\n}", "func HelperInitRelatedExecCommand(cmd string, args []string) (string, error) {\n\tif cmd == \"kubeadm\" {\n\t\tif len(args) == 0 {\n\t\t\treturn \"\", fmt.Errorf(\"Test setup error - missing argument(s) for kubeadm command\")\n\t\t}\n\t\tswitch args[0] {\n\t\tcase \"version\":\n\t\t\treturn \"v1.11.0\", nil\n\t\tcase \"token\":\n\t\t\treturn \"zs6do0.rlyf5fbz9abknbc4\", nil\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"Test setup error - need mock for kubeadm %q command\", args[0])\n\t\t}\n\n\t}\n\tout, err := lazyjack.OsExecCommand(cmd, args)\n\treturn out, err\n}", "func NewCmdLine() CmdLine {\n\t// We use cmdLineReader so tests can inject here\n\tonce.Do(cmdLineOpener)\n\treturn procCmdLine\n}", "func (d *Diagnosis) cmdlineHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\tfmt.Fprintf(w, strings.Join(os.Args, \"\\x00\"))\n}", "func BindCommandLineFlags(prefix string, opts *Options) {\n\tflagSet := pflag.CommandLine\n\tflags.BindRunCmdFlags(prefix, flagSet, opts)\n\tpflag.CommandLine.AddGoFlagSet(flag.CommandLine)\n}", "func cmdLine() string {\n\treturn \"go run mksyscall_aix_ppc64.go \" + strings.Join(os.Args[1:], \" \")\n}", "func AddPersistentStringFlag(cmd *cobra.Command, name string, aliases, localAliases, persistentAliases []string, aliasToBeInherited *pflag.FlagSet, value string, env, usage string) {\n\tif env != \"\" {\n\t\tusage = fmt.Sprintf(\"%s [$%s]\", usage, env)\n\t}\n\tif envV, ok := os.LookupEnv(env); ok {\n\t\tvalue = envV\n\t}\n\taliasesUsage := fmt.Sprintf(\"Alias of --%s\", name)\n\tp := new(string)\n\n\t// flags is full set of flag(s)\n\t// flags can redefine alias already used in subcommands\n\tflags := cmd.Flags()\n\tfor _, a := range aliases {\n\t\tif len(a) == 1 {\n\t\t\t// pflag doesn't support short-only flags, so we have to register long one as well here\n\t\t\tflags.StringVarP(p, a, a, value, aliasesUsage)\n\t\t} else {\n\t\t\tflags.StringVar(p, a, value, aliasesUsage)\n\t\t}\n\t\t// non-persistent flags are not added to the InheritedFlags, so we should add them manually\n\t\tf := flags.Lookup(a)\n\t\taliasToBeInherited.AddFlag(f)\n\t}\n\n\t// localFlags are local to the rootCmd\n\tlocalFlags := cmd.LocalFlags()\n\tfor _, a := range localAliases {\n\t\tif len(a) == 1 {\n\t\t\t// pflag doesn't support short-only flags, so we have to register long one as well here\n\t\t\tlocalFlags.StringVarP(p, a, a, value, aliasesUsage)\n\t\t} else {\n\t\t\tlocalFlags.StringVar(p, a, value, aliasesUsage)\n\t\t}\n\t}\n\n\t// persistentFlags cannot redefine alias already used in subcommands\n\tpersistentFlags := cmd.PersistentFlags()\n\tpersistentFlags.StringVar(p, name, value, usage)\n\tfor _, a := range persistentAliases {\n\t\tif len(a) == 1 {\n\t\t\t// pflag doesn't support short-only flags, so we have to register long one as well here\n\t\t\tpersistentFlags.StringVarP(p, a, a, value, aliasesUsage)\n\t\t} else {\n\t\t\tpersistentFlags.StringVar(p, a, value, aliasesUsage)\n\t\t}\n\t}\n}", "func parseCmdLine() []string{\n\t// Anything that wasn't command, argument, or option will be returned\n\t// as a filename.\n\t// Upon return:\n\t// if a command like init was encountered,\n\tvar filenames []string\n\tvar parsingArgument bool\n\tfor pos, nextArg := range os.Args {\n\t\t//fmt.Printf(\"\\n\\nTop of loop. os.Args[%v]: %v. parsingArgument: %v\\n\", pos, nextArg,parsingArgument)\n\t\tswitch nextArg {\n\t\tcase \"init\":\n\t\t\t// initCmd.Parsed() is now true\n\t\t\tinitCmd.Parse(os.Args[pos+1:])\n\t\t\tif *initSiteName != \"\" {\n\t\t\t\t// *initSitename now points to the string value.\n\t\t\t\t// It's empty if -sitename wasn't -sitename specified\n\t\t\t\tparsingArgument = true\n\t\t\t}\n\t\tcase \"build\":\n\t\t\tbuildCmd.Parse(os.Args[pos+1:])\n\t\t\tif *buildOutputDir != \"\" {\n\t\t\t\tparsingArgument = true\n\t\t\t}\n\t\t\tif *buildBaseURL != \"\" {\n\t\t\t\tparsingArgument = true\n\t\t\t}\n\t\tdefault:\n\t\t\t// If not in the middle of parsing a command-like subargument,\n\t\t\t// like the -sitename=test in this command line:\n\t\t\t// foo init -sitename=test\n\t\t\t// Where foo is the name of the program, and -sitename is\n\t\t\t// an optional subcommand to init,\n\t\t\t//\n\t\t\t// os.Args[0] falls through so exclude it, since it's\n\t\t\t// the name of the invoking program.\n\t\t\tif !parsingArgument && pos > 0{\n\t\t\t\tfilenames = append(filenames, nextArg)\n\t\t\t} else {\n\t\t\t\tparsingArgument = false\n\t\t\t}\n\n\t\t}\n\t}\n\treturn filenames\n}", "func SetShellCmd() {\n\tif runtime.GOOS == \"windows\" {\n\t\tShellCmd = os.Getenv(\"COMSPEC\")\n\t\treturn\n\t}\n\ttry := []string{\"/usr/bin/bash\"}\n\tif !FileExists(ShellCmd) {\n\t\tfor i := range try {\n\t\t\tb := try[i]\n\t\t\tif FileExists(b) {\n\t\t\t\tShellCmd = b\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func ensureDockerPluginExecPath(ctx context.Context) {\n\tLogc(ctx).Trace(\">>>> ensureDockerPluginExecPath\")\n\tdefer Logc(ctx).Trace(\"<<<< ensureDockerPluginExecPath\")\n\n\tpath := os.Getenv(\"PATH\")\n\tif !strings.Contains(path, \"/netapp\") {\n\t\tLogc(ctx).Trace(\"PATH did not contain /netapp, putting it there.\")\n\t\tpath = \"/netapp:\" + path\n\t\t_ = os.Setenv(\"PATH\", path)\n\t}\n}", "func handleSpecialCliOptions(args []string) {\n\t// --dumpversion\n\tif opts.ShowOnlyVersion {\n\t\tfmt.Println(gitTag)\n\t\tos.Exit(0)\n\t}\n\n\t// --version\n\tif opts.ShowVersion {\n\t\tfmt.Printf(\"go-replace version %s (%s)\\n\", gitTag, gitCommit)\n\t\tfmt.Printf(\"Copyright (C) 2022 %s\\n\", Author)\n\t\tos.Exit(0)\n\t}\n\n\t// --help\n\tif opts.ShowHelp {\n\t\targparser.WriteHelp(os.Stdout)\n\t\tos.Exit(0)\n\t}\n\n\t// --mode\n\tswitch mode := opts.Mode; mode {\n\tcase \"replace\":\n\t\topts.ModeIsReplaceMatch = true\n\t\topts.ModeIsReplaceLine = false\n\t\topts.ModeIsLineInFile = false\n\t\topts.ModeIsTemplate = false\n\tcase \"line\":\n\t\topts.ModeIsReplaceMatch = false\n\t\topts.ModeIsReplaceLine = true\n\t\topts.ModeIsLineInFile = false\n\t\topts.ModeIsTemplate = false\n\tcase \"lineinfile\":\n\t\topts.ModeIsReplaceMatch = false\n\t\topts.ModeIsReplaceLine = false\n\t\topts.ModeIsLineInFile = true\n\t\topts.ModeIsTemplate = false\n\tcase \"template\":\n\t\topts.ModeIsReplaceMatch = false\n\t\topts.ModeIsReplaceLine = false\n\t\topts.ModeIsLineInFile = false\n\t\topts.ModeIsTemplate = true\n\t}\n\n\t// --output\n\tif opts.Output != \"\" && len(args) > 1 {\n\t\tlogFatalErrorAndExit(errors.New(\"Only one file is allowed when using --output\"), 1)\n\t}\n\n\tif opts.LineinfileBefore != \"\" || opts.LineinfileAfter != \"\" {\n\t\tif !opts.ModeIsLineInFile {\n\t\t\tlogFatalErrorAndExit(errors.New(\"--lineinfile-after and --lineinfile-before only valid in --mode=lineinfile\"), 1)\n\t\t}\n\n\t\tif opts.LineinfileBefore != \"\" && opts.LineinfileAfter != \"\" {\n\t\t\tlogFatalErrorAndExit(errors.New(\"Only --lineinfile-after or --lineinfile-before is allowed in --mode=lineinfile\"), 1)\n\t\t}\n\t}\n}", "func (container *Container) getCommandLine(projectName string, runningPods rkt.Pods, logger *log.Logger) ([]string, error) {\n\t// generate the different components\n\tcommand := make([]string, 0)\n\n\t// get the appName\n\tappName, err := rkt.GetAppName(projectName, container.Name)\n\tif err != nil {\n\t\treturn command, err\n\t}\n\tappNameLine := fmt.Sprintf(\"--name=%s\", appName)\n\n\t// generate environment strings\n\tenvArray := make([]string, 0)\n\tfor varName, varValue := range container.Environment {\n\t\tenvArray = append(envArray, fmt.Sprintf(\"--environment=%s=%s\", varName, varValue))\n\t}\n\n\t// exec string\n\texecArray := make([]string, 0)\n\tif container.Exec != \"\" {\n\t\texec_parts := util.ShellSplit(container.Exec)\n\n\t\tif len(exec_parts) > 0 {\n\t\t\t// first prime our array\n\t\t\texecArray = append(execArray, \"--exec\")\n\t\t\t// split our string into parts\n\t\t\texecArray = append(execArray, exec_parts[0])\n\t\t}\n\t\t// if there is more than one part the rkt command requires that other compoments come after a double hyphen\n\t\tif len(exec_parts) > 1 {\n\t\t\texecArray = append(execArray, \"--\")\n\t\t\texecArray = append(execArray, exec_parts[1:]...)\n\t\t}\n\t}\n\n\t// mount strings\n\tmountArray := make([]string, 0)\n\tfor _, mount := range container.Mounts {\n\t\tmountArray = append(mountArray, mount.GenerateCommandLine()...)\n\t}\n\n\tdepIPMap, err := container.GetDepChainIPs(projectName, runningPods, logger)\n\tif err != nil {\n\t\treturn command, err\n\t}\n\n\thostsArray := make([]string, 0)\n\tfor name, IPs := range depIPMap {\n\t\tfor _, IP := range IPs {\n\t\t\thostsArray = append(hostsArray, fmt.Sprintf(\"--hosts-entry=%s=%s\", IP, name))\n\t\t}\n\t}\n\n\t// create the hostname\n\thostnameLine := fmt.Sprintf(\"--hostname=%s\", container.Name)\n\n\t// combine our command parts\n\tcommand = append(command, container.Image)\n\tcommand = append(command, hostnameLine)\n\tcommand = append(command, envArray...)\n\tcommand = append(command, mountArray...)\n\tcommand = append(command, hostsArray...)\n\tcommand = append(command, appNameLine)\n\tcommand = append(command, execArray...)\n\n\treturn command, nil\n}", "func parsePatroniCmdline(cmdline string, cwd string) string {\n\tparts := strings.Fields(cmdline)\n\n\tvar configFilePath string\n\n\tfor _, s := range parts[1:] {\n\t\tif strings.HasSuffix(s, \".yml\") || strings.HasSuffix(s, \".yaml\") {\n\t\t\tconfigFilePath = s\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Return value if it is an absolute path.\n\tif strings.HasPrefix(configFilePath, \"/\") || configFilePath == \"\" {\n\t\treturn configFilePath\n\t}\n\n\t// For relative paths, prepend value with current working directory.\n\treturn cwd + \"/\" + strings.TrimLeft(configFilePath, \"./\")\n}", "func processParamCommandLineToOCIArgs(commandLine string) ([]string, error) {\n\targs, err := shellwords.Parse(commandLine)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to parse command line string \\\"%s\\\"\", commandLine)\n\t}\n\treturn args, nil\n}", "func BootstrapCommand(ctx context.Context, name string, arg ...string) *testexec.Cmd {\n\t// Refuse to find an executable with $PATH.\n\t// android-sh inserts /vendor/bin before /system/bin in $PATH, and /vendor/bin\n\t// contains very similar executables as /system/bin on some boards (e.g. nocturne).\n\t// In particular, /vendor/bin/sh is rarely what you want since it drops\n\t// /system/bin from $PATH. To avoid such mistakes, refuse to run executables\n\t// without explicitly specifying absolute paths. To run shell commands,\n\t// specify /system/bin/sh.\n\t// See: http://crbug.com/949853\n\tif !strings.HasPrefix(name, \"/\") {\n\t\tpanic(\"Refusing to search $PATH; specify an absolute path instead\")\n\t}\n\treturn testexec.CommandContext(ctx, \"android-sh\", append([]string{\"-c\", \"exec \\\"$@\\\"\", \"-\", name}, arg...)...)\n}", "func InitArgs(args ...string) func(*LinuxFactory) error {\n\treturn func(l *LinuxFactory) error {\n\t\tname := args[0]\n\t\tif filepath.Base(name) == name {\n\t\t\tif lp, err := exec.LookPath(name); err == nil {\n\t\t\t\tname = lp\n\t\t\t}\n\t\t} else {\n\t\t\tabs, err := filepath.Abs(name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tname = abs\n\t\t}\n\t\tl.InitPath = \"/proc/self/exe\"\n\t\tl.InitArgs = append([]string{name}, args[1:]...)\n\t\treturn nil\n\t}\n}", "func (b *AdapterBase) initFlagSet() {\n\tif b.FlagSet == nil {\n\t\t// default to the normal commandline flags\n\t\tb.FlagSet = pflag.CommandLine\n\t}\n}", "func setupAddCommand(cmd *cobra.Command) {\n\tcmd.Flags().String(\"record\", \"\", \"Record Name\")\n\n\tif err := cmd.MarkFlagRequired(\"record\"); err != nil {\n\t\tlog.Fatalf(\"Lethal damage: %s\\n\\n\", err)\n\t}\n\n\tcmd.Flags().String(\"zone\", \"\", \"Zone Name\")\n\n\tif err := cmd.MarkFlagRequired(\"zone\"); err != nil {\n\t\tlog.Fatalf(\"Lethal damage: %s\\n\\n\", err)\n\t}\n\n\tcmd.Flags().String(\"dns-provider\", \"\", \"DNS Provider\")\n\n\tif err := cmd.MarkFlagRequired(\"dns-provider\"); err != nil {\n\t\tlog.Fatalf(\"Lethal damage: %s\\n\\n\", err)\n\t}\n\n\tcmd.Flags().String(\"ip-provider\", \"google\", \"IP Provider\")\n\tcmd.Flags().Int(\"interval\", 1, \"Interval in Minutes\")\n\tcmd.Flags().Bool(\"daemon\", false, \"Daemon\")\n}", "func Setup(c *exec.Cmd) {}", "func setupEnv(stack []byte) {\n\targs := stack\n\tbo := binary.LittleEndian\n\tbo.PutUint64(args, 1) // 1 argument, the process name.\n\targs = args[8:]\n\t// First argument, address of binary name.\n\tbinAddr := args[:8]\n\targs = args[8:]\n\tbo.PutUint64(args, 0) // NULL separator.\n\targs = args[8:]\n\tbo.PutUint64(args, 0) // No envp.\n\targs = args[8:]\n\t// Build auxillary vector.\n\t// Page size.\n\tbo.PutUint64(args, _AT_PAGESZ)\n\targs = args[8:]\n\tbo.PutUint64(args, pageSize)\n\targs = args[8:]\n\t// End of auxv.\n\tbo.PutUint64(args, _AT_NULL)\n\targs = args[8:]\n\tbo.PutUint64(args, 0)\n\t// Binary name.\n\tbo.PutUint64(binAddr, uint64(uintptr(unsafe.Pointer(&args[0]))))\n\tn := copy(args, []byte(\"kernel\\x00\"))\n\targs = args[n:]\n}", "func augmentPathEnv() {\n\t// We only have Linux executables.\n\tif runtime.GOOS != \"linux\" {\n\t\treturn\n\t}\n\n\t// If the executables are already present, skip.\n\t_, errdevdraw := exec.LookPath(\"devdraw\")\n\t_, err9pserve := exec.LookPath(\"9pserve\")\n\tif errdevdraw == nil && err9pserve == nil {\n\t\treturn\n\t}\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpath := os.Getenv(\"PATH\") + \":\" + filepath.Join(wd, \"build\", \"bin\")\n\tos.Setenv(\"PATH\", path)\n\n\t// We also need fonts.\n\tif _, hzp9 := os.LookupEnv(\"PLAN9\"); !hzp9 {\n\t\tos.Setenv(\"PLAN9\", filepath.Join(wd, \"build\"))\n\t}\n}", "func getPlatformStringsAddFunction(c *config.Config, info fileInfo, cgoTags *cgoTagsAndOpts) func(sb *platformStringsBuilder, ss ...string) {\n\tisOSSpecific, isArchSpecific := isOSArchSpecific(info, cgoTags)\n\tv := getGoConfig(c).rulesGoVersion\n\tconstraintPrefix := \"@\" + getGoConfig(c).rulesGoRepoName + \"//go/platform:\"\n\n\tswitch {\n\tcase !isOSSpecific && !isArchSpecific:\n\t\tif checkConstraints(c, \"\", \"\", info.goos, info.goarch, info.tags, cgoTags) {\n\t\t\treturn func(sb *platformStringsBuilder, ss ...string) {\n\t\t\t\tfor _, s := range ss {\n\t\t\t\t\tsb.addGenericString(s)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tcase isOSSpecific && !isArchSpecific:\n\t\tvar osMatch []string\n\t\tfor _, os := range rule.KnownOSs {\n\t\t\tif rulesGoSupportsOS(v, os) &&\n\t\t\t\tcheckConstraints(c, os, \"\", info.goos, info.goarch, info.tags, cgoTags) {\n\t\t\t\tosMatch = append(osMatch, os)\n\t\t\t}\n\t\t}\n\t\tif len(osMatch) > 0 {\n\t\t\treturn func(sb *platformStringsBuilder, ss ...string) {\n\t\t\t\tfor _, s := range ss {\n\t\t\t\t\tsb.addOSString(s, osMatch, constraintPrefix)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tcase !isOSSpecific && isArchSpecific:\n\t\tvar archMatch []string\n\t\tfor _, arch := range rule.KnownArchs {\n\t\t\tif rulesGoSupportsArch(v, arch) &&\n\t\t\t\tcheckConstraints(c, \"\", arch, info.goos, info.goarch, info.tags, cgoTags) {\n\t\t\t\tarchMatch = append(archMatch, arch)\n\t\t\t}\n\t\t}\n\t\tif len(archMatch) > 0 {\n\t\t\treturn func(sb *platformStringsBuilder, ss ...string) {\n\t\t\t\tfor _, s := range ss {\n\t\t\t\t\tsb.addArchString(s, archMatch, constraintPrefix)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\tvar platformMatch []rule.Platform\n\t\tfor _, platform := range rule.KnownPlatforms {\n\t\t\tif rulesGoSupportsPlatform(v, platform) &&\n\t\t\t\tcheckConstraints(c, platform.OS, platform.Arch, info.goos, info.goarch, info.tags, cgoTags) {\n\t\t\t\tplatformMatch = append(platformMatch, platform)\n\t\t\t}\n\t\t}\n\t\tif len(platformMatch) > 0 {\n\t\t\treturn func(sb *platformStringsBuilder, ss ...string) {\n\t\t\t\tfor _, s := range ss {\n\t\t\t\t\tsb.addPlatformString(s, platformMatch, constraintPrefix)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn func(_ *platformStringsBuilder, _ ...string) {}\n}", "func (app ApplicationArguments) SplitManaged(args []string) (managed []string, unmanaged []string) {\nArg:\n\tfor i := 1; i < len(args); i++ {\n\t\targ := args[i]\n\t\tif arg == \"--\" {\n\t\t\tunmanaged = append(unmanaged, args[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t\tif strings.HasPrefix(arg, \"--\") {\n\t\t\targSplit := strings.Split(args[i][2:], \"=\")\n\t\t\tif isSwitch, ok := app.longs[argSplit[0]]; ok {\n\t\t\t\tmanaged = append(managed, arg)\n\t\t\t\tif !isSwitch && len(argSplit) == 1 {\n\t\t\t\t\t// This is not a switch (bool flag) and there is no argument with\n\t\t\t\t\t// the flag, so the argument must be after and we add it to\n\t\t\t\t\t// the managed args if there is.\n\t\t\t\t\ti++\n\t\t\t\t\tif i < len(args) {\n\t\t\t\t\t\tmanaged = append(managed, args[i])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tunmanaged = append(unmanaged, arg)\n\t\t\t}\n\t\t} else if strings.HasPrefix(arg, \"-\") {\n\t\t\twithArg := false\n\t\t\tfor pos, opt := range arg[1:] {\n\t\t\t\tif isSwitch, ok := app.shorts[opt]; ok {\n\t\t\t\t\tif !isSwitch {\n\t\t\t\t\t\t// This is not a switch (bool flag), so we check if there are characters\n\t\t\t\t\t\t// following the current flag in the same word. If it is not the case,\n\t\t\t\t\t\t// then the argument must be after and we add it to the managed args\n\t\t\t\t\t\t// if there is. If it is the case, then, the argument is included in\n\t\t\t\t\t\t// the current flag and we consider the whole word as a managed argument.\n\t\t\t\t\t\twithArg = pos == len(arg[1:])-1\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tunmanaged = append(unmanaged, arg)\n\t\t\t\t\tcontinue Arg\n\t\t\t\t}\n\t\t\t}\n\t\t\tmanaged = append(managed, arg)\n\t\t\tif withArg {\n\t\t\t\t// The next argument must be an argument to the current flag\n\t\t\t\ti++\n\t\t\t\tif i < len(args) {\n\t\t\t\t\tmanaged = append(managed, args[i])\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tunmanaged = append(unmanaged, arg)\n\t\t}\n\t}\n\treturn\n}", "func setupCLI() string {\n\n\tvar input string\n\n\tInputPtr := flag.String(\"i\", \"\", \"location of raw binary data cipher text file\")\n\n\tif len(os.Args) < 2 {\n\n\t\tmissingParametersError()\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\n\t}\n\n\tflag.Parse()\n\n\tinput = *InputPtr\n\n\tif input == \"\" {\n\t\tmissingParametersError()\n\t\tflag.PrintDefaults()\n\t\tos.Exit(1)\n\t}\n\n\treturn input\n\n}", "func legacyArgs(cmd *Command, args []string) error {\n\t// no subcommand, always take args\n\tif !cmd.HasSubCommands() {\n\t\treturn nil\n\t}\n\n\t// root command with subcommands, do subcommand checking.\n\tif !cmd.HasParent() && len(args) > 0 {\n\t\treturn fmt.Errorf(\"unknown command %q for %q%s\", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))\n\t}\n\treturn nil\n}", "func FullCmdLine() string {\n\tonce.Do(cmdLineOpener)\n\treturn procCmdLine.Raw\n}", "func ShellOptional(dock *docker.Docker, n *naming.Naming) error {\n\tlog.Info(\"Launching shell\")\n\tlog.Drop()\n\n\targs := docker.ContainerExecArgs{\n\t\tInteractive: true,\n\t\tAsRoot: true,\n\t\tNetwork: true,\n\t\tName: n.Container,\n\t}\n\terr := dock.ContainerExec(args)\n\tif err != nil {\n\t\treturn log.Failed(err)\n\t}\n\n\treturn log.Done()\n}", "func escapeShellArg(arg string) string {\n var quotedArg bytes.Buffer\n quotedArg.Grow(len(arg))\n\n if runtime.GOOS == \"windows\" {\n quotedArg.WriteString(`\"`)\n } else {\n quotedArg.WriteString(`'`)\n }\n\n for _, runeVal := range arg {\n if runtime.GOOS == \"windows\" {\n if runeVal == '\"' || runeVal == '%' {\n quotedArg.WriteRune(' ')\n continue\n }\n } else {\n if runeVal == '\\'' {\n quotedArg.WriteString(`'\\'`)\n }\n }\n quotedArg.WriteRune(runeVal)\n }\n if runtime.GOOS == \"windows\" {\n quotedArg.WriteString(`\"`)\n } else {\n quotedArg.WriteString(`'`)\n }\n\n return quotedArg.String()\n}", "func prepareStagePath(path string, m *mount.SafeFormatAndMount) error {\n\treturn nil\n}", "func (p *Plugin) PrepareCommand(command string, extraArgs []string) (main string, argv []string, err error) {\n\tparts := strings.Split(os.ExpandEnv(command), \" \")\n\tmain = parts[0]\n\tif len(parts) > 1 {\n\t\targv = parts[1:]\n\t}\n\tif !p.Metadata.IgnoreGlobalFlags && extraArgs != nil {\n\t\targv = append(argv, extraArgs...)\n\t}\n\treturn\n}", "func (d *Driver) SetupRunArgs(root *cobra.Command) {\n\t// Summon needs to pass the help flag down to the proxied\n\t// command, but cobra is very agressive in wanting to manage the help.\n\t// To workaround this, remove the help, but reintroduce it only if the user\n\t// defined a help for his command in the config file. If the help is removed,\n\t// it can be positionned explicitely by the user with flagValue \"help\".\n\t// Otherwize the help is reintroduced when calling the proxied command. It\n\t// is reinserted at the same position (before a recorded arg), if this arg\n\t// was not manipulated by a template rendering. In the latter case, help\n\t// is appended to the proxied command.\n\n\t// all args after arg[0] which is the main program name\n\tif len(d.opts.args) == 0 {\n\t\tpanic(\"missing Args call to Configure\")\n\t}\n\tallArgs := d.opts.args[1:]\n\n\t// check if we have help and remove it. Keep it's position\n\tmanagedHelp := []string{}\n\tvar helpPos int\n\tvar helpFlag string\n\tfor pos, a := range allArgs {\n\t\tif a == \"--help\" || a == \"-h\" {\n\t\t\thelpPos = pos\n\t\t\thelpFlag = a\n\t\t\tcontinue\n\t\t}\n\t\tmanagedHelp = append(managedHelp, a)\n\t}\n\n\t// if help is requested on:\n\t// * a managed command that has a help line\n\t// * on the root (no parameters)\n\tvar ownHelp bool\n\tif helpFlag != \"\" {\n\t\tcmd, _, _ := root.Root().Find(allArgs[:helpPos])\n\t\tif cmd != root && cmd.Short != \"\" || len(managedHelp) == 0 {\n\t\t\townHelp = true\n\t\t}\n\t}\n\n\tvar fl *flagValue\n\tif !ownHelp {\n\t\t// if --help is anywhere but near the summon root, help should go to\n\t\t// the proxied command\n\t\td.opts.helpWanted.helpFlag = helpFlag\n\t\tif helpPos+1 < len(allArgs) {\n\t\t\td.opts.helpWanted.nextToHelp = allArgs[helpPos+1]\n\t\t}\n\t\tfl = d.AddFlag(root, \"help\", &config.FlagSpec{Effect: \"--help\", Explicit: true}, global, func() {\n\t\t\t// we were called in by rendering, disable implicit add effect\n\t\t\td.opts.helpWanted.helpFlag = \"\"\n\t\t})\n\t\td.flagsToRender = append(d.flagsToRender, fl)\n\t\tfl.initializing = true\n\t} else {\n\t\t// let cobra manage help\n\t\tmanagedHelp = allArgs\n\t}\n\n\troot.Root().PersistentPreRun = func(cmd *cobra.Command, args []string) {\n\t\t_, d.opts.args, _ = cmd.Root().Find(managedHelp)\n\n\t\tif fl != nil {\n\t\t\tfl.initializing = false\n\t\t}\n\t}\n\td.opts.initialArgs = slices.Clone(managedHelp)\n\troot.Root().SetArgs(managedHelp)\n}", "func setPlatform(ea *ExtractionArgs) (success bool) {\n\tswitch platform := runtime.GOOS; platform {\n\tcase osFREEBSD, osLINUX:\n\t\tea.Extractor = extractSectionUnix\n\t\tif ea.Verbose {\n\t\t\tea.ArArgs = append(ea.ArArgs, \"xv\")\n\t\t} else {\n\t\t\tea.ArArgs = append(ea.ArArgs, \"x\")\n\t\t}\n\t\tea.ObjectTypeInArchive = fileTypeELFOBJECT\n\t\tsuccess = true\n\tcase osDARWIN:\n\t\tea.Extractor = extractSectionDarwin\n\t\tea.ArArgs = append(ea.ArArgs, \"-x\")\n\t\tif ea.Verbose {\n\t\t\tea.ArArgs = append(ea.ArArgs, \"-v\")\n\t\t}\n\t\tea.ObjectTypeInArchive = fileTypeMACHOBJECT\n\t\tsuccess = true\n\tdefault:\n\t\tLogError(\"Unsupported platform: %s.\", platform)\n\t}\n\treturn\n}", "func (client BaseClient) EnableConsolePreparer(ctx context.Context, defaultParameter string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"default\": autorest.Encode(\"path\", defaultParameter),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2018-05-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsPost(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/providers/Microsoft.SerialConsole/consoleServices/{default}/enableConsole\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func InsertBeforeSysPath(p string) string {\n sysModule := python.PyImport_ImportModule(\"sys\")\n path := sysModule.GetAttrString(\"path\")\n python.PyList_Insert(path, 0, PyStr(p))\n return GoStr(path.Repr())\n}", "func kern_procargs(pid int,\n\texe func(string),\n\targv func(string),\n\tenv func(string, string)) error {\n\n\tmib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)}\n\targmax := uintptr(C.ARG_MAX)\n\tbuf := make([]byte, argmax)\n\terr := sysctl(mib, &buf[0], &argmax, nil, 0)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tbbuf := bytes.NewBuffer(buf)\n\tbbuf.Truncate(int(argmax))\n\n\tvar argc int32\n\tbinary.Read(bbuf, binary.LittleEndian, &argc)\n\n\tpath, err := bbuf.ReadBytes(0)\n\tif exe != nil {\n\t\texe(string(chop(path)))\n\t}\n\n\t// skip trailing \\0's\n\tfor {\n\t\tc, _ := bbuf.ReadByte()\n\t\tif c != 0 {\n\t\t\tbbuf.UnreadByte()\n\t\t\tbreak // start of argv[0]\n\t\t}\n\t}\n\n\tfor i := 0; i < int(argc); i++ {\n\t\targ, err := bbuf.ReadBytes(0)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif argv != nil {\n\t\t\targv(string(chop(arg)))\n\t\t}\n\t}\n\n\tif env == nil {\n\t\treturn nil\n\t}\n\n\tdelim := []byte{61} // \"=\"\n\n\tfor {\n\t\tline, err := bbuf.ReadBytes(0)\n\t\tif err == io.EOF || line[0] == 0 {\n\t\t\tbreak\n\t\t}\n\t\tpair := bytes.SplitN(chop(line), delim, 2)\n\t\tenv(string(pair[0]), string(pair[1]))\n\t}\n\n\treturn nil\n}", "func MaybeRunExternalCommand(command string, args []string) {\n\ttryRunExternalCommand(config.ZenlogSrcTopDir()+\"/subcommands\", command, args)\n\n\tfor _, path := range strings.Split(os.Getenv(\"PATH\"), \":\") {\n\t\ttryRunExternalCommand(path, command, args)\n\t}\n}", "func flagHackLookup(flagName string) string {\n\t// e.g. \"-d\" for \"--driver\"\n\tflagPrefix := flagName[1:3]\n\n\t// TODO: Should we support -flag-name (single hyphen) syntax as well?\n\tfor i, arg := range os.Args {\n\t\tif strings.Contains(arg, flagPrefix) {\n\t\t\t// format '--driver foo' or '-d foo'\n\t\t\tif arg == flagPrefix || arg == flagName {\n\t\t\t\tif i+1 < len(os.Args) {\n\t\t\t\t\treturn os.Args[i+1]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// format '--driver=foo' or '-d=foo'\n\t\t\tif strings.HasPrefix(arg, flagPrefix+\"=\") || strings.HasPrefix(arg, flagName+\"=\") {\n\t\t\t\treturn strings.Split(arg, \"=\")[1]\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\"\n}", "func updateBootCmdline(cl string) string {\n\tf := cmdline.NewUpdateFilter(*appendCmdline, strings.Split(*removeCmdlineItem, \",\"), strings.Split(*reuseCmdlineItem, \",\"))\n\treturn f.Update(cmdline.NewCmdLine(), cl)\n}", "func insertArgs(baseArgs []string, newArgs []string) []string {\n\tvar result []string\n\tinserted := false\n\tfor _, arg := range baseArgs {\n\t\tif !inserted && arg == \"--\" {\n\t\t\tresult = append(result, newArgs...)\n\t\t\tinserted = true\n\t\t}\n\t\tresult = append(result, arg)\n\t}\n\n\tif !inserted {\n\t\tresult = append(result, newArgs...)\n\t}\n\treturn result\n}", "func NewCmdLine() CmdLiner {\n\treturn &CmdLine{reader: bufio.NewReader(os.Stdin)}\n}", "func shellWrapArgs(args []string) []string {\n\tif len(args) > 0 {\n\t\tcmdStr := fmt.Sprintf(\"\\\"%s\\\"\", strings.Join(args, \" \"))\n\t\treturn []string{\"-c\", cmdStr}\n\t} else {\n\t\treturn []string{}\n\t}\n}", "func helperCommand(t *testing.T, s ...string) *exec.Cmd {\n\tcs := []string{\"-test.run=TestHelperProcess\", \"--\"}\n\tcs = append(cs, s...)\n\tcmd := exec.Command(os.Args[0], cs...)\n\tcmd.Env = []string{\"GO_WANT_HELPER_PROCESS=1\"}\n\treturn cmd\n}", "func shellExecutor(rootCmd *cobra.Command, printer *Printer, meta *meta) func(s string) {\n\treturn func(s string) {\n\t\targs := strings.Fields(s)\n\n\t\tsentry.AddCommandContext(strings.Join(removeOptions(args), \" \"))\n\n\t\trootCmd.SetArgs(meta.CliConfig.Alias.ResolveAliases(args))\n\n\t\terr := rootCmd.Execute()\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*interactive.InterruptError); ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tprintErr := printer.Print(err, nil)\n\t\t\tif printErr != nil {\n\t\t\t\t_, _ = fmt.Fprintln(os.Stderr, err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t// command is nil if it does not have a Run function\n\t\t// ex: instance -h\n\t\tif meta.command == nil {\n\t\t\treturn\n\t\t}\n\n\t\tautoCompleteCache.Update(meta.command.Namespace)\n\n\t\tprintErr := printer.Print(meta.result, meta.command.getHumanMarshalerOpt())\n\t\tif printErr != nil {\n\t\t\t_, _ = fmt.Fprintln(os.Stderr, printErr)\n\t\t}\n\t}\n}", "func (opts *ToolOptions) NormalizeOptionsAndURI() error {\n\tif opts.URI == nil || opts.URI.ConnectionString == \"\" {\n\t\t// If URI not provided, get replica set name and generate connection string\n\t\t_, opts.ReplicaSetName = util.SplitHostArg(opts.Host)\n\t\turi, err := NewURI(util.BuildURI(opts.Host, opts.Port))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\topts.URI = uri\n\t}\n\n\tcs, err := connstring.Parse(opts.URI.ConnectionString)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = opts.setOptionsFromURI(cs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// finalize auth options, filling in missing passwords\n\tif opts.Auth.ShouldAskForPassword() {\n\t\tpass, err := password.Prompt(\"mongo user\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading password: %v\", err)\n\t\t}\n\t\topts.Auth.Password = pass\n\t\topts.ConnString.Password = pass\n\t}\n\n\tshouldAskForSSLPassword, err := opts.SSL.ShouldAskForPassword()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error determining whether client cert needs password: %v\", err)\n\t}\n\tif shouldAskForSSLPassword {\n\t\tpass, err := password.Prompt(\"client certificate\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading password: %v\", err)\n\t\t}\n\t\topts.SSL.SSLPEMKeyPassword = pass\n\t}\n\n\terr = opts.ConnString.Validate()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"connection string failed validation\")\n\t}\n\n\t// Connect directly to a host if there's no replica set specified, or\n\t// if the connection string already specified a direct connection.\n\t// Do not connect directly if loadbalanced.\n\tif !opts.ConnString.LoadBalanced {\n\t\topts.Direct = (opts.ReplicaSetName == \"\") || opts.Direct\n\t}\n\n\treturn nil\n}", "func (s *BasePlSqlParserListener) EnterSet_command(ctx *Set_commandContext) {}", "func renderArgAndMount(existingArg interface{}, existingMount interface{}, runtime string, defaultSecurePort string, defaultCertDir string) ([]string, []string) {\n\tretArg := convertInterfaceToStringSlice(existingArg)\n\tretMount := convertInterfaceToStringSlice(existingMount)\n\trenderedCertDir := fmt.Sprintf(defaultCertDir, runtime)\n\t// Set a default value for certDirArg and certDirMount (for the case where the user does not set these values)\n\t// If a user sets these values, we will set them to an empty string and check to make sure they are not empty\n\t// strings before adding them to the rendered arg/mount slices.\n\tcertDirMount := fmt.Sprintf(\"%s:%s\", renderedCertDir, renderedCertDir)\n\tcertDirArg := fmt.Sprintf(\"%s=%s\", CertDirArgument, renderedCertDir)\n\tsecurePortArg := fmt.Sprintf(\"%s=%s\", SecurePortArgument, defaultSecurePort)\n\tif len(retArg) > 0 {\n\t\ttlsCF := getArgValue(retArg, TLSCertFileArgument, \"=\")\n\t\tif tlsCF == \"\" {\n\t\t\t// If the --tls-cert-file Argument was not set in the config for this component, we can look to see if\n\t\t\t// the --cert-dir was set. --tls-cert-file (if set) will take precedence over --tls-cert-file\n\t\t\tcertDir := getArgValue(retArg, CertDirArgument, \"=\")\n\t\t\tif certDir != \"\" {\n\t\t\t\t// If --cert-dir was set, we use the --cert-dir that the user provided and should set certDirArg to \"\"\n\t\t\t\t// so that we don't append it.\n\t\t\t\tcertDirArg = \"\"\n\t\t\t\t// Set certDirMount to an intelligently interpolated value based off of the custom certDir set by the\n\t\t\t\t// user.\n\t\t\t\tcertDirMount = fmt.Sprintf(\"%s:%s\", certDir, certDir)\n\t\t\t}\n\t\t} else {\n\t\t\t// If the --tls-cert-file argument was set by the user, we don't need to set --cert-dir, but still should\n\t\t\t// render a --cert-dir-mount that is based on the --tls-cert-file argument to map the files necessary\n\t\t\t// to the static pod (in the RKE2 case)\n\t\t\tcertDirArg = \"\"\n\t\t\tdir := filepath.Dir(tlsCF)\n\t\t\tcertDirMount = fmt.Sprintf(\"%s:%s\", dir, dir)\n\t\t}\n\t\tsPA := getArgValue(retArg, SecurePortArgument, \"=\")\n\t\tif sPA != \"\" {\n\t\t\t// If the user set a custom --secure-port, set --secure-port to an empty string so we don't override\n\t\t\t// their custom value\n\t\t\tsecurePortArg = \"\"\n\t\t}\n\t}\n\tif certDirArg != \"\" {\n\t\tlogrus.Debugf(\"renderArgAndMount adding %s to component arguments\", certDirArg)\n\t\tretArg = appendToInterface(existingArg, certDirArg)\n\t}\n\tif securePortArg != \"\" {\n\t\tlogrus.Debugf(\"renderArgAndMount adding %s to component arguments\", securePortArg)\n\t\tretArg = appendToInterface(retArg, securePortArg)\n\t}\n\tif runtime == capr.RuntimeRKE2 {\n\t\t// todo: make sure the certDirMount is not already set by the user to some custom value before we set it for the static pod extraMount\n\t\tlogrus.Debugf(\"renderArgAndMount adding %s to component mounts\", certDirMount)\n\t\tretMount = appendToInterface(existingMount, certDirMount)\n\t}\n\treturn retArg, retMount\n}", "func handlePantheonFlags(cmd *cobra.Command, args []string, app *ddevapp.DdevApp) error {\n\tprovider, err := app.GetProvider()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to GetProvider: %v\", err)\n\t}\n\tpantheonProvider := provider.(*ddevapp.PantheonProvider)\n\terr = pantheonProvider.SetSiteNameAndEnv(pantheonEnvironmentName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func fixInterpreterArgs(interpreter string, args []string) []string {\n\tire := regexp.MustCompile(`.*[\\/\\\\!](.*)`)\n\tvar i string\n\timatch := ire.FindStringSubmatch(interpreter)\n\tif len(imatch) == 0 {\n\t\ti = interpreter\n\t} else {\n\t\ti = imatch[1]\n\t}\n\tswitch i {\n\tcase \"powershell\", \"powershell.exe\":\n\t\tfor i := range args {\n\t\t\targs[i] = strings.Replace(args[i], \" \", \"` \", -1)\n\t\t\targs[i] = strings.Replace(args[i], \",\", \"`,\", -1)\n\t\t\targs[i] = strings.Replace(args[i], \";\", \"`;\", -1)\n\t\t\tif args[i] == \"\" {\n\t\t\t\targs[i] = \"''\"\n\t\t\t}\n\t\t}\n\t}\n\treturn args\n}", "func getCommand(meta *meta, args []string, suggest string) *Command {\n\trawCommand := removeOptions(args)\n\tsuggestIsOption := argIsOption(suggest)\n\n\tif !suggestIsOption {\n\t\trawCommand = append(rawCommand, suggest)\n\t}\n\n\trawCommand = meta.CliConfig.Alias.ResolveAliases(rawCommand)\n\n\t// Find the closest command in case there is multiple positional arguments\n\tfor ; len(rawCommand) > 1; rawCommand = rawCommand[:len(rawCommand)-1] {\n\t\tcommand, foundCommand := meta.Commands.find(rawCommand...)\n\t\tif foundCommand {\n\t\t\treturn command\n\t\t}\n\t}\n\treturn nil\n}", "func runUsingChrootExecMain() {\n\targs := os.Args[1:]\n\tvar options runUsingChrootExecSubprocOptions\n\tvar err error\n\n\truntime.LockOSThread()\n\n\t// Set logging.\n\tif level := os.Getenv(\"LOGLEVEL\"); level != \"\" {\n\t\tif ll, err := strconv.Atoi(level); err == nil {\n\t\t\tlogrus.SetLevel(logrus.Level(ll))\n\t\t}\n\t\tos.Unsetenv(\"LOGLEVEL\")\n\t}\n\n\t// Unpack our configuration.\n\tconfPipe := os.NewFile(3, \"confpipe\")\n\tif confPipe == nil {\n\t\tfmt.Fprintf(os.Stderr, \"error reading options pipe\\n\")\n\t\tos.Exit(1)\n\t}\n\tdefer confPipe.Close()\n\tif err := json.NewDecoder(confPipe).Decode(&options); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error decoding options: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t// Set the hostname. We're already in a distinct UTS namespace and are admins in the user\n\t// namespace which created it, so we shouldn't get a permissions error, but seccomp policy\n\t// might deny our attempt to call sethostname() anyway, so log a debug message for that.\n\tif options.Spec == nil || options.Spec.Process == nil {\n\t\tfmt.Fprintf(os.Stderr, \"invalid options spec passed in\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tif options.Spec.Hostname != \"\" {\n\t\tsetContainerHostname(options.Spec.Hostname)\n\t}\n\n\t// Try to chroot into the root. Do this before we potentially\n\t// block the syscall via the seccomp profile. Allow the\n\t// platform to override this - on FreeBSD, we use a simple\n\t// jail to set the hostname in the container\n\tif err := createPlatformContainer(options); err != nil {\n\t\tvar oldst, newst unix.Stat_t\n\t\tif err := unix.Stat(options.Spec.Root.Path, &oldst); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"error stat()ing intended root directory %q: %v\\n\", options.Spec.Root.Path, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif err := unix.Chdir(options.Spec.Root.Path); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"error chdir()ing to intended root directory %q: %v\\n\", options.Spec.Root.Path, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif err := unix.Chroot(options.Spec.Root.Path); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"error chroot()ing into directory %q: %v\\n\", options.Spec.Root.Path, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif err := unix.Stat(\"/\", &newst); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"error stat()ing current root directory: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif oldst.Dev != newst.Dev || oldst.Ino != newst.Ino {\n\t\t\tfmt.Fprintf(os.Stderr, \"unknown error chroot()ing into directory %q: %v\\n\", options.Spec.Root.Path, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlogrus.Debugf(\"chrooted into %q\", options.Spec.Root.Path)\n\t}\n\n\t// not doing because it's still shared: creating devices\n\t// not doing because it's not applicable: setting annotations\n\t// not doing because it's still shared: setting sysctl settings\n\t// not doing because cgroupfs is read only: configuring control groups\n\t// -> this means we can use the freezer to make sure there aren't any lingering processes\n\t// -> this means we ignore cgroups-based controls\n\t// not doing because we don't set any in the config: running hooks\n\t// not doing because we don't set it in the config: setting rootfs read-only\n\t// not doing because we don't set it in the config: setting rootfs propagation\n\tlogrus.Debugf(\"setting apparmor profile\")\n\tif err = setApparmorProfile(options.Spec); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error setting apparmor profile for process: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tif err = setSelinuxLabel(options.Spec); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error setting SELinux label for process: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlogrus.Debugf(\"setting resource limits\")\n\tif err = setRlimits(options.Spec, false, false); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error setting process resource limits for process: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t// Try to change to the directory.\n\tcwd := options.Spec.Process.Cwd\n\tif !filepath.IsAbs(cwd) {\n\t\tcwd = \"/\" + cwd\n\t}\n\tcwd = filepath.Clean(cwd)\n\tif err := unix.Chdir(\"/\"); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error chdir()ing into new root directory %q: %v\\n\", options.Spec.Root.Path, err)\n\t\tos.Exit(1)\n\t}\n\tif err := unix.Chdir(cwd); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error chdir()ing into directory %q under root %q: %v\\n\", cwd, options.Spec.Root.Path, err)\n\t\tos.Exit(1)\n\t}\n\tlogrus.Debugf(\"changed working directory to %q\", cwd)\n\n\t// Drop privileges.\n\tuser := options.Spec.Process.User\n\tif len(user.AdditionalGids) > 0 {\n\t\tgids := make([]int, len(user.AdditionalGids))\n\t\tfor i := range user.AdditionalGids {\n\t\t\tgids[i] = int(user.AdditionalGids[i])\n\t\t}\n\t\tlogrus.Debugf(\"setting supplemental groups\")\n\t\tif err = syscall.Setgroups(gids); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"error setting supplemental groups list: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tsetgroups, _ := os.ReadFile(\"/proc/self/setgroups\")\n\t\tif strings.Trim(string(setgroups), \"\\n\") != \"deny\" {\n\t\t\tlogrus.Debugf(\"clearing supplemental groups\")\n\t\t\tif err = syscall.Setgroups([]int{}); err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"error clearing supplemental groups list: %v\", err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t}\n\t}\n\n\tlogrus.Debugf(\"setting gid\")\n\tif err = unix.Setresgid(int(user.GID), int(user.GID), int(user.GID)); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error setting GID: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif err = setSeccomp(options.Spec); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error setting seccomp filter for process: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlogrus.Debugf(\"setting capabilities\")\n\tvar keepCaps []string\n\tif user.UID != 0 {\n\t\tkeepCaps = []string{\"CAP_SETUID\"}\n\t}\n\tif err := setCapabilities(options.Spec, keepCaps...); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error setting capabilities for process: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlogrus.Debugf(\"setting uid\")\n\tif err = unix.Setresuid(int(user.UID), int(user.UID), int(user.UID)); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error setting UID: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\t// Actually run the specified command.\n\tcmd := exec.Command(args[0], args[1:]...)\n\tsetPdeathsig(cmd)\n\tcmd.Env = options.Spec.Process.Env\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\tcmd.Dir = cwd\n\tlogrus.Debugf(\"Running %#v (PATH = %q)\", cmd, os.Getenv(\"PATH\"))\n\tinterrupted := make(chan os.Signal, 100)\n\tif err = cmd.Start(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"process failed to start with error: %v\", err)\n\t}\n\tgo func() {\n\t\tfor range interrupted {\n\t\t\tif err := cmd.Process.Signal(syscall.SIGKILL); err != nil {\n\t\t\t\tlogrus.Infof(\"%v while attempting to send SIGKILL to child process\", err)\n\t\t\t}\n\t\t}\n\t}()\n\tsignal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)\n\terr = cmd.Wait()\n\tsignal.Stop(interrupted)\n\tclose(interrupted)\n\tif err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\tif waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {\n\t\t\t\tif waitStatus.Exited() {\n\t\t\t\t\tif waitStatus.ExitStatus() != 0 {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"subprocess exited with status %d\\n\", waitStatus.ExitStatus())\n\t\t\t\t\t}\n\t\t\t\t\tos.Exit(waitStatus.ExitStatus())\n\t\t\t\t} else if waitStatus.Signaled() {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"subprocess exited on %s\\n\", waitStatus.Signal())\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(os.Stderr, \"process exited with error: %v\", err)\n\t\tos.Exit(1)\n\t}\n}", "func langserverRunShim(ctx context.Context, langs []string) error {\n\tif len(langs) == 0 || langs[0] == \"\" {\n\t\treturn errors.New(\"--lang not specified\")\n\t}\n\n\tworkdir, err := resolveWorkdirFromLSInvocation()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// workdir is not always the same as process's initial workdir.\n\t// after this, we can resolve the chosen project's details.\n\tif err := os.Chdir(workdir); err != nil {\n\t\treturn err\n\t}\n\n\t// in Bob dev containers we might have /workspace mount (i.e. different mount point than source\n\t// path in host), but editors send file references to LS's with the paths they're seeing, so we\n\t// must use the same path in containers (unless we want to do tricks with symlinks etc.)\n\tmountDir := workdir\n\n\t// access chosen project's details (so we know which programming language's langserver to start)\n\tbobfile, err := readBobfile()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlangserverCmd, builder, err := func() ([]string, *BuilderSpec, error) {\n\t\tfor _, builder := range bobfile.Builders {\n\t\t\t// FIXME: this assumes\n\t\t\tbaseImageConf, err := loadNonOptionalBaseImageConf(builder)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"loadNonOptionalBaseImageConf: %w\", err)\n\t\t\t}\n\n\t\t\tif baseImageConf.Langserver == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif anyOfLanguagesMatch(langs, baseImageConf.Langserver.Languages) {\n\t\t\t\treturn baseImageConf.Langserver.Command, &builder, nil\n\t\t\t}\n\t\t}\n\n\t\treturn nil, nil, fmt.Errorf(\n\t\t\t\"%s doesn't define a compatible language server for %v\",\n\t\t\tbobfile.ProjectName,\n\t\t\tlangs)\n\t}()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkind, dockerImage, err := parseBuilderUsesType(builder.Uses)\n\tif err != nil || kind != builderUsesTypeImage {\n\t\treturn fmt.Errorf(\"not Docker image or failure parsing uses: %w\", err)\n\t}\n\n\t// not using \"--tty\" because with it we got \"gopls: the input device is not a TTY\"\n\tdockerized := append([]string{\"docker\", \"run\",\n\t\t\"--rm\", // so resources get released. (this process is ephemeral in nature)\n\t\t\"--shm-size=512M\",\n\t\t\"--interactive\", // use stdin (it is the transport for one direction in LSP)\n\t\t\"--name=\" + langServerContainerName(bobfile, *builder),\n\t\t\"--volume\", fmt.Sprintf(\"%s:%s\", workdir, mountDir),\n\t\tdockerImage,\n\t}, langserverCmd...)\n\n\t//nolint:gosec // ok\n\tlangserver := exec.CommandContext(ctx, dockerized[0], dockerized[1:]...)\n\tlangserver.Stdin = os.Stdin\n\tlangserver.Stdout = os.Stdout\n\tlangserver.Stderr = os.Stderr\n\n\treturn langserver.Run()\n}", "func SetupShell() {\n\tsh = InitShell(\"$\", \" \")\n}", "func DryRun(args []string, env map[string]string) (string, *exec.Cmd, error) {\n\tskipSite := env[v1.EnvVarCohSkipSite]\n\tdetails := &RunDetails{\n\t\tOsArgs: args,\n\t\tEnv: env,\n\t\tCoherenceHome: env[v1.EnvVarCoherenceHome],\n\t\tUtilsDir: env[v1.EnvVarCohUtilDir],\n\t\tJavaHome: env[v1.EnvVarJavaHome],\n\t\tAppType: strings.ToLower(env[v1.EnvVarAppType]),\n\t\tDir: env[v1.EnvVarCohAppDir],\n\t\tMainClass: DCS,\n\t\tGetSite: strings.ToLower(skipSite) != \"true\",\n\t}\n\n\tprintHeader(details)\n\n\t// add any Classpath items\n\tdetails.AddClasspath(env[v1.EnvVarJvmExtraClasspath])\n\tdetails.AddClasspath(env[v1.EnvVarJavaClasspath])\n\n\tif len(details.OsArgs) == 1 {\n\t\tdetails.Command = CommandServer\n\t} else {\n\t\tswitch details.OsArgs[1] {\n\t\tcase CommandServer:\n\t\t\tserver(details)\n\t\tcase CommandConsole:\n\t\t\tconsole(details)\n\t\tcase CommandQueryPlus:\n\t\t\tqueryPlus(details)\n\t\tcase CommandMBeanServer:\n\t\t\tmbeanServer(details)\n\t\tcase v1.RunnerInit:\n\t\t\terr := Initialise()\n\t\t\treturn \"\", nil, err\n\t\tcase CommandVersion:\n\t\t\treturn \"\", nil, nil\n\t\tdefault:\n\t\t\tusage()\n\t\t\treturn \"\", nil, fmt.Errorf(\"invalid command %s\", details.OsArgs[1])\n\t\t}\n\t}\n\n\treturn start(details)\n}", "func packratPlatform(p string) string {\n\tswitch p {\n\tcase \"x86_64-w64-mingw32/x64\":\n\t\treturn \"x86_64-w64-mingw32\"\n\tdefault:\n\t\treturn p\n\t}\n}", "func InternalExecFor(rootCmd *RootCommand, args []string) (err error) {\n\tvar (\n\t\tpkg = new(ptpkg)\n\t\tgoCommand = &rootCmd.Command\n\t\tstop bool\n\t\tmatched bool\n\t\t// helpFlag = rootCmd.allFlags[UnsortedGroup][\"help\"]\n\t)\n\n\tif rootCommand == nil {\n\t\tsetRootCommand(rootCmd)\n\t}\n\n\tdefer func() {\n\t\t_ = rootCmd.ow.Flush()\n\t\t_ = rootCmd.oerr.Flush()\n\t}()\n\n\terr = preprocess(rootCmd, args)\n\n\tif err == nil {\n\t\tfor pkg.i = 1; pkg.i < len(args); pkg.i++ {\n\t\t\tpkg.Reset()\n\t\t\tpkg.a = args[pkg.i]\n\n\t\t\t// --debug: long opt\n\t\t\t// -D: short opt\n\t\t\t// -nv: double chars short opt\n\t\t\t// ~~debug: long opt without opt-entry prefix.\n\t\t\t// ~D: short opt without opt-entry prefix.\n\t\t\t// -abc: the combined short opts\n\t\t\t// -nvabc, -abnvc: a,b,c,nv the four short opts, if no -n & -v defined.\n\t\t\t// --name=consul, --name consul, --name=consul: opt with a string, int, string slice argument\n\t\t\t// -nconsul, -n consul, -n=consul: opt with an argument.\n\t\t\t// - -nconsul is not good format, but it could get somewhat works.\n\t\t\t// - -n'consul', -n\"consul\" could works too.\n\t\t\t// -t3: opt with an argument.\n\t\t\tmatched, stop, err = xxTestCmd(pkg, &goCommand, rootCmd, args)\n\t\t\tif e, ok := err.(*ErrorForCmdr); ok {\n\t\t\t\tferr(\"%v\", e)\n\t\t\t\tif !e.Ignorable {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif stop {\n\t\t\t\tif pkg.lastCommandHeld || (matched && pkg.flg == nil) {\n\t\t\t\t\terr = afterInternalExec(pkg, rootCmd, goCommand, args)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\terr = afterInternalExec(pkg, rootCmd, goCommand, args)\n\t}\n\treturn\n}", "func setupEnv(args *execdriver.InitArgs) error {\n\t// Get env\n\tvar env []string\n\tcontent, err := ioutil.ReadFile(\".dockerenv\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to load environment variables: %v\", err)\n\t}\n\tif err := json.Unmarshal(content, &env); err != nil {\n\t\treturn fmt.Errorf(\"Unable to unmarshal environment variables: %v\", err)\n\t}\n\t// Propagate the plugin-specific container env variable\n\tenv = append(env, \"container=\"+os.Getenv(\"container\"))\n\n\targs.Env = env\n\n\tos.Clearenv()\n\tfor _, kv := range args.Env {\n\t\tparts := strings.SplitN(kv, \"=\", 2)\n\t\tif len(parts) == 1 {\n\t\t\tparts = append(parts, \"\")\n\t\t}\n\t\tos.Setenv(parts[0], parts[1])\n\t}\n\n\treturn nil\n}", "func LowSubcmd(subcmd string) (bool, int) {\n\targs := []string{}\n\tif len(os.Args) > 1 {\n\t\targs = os.Args\n\t} else {\n\t\treturn false, 0\n\t}\n\tfor i, v := range args {\n\t\tif v == subcmd {\n\t\t\treturn true, i\n\t\t}\n\t}\n\treturn false, 0\n}", "func (s *BaseGShellListener) EnterCommandLine(ctx *CommandLineContext) {}", "func setFakeExecCommandContext(t *testing.T) func() {\n\texecCommandContext = func(ctx context.Context, command string, args ...string) *exec.Cmd {\n\t\tcs := []string{\"-test.run=TestHelperProcess\", \"--\", command}\n\t\tcs = append(cs, args...)\n\t\tcmd := exec.CommandContext(ctx, os.Args[0], cs...)\n\t\tcmd.Env = []string{\"GO_RUN_HELPER_PROCESS=1\"}\n\t\treturn cmd\n\t}\n\n\treturn func() {\n\t\texecCommandContext = exec.CommandContext\n\t}\n}", "func (opts *ToolOptions) setOptionsFromURI(cs connstring.ConnString) error {\n\topts.URI.ConnString = cs\n\n\tif opts.enabledOptions.Connection {\n\t\t// Port can be set in --port, --host, or URI\n\t\t// Each host/port pair in the options must match the URI host/port pairs\n\t\tif opts.Port != \"\" {\n\t\t\t// if --port is set, check that each host:port pair in the URI the port defined in --port\n\t\t\tfor i, host := range cs.Hosts {\n\t\t\t\tif strings.Index(host, \":\") != -1 {\n\t\t\t\t\thostPort := strings.Split(host, \":\")[1]\n\t\t\t\t\tif hostPort != opts.Port {\n\t\t\t\t\t\treturn ConflictingArgsErrorFormat(\"port\", strings.Join(cs.Hosts, \",\"), opts.Port, \"--port\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// if the URI hosts have no ports, append them\n\t\t\t\t\tcs.Hosts[i] = cs.Hosts[i] + \":\" + opts.Port\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif opts.Host != \"\" {\n\t\t\t// build hosts from --host and --port\n\t\t\tseedlist, replicaSetName := util.SplitHostArg(opts.Host)\n\t\t\topts.ReplicaSetName = replicaSetName\n\n\t\t\tif opts.Port != \"\" {\n\t\t\t\tfor i := range seedlist {\n\t\t\t\t\tif strings.Index(seedlist[i], \":\") == -1 { // no port\n\t\t\t\t\t\tseedlist[i] = seedlist[i] + \":\" + opts.Port\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// create a set of hosts since the order of a seedlist doesn't matter\n\t\t\tcsHostSet := make(map[string]bool)\n\t\t\tfor _, host := range cs.Hosts {\n\t\t\t\tcsHostSet[host] = true\n\t\t\t}\n\n\t\t\toptionHostSet := make(map[string]bool)\n\t\t\tfor _, host := range seedlist {\n\t\t\t\toptionHostSet[host] = true\n\t\t\t}\n\n\t\t\t// check the sets are equal\n\t\t\tif len(csHostSet) != len(optionHostSet) {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"host\", strings.Join(cs.Hosts, \",\"), opts.Host, \"--host\")\n\t\t\t}\n\n\t\t\tfor host := range csHostSet {\n\t\t\t\tif _, ok := optionHostSet[host]; !ok {\n\t\t\t\t\treturn ConflictingArgsErrorFormat(\"host\", strings.Join(cs.Hosts, \",\"), opts.Host, \"--host\")\n\t\t\t\t}\n\t\t\t}\n\t\t} else if len(cs.Hosts) > 0 {\n\t\t\tif cs.ReplicaSet != \"\" {\n\t\t\t\topts.Host = cs.ReplicaSet + \"/\"\n\t\t\t}\n\n\t\t\t// check if there is a <host:port> pair with a port that matches --port <port>\n\t\t\tconflictingPorts := true\n\t\t\tfor _, host := range cs.Hosts {\n\t\t\t\thostPort := strings.Split(host, \":\")\n\t\t\t\topts.Host += hostPort[0] + \",\"\n\n\t\t\t\t// a port might not be specified, e.g. `mongostat --discover`\n\t\t\t\tif len(hostPort) == 2 {\n\t\t\t\t\tif opts.Port != \"\" {\n\t\t\t\t\t\tif hostPort[1] == opts.Port {\n\t\t\t\t\t\t\tconflictingPorts = false\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\topts.Port = hostPort[1]\n\t\t\t\t\t\tconflictingPorts = false\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tconflictingPorts = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tif conflictingPorts {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"port\", strings.Join(cs.Hosts, \",\"), opts.Port, \"--port\")\n\t\t\t}\n\t\t\t// remove trailing comma\n\t\t\topts.Host = opts.Host[:len(opts.Host)-1]\n\t\t}\n\n\t\tif len(cs.Hosts) > 1 && cs.LoadBalanced {\n\t\t\treturn fmt.Errorf(\"loadBalanced cannot be set to true if multiple hosts are specified\")\n\t\t}\n\n\t\tif opts.Connection.ServerSelectionTimeout != 0 && cs.ServerSelectionTimeoutSet {\n\t\t\tif (time.Duration(opts.Connection.ServerSelectionTimeout) * time.Millisecond) != cs.ServerSelectionTimeout {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"serverSelectionTimeout\", strconv.Itoa(int(cs.ServerSelectionTimeout/time.Millisecond)), strconv.Itoa(opts.Connection.ServerSelectionTimeout), \"--serverSelectionTimeout\")\n\t\t\t}\n\t\t}\n\t\tif opts.Connection.ServerSelectionTimeout != 0 && !cs.ServerSelectionTimeoutSet {\n\t\t\tcs.ServerSelectionTimeout = time.Duration(opts.Connection.ServerSelectionTimeout) * time.Millisecond\n\t\t\tcs.ServerSelectionTimeoutSet = true\n\t\t}\n\t\tif opts.Connection.ServerSelectionTimeout == 0 && cs.ServerSelectionTimeoutSet {\n\t\t\topts.Connection.ServerSelectionTimeout = int(cs.ServerSelectionTimeout / time.Millisecond)\n\t\t}\n\n\t\tif opts.Connection.Timeout != 3 && cs.ConnectTimeoutSet {\n\t\t\tif (time.Duration(opts.Connection.Timeout) * time.Millisecond) != cs.ConnectTimeout {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"connectTimeout\", strconv.Itoa(int(cs.ConnectTimeout/time.Millisecond)), strconv.Itoa(opts.Connection.Timeout), \"--dialTimeout\")\n\t\t\t}\n\t\t}\n\t\tif opts.Connection.Timeout != 3 && !cs.ConnectTimeoutSet {\n\t\t\tcs.ConnectTimeout = time.Duration(opts.Connection.Timeout) * time.Millisecond\n\t\t\tcs.ConnectTimeoutSet = true\n\t\t}\n\t\tif opts.Connection.Timeout == 3 && cs.ConnectTimeoutSet {\n\t\t\topts.Connection.Timeout = int(cs.ConnectTimeout / time.Millisecond)\n\t\t}\n\n\t\tif opts.Connection.SocketTimeout != 0 && cs.SocketTimeoutSet {\n\t\t\tif (time.Duration(opts.Connection.SocketTimeout) * time.Millisecond) != cs.SocketTimeout {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"SocketTimeout\", strconv.Itoa(int(cs.SocketTimeout/time.Millisecond)), strconv.Itoa(opts.Connection.SocketTimeout), \"--socketTimeout\")\n\t\t\t}\n\t\t}\n\t\tif opts.Connection.SocketTimeout != 0 && !cs.SocketTimeoutSet {\n\t\t\tcs.SocketTimeout = time.Duration(opts.Connection.SocketTimeout) * time.Millisecond\n\t\t\tcs.SocketTimeoutSet = true\n\t\t}\n\t\tif opts.Connection.SocketTimeout == 0 && cs.SocketTimeoutSet {\n\t\t\topts.Connection.SocketTimeout = int(cs.SocketTimeout / time.Millisecond)\n\t\t}\n\n\t\tif len(cs.Compressors) != 0 {\n\t\t\tif opts.Connection.Compressors != \"none\" && opts.Connection.Compressors != strings.Join(cs.Compressors, \",\") {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"compressors\", strings.Join(cs.Compressors, \",\"), opts.Connection.Compressors, \"--compressors\")\n\t\t\t}\n\t\t} else {\n\t\t\tcs.Compressors = strings.Split(opts.Connection.Compressors, \",\")\n\t\t}\n\t}\n\n\tif opts.enabledOptions.Auth {\n\n\t\tif opts.Username != \"\" && cs.Username != \"\" {\n\t\t\tif opts.Username != cs.Username {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"username\", cs.Username, opts.Username, \"--username\")\n\t\t\t}\n\t\t}\n\t\tif opts.Username != \"\" && cs.Username == \"\" {\n\t\t\tcs.Username = opts.Username\n\t\t}\n\t\tif opts.Username == \"\" && cs.Username != \"\" {\n\t\t\topts.Username = cs.Username\n\t\t}\n\n\t\tif opts.Password != \"\" && cs.PasswordSet {\n\t\t\tif opts.Password != cs.Password {\n\t\t\t\treturn fmt.Errorf(\"Invalid Options: Cannot specify different password in connection URI and command-line option\")\n\t\t\t}\n\t\t}\n\t\tif opts.Password != \"\" && !cs.PasswordSet {\n\t\t\tcs.Password = opts.Password\n\t\t\tcs.PasswordSet = true\n\t\t}\n\t\tif opts.Password == \"\" && cs.PasswordSet {\n\t\t\topts.Password = cs.Password\n\t\t}\n\n\t\tif opts.Source != \"\" && cs.AuthSourceSet {\n\t\t\tif opts.Source != cs.AuthSource {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"authSource\", cs.AuthSource, opts.Source, \"--authenticationDatabase\")\n\t\t\t}\n\t\t}\n\t\tif opts.Source != \"\" && !cs.AuthSourceSet {\n\t\t\tcs.AuthSource = opts.Source\n\t\t\tcs.AuthSourceSet = true\n\t\t}\n\t\tif opts.Source == \"\" && cs.AuthSourceSet {\n\t\t\topts.Source = cs.AuthSource\n\t\t}\n\n\t\tif opts.Mechanism != \"\" && cs.AuthMechanism != \"\" {\n\t\t\tif opts.Mechanism != cs.AuthMechanism {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"authMechanism\", cs.AuthMechanism, opts.Mechanism, \"--authenticationMechanism\")\n\t\t\t}\n\t\t}\n\t\tif opts.Mechanism != \"\" && cs.AuthMechanism == \"\" {\n\t\t\tcs.AuthMechanism = opts.Mechanism\n\t\t}\n\t\tif opts.Mechanism == \"\" && cs.AuthMechanism != \"\" {\n\t\t\topts.Mechanism = cs.AuthMechanism\n\t\t}\n\n\t}\n\n\tif opts.enabledOptions.Namespace {\n\n\t\tif opts.DB != \"\" && cs.Database != \"\" {\n\t\t\tif opts.DB != cs.Database {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"database\", cs.Database, opts.DB, \"--db\")\n\t\t\t}\n\t\t}\n\t\tif opts.DB != \"\" && cs.Database == \"\" {\n\t\t\tcs.Database = opts.DB\n\t\t}\n\t\tif opts.DB == \"\" && cs.Database != \"\" {\n\t\t\topts.DB = cs.Database\n\t\t}\n\t}\n\n\t// check replica set name equality\n\tif opts.ReplicaSetName != \"\" && cs.ReplicaSet != \"\" {\n\t\tif opts.ReplicaSetName != cs.ReplicaSet {\n\t\t\treturn ConflictingArgsErrorFormat(\"replica set name\", cs.ReplicaSet, opts.Host, \"--host\")\n\t\t}\n\t\tif opts.ConnString.LoadBalanced {\n\t\t\treturn fmt.Errorf(\"loadBalanced cannot be set to true if the replica set name is specified\")\n\t\t}\n\t}\n\tif opts.ReplicaSetName != \"\" && cs.ReplicaSet == \"\" {\n\t\tcs.ReplicaSet = opts.ReplicaSetName\n\t}\n\tif opts.ReplicaSetName == \"\" && cs.ReplicaSet != \"\" {\n\t\topts.ReplicaSetName = cs.ReplicaSet\n\t}\n\n\t// Connect directly to a host if indicated by the connection string.\n\topts.Direct = cs.DirectConnection || (cs.Connect == connstring.SingleConnect)\n\tif opts.Direct && opts.ConnString.LoadBalanced {\n\t\treturn fmt.Errorf(\"loadBalanced cannot be set to true if the direct connection option is specified\")\n\t}\n\n\tif (cs.SSL || opts.UseSSL) && !BuiltWithSSL {\n\t\tif strings.HasPrefix(cs.Original, \"mongodb+srv\") {\n\t\t\treturn fmt.Errorf(\"SSL enabled by default when using SRV but tool not built with SSL: \" +\n\t\t\t\t\"SSL must be explicitly disabled with ssl=false in the connection string\")\n\t\t}\n\t\treturn fmt.Errorf(\"cannot use ssl: tool not built with SSL support\")\n\t}\n\n\tif cs.RetryWritesSet {\n\t\topts.RetryWrites = &cs.RetryWrites\n\t}\n\n\tif cs.SSLSet {\n\t\tif opts.UseSSL && !cs.SSL {\n\t\t\treturn ConflictingArgsErrorFormat(\"ssl\", strconv.FormatBool(cs.SSL), strconv.FormatBool(opts.UseSSL), \"--ssl\")\n\t\t} else if !opts.UseSSL && cs.SSL {\n\t\t\topts.UseSSL = cs.SSL\n\t\t}\n\t}\n\n\t// ignore opts.UseSSL being false due to zero-value problem (TOOLS-2459 PR for details)\n\t// Ignore: opts.UseSSL = false, cs.SSL = true (have cs take precedence)\n\t// Treat as conflict: opts.UseSSL = true, cs.SSL = false\n\tif opts.UseSSL && cs.SSLSet {\n\t\tif !cs.SSL {\n\t\t\treturn ConflictingArgsErrorFormat(\"ssl or tls\", strconv.FormatBool(cs.SSL), strconv.FormatBool(opts.UseSSL), \"--ssl\")\n\t\t}\n\t}\n\tif opts.UseSSL && !cs.SSLSet {\n\t\tcs.SSL = opts.UseSSL\n\t\tcs.SSLSet = true\n\t}\n\t// If SSL set in cs but not in opts,\n\tif !opts.UseSSL && cs.SSLSet {\n\t\topts.UseSSL = cs.SSL\n\t}\n\n\tif opts.SSLCAFile != \"\" && cs.SSLCaFileSet {\n\t\tif opts.SSLCAFile != cs.SSLCaFile {\n\t\t\treturn ConflictingArgsErrorFormat(\"sslCAFile\", cs.SSLCaFile, opts.SSLCAFile, \"--sslCAFile\")\n\t\t}\n\t}\n\tif opts.SSLCAFile != \"\" && !cs.SSLCaFileSet {\n\t\tcs.SSLCaFile = opts.SSLCAFile\n\t\tcs.SSLCaFileSet = true\n\t}\n\tif opts.SSLCAFile == \"\" && cs.SSLCaFileSet {\n\t\topts.SSLCAFile = cs.SSLCaFile\n\t}\n\n\tif opts.SSLPEMKeyFile != \"\" && cs.SSLClientCertificateKeyFileSet {\n\t\tif opts.SSLPEMKeyFile != cs.SSLClientCertificateKeyFile {\n\t\t\treturn ConflictingArgsErrorFormat(\"sslClientCertificateKeyFile\", cs.SSLClientCertificateKeyFile, opts.SSLPEMKeyFile, \"--sslPEMKeyFile\")\n\t\t}\n\t}\n\tif opts.SSLPEMKeyFile != \"\" && !cs.SSLClientCertificateKeyFileSet {\n\t\tcs.SSLClientCertificateKeyFile = opts.SSLPEMKeyFile\n\t\tcs.SSLClientCertificateKeyFileSet = true\n\t}\n\tif opts.SSLPEMKeyFile == \"\" && cs.SSLClientCertificateKeyFileSet {\n\t\topts.SSLPEMKeyFile = cs.SSLClientCertificateKeyFile\n\t}\n\n\tif opts.SSLPEMKeyPassword != \"\" && cs.SSLClientCertificateKeyPasswordSet {\n\t\tif opts.SSLPEMKeyPassword != cs.SSLClientCertificateKeyPassword() {\n\t\t\treturn ConflictingArgsErrorFormat(\"sslPEMKeyFilePassword\", cs.SSLClientCertificateKeyPassword(), opts.SSLPEMKeyPassword, \"--sslPEMKeyFilePassword\")\n\t\t}\n\t}\n\tif opts.SSLPEMKeyPassword != \"\" && !cs.SSLClientCertificateKeyPasswordSet {\n\t\tcs.SSLClientCertificateKeyPassword = func() string { return opts.SSLPEMKeyPassword }\n\t\tcs.SSLClientCertificateKeyPasswordSet = true\n\t}\n\tif opts.SSLPEMKeyPassword == \"\" && cs.SSLClientCertificateKeyPasswordSet {\n\t\topts.SSLPEMKeyPassword = cs.SSLClientCertificateKeyPassword()\n\t}\n\n\t// Note: SSLCRLFile is not parsed by the go driver\n\n\t// ignore (opts.SSLAllowInvalidCert || opts.SSLAllowInvalidHost) being false due to zero-value problem (TOOLS-2459 PR for details)\n\t// Have cs take precedence in cases where it is unclear\n\tif (opts.SSLAllowInvalidCert || opts.SSLAllowInvalidHost || opts.TLSInsecure) && cs.SSLInsecureSet {\n\t\tif !cs.SSLInsecure {\n\t\t\treturn ConflictingArgsErrorFormat(\"sslInsecure or tlsInsecure\", \"false\", \"true\", \"--sslAllowInvalidCert or --sslAllowInvalidHost\")\n\t\t}\n\t}\n\tif (opts.SSLAllowInvalidCert || opts.SSLAllowInvalidHost || opts.TLSInsecure) && !cs.SSLInsecureSet {\n\t\tcs.SSLInsecure = true\n\t\tcs.SSLInsecureSet = true\n\t}\n\tif (!opts.SSLAllowInvalidCert && !opts.SSLAllowInvalidHost || !opts.TLSInsecure) && cs.SSLInsecureSet {\n\t\topts.SSLAllowInvalidCert = cs.SSLInsecure\n\t\topts.SSLAllowInvalidHost = cs.SSLInsecure\n\t\topts.TLSInsecure = cs.SSLInsecure\n\t}\n\n\tif strings.ToLower(cs.AuthMechanism) == \"gssapi\" {\n\t\tif !BuiltWithGSSAPI {\n\t\t\treturn fmt.Errorf(\"cannot specify gssapiservicename: tool not built with kerberos support\")\n\t\t}\n\n\t\tgssapiServiceName, _ := cs.AuthMechanismProperties[\"SERVICE_NAME\"]\n\n\t\tif opts.Kerberos.Service != \"\" && cs.AuthMechanismPropertiesSet {\n\t\t\tif opts.Kerberos.Service != gssapiServiceName {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"Kerberos service name\", gssapiServiceName, opts.Kerberos.Service, \"--gssapiServiceName\")\n\t\t\t}\n\t\t}\n\t\tif opts.Kerberos.Service != \"\" && !cs.AuthMechanismPropertiesSet {\n\t\t\tif cs.AuthMechanismProperties == nil {\n\t\t\t\tcs.AuthMechanismProperties = make(map[string]string)\n\t\t\t}\n\t\t\tcs.AuthMechanismProperties[\"SERVICE_NAME\"] = opts.Kerberos.Service\n\t\t\tcs.AuthMechanismPropertiesSet = true\n\t\t}\n\t\tif opts.Kerberos.Service == \"\" && cs.AuthMechanismPropertiesSet {\n\t\t\topts.Kerberos.Service = gssapiServiceName\n\t\t}\n\t}\n\n\tif strings.ToLower(cs.AuthMechanism) == \"mongodb-aws\" {\n\t\tawsSessionToken, _ := cs.AuthMechanismProperties[\"AWS_SESSION_TOKEN\"]\n\n\t\tif opts.AWSSessionToken != \"\" && cs.AuthMechanismPropertiesSet {\n\t\t\tif opts.AWSSessionToken != awsSessionToken {\n\t\t\t\treturn ConflictingArgsErrorFormat(\"AWS Session Token\", awsSessionToken, opts.AWSSessionToken, \"--awsSessionToken\")\n\t\t\t}\n\t\t}\n\t\tif opts.AWSSessionToken != \"\" && !cs.AuthMechanismPropertiesSet {\n\t\t\tif cs.AuthMechanismProperties == nil {\n\t\t\t\tcs.AuthMechanismProperties = make(map[string]string)\n\t\t\t}\n\t\t\tcs.AuthMechanismProperties[\"AWS_SESSION_TOKEN\"] = opts.AWSSessionToken\n\t\t\tcs.AuthMechanismPropertiesSet = true\n\t\t}\n\t\tif opts.AWSSessionToken == \"\" && cs.AuthMechanismPropertiesSet {\n\t\t\topts.AWSSessionToken = awsSessionToken\n\t\t}\n\t}\n\tfor _, extraOpts := range opts.URI.extraOptionsRegistry {\n\t\tif uriSetter, ok := extraOpts.(URISetter); ok {\n\t\t\terr := uriSetter.SetOptionsFromURI(cs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// set the connString on opts so it can be validated later\n\topts.ConnString = cs\n\n\treturn nil\n}", "func (client ConsoleClient) EnableConsolePreparer(ctx context.Context) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"default\": autorest.Encode(\"path\", \"default\"),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2018-05-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsPost(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/providers/Microsoft.SerialConsole/consoleServices/{default}/enableConsole\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func constructHostInstallerArgs(cluster *common.Cluster, host *models.Host, inventory *models.Inventory, infraEnv *common.InfraEnv, log logrus.FieldLogger) (string, error) {\n\n\tvar installerArgs []string\n\tvar err error\n\thasStaticNetwork := (infraEnv != nil && infraEnv.StaticNetworkConfig != \"\") || cluster.StaticNetworkConfigured\n\n\tif host.InstallerArgs != \"\" {\n\t\terr = json.Unmarshal([]byte(host.InstallerArgs), &installerArgs)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tif !hasStaticNetwork {\n\t\t// The set of ip=<nic>:dhcp kernel arguments should be added only if there is no static\n\t\t// network configured by the user. This is because this parameter will configure RHCOS to\n\t\t// try to obtain IP address from the DHCP server even if we provide a static addressing.\n\t\t// As in majority of cases it's not an issue because of the priorities set in the config\n\t\t// of NetworkManager, in some specific scenarios (e.g. BZ-2106110) this causes machines to\n\t\t// lose their connectivity because priorities get mixed.\n\t\tinstallerArgs, err = appendDHCPArgs(cluster, host, inventory, installerArgs, log)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tfor _, disk := range inventory.Disks {\n\t\tif disk.DriveType == models.DriveTypeMultipath && disk.ID == host.InstallationDiskID {\n\t\t\tinstallerArgs = append(installerArgs, \"--append-karg\", \"root=/dev/disk/by-label/dm-mpath-root\", \"--append-karg\", \"rw\", \"--append-karg\", \"rd.multipath=default\")\n\t\t}\n\t}\n\n\tif hasStaticNetwork && !funk.Contains(installerArgs, \"--copy-network\") {\n\t\t// network not configured statically or\n\t\t// installer args already contain command for network configuration\n\t\tinstallerArgs = append(installerArgs, \"--copy-network\")\n\t}\n\n\treturn toJSONString(installerArgs)\n}", "func setupCall(h handlerType, rule ruleType, lfStr, rtStr string,\n\trep httpserver.Replacer, hdr http.Header, username string) (cgiHnd cgi.Handler) {\n\tcgiHnd.Root = \"/\"\n\tcgiHnd.Dir = h.root\n\trep.Set(\"root\", h.root)\n\trep.Set(\"match\", lfStr)\n\trep.Set(\".\", currentDir())\n\tcgiHnd.Path = rep.Replace(rule.exe)\n\tif rule.dir != \"\" {\n\t\tcgiHnd.Dir = rule.dir\n\t}\n\tcgiHnd.Env = append(cgiHnd.Env, \"REMOTE_USER=\"+username)\n\tenvAdd := func(key, val string) {\n\t\tval = rep.Replace(val)\n\t\tcgiHnd.Env = append(cgiHnd.Env, key+\"=\"+val)\n\t}\n\tfor _, env := range rule.envs {\n\t\tenvAdd(env[0], env[1])\n\t}\n\tfor _, env := range rule.emptyEnvs {\n\t\tcgiHnd.Env = append(cgiHnd.Env, env+\"=\")\n\t}\n\tenvAdd(\"PATH_INFO\", rtStr)\n\tenvAdd(\"SCRIPT_FILENAME\", cgiHnd.Path)\n\tenvAdd(\"SCRIPT_NAME\", lfStr)\n\tif rule.passAll {\n\t\tcgiHnd.InheritEnv = passAll()\n\t} else {\n\t\tcgiHnd.InheritEnv = append(cgiHnd.InheritEnv, rule.passEnvs...)\n\t}\n\tfor _, str := range rule.args {\n\t\tcgiHnd.Args = append(cgiHnd.Args, rep.Replace(str))\n\t}\n\tenvAdd(\"SCRIPT_EXEC\", trim(sprintf(\"%s %s\", cgiHnd.Path, join(cgiHnd.Args, \" \"))))\n\treturn\n}", "func LoadArgs(argConfs []snlapi.Arg, cliArgs []string) (map[string]*string, error) {\n\targRes := map[string]*string{}\n\n\tam := map[string]snlapi.Arg{}\n\tposSl := []snlapi.Arg{}\n\tfor _, ac := range argConfs {\n\t\tif ac.Type == \"\" || ac.Type == \"bool\" || ac.Type == \"named\" {\n\t\t\tam[ac.Name] = ac\n\t\t\tcontinue\n\t\t}\n\t\tif ac.Type == \"pos\" {\n\t\t\tposSl = append(posSl, ac)\n\t\t\tcontinue\n\t\t}\n\t\t//TODO: Validation\n\t\treturn nil, fmt.Errorf(\"unknown argument type: name: '%s', type: '%s' should be one of: pos, bool, named\", ac.Name, ac.Type)\n\t}\n\n\tprevHandled := false\n\tpassedPosArg := 0\n\tfor i, cliArg := range cliArgs {\n\t\tif prevHandled {\n\t\t\tprevHandled = false\n\t\t\tcontinue\n\t\t}\n\t\t// Whitespace separated named or bool flag\n\t\tif match := argWsSeparatedRegex.MatchString(cliArg); match {\n\t\t\targName := strings.TrimLeft(cliArg, \"-\")\n\t\t\tc, exists := am[argName]\n\t\t\tif !exists {\n\t\t\t\treturn nil, fmt.Errorf(\"named argument does not exist: name '%s'\", argName)\n\t\t\t}\n\t\t\t// Bool flag\n\t\t\tif c.Type != \"\" && c.Type == \"bool\" {\n\t\t\t\ttrueVal := \"1\"\n\t\t\t\targRes[argName] = &trueVal\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c.Type == \"\" || c.Type == \"named\" {\n\t\t\t\t// Named flag whitespace separated\n\t\t\t\tif i+1 >= len(cliArgs) {\n\t\t\t\t\treturn nil, fmt.Errorf(\"missing value after last named argument: name '%s'\", cliArg)\n\t\t\t\t}\n\t\t\t\targRes[argName] = &cliArgs[i+1]\n\t\t\t\tprevHandled = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// Equal sign separated named argument\n\t\tif match := argEqSeparatedRegex.MatchString(cliArg); match {\n\t\t\ttmpS := strings.TrimLeft(cliArg, \"-\")\n\t\t\tspl := strings.SplitN(tmpS, \"=\", 2)\n\t\t\targName, argValue := spl[0], spl[1]\n\t\t\tc, exists := am[argName]\n\t\t\tif !exists {\n\t\t\t\treturn nil, fmt.Errorf(\"named argument does not exist: '%s'\", argName)\n\t\t\t}\n\t\t\tif !(c.Type == \"\" || c.Type == \"named\") {\n\t\t\t\treturn nil, fmt.Errorf(\"value provided for non-named argument %s: '%s'\", argName, cliArg)\n\t\t\t}\n\t\t\targRes[argName] = &argValue\n\t\t\tcontinue\n\t\t}\n\n\t\t// Positional arguments\n\t\tif len(posSl) > passedPosArg {\n\t\t\ta := posSl[passedPosArg]\n\t\t\targRes[a.Name] = &cliArgs[i]\n\t\t\tpassedPosArg++\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"too many positional arguments given: '%s'\", cliArg)\n\t\t}\n\t}\n\n\tfor i := range argConfs {\n\t\targName := argConfs[i].Name\n\t\tif _, exists := argRes[argName]; !exists {\n\t\t\tif argConfs[i].FromEnvVar != nil {\n\t\t\t\tvalue, isSet := os.LookupEnv(argName)\n\t\t\t\tif isSet {\n\t\t\t\t\targRes[argName] = &value\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif argConfs[i].Default != nil {\n\t\t\t\targRes[argName] = argConfs[i].Default\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif argConfs[i].Type == \"bool\" {\n\t\t\t\tfalseVal := \"0\"\n\t\t\t\targRes[argName] = &falseVal\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif argConfs[i].Type == \"pos\" {\n\t\t\t\treturn nil, fmt.Errorf(\"value for positional argument missing: '%s'\", argName)\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"value for argument missing: not given via parameter, environment variable or default value: '%s'\", argName)\n\t\t}\n\t}\n\tlogrus.Trace(\"parsed args\", argRes)\n\n\treturn argRes, nil\n}", "func (cli *CLI) setup(args []string) (*structs.Config, error) {\n\n\t// If the length of the CLI args is greater than one then there is an error.\n\tif len(args) > 1 {\n\t\treturn nil, fmt.Errorf(\"too many command line args\\n %v\", usage)\n\t}\n\n\t// If no cli flags are passed then we just return a default configuration\n\t// struct for use.\n\tif len(args) == 0 {\n\t\treturn config.DefaultConfig(), nil\n\t}\n\n\t// If one CLI argument is passed this is split using the equals delimiter and\n\t// the right hand side used as the configuration file/path to parse.\n\tsplit := strings.Split(args[0], \"=\")\n\n\tswitch p := split[0]; p {\n\tcase \"-config\":\n\t\tc, err := config.FromPath(split[1])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unable to correctly determine config location %v\", split[1])\n\t}\n}", "func objectNameAndCommandLine(env *cmdline.Env, args []string) (string, string, error) {\n\tif len(args) < 1 {\n\t\treturn \"\", \"\", errors.New(\"object name missing\")\n\t}\n\tname := args[0]\n\targs = args[1:]\n\t// For compatibility with tools like rsync. Because object names\n\t// don't look like traditional hostnames, tools that work with rsh and\n\t// ssh can't work directly with vsh. This trick makes the following\n\t// possible:\n\t// $ VSH_NAME=<object name> rsync -avh -e vsh /foo/* v23:/foo/\n\t// The \"v23\" host will be substituted with <object name>.\n\tif envName := env.Vars[\"VSH_NAME\"]; envName != \"\" && name == \"v23\" {\n\t\tname = envName\n\t}\n\tcmd := strings.Join(args, \" \")\n\treturn name, cmd, nil\n}", "func StartPoint(name string) func(*types.Cmd) {\n\treturn func(g *types.Cmd) {\n\t\tg.AddOptions(name)\n\t}\n}", "func argsInit() {\n\tArgs = make([]string, 0, 0)\n\tArgs = append(Args, os.Args...)\n\tExecFile = options.GetExecFileByPid(os.Getpid())\n\t// default opt Parser\n\t// do not include ExecFile\n\topts = options.NewOptParser(Args[1:])\n\tArgLine = options.ArgsToSpLine(Args)\n\tArgFullLine = options.CleanArgLine(os.Args[0] + \" \" + opts.String())\n\t//\n}", "func Preload(L *lua.LState) {\n\tL.PreloadModule(\"argparse\", Loader)\n}", "func (c *Vrouter) PrepareDaemonSet(ds *appsv1.DaemonSet,\n\tcommonConfiguration *PodConfiguration,\n\trequest reconcile.Request,\n\tscheme *runtime.Scheme,\n\tclient client.Client) error {\n\tinstanceType := \"vrouter\"\n\tSetDSCommonConfiguration(ds, commonConfiguration)\n\tds.SetName(request.Name + \"-\" + instanceType + \"-daemonset\")\n\tds.SetNamespace(request.Namespace)\n\tds.SetLabels(map[string]string{\"contrail_manager\": instanceType,\n\t\tinstanceType: request.Name})\n\tds.Spec.Selector.MatchLabels = map[string]string{\"contrail_manager\": instanceType,\n\t\tinstanceType: request.Name}\n\tds.Spec.Template.SetLabels(map[string]string{\"contrail_manager\": instanceType,\n\t\tinstanceType: request.Name})\n\tds.Spec.Template.Spec.Affinity = &corev1.Affinity{\n\t\tPodAntiAffinity: &corev1.PodAntiAffinity{\n\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{{\n\t\t\t\tLabelSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchExpressions: []metav1.LabelSelectorRequirement{{\n\t\t\t\t\t\tKey: instanceType,\n\t\t\t\t\t\tOperator: \"Exists\",\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tTopologyKey: \"kubernetes.io/hostname\",\n\t\t\t}},\n\t\t},\n\t}\n\terr := controllerutil.SetControllerReference(c, ds, scheme)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func addCredentialProviderFlags(fs *pflag.FlagSet) {\n\t// lookup flags in global flag set and re-register the values with our flagset\n\tglobal := pflag.CommandLine\n\tlocal := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)\n\n\taddLegacyCloudProviderCredentialProviderFlags(global, local)\n\n\tfs.AddFlagSet(local)\n}", "func sortOptions(meta *meta, args []string, toSuggest string, suggestions []string) []string {\n\tcommand := getCommand(meta, args, toSuggest)\n\tif command == nil {\n\t\treturn suggestions\n\t}\n\n\targSpecs := []ShellSuggestion(nil)\n\tfor _, suggest := range suggestions {\n\t\targSpec := command.ArgSpecs.GetByName(optionToArgSpecName(suggest))\n\t\targSpecs = append(argSpecs, ShellSuggestion{\n\t\t\tText: suggest,\n\t\t\tArg: argSpec,\n\t\t})\n\t}\n\n\tsort.Slice(argSpecs, func(i, j int) bool {\n\t\tif argSpecs[i].Arg != nil && argSpecs[j].Arg != nil && argSpecs[i].Arg.Required != argSpecs[j].Arg.Required {\n\t\t\treturn argSpecs[i].Arg.Required\n\t\t}\n\t\treturn argSpecs[i].Text < argSpecs[j].Text\n\t})\n\n\tsuggests := []string(nil)\n\tfor _, argSpec := range argSpecs {\n\t\tsuggests = append(suggests, argSpec.Text)\n\t}\n\n\treturn suggests\n}", "func (pw *linuxProcess) CmdLine(withArgs bool) (string, error) {\n\tif pw.cmdLine != \"\" {\n\t\treturn pw.cmdLine, nil\n\t}\n\n\tcmdPath := helpers.HostProc(strconv.Itoa(int(pw.pid)), \"cmdline\")\n\tprocCmdline, err := ioutil.ReadFile(cmdPath)\n\tif err != nil {\n\t\tprocCmdline = nil // we can't be sure internal libraries return nil on error\n\t}\n\n\tif len(procCmdline) == 0 {\n\t\treturn \"\", nil // zombie process\n\t}\n\n\t// Ignoring dash on session commands\n\tif procCmdline[0] == '-' {\n\t\tprocCmdline = procCmdline[1:]\n\t}\n\n\tcmdLineBytes := make([]byte, 0, len(procCmdline))\n\tfor i := 0; i < len(procCmdline); i++ {\n\t\tif procCmdline[i] == 0 {\n\t\t\t// ignoring the trailing zero that ends /proc/<pid>/cmdline, but adding the last character if the file\n\t\t\t// does not end in zero\n\t\t\tif withArgs && i < len(procCmdline)-1 {\n\t\t\t\tcmdLineBytes = append(cmdLineBytes, ' ')\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tcmdLineBytes = append(cmdLineBytes, procCmdline[i])\n\t\t}\n\t}\n\tpw.cmdLine = helpers.SanitizeCommandLine(string(cmdLineBytes))\n\treturn pw.cmdLine, nil\n}", "func genCURLCmdLine(t *testing.T, resURL, proxyURL string, targets meta.NodeMap) []string {\n\tvar noProxy []string\n\tfor _, t := range targets {\n\t\tif !cos.StringInSlice(t.PubNet.Hostname, noProxy) {\n\t\t\tnoProxy = append(noProxy, t.PubNet.Hostname)\n\t\t}\n\t}\n\n\t// TODO: \"--proxy-insecure\" requires `curl` 7.58.0+ and is needed when we USE_HTTPS (see #885)\n\treturn []string{\n\t\t\"-L\", \"-X\", \"GET\",\n\t\tresURL,\n\t\t\"-o\", filepath.Join(t.TempDir(), \"curl.file\"),\n\t\t\"-x\", proxyURL,\n\t\t\"--max-redirs\", \"3\",\n\t\t\"--noproxy\", strings.Join(noProxy, \",\"),\n\t\t\"--insecure\",\n\t}\n}", "func setupFlags(params, paramsJSON string) *pflag.FlagSet {\n\tflagSet := pflag.NewFlagSet(\"TestGetParamsFromFlags\", pflag.PanicOnError)\n\tregisterParamsFlags(flagSet)\n\t// mirror actual usage by using Parse rather than Set\n\tcmdline := []string{\"apply\"}\n\tif params != \"\" {\n\t\tcmdline = append(cmdline, \"--params\", params)\n\t}\n\tif paramsJSON != \"\" {\n\t\tcmdline = append(cmdline, \"--paramsJSON\", paramsJSON)\n\t}\n\n\tif err := flagSet.Parse(append(cmdline, \"samples/test.hcl\")); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn flagSet\n}", "func setHypercoreCmd() string {\n\tcmd := \"linux\"\n\tif path, err := exec.LookPath(cmd); err == nil {\n\t\treturn path\n\t}\n\treturn cmd\n}", "func processLine(cmdLine string) {\n\tcmdLine = strings.TrimSpace(cmdLine)\n\n\tcommandList := make([]exec.Cmd, 0)\n\n\tif len(cmdLine) == 0 {\n\t\treturn\n\t}\n\n\tpipeStages := strings.Split(cmdLine, pipeChar)\n\n\terr := createPipeStages(&commandList, pipeStages)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = connectPipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error with pipes: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = executePipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error during execution: %v\\n\", shellName, err)\n\t\treturn\n\t}\n}", "func parseEnvArgs(candidate string) string {\n\tif !strings.Contains(candidate, \"=\") {\n\t\treturn subsituteEnvArg(candidate)\n\t}\n\tparts := strings.Split(candidate, \"=\")\n\tparts[1] = subsituteEnvArg(parts[1])\n\treturn strings.Join(parts, \"=\")\n}", "func connect() cli.Command { // nolint: gocyclo\n\tcommand := cli.Command{\n\t\tName: \"connect\",\n\t\tAliases: []string{\"conn\"},\n\t\tUsage: \"Get a shell from a vm\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"user\",\n\t\t\t\tValue: \"\",\n\t\t\t\tUsage: \"ssh login user\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"key\",\n\t\t\t\tValue: \"\",\n\t\t\t\tUsage: \"private key path (default: ~/.ssh/id_rsa)\",\n\t\t\t},\n\t\t},\n\t\tAction: func(c *cli.Context) error {\n\t\t\tvar name, loginUser, key string\n\t\t\tvar vmID int\n\t\t\tnameFound := false\n\t\t\tnargs := c.NArg()\n\t\t\tswitch {\n\t\t\tcase nargs == 1:\n\t\t\t\t// Parse flags\n\t\t\t\tif c.String(\"user\") != \"\" {\n\t\t\t\t\tloginUser = c.String(\"user\")\n\t\t\t\t} else {\n\t\t\t\t\tusr, _ := user.Current()\n\t\t\t\t\tloginUser = usr.Name\n\t\t\t\t}\n\n\t\t\t\tif c.String(\"key\") != \"\" {\n\t\t\t\t\tkey, _ = filepath.Abs(c.String(\"key\"))\n\t\t\t\t} else {\n\t\t\t\t\tusr, err := user.Current()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t}\n\n\t\t\t\t\tkey = usr.HomeDir + \"/.ssh/id_rsa\"\n\t\t\t\t}\n\t\t\t\tname = c.Args().First()\n\t\t\t\tcli, err := client.NewEnvClient()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tlistArgs := filters.NewArgs()\n\t\t\t\tlistArgs.Add(\"ancestor\", VMLauncherContainerImage)\n\t\t\t\tcontainers, err := cli.ContainerList(context.Background(),\n\t\t\t\t\ttypes.ContainerListOptions{\n\t\t\t\t\t\tQuiet: false,\n\t\t\t\t\t\tSize: false,\n\t\t\t\t\t\tAll: true,\n\t\t\t\t\t\tLatest: false,\n\t\t\t\t\t\tSince: \"\",\n\t\t\t\t\t\tBefore: \"\",\n\t\t\t\t\t\tLimit: 0,\n\t\t\t\t\t\tFilters: listArgs,\n\t\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tfor id, container := range containers {\n\t\t\t\t\tif container.Names[0][1:] == name {\n\t\t\t\t\t\tnameFound = true\n\t\t\t\t\t\tvmID = id\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !nameFound {\n\t\t\t\t\tfmt.Printf(\"Unable to find a running vm with name: %s\", name)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t} else {\n\t\t\t\t\tvmIP := containers[vmID].NetworkSettings.Networks[\"bridge\"].IPAddress\n\t\t\t\t\tgetNewSSHConn(loginUser, vmIP, key)\n\t\t\t\t}\n\n\t\t\tcase nargs == 0:\n\t\t\t\tfmt.Println(\"No name provided as argument.\")\n\t\t\t\tos.Exit(1)\n\n\t\t\tcase nargs > 1:\n\t\t\t\tfmt.Println(\"Only one argument is allowed\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\treturn command\n}", "func createCommand(pipeStage string) (*exec.Cmd, error) {\n\targs := strings.Fields(pipeStage)\n\texecutionArgs := make([]string, 0)\n\tvar cmd *exec.Cmd\n\n\t// Deliminating by pipes exposed an empty command\n\tif len(args) == 0 {\n\t\treturn nil, errors.New(\"Pipeline stage cannot be empty\")\n\t}\n\n\tvar inRedirectFile, outRedirectFile string\n\n\t// Any redirection specifiers (</>) will get parsed and saved\n\ti := 0\n\tfor i < len(args) {\n\t\tif strings.HasPrefix(args[i], inRedirectChar) {\n\t\t\tif i == 0 {\n\t\t\t\treturn nil, errors.New(\"Command must precede input redirection\")\n\t\t\t}\n\t\t\tif len(args[i]) > 1 {\n\t\t\t\tinRedirectFile = args[i][1:]\n\t\t\t\ti++\n\t\t\t} else if i == (len(args) - 1) {\n\t\t\t\treturn nil, errors.New(\"Redirection must include input file name\")\n\t\t\t} else {\n\t\t\t\tinRedirectFile = args[i+1]\n\t\t\t\ti += 2\n\t\t\t}\n\t\t} else if strings.HasPrefix(args[i], outRedirectChar) {\n\t\t\tif i == 0 {\n\t\t\t\treturn nil, errors.New(\"Command must precede output redirection\")\n\t\t\t}\n\t\t\tif len(args[i]) > 1 {\n\t\t\t\toutRedirectFile = args[i][1:]\n\t\t\t\ti++\n\t\t\t} else if i == (len(args) - 1) {\n\t\t\t\treturn nil, errors.New(\"Redirection must include output file name\")\n\t\t\t} else {\n\t\t\t\toutRedirectFile = args[i+1]\n\t\t\t\ti += 2\n\t\t\t}\n\t\t} else if i < len(args) {\n\t\t\t// Save command arguments only if they arent references to redirection\n\t\t\texecutionArgs = append(executionArgs, args[i])\n\t\t\ti++\n\t\t}\n\t}\n\t// Create a command using only the command name and arguments\n\tcmd = exec.Command(executionArgs[0], executionArgs[1:]...)\n\n\t// Set the redirect output files from the data we parsed\n\terr := setRedirects(cmd, inRedirectFile, outRedirectFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cmd, nil\n}", "func (o *os) GetCmdlineArgs() gdnative.PoolStringArray {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetCmdlineArgs()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_cmdline_args\")\n\n\t// Call the parent method.\n\t// PoolStringArray\n\tretPtr := gdnative.NewEmptyPoolStringArray()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewPoolStringArrayFromPointer(retPtr)\n\treturn ret\n}", "func importPathsNoDotExpansion(args []string) []string {\n\tif len(args) == 0 {\n\t\treturn nil\n\t}\n\tvar out []string\n\tfor _, a := range args {\n\t\t// Arguments are supposed to be import paths, but\n\t\t// as a courtesy to Windows developers, rewrite \\ to /\n\t\t// in command-line arguments. Handles .\\... and so on.\n\t\tif filepath.Separator == '\\\\' {\n\t\t\ta = strings.Replace(a, `\\`, `/`, -1)\n\t\t}\n\n\t\t// Put argument in canonical form, but preserve leading ./.\n\t\tif strings.HasPrefix(a, \"./\") {\n\t\t\ta = \"./\" + path.Clean(a)\n\t\t\tif a == \"./.\" {\n\t\t\t\ta = \".\"\n\t\t\t}\n\t\t} else {\n\t\t\ta = path.Clean(a)\n\t\t}\n\t\tif a == allPackage || a == standardPackages {\n\t\t\tout = append(out, allPackages(a)...)\n\t\t\tcontinue\n\t\t}\n\t\tout = append(out, a)\n\t}\n\treturn out\n}", "func normalizeArg(args []string, arg string) []string {\n\tidx := -1\n\tfor i, v := range args {\n\t\tif v == arg {\n\t\t\tidx = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif idx == -1 || idx == len(args)-1 { // not found OR -arg has no succeding element\n\t\treturn args\n\t}\n\tnewArg := fmt.Sprintf(\"%s=%s\", args[idx], args[idx+1]) // merge values\n\targs[idx] = newArg // modify the arg\n\treturn append(args[:idx+1], args[idx+2:]...)\n}", "func PrependEnvPath(variable, seperator, path string) error {\n originalContents, varSet := os.LookupEnv(variable)\n\n var contents string\n if varSet == false || originalContents == \"\" {\n contents = path\n } else {\n contents = variable + seperator + path\n }\n\n if err := os.Setenv(variable, contents); err != nil {\n return err\n }\n\n return nil\n}", "func getCommandline(argv []string) string {\n\tvar str string\n\tfor _, s := range argv {\n\t\tstr += s + \" \"\n\t}\n\treturn str[0 : len(str)-1]\n}", "func NewCmdSet(f kcmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {\n\tset := &cobra.Command{\n\t\tUse: \"set COMMAND\",\n\t\tShort: \"Commands that help set specific features on objects\",\n\t\tLong: setLong,\n\t\tRun: kcmdutil.DefaultSubCommandRun(streams.ErrOut),\n\t}\n\n\tgroups := ktemplates.CommandGroups{\n\t\t{\n\t\t\tMessage: \"Manage workloads:\",\n\t\t\tCommands: []*cobra.Command{\n\t\t\t\tNewCmdDeploymentHook(f, streams),\n\t\t\t\tNewCmdEnv(f, streams),\n\t\t\t\tNewCmdImage(f, streams),\n\t\t\t\t// TODO: this seems reasonable to upstream\n\t\t\t\tNewCmdProbe(f, streams),\n\t\t\t\tNewCmdResources(f, streams),\n\t\t\t\tNewCmdSelector(f, streams),\n\t\t\t\tNewCmdServiceAccount(f, streams),\n\t\t\t\tNewCmdVolume(f, streams),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tMessage: \"Manage secrets and config:\",\n\t\t\tCommands: []*cobra.Command{\n\t\t\t\tNewCmdData(f, streams),\n\t\t\t\tNewCmdBuildSecret(f, streams),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tMessage: \"Manage application flows:\",\n\t\t\tCommands: []*cobra.Command{\n\t\t\t\tNewCmdBuildHook(f, streams),\n\t\t\t\tNewCmdImageLookup(f, streams),\n\t\t\t\tNewCmdTriggers(f, streams),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tMessage: \"Manage load balancing:\",\n\t\t\tCommands: []*cobra.Command{\n\t\t\t\tNewCmdRouteBackends(f, streams),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tMessage: \"Manage authorization policy:\",\n\t\t\tCommands: []*cobra.Command{\n\t\t\t\tNewCmdSubject(f, streams),\n\t\t\t},\n\t\t},\n\t}\n\tgroups.Add(set)\n\treturn set\n}", "func marshalOptions(opts *initClusterRegistryOptions) error {\n\tif opts.apiServerOverridesString == \"\" {\n\t\treturn nil\n\t}\n\n\targsMap := make(map[string]string)\n\toverrideArgs := strings.Split(opts.apiServerOverridesString, \",\")\n\tfor _, overrideArg := range overrideArgs {\n\t\tsplitArg := strings.SplitN(overrideArg, \"=\", 2)\n\t\tif len(splitArg) != 2 {\n\t\t\treturn fmt.Errorf(\"wrong format for override arg: %s\", overrideArg)\n\t\t}\n\t\tkey := strings.TrimSpace(splitArg[0])\n\t\tval := strings.TrimSpace(splitArg[1])\n\t\tif len(key) == 0 {\n\t\t\treturn fmt.Errorf(\"wrong format for override arg: %s, arg name cannot be empty\", overrideArg)\n\t\t}\n\t\targsMap[key] = val\n\t}\n\n\topts.apiServerOverrides = argsMap\n\n\treturn nil\n}", "func SetupCmd(cmd *exec.Cmd) {\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}\n}", "func NewCmdSet(name, fullName string) *cobra.Command {\n\tsto := NewSetOptions()\n\n\tvar componentSetCmd = &cobra.Command{\n\t\tUse: name,\n\t\tShort: \"Set active component.\",\n\t\tLong: \"Set component as active.\",\n\t\tExample: fmt.Sprintf(setExample, fullName),\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\todoutil.LogErrorAndExit(sto.Complete(name, cmd, args), \"\")\n\t\t\todoutil.LogErrorAndExit(sto.Validate(), \"\")\n\t\t\todoutil.LogErrorAndExit(sto.Run(), \"\")\n\t\t},\n\t}\n\n\t//Adding `--project` flag\n\tproject.AddProjectFlag(componentSetCmd)\n\t//Adding `--application` flag\n\tappCmd.AddApplicationFlag(componentSetCmd)\n\n\tcompletion.RegisterCommandHandler(componentSetCmd, completion.ComponentNameCompletionHandler)\n\n\treturn componentSetCmd\n}", "func WinExec(lpCmdLine /*const*/ LPCSTR, uCmdShow UINT) UINT {\n\tret1 := syscall3(winExec, 2,\n\t\tuintptr(unsafe.Pointer(lpCmdLine)),\n\t\tuintptr(uCmdShow),\n\t\t0)\n\treturn UINT(ret1)\n}", "func (Golang) Prep(gopath string, meta Metadata, local bool) (err error) {\n\tlogrus.Debug(\"Running Prep Commands\")\n\tvar codepath string\n\tif local {\n\t\twd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"failed getting CWD\")\n\t\t\treturn err\n\t\t}\n\n\t\tcodepath = wd\n\n\t} else {\n\t\tcodepath = fmt.Sprintf(\"%s/src/%s\", gopath, meta.Package)\n\n\t\terr = os.Chdir(codepath)\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"failed to cwd to %s\", gopath)\n\t\t\treturn err\n\t\t}\n\n\t\t// set the gopath in the environment so that we can interpolate it below\n\t\t_ = os.Setenv(\"GOPATH\", gopath)\n\t}\n\n\tfor _, cmdString := range meta.BuildInfo.PrepCommands {\n\t\t// interpolate any environment variables into the command string\n\t\tcmdString, err = envsubst.String(cmdString)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"failed to substitute env vars\")\n\t\t\treturn err\n\t\t}\n\n\t\tcmd := exec.Command(\"bash\", \"-c\", cmdString)\n\n\t\tlogrus.Debugf(\"Running %q with GOPATH=%s\", cmdString, gopath)\n\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\terr = cmd.Run()\n\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"failed running %q\", cmdString)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlogrus.Debugf(\"Prep steps for %s complete\", meta.Package)\n\n\treturn err\n}", "func poshCmd(args ...string) (string, string, error) {\n\targs = append([]string{\"-NoProfile\", \"-NonInteractive\"}, args...)\n\tcmd := exec.Command(powershell, args...)\n\tlog.Debugf(\"[POSH]: %s %s\", powershell, strings.Join(args, \" \"))\n\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\treturn stdout.String(), stderr.String(), err\n}", "func newCmdSetNamePrefix(fSys filesys.FileSystem) *cobra.Command {\n\tvar o setNamePrefixOptions\n\n\tcmd := &cobra.Command{\n\t\tUse: \"nameprefix\",\n\t\tShort: \"Sets the value of the namePrefix field in the kustomization file\",\n\t\tExample: `\nThe command\n set nameprefix acme-\nwill add the field \"namePrefix: acme-\" to the kustomization file if it doesn't exist,\nand overwrite the value with \"acme-\" if the field does exist.\n`,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\terr := o.Validate(args)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn o.RunSetNamePrefix(fSys)\n\t\t},\n\t}\n\treturn cmd\n}" ]
[ "0.5523025", "0.51151484", "0.4743443", "0.46826875", "0.46628645", "0.45783198", "0.45290822", "0.45157805", "0.44479582", "0.4447057", "0.44447815", "0.4437473", "0.44140285", "0.43926194", "0.43795717", "0.4370517", "0.4349403", "0.4305569", "0.42815304", "0.42798728", "0.42791402", "0.4259367", "0.42545775", "0.42489693", "0.4237089", "0.42247957", "0.4223139", "0.4216774", "0.4195755", "0.4185485", "0.41780832", "0.41773817", "0.41335446", "0.4116307", "0.41162574", "0.41156048", "0.41088", "0.40993646", "0.4092828", "0.4048739", "0.40460667", "0.4037503", "0.4033525", "0.40321743", "0.40299588", "0.40114665", "0.39999455", "0.3994744", "0.39895117", "0.39810702", "0.3979511", "0.3971303", "0.39705828", "0.39614448", "0.39552417", "0.39426532", "0.39251998", "0.39246887", "0.39089414", "0.39072466", "0.38986903", "0.3897921", "0.38972726", "0.38921484", "0.3887821", "0.38829768", "0.38818434", "0.388004", "0.3868088", "0.38671535", "0.38666996", "0.38550657", "0.38492125", "0.38474986", "0.38473314", "0.38416478", "0.38361084", "0.38271675", "0.38265663", "0.3824979", "0.38223287", "0.38203183", "0.38157088", "0.38136753", "0.3811066", "0.38092574", "0.38067868", "0.38039896", "0.38018274", "0.37984595", "0.3798248", "0.3797599", "0.37907588", "0.37893006", "0.37862143", "0.3786036", "0.37843463", "0.37784663", "0.37783846", "0.37571287" ]
0.7633684
0
ServeDNS implements the plugin.Handler interface.
func (d DNS64) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { drr := &ResponseWriter{d, w} return d.Next.ServeDNS(ctx, drr, r) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (h *DNSHandler) ServeDNS(dc *ctx.Context) {\n\tw, req := dc.DNSWriter, dc.DNSRequest\n\n\tmsg := h.handle(w.Proto(), req)\n\n\tw.WriteMsg(msg)\n}", "func (e *Delay) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\n\t// Debug log that we've have seen the query. This will only be shown when the debug plugin is loaded.\n\tlog.Debug(\"Received response\")\n\n\t// Pause execution for configured interval\n\ttime.Sleep(e.Delay * time.Millisecond)\n\n\t// Call next plugin (if any).\n\treturn plugin.NextOrFailure(e.Name(), e.Next, ctx, w, r)\n}", "func (h Dnstap) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\trw := &ResponseWriter{\n\t\tResponseWriter: w,\n\t\tDnstap: h,\n\t\tquery: r,\n\t\tctx: ctx,\n\t\tqueryTime: time.Now(),\n\t}\n\n\t// The query tap message should be sent before sending the query to the\n\t// forwarder. Otherwise, the tap messages will come out out of order.\n\th.tapQuery(ctx, w, r, rw.queryTime)\n\n\treturn plugin.NextOrFailure(h.Name(), h.Next, ctx, rw, r)\n}", "func (v *View) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\treturn plugin.NextOrFailure(v.Name(), v.Next, ctx, w, r)\n}", "func (p ipecho) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tif p.echoIP(w, r) {\n\t\treturn dns.RcodeSuccess, nil\n\t}\n\treturn plugin.NextOrFailure(p.Name(), p.Next, ctx, w, r)\n}", "func (g GeoIP) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\treturn plugin.NextOrFailure(pluginName, g.Next, ctx, w, r)\n}", "func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {\n\tvar h Handler\n\tif len(req.Question) >= 1 { // allow more than one question\n\t\th = mux.match(req.Question[0].Name, req.Question[0].Qtype)\n\t}\n\n\tif h != nil {\n\t\th.ServeDNS(w, req)\n\t} else {\n\t\thandleRefused(w, req)\n\t}\n}", "func (p *unifinames) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tif !p.haveRoutine.Load() {\n\t\tp.haveRoutine.Store(true)\n\t\tgo func() {\n\t\t\tupdate := func() {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tif p.Config.Debug {\n\t\t\t\t\tlog.Println(\"[unifi-names] updating clients\")\n\t\t\t\t}\n\t\t\t\tif err := p.getClients(context.Background()); err != nil {\n\t\t\t\t\tp.mu.Unlock()\n\t\t\t\t\tlog.Printf(\"[unifi-names] unable to get clients: %v\\n\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tp.mu.Unlock()\n\t\t\t\tlog.Printf(\"[unifi-names] got %d hosts\", len(p.aClients)+len(p.aaaaClients))\n\t\t\t\tp.lastUpdate = time.Now()\n\t\t\t}\n\t\t\tupdate()\n\t\t\tt := time.NewTicker(time.Duration(p.Config.TTL) * time.Second)\n\t\t\tfor range t.C {\n\t\t\t\tupdate()\n\t\t\t}\n\t\t}()\n\t}\n\tif p.resolve(w, r) {\n\t\treturn dns.RcodeSuccess, nil\n\t}\n\treturn plugin.NextOrFailure(p.Name(), p.Next, ctx, w, r)\n}", "func (c Chaos) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstate := request.Request{W: w, Req: r}\n\tif state.QClass() != dns.ClassCHAOS || state.QType() != dns.TypeTXT {\n\t\treturn plugin.NextOrFailure(c.Name(), c.Next, ctx, w, r)\n\t}\n\n\tm := new(dns.Msg)\n\tm.SetReply(r)\n\n\thdr := dns.RR_Header{Name: state.QName(), Rrtype: dns.TypeTXT, Class: dns.ClassCHAOS, Ttl: 0}\n\tswitch state.Name() {\n\tdefault:\n\t\treturn plugin.NextOrFailure(c.Name(), c.Next, ctx, w, r)\n\tcase \"authors.bind.\":\n\t\trnd := rand.New(rand.NewSource(time.Now().Unix()))\n\n\t\tfor _, i := range rnd.Perm(len(c.Authors)) {\n\t\t\tm.Answer = append(m.Answer, &dns.TXT{Hdr: hdr, Txt: []string{c.Authors[i]}})\n\t\t}\n\tcase \"version.bind.\", \"version.server.\":\n\t\tm.Answer = []dns.RR{&dns.TXT{Hdr: hdr, Txt: []string{c.Version}}}\n\tcase \"hostname.bind.\", \"id.server.\":\n\t\thostname, err := os.Hostname()\n\t\tif err != nil {\n\t\t\thostname = \"localhost\"\n\t\t}\n\t\tm.Answer = []dns.RR{&dns.TXT{Hdr: hdr, Txt: []string{trim(hostname)}}}\n\t}\n\tw.WriteMsg(m)\n\treturn 0, nil\n}", "func (e *External) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstate := request.Request{W: w, Req: r}\n\n\tzone := plugin.Zones(e.Zones).Matches(state.Name())\n\tif zone == \"\" {\n\t\treturn plugin.NextOrFailure(e.Name(), e.Next, ctx, w, r)\n\t}\n\n\tstate.Zone = zone\n\tfor _, z := range e.Zones {\n\t\t// TODO(miek): save this in the External struct.\n\t\tif state.Name() == z { // apex query\n\t\t\tret, err := e.serveApex(state)\n\t\t\treturn ret, err\n\t\t}\n\t\tif dns.IsSubDomain(e.apex+\".\"+z, state.Name()) {\n\t\t\t// dns subdomain test for ns. and dns. queries\n\t\t\tret, err := e.serveSubApex(state)\n\t\t\treturn ret, err\n\t\t}\n\t}\n\n\tsvc, rcode := e.externalFunc(state, e.headless)\n\n\tm := new(dns.Msg)\n\tm.SetReply(state.Req)\n\tm.Authoritative = true\n\n\tif len(svc) == 0 {\n\t\tif e.Fall.Through(state.Name()) && rcode == dns.RcodeNameError {\n\t\t\treturn plugin.NextOrFailure(e.Name(), e.Next, ctx, w, r)\n\t\t}\n\n\t\tm.Rcode = rcode\n\t\tm.Ns = []dns.RR{e.soa(state)}\n\t\tw.WriteMsg(m)\n\t\treturn 0, nil\n\t}\n\n\tswitch state.QType() {\n\tcase dns.TypeA:\n\t\tm.Answer, m.Truncated = e.a(ctx, svc, state)\n\tcase dns.TypeAAAA:\n\t\tm.Answer, m.Truncated = e.aaaa(ctx, svc, state)\n\tcase dns.TypeSRV:\n\t\tm.Answer, m.Extra = e.srv(ctx, svc, state)\n\tcase dns.TypePTR:\n\t\tm.Answer = e.ptr(svc, state)\n\tdefault:\n\t\tm.Ns = []dns.RR{e.soa(state)}\n\t}\n\n\t// If we did have records, but queried for the wrong qtype return a nodata response.\n\tif len(m.Answer) == 0 {\n\t\tm.Ns = []dns.RR{e.soa(state)}\n\t}\n\n\tw.WriteMsg(m)\n\treturn 0, nil\n}", "func (s *Server) ServeDNS(w dns.ResponseWriter, req *dns.Msg) {\n\tm := new(dns.Msg)\n\tm.SetReply(req)\n\tm.Compress = false\n\n\tswitch req.Opcode {\n\tcase dns.OpcodeQuery:\n\t\tm.Authoritative = true\n\t\ts.parseQuery(m)\n\t}\n\n\terr := w.WriteMsg(m)\n\tif err != nil {\n\t\tlog.Warn().Err(err).Msg(\"failed to write response message\")\n\t}\n}", "func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) {\n\t// The default dns.Mux checks the question section size, but we have our\n\t// own mux here. Check if we have a question section. If not drop them here.\n\tif r == nil || len(r.Question) == 0 {\n\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)\n\t\treturn\n\t}\n\n\tif !s.debug {\n\t\tdefer func() {\n\t\t\t// In case the user doesn't enable error plugin, we still\n\t\t\t// need to make sure that we stay alive up here\n\t\t\tif rec := recover(); rec != nil {\n\t\t\t\tif s.stacktrace {\n\t\t\t\t\tlog.Errorf(\"Recovered from panic in server: %q %v\\n%s\", s.Addr, rec, string(debug.Stack()))\n\t\t\t\t} else {\n\t\t\t\t\tlog.Errorf(\"Recovered from panic in server: %q %v\", s.Addr, rec)\n\t\t\t\t}\n\t\t\t\tvars.Panic.Inc()\n\t\t\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif !s.classChaos && r.Question[0].Qclass != dns.ClassINET {\n\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n\t\treturn\n\t}\n\n\tif m, err := edns.Version(r); err != nil { // Wrong EDNS version, return at once.\n\t\tw.WriteMsg(m)\n\t\treturn\n\t}\n\n\t// Wrap the response writer in a ScrubWriter so we automatically make the reply fit in the client's buffer.\n\tw = request.NewScrubWriter(r, w)\n\n\tq := strings.ToLower(r.Question[0].Name)\n\tvar (\n\t\toff int\n\t\tend bool\n\t\tdshandler *Config\n\t)\n\n\tfor {\n\t\tif z, ok := s.zones[q[off:]]; ok {\n\t\t\tfor _, h := range z {\n\t\t\t\tif h.pluginChain == nil { // zone defined, but has not got any plugins\n\t\t\t\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif h.metaCollector != nil {\n\t\t\t\t\t// Collect metadata now, so it can be used before we send a request down the plugin chain.\n\t\t\t\t\tctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})\n\t\t\t\t}\n\n\t\t\t\t// If all filter funcs pass, use this config.\n\t\t\t\tif passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {\n\t\t\t\t\tif h.ViewName != \"\" {\n\t\t\t\t\t\t// if there was a view defined for this Config, set the view name in the context\n\t\t\t\t\t\tctx = context.WithValue(ctx, ViewKey{}, h.ViewName)\n\t\t\t\t\t}\n\t\t\t\t\tif r.Question[0].Qtype != dns.TypeDS {\n\t\t\t\t\t\trcode, _ := h.pluginChain.ServeDNS(ctx, w, r)\n\t\t\t\t\t\tif !plugin.ClientWrite(rcode) {\n\t\t\t\t\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// The type is DS, keep the handler, but keep on searching as maybe we are serving\n\t\t\t\t\t// the parent as well and the DS should be routed to it - this will probably *misroute* DS\n\t\t\t\t\t// queries to a possibly grand parent, but there is no way for us to know at this point\n\t\t\t\t\t// if there is an actual delegation from grandparent -> parent -> zone.\n\t\t\t\t\t// In all fairness: direct DS queries should not be needed.\n\t\t\t\t\tdshandler = h\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\toff, end = dns.NextLabel(q, off)\n\t\tif end {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif r.Question[0].Qtype == dns.TypeDS && dshandler != nil && dshandler.pluginChain != nil {\n\t\t// DS request, and we found a zone, use the handler for the query.\n\t\trcode, _ := dshandler.pluginChain.ServeDNS(ctx, w, r)\n\t\tif !plugin.ClientWrite(rcode) {\n\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t}\n\t\treturn\n\t}\n\n\t// Wildcard match, if we have found nothing try the root zone as a last resort.\n\tif z, ok := s.zones[\".\"]; ok {\n\t\tfor _, h := range z {\n\t\t\tif h.pluginChain == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif h.metaCollector != nil {\n\t\t\t\t// Collect metadata now, so it can be used before we send a request down the plugin chain.\n\t\t\t\tctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})\n\t\t\t}\n\n\t\t\t// If all filter funcs pass, use this config.\n\t\t\tif passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {\n\t\t\t\tif h.ViewName != \"\" {\n\t\t\t\t\t// if there was a view defined for this Config, set the view name in the context\n\t\t\t\t\tctx = context.WithValue(ctx, ViewKey{}, h.ViewName)\n\t\t\t\t}\n\t\t\t\trcode, _ := h.pluginChain.ServeDNS(ctx, w, r)\n\t\t\t\tif !plugin.ClientWrite(rcode) {\n\t\t\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Still here? Error out with REFUSED.\n\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n}", "func (s *server) ServeDNS(w dns.ResponseWriter, req *dns.Msg) {\n\tmsg := dns.Msg{}\n\tmsg.SetReply(req)\n\tmsg.Authoritative = true\n\t// Stuff must be in the answer section\n\tfor _, a := range s.query(req) {\n\t\tlog.Info().Msgf(\"%v\\n\", a)\n\t\tmsg.Answer = append(msg.Answer, a)\n\t}\n\n\t_ = w.WriteMsg(&msg)\n}", "func (h errorHandler) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tdefer h.recovery(ctx, w, r)\n\n\trcode, err := h.Next.ServeDNS(ctx, w, r)\n\n\tif err != nil {\n\t\tstate := request.Request{W: w, Req: r}\n\t\terrMsg := fmt.Sprintf(\"%s [ERROR %d %s %s] %v\", time.Now().Format(timeFormat), rcode, state.Name(), state.Type(), err)\n\n\t\tif h.Debug {\n\t\t\t// Write error to response as a txt message instead of to log\n\t\t\tanswer := debugMsg(rcode, r)\n\t\t\ttxt, _ := dns.NewRR(\". IN 0 TXT \" + errMsg)\n\t\t\tanswer.Answer = append(answer.Answer, txt)\n\t\t\tstate.SizeAndDo(answer)\n\t\t\tw.WriteMsg(answer)\n\t\t\treturn 0, err\n\t\t}\n\t\th.Log.Println(errMsg)\n\t}\n\n\treturn rcode, err\n}", "func (c *Cache) ServeDNS(ctx context.Context, w MessageWriter, r *Query) {\n\tvar (\n\t\tmiss bool\n\n\t\tnow = time.Now()\n\t)\n\n\tc.mu.RLock()\n\tfor _, q := range r.Questions {\n\t\tif hit := c.lookup(q, w, now); !hit {\n\t\t\tmiss = true\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\n\tif !miss {\n\t\treturn\n\t}\n\n\tmsg, err := w.Recur(ctx)\n\tif err != nil || msg == nil {\n\t\tw.Status(ServFail)\n\t\treturn\n\t}\n\tif msg.RCode == NoError {\n\t\tc.insert(msg, now)\n\t}\n\twriteMessage(w, msg)\n}", "func (s *Server) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {\n\tc := s.ctx\n\tdefer func() {\n\t\t// Closing the response tells the DNS service to terminate\n\t\tif c.Err() != nil {\n\t\t\t_ = w.Close()\n\t\t}\n\t}()\n\n\tq := &r.Question[0]\n\tatomic.AddInt64(&s.requestCount, 1)\n\n\tanswerString := func(a []dns.RR) string {\n\t\tif a == nil {\n\t\t\treturn \"\"\n\t\t}\n\t\tswitch len(a) {\n\t\tcase 0:\n\t\t\treturn \"EMPTY\"\n\t\tcase 1:\n\t\t\treturn a[0].String()\n\t\tdefault:\n\t\t\treturn fmt.Sprintf(\"%v\", a)\n\t\t}\n\t}\n\n\tqts := dns.TypeToString[q.Qtype]\n\tanswer, err := s.cacheResolve(q)\n\tvar rc int\n\tvar pfx dfs = func() string { return \"\" }\n\tvar txt dfs = func() string { return \"\" }\n\tvar rct dfs = func() string { return dns.RcodeToString[rc] }\n\n\tvar msg *dns.Msg\n\n\tdefer func() {\n\t\tdlog.Debugf(c, \"%s%-6s %s -> %s %s\", pfx, qts, q.Name, rct, txt)\n\t\t_ = w.WriteMsg(msg)\n\t}()\n\n\tif err == nil && answer != nil {\n\t\trc = dns.RcodeSuccess\n\t\tmsg = new(dns.Msg)\n\t\tmsg.SetReply(r)\n\t\tmsg.Answer = answer\n\t\tmsg.Authoritative = true\n\t\t// mac dns seems to fallback if you don't\n\t\t// support recursion, if you have more than a\n\t\t// single dns server, this will prevent us\n\t\t// from intercepting all queries\n\t\tmsg.RecursionAvailable = true\n\t\ttxt = func() string { return answerString(msg.Answer) }\n\t\treturn\n\t}\n\n\t// The recursion check query, or queries that end with the cluster domain name, are not dispatched to the\n\t// fallback DNS-server.\n\tif s.fallback == nil || strings.HasPrefix(q.Name, recursionCheck) || strings.HasSuffix(q.Name, s.clusterDomain) {\n\t\tif err == nil {\n\t\t\trc = dns.RcodeNameError\n\t\t} else {\n\t\t\trc = dns.RcodeServerFailure\n\t\t\tif errors.Is(err, context.DeadlineExceeded) {\n\t\t\t\ttxt = func() string { return \"timeout\" }\n\t\t\t} else {\n\t\t\t\ttxt = err.Error\n\t\t\t}\n\t\t}\n\t\tmsg = new(dns.Msg)\n\t\tmsg.SetRcode(r, rc)\n\t\treturn\n\t}\n\n\tpfx = func() string { return fmt.Sprintf(\"(%s) \", s.fallback.RemoteAddr()) }\n\tdc := dns.Client{Net: \"udp\", Timeout: 2 * time.Second}\n\tmsg, _, err = dc.ExchangeWithConn(r, s.fallback)\n\tif err != nil {\n\t\tmsg = new(dns.Msg)\n\t\trc = dns.RcodeServerFailure\n\t\ttxt = err.Error\n\t\tif err, ok := err.(net.Error); ok {\n\t\t\tswitch {\n\t\t\tcase err.Timeout():\n\t\t\t\ttxt = func() string { return \"timeout\" }\n\t\t\tcase err.Temporary():\n\t\t\t\trc = dns.RcodeRefused\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\tmsg.SetRcode(r, rc)\n\t} else {\n\t\trc = msg.Rcode\n\t\ttxt = func() string { return answerString(msg.Answer) }\n\t}\n}", "func (lim Limit) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\n\t// Use a nonwriter to capture the response.\n\tnw := nonwriter.New(w)\n\n\trcode, err := plugin.NextOrFailure(lim.Name(), lim.Next, ctx, nw, r)\n\tif err != nil {\n\t\t// Simply return if there was an error.\n\t\treturn rcode, err\n\t}\n\n\tlim.limit(nw.Msg)\n\n\t// Then write it to the client.\n\tw.WriteMsg(nw.Msg)\n\treturn rcode, err\n}", "func (s *server) ServeDNS(w dns.ResponseWriter, req *dns.Msg) {\n m := s.getMsgResource(req)\n\tdefer s.msgPool.EnQueue(m)\n\ttimeNow := time.Now().Local()\n\tq := req.Question[0]\n\tname := strings.ToLower(q.Name)\n\ttcp := false\n\tif tcp = isTCPQuery(w); tcp {\n\t\tatomic.AddInt64(&statsRequestCountTcp, 1)\n\t} else {\n\t\tatomic.AddInt64(&statsRequestCountUdp, 1)\n\t}\n\tatomic.AddInt64(&statsRequestCount, 1)\n\n\tglog.V(3).Infof(\"received DNS Request for %q from %q with type %d\", q.Name, w.RemoteAddr(), q.Qtype)\n\n\t// Check cache first.\n\tremoteAddr := w.RemoteAddr().String() //10.8.65.158:42158\n\tremoteIp := strings.Split(remoteAddr, \":\")\n\tm1 := s.rcache.SearchRecordInCache(q, tcp, m.Id, remoteIp[0], timeNow)\n\tif m1 != nil {\n\t\tatomic.AddInt64(&statsRequestCountCached, 1)\n\t\tglog.V(4).Infof(\"cache hit %q: %v\\n \", q.Name, m1)\n\t\ts.checkAndWtiteMsg(w,req,m1,tcp,true)\n\t\tMsgCachePool.EnQueue(m1)\n\t\treturn\n\t}\n\n\tif q.Qclass == dns.ClassCHAOS || q.Qtype == dns.TypePTR {\n\t\tm.SetReply(req)\n\t\tm.SetRcode(req, dns.RcodeServerFailure)\n\t\tif err := w.WriteMsg(m); err != nil {\n\t\t\tglog.Infof(\"failure to return reply %q\", err)\n\t\t}\n\t\treturn\n\t}\n\tatomic.AddInt64(&statsCacheMissResponse, 1)\n\t// cluster domain forward\n\tfor subKey, subVal := range s.subDomainServers {\n\t\tif strings.HasSuffix(name, subKey) {\n\t\t\tresp := s.dnsDomainForward(w, req, subVal,remoteIp[0], timeNow)\n\t\t\tglog.V(4).Infof(\"ServeSubDomainForward %q: %v \\n \", q.Name, resp.Answer)\n\t\t\treturn\n\t\t}\n\t}\n\t// domain local\n\tfor _, domain := range s.dnsDomains {\n\t\tif strings.HasSuffix(name, domain) {\n\t\t\t// find local record and insert to cache\n\t\t\ts.processLocalDomainRecord(w,req,m,remoteIp[0],domain ,timeNow)\n\t\t\treturn\n\t\t}\n\t}\n // ex-domain froward\n\tresp := s.dnsDomainForward(w, req,s.forwardNameServers, remoteIp[0], timeNow)\n\tglog.V(4).Infof(\"ServeDNSForward %q: %v \\n \", q.Name, resp.Answer)\n\treturn\n}", "func (h *dnsHandler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {\n\n\tmsg := dns.Msg{}\n\tmsg.SetReply(r)\n\tmsg.Authoritative = true\n\n\torigin := r.Question[0].Name\n\n\tdomain := h.getDomain(origin)\n\tlog.Info().Msgf(\"Received DNS query for %s: \\n\", domain)\n\n\tconfig, _ := dns.ClientConfigFromFile(\"/etc/resolv.conf\")\n\n\tc := new(dns.Client)\n\n\tm := new(dns.Msg)\n\tm.SetQuestion(domain, r.Question[0].Qtype)\n\tm.RecursionDesired = true\n\n\tserver := config.Servers[0]\n\tport := config.Port\n\n\tlog.Info().Msgf(\"Exchange message for domain %s to dns server %s:%s\\n\", domain, server, port)\n\n\tres, _, err := c.Exchange(m, net.JoinHostPort(server, port))\n\n\tif res == nil {\n\t\tlog.Error().Msgf(\"*** error: %s\\n\", err.Error())\n\t}\n\n\tif res.Rcode != dns.RcodeSuccess {\n\t\tlog.Error().Msgf(\" *** invalid answer name %s after %d query for %s\\n\", domain, r.Question[0].Qtype, domain)\n\t}\n\n\t// Stuff must be in the answer section\n\tfor _, a := range res.Answer {\n\t\tlog.Info().Msgf(\"%v\\n\", a)\n\t\tmsg.Answer = append(msg.Answer, a)\n\t}\n\n\tw.WriteMsg(&msg)\n}", "func (h *CloudDNS) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstate := request.Request{W: w, Req: r}\n\tqname := state.Name()\n\tzName := plugin.Zones(h.zoneNames).Matches(qname)\n\n\tif zName == \"\" {\n\t\treturn plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r)\n\t}\n\n\tz, ok := h.zones[zName]\n\tif !ok || z == nil {\n\n\t\treturn dns.RcodeServerFailure, nil\n\t}\n\n\tm := new(dns.Msg)\n\tm.SetReply(r)\n\tm.Authoritative, m.RecursionAvailable = true, true\n\tvar result file.Result\n\tfor _, managedZone := range z {\n\t\th.zMu.RLock()\n\t\tm.Answer, m.Ns, m.Extra, result = managedZone.z.Lookup(state, qname)\n\t\th.zMu.RUnlock()\n\t\tif len(m.Answer) != 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(m.Answer) == 0 && h.Fall.Through(qname) {\n\t\treturn plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r)\n\t}\n\n\tswitch result {\n\tcase file.Success:\n\tcase file.NoData:\n\tcase file.NameError:\n\t\tm.Rcode = dns.RcodeNameError\n\tcase file.Delegation:\n\t\tm.Authoritative = false\n\tcase file.ServerFailure:\n\t\treturn dns.RcodeServerFailure, nil\n\t}\n\n\tw.WriteMsg(m)\n\treturn dns.RcodeSuccess, nil\n}", "func (e *EDNS) ServeDNS(dc *ctx.Context) {\n\tw, req := dc.DNSWriter, dc.DNSRequest\n\n\tnoedns := req.IsEdns0() == nil\n\n\topt, size, do := dnsutil.SetEdns0(req)\n\tif opt.Version() != 0 {\n\t\topt.SetVersion(0)\n\t\topt.SetExtendedRcode(dns.RcodeBadVers)\n\n\t\tw.WriteMsg(dnsutil.HandleFailed(req, dns.RcodeBadVers, do))\n\n\t\tdc.Abort()\n\t\treturn\n\t}\n\n\tif w.Proto() == \"tcp\" {\n\t\tsize = dns.MaxMsgSize\n\t}\n\n\tdc.DNSWriter = &DNSResponseWriter{ResponseWriter: w, opt: opt, size: size, do: do, noedns: noedns, noad: !req.AuthenticatedData}\n\n\tdc.NextDNS()\n\n\tdc.DNSWriter = w\n}", "func (p *plug) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstart := time.Now()\n\trequests.Inc()\n\tstate := request.Request{W: w, Req: r}\n\tip := state.IP()\n\n\t// capture the written answer\n\trrw := dnstest.NewRecorder(w)\n\trcode, result, err := p.serveDNSInternal(ctx, rrw, r)\n\tif rcode > 0 {\n\t\t// actually send the answer if we have one\n\t\tanswer := new(dns.Msg)\n\t\tanswer.SetRcode(r, rcode)\n\t\tstate.SizeAndDo(answer)\n\t\terr = w.WriteMsg(answer)\n\t\tif err != nil {\n\t\t\treturn dns.RcodeServerFailure, err\n\t\t}\n\t}\n\n\t// increment counters\n\tswitch {\n\tcase err != nil:\n\t\terrorsTotal.Inc()\n\tcase result.Reason == dnsfilter.FilteredBlackList:\n\t\tfiltered.Inc()\n\t\tfilteredLists.Inc()\n\tcase result.Reason == dnsfilter.FilteredSafeBrowsing:\n\t\tfiltered.Inc()\n\t\tfilteredSafebrowsing.Inc()\n\tcase result.Reason == dnsfilter.FilteredParental:\n\t\tfiltered.Inc()\n\t\tfilteredParental.Inc()\n\tcase result.Reason == dnsfilter.FilteredInvalid:\n\t\tfiltered.Inc()\n\t\tfilteredInvalid.Inc()\n\tcase result.Reason == dnsfilter.FilteredSafeSearch:\n\t\t// the request was passsed through but not filtered, don't increment filtered\n\t\tsafesearch.Inc()\n\tcase result.Reason == dnsfilter.NotFilteredWhiteList:\n\t\twhitelisted.Inc()\n\tcase result.Reason == dnsfilter.NotFilteredNotFound:\n\t\t// do nothing\n\tcase result.Reason == dnsfilter.NotFilteredError:\n\t\ttext := \"SHOULD NOT HAPPEN: got DNSFILTER_NOTFILTERED_ERROR without err != nil!\"\n\t\tlog.Println(text)\n\t\terr = errors.New(text)\n\t\trcode = dns.RcodeServerFailure\n\t}\n\n\t// log\n\telapsed := time.Since(start)\n\telapsedTime.Observe(elapsed.Seconds())\n\tif p.settings.QueryLogEnabled {\n\t\tlogRequest(r, rrw.Msg, result, time.Since(start), ip)\n\t}\n\treturn rcode, err\n}", "func (s *Server) ServeDNS(w dns.ResponseWriter, m *dns.Msg) {\n\treply := new(dns.Msg)\n\n\tif m.MsgHdr.Opcode != dns.OpcodeQuery {\n\t\treply.SetRcode(m, dns.RcodeRefused)\n\t\tif err := w.WriteMsg(reply); err != nil {\n\t\t\ts.Log.Printf(\"WriteMsg: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\n\treply.SetReply(m)\n\treply.RecursionAvailable = true\n\tif s.Authoritative {\n\t\treply.Authoritative = true\n\t\treply.RecursionAvailable = false\n\t}\n\n\tq := m.Question[0]\n\n\tqname := strings.ToLower(dns.Fqdn(q.Name))\n\n\tif q.Qclass != dns.ClassINET {\n\t\treply.SetRcode(m, dns.RcodeNotImplemented)\n\t\tif err := w.WriteMsg(reply); err != nil {\n\t\t\ts.Log.Printf(\"WriteMsg: %v\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tqnameZone, ok := s.r.Zones[qname]\n\tif !ok {\n\t\ts.writeErr(w, reply, notFound(qname))\n\t\treturn\n\t}\n\n\t// This does the lookup twice (including lookup* below).\n\t// TODO: Avoid this.\n\tad, rname, _, err := s.r.targetZone(qname)\n\tif err != nil {\n\t\ts.writeErr(w, reply, err)\n\t\treturn\n\t}\n\treply.AuthenticatedData = ad\n\n\tif rname != qname {\n\t\treply.Answer = append(reply.Answer, mkCname(qname, rname))\n\t}\n\n\tswitch q.Qtype {\n\tcase dns.TypeA:\n\t\t_, addrs, err := s.r.lookupA(context.Background(), qname)\n\t\tif err != nil {\n\t\t\ts.writeErr(w, reply, err)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, addr := range addrs {\n\t\t\tparsed := net.ParseIP(addr)\n\t\t\tif parsed == nil {\n\t\t\t\tpanic(\"ServeDNS: malformed IP in records\")\n\t\t\t}\n\t\t\treply.Answer = append(reply.Answer, &dns.A{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: rname,\n\t\t\t\t\tRrtype: dns.TypeA,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 9999,\n\t\t\t\t},\n\t\t\t\tA: parsed,\n\t\t\t})\n\t\t}\n\tcase dns.TypeAAAA:\n\t\t_, addrs, err := s.r.lookupAAAA(context.Background(), q.Name)\n\t\tif err != nil {\n\t\t\ts.writeErr(w, reply, err)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, addr := range addrs {\n\t\t\tparsed := net.ParseIP(addr)\n\t\t\tif parsed == nil {\n\t\t\t\tpanic(\"ServeDNS: malformed IP in records\")\n\t\t\t}\n\t\t\treply.Answer = append(reply.Answer, &dns.AAAA{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: rname,\n\t\t\t\t\tRrtype: dns.TypeAAAA,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 9999,\n\t\t\t\t},\n\t\t\t\tAAAA: parsed,\n\t\t\t})\n\t\t}\n\tcase dns.TypeMX:\n\t\t_, mxs, err := s.r.lookupMX(context.Background(), q.Name)\n\t\tif err != nil {\n\t\t\ts.writeErr(w, reply, err)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, mx := range mxs {\n\t\t\treply.Answer = append(reply.Answer, &dns.MX{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: rname,\n\t\t\t\t\tRrtype: dns.TypeMX,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 9999,\n\t\t\t\t},\n\t\t\t\tPreference: mx.Pref,\n\t\t\t\tMx: mx.Host,\n\t\t\t})\n\t\t}\n\tcase dns.TypeNS:\n\t\tcname, nss, err := s.r.lookupNS(context.Background(), q.Name)\n\t\tif err != nil {\n\t\t\ts.writeErr(w, reply, err)\n\t\t\treturn\n\t\t}\n\n\t\tif cname != \"\" {\n\t\t\treply.Answer = append(reply.Answer, mkCname(q.Name, cname))\n\t\t}\n\t\tfor _, ns := range nss {\n\t\t\treply.Answer = append(reply.Answer, &dns.NS{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: rname,\n\t\t\t\t\tRrtype: dns.TypeNS,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 9999,\n\t\t\t\t},\n\t\t\t\tNs: ns.Host,\n\t\t\t})\n\t\t}\n\tcase dns.TypeSRV:\n\t\t_, srvs, err := s.r.lookupSRV(context.Background(), q.Name)\n\t\tif err != nil {\n\t\t\ts.writeErr(w, reply, err)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, srv := range srvs {\n\t\t\treply.Answer = append(reply.Answer, &dns.SRV{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: rname,\n\t\t\t\t\tRrtype: dns.TypeSRV,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 9999,\n\t\t\t\t},\n\t\t\t\tPriority: srv.Priority,\n\t\t\t\tPort: srv.Port,\n\t\t\t\tTarget: srv.Target,\n\t\t\t})\n\t\t}\n\tcase dns.TypeCNAME:\n\t\treply.AuthenticatedData = qnameZone.AD\n\tcase dns.TypeTXT:\n\t\t_, txts, err := s.r.lookupTXT(context.Background(), q.Name)\n\t\tif err != nil {\n\t\t\ts.writeErr(w, reply, err)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, txt := range txts {\n\t\t\treply.Answer = append(reply.Answer, &dns.TXT{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: rname,\n\t\t\t\t\tRrtype: dns.TypeTXT,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 9999,\n\t\t\t\t},\n\t\t\t\tTxt: splitTXT(txt),\n\t\t\t})\n\t\t}\n\tcase dns.TypePTR:\n\t\trzone, ok := s.r.Zones[q.Name]\n\t\tif !ok {\n\t\t\ts.writeErr(w, reply, notFound(q.Name))\n\t\t\treturn\n\t\t}\n\n\t\tfor _, name := range rzone.PTR {\n\t\t\treply.Answer = append(reply.Answer, &dns.PTR{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: rname,\n\t\t\t\t\tRrtype: dns.TypePTR,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 9999,\n\t\t\t\t},\n\t\t\t\tPtr: name,\n\t\t\t})\n\t\t}\n\tcase dns.TypeSOA:\n\t\treply.Answer = []dns.RR{\n\t\t\t&dns.SOA{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: q.Name,\n\t\t\t\t\tRrtype: dns.TypeSOA,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 9999,\n\t\t\t\t},\n\t\t\t\tNs: \"localhost.\",\n\t\t\t\tMbox: \"hostmaster.localhost.\",\n\t\t\t\tSerial: 1,\n\t\t\t\tRefresh: 900,\n\t\t\t\tRetry: 900,\n\t\t\t\tExpire: 1800,\n\t\t\t\tMinttl: 60,\n\t\t\t},\n\t\t}\n\tdefault:\n\t\trzone, ok := s.r.Zones[q.Name]\n\t\tif !ok {\n\t\t\ts.writeErr(w, reply, notFound(q.Name))\n\t\t\treturn\n\t\t}\n\n\t\treply.Answer = append(reply.Answer, rzone.Misc[dns.Type(q.Qtype)]...)\n\t}\n\n\ts.Log.Printf(\"DNS TRACE %v\", reply.String())\n\n\tif err := w.WriteMsg(reply); err != nil {\n\t\ts.Log.Printf(\"WriteMsg: %v\", err)\n\t}\n}", "func (re *Records) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstate := request.Request{W: w, Req: r}\n\tqname := state.Name()\n\tzone := plugin.Zones(re.origins).Matches(qname)\n\tif zone == \"\" {\n\t\treturn plugin.NextOrFailure(re.Name(), re.Next, ctx, w, r)\n\t}\n\n\t// New we should have some data for this zone, as we just have a list of RR, iterate through them, find the qname\n\t// and see if the qtype exists. If so reply, if not do the normal DNS thing and return either NXDOMAIN or NODATA.\n\tm := new(dns.Msg)\n\tm.SetReply(r)\n\tm.Authoritative = true\n\n\tnxdomain := true\n\tvar soa dns.RR\n\tfor _, r := range re.m[zone] {\n\t\tif r.Header().Rrtype == dns.TypeSOA && soa == nil {\n\t\t\tsoa = r\n\t\t}\n\t\tif r.Header().Name == qname {\n\t\t\tnxdomain = false\n\t\t\tif r.Header().Rrtype == state.QType() {\n\t\t\t\tm.Answer = append(m.Answer, r)\n\t\t\t}\n\t\t}\n\t}\n\n\t// handle NXDOMAIN, NODATA and normal response here.\n\tif nxdomain {\n\t\tm.Rcode = dns.RcodeNameError\n\t\tif soa != nil {\n\t\t\tm.Ns = []dns.RR{soa}\n\t\t}\n\t\tw.WriteMsg(m)\n\t\treturn dns.RcodeSuccess, nil\n\t}\n\n\tif len(m.Answer) == 0 {\n\t\tif soa != nil {\n\t\t\tm.Ns = []dns.RR{soa}\n\t\t}\n\t}\n\n\tw.WriteMsg(m)\n\treturn dns.RcodeSuccess, nil\n}", "func (rl *RateLimit) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstate := request.Request{W: w, Req: r}\n\n\tif state.Proto() == \"tcp\" {\n\t\t// No ratelimit is applied for TCP clients,\n\t\t// pass the request to the next plugin.\n\t\treturn plugin.NextOrFailure(rl.Name(), rl.Next, ctx, w, r)\n\t}\n\n\tallow, err := rl.check(state.IP())\n\tif err != nil {\n\t\treturn dns.RcodeServerFailure, err\n\t}\n\n\tif allow {\n\t\treturn plugin.NextOrFailure(rl.Name(), rl.Next, ctx, w, r)\n\t}\n\n\tDropCount.WithLabelValues(metrics.WithServer(ctx)).Inc()\n\treturn dns.RcodeRefused, nil\n}", "func (e ENS) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstate := request.Request{W: w, Req: r}\n\n\ta := new(dns.Msg)\n\ta.SetReply(r)\n\ta.Compress = true\n\ta.Authoritative = true\n\tvar result Result\n\ta.Answer, a.Ns, a.Extra, result = Lookup(e, state)\n\tswitch result {\n\tcase Success:\n\t\tstate.SizeAndDo(a)\n\t\tw.WriteMsg(a)\n\t\treturn dns.RcodeSuccess, nil\n\tcase NoData:\n\t\tif e.Next == nil {\n\t\t\tstate.SizeAndDo(a)\n\t\t\tw.WriteMsg(a)\n\t\t\treturn dns.RcodeSuccess, nil\n\t\t}\n\t\treturn plugin.NextOrFailure(e.Name(), e.Next, ctx, w, r)\n\tcase NameError:\n\t\ta.Rcode = dns.RcodeNameError\n\tcase ServerFailure:\n\t\treturn dns.RcodeServerFailure, nil\n\t}\n\t// Unknown result...\n\treturn dns.RcodeServerFailure, nil\n\n}", "func (ipr *Ipref) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstate := request.Request{W: w, Req: r}\n\n\tif !ipr.match(state) {\n\t\treturn plugin.NextOrFailure(ipr.Name(), ipr.Next, ctx, w, r)\n\t}\n\n\tvar res *unbound.Result\n\tvar err error\n\n\tswitch {\n\n\tcase state.QClass() == dns.ClassINET && (state.QType() == dns.TypeA || state.QType() == dns.TypeAAAA):\n\n\t\tif res, err = ipr.resolve_aa(state); err == nil { // try AA first\n\t\t\tbreak\n\t\t}\n\n\t\tfallthrough\n\n\tdefault:\n\n\t\tswitch state.Proto() {\n\t\tcase \"tcp\":\n\t\t\tres, err = ipr.t.Resolve(state.QName(), state.QType(), state.QClass())\n\t\tcase \"udp\":\n\t\t\tres, err = ipr.u.Resolve(state.QName(), state.QType(), state.QClass())\n\t\t}\n\t}\n\n\trcode := dns.RcodeServerFailure\n\tif err == nil {\n\t\trcode = res.AnswerPacket.Rcode\n\t}\n\trc, ok := dns.RcodeToString[rcode]\n\tif !ok {\n\t\trc = strconv.Itoa(rcode)\n\t}\n\n\tserver := metrics.WithServer(ctx)\n\tRcodeCount.WithLabelValues(server, rc).Add(1)\n\tRequestDuration.WithLabelValues(server).Observe(res.Rtt.Seconds())\n\n\tif err != nil {\n\t\treturn dns.RcodeServerFailure, err\n\t}\n\n\t// If the client *didn't* set the opt record, and specifically not the DO bit,\n\t// strip this from the reply (unbound default to setting DO).\n\tif !state.Do() {\n\t\t// technically we can still set bufsize and fluff, for now remove the entire OPT record.\n\t\tfor i := 0; i < len(res.AnswerPacket.Extra); i++ {\n\t\t\trr := res.AnswerPacket.Extra[i]\n\t\t\tif _, ok := rr.(*dns.OPT); ok {\n\t\t\t\tres.AnswerPacket.Extra = append(res.AnswerPacket.Extra[:i], res.AnswerPacket.Extra[i+1:]...)\n\t\t\t\tbreak // TODO(miek): more than one? Think TSIG?\n\t\t\t}\n\t\t}\n\t\tfilter(res.AnswerPacket, dnssec)\n\t}\n\n\tres.AnswerPacket.Id = r.Id\n\n\t// If the advertised size of the client is smaller than we got, unbound either retried with TCP or something else happened.\n\tif state.Size() < res.AnswerPacket.Len() {\n\t\tres.AnswerPacket = state.Scrub(res.AnswerPacket)\n\t\tres.AnswerPacket.Truncated = true\n\t\tw.WriteMsg(res.AnswerPacket)\n\n\t\treturn 0, nil\n\t}\n\n\tstate.SizeAndDo(res.AnswerPacket)\n\tw.WriteMsg(res.AnswerPacket)\n\n\treturn 0, nil\n}", "func (s *DnsServer) Run() error {\n\tmux := dns.NewServeMux()\n\tfor _, domain := range s.Domains {\n\t\tmux.HandleFunc(dns.Fqdn(domain), s.HandleIncoming)\n\t}\n\ts.dnsServer = &dns.Server{Handler: mux}\n\ts.dnsServer.Net = \"udp\"\n\tif s.Flags.Listener != nil {\n\t\ts.dnsServer.Listener = s.Flags.Listener\n\t\ts.dnsServer.Addr = s.Flags.Listener.Addr().String()\n\t} else {\n\t\tl, err := net.Listen(net.JoinHostPort(s.host, strconv.Itoa(s.Port)), \"tcp\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.dnsServer.Listener = l\n\t}\n\tif s.Port != 0 {\n\t\ts.dnsServer.Addr = net.JoinHostPort(s.host, strconv.Itoa(s.Port))\n\t}\n\tgo s.HandleControllers()\n\ts.Logger.Infof(\"Serving Dns on %s for domains %v\", s.dnsServer.Addr, s.Domains )\n\treturn s.dnsServer.ListenAndServe()\n}", "func DnsHandler(ctx *gin.Context) {\n\tvar r resources.DnsRequest\n\t// Trying to unmarshal the incoming POST request body to DnsRequest struct\n\t// It also validates the Json\n\tif err := ctx.ShouldBindJSON(&r); err != nil {\n\t\tctx.JSON(http.StatusBadRequest, resources.ErrorResponse{Message: \"bad request body\"})\n\t\treturn\n\t}\n\n\t// Calculate the location with given coordinates and velocity\n\tl := services.FindLocation(*r.X, *r.Y, *r.Z, *r.Vel)\n\n\t// Create the response object and returns it as json with 200 status code\n\tres := resources.DnsResponse{Loc: l}\n\tctx.JSON(http.StatusOK, res)\n}", "func (s *server) ServeDNSForward(w dns.ResponseWriter, req *dns.Msg) *dns.Msg {\n\tif s.config.NoRec {\n\t\tm := s.ServerFailure(req)\n\t\tw.WriteMsg(m)\n\t\treturn m\n\t}\n\n\tif len(s.config.Nameservers) == 0 || dns.CountLabel(req.Question[0].Name) < s.config.Ndots {\n\t\tif s.config.Verbose {\n\t\t\tif len(s.config.Nameservers) == 0 {\n\t\t\t\tlogf(\"can not forward, no nameservers defined\")\n\t\t\t} else {\n\t\t\t\tlogf(\"can not forward, name too short (less than %d labels): `%s'\", s.config.Ndots, req.Question[0].Name)\n\t\t\t}\n\t\t}\n\t\tm := s.ServerFailure(req)\n\t\tm.RecursionAvailable = true // this is still true\n\t\tw.WriteMsg(m)\n\t\treturn m\n\t}\n\n\tvar (\n\t\tr *dns.Msg\n\t\terr error\n\t)\n\n\tnsid := s.randomNameserverID(req.Id)\n\ttry := 0\nRedo:\n\tif isTCP(w) {\n\t\tr, err = exchangeWithRetry(s.dnsTCPclient, req, s.config.Nameservers[nsid])\n\t} else {\n\t\tr, err = exchangeWithRetry(s.dnsUDPclient, req, s.config.Nameservers[nsid])\n\t}\n\tif err == nil {\n\t\tr.Compress = true\n\t\tr.Id = req.Id\n\t\tw.WriteMsg(r)\n\t\treturn r\n\t}\n\t// Seen an error, this can only mean, \"server not reached\", try again\n\t// but only if we have not exausted our nameservers.\n\tif try < len(s.config.Nameservers) {\n\t\ttry++\n\t\tnsid = (nsid + 1) % len(s.config.Nameservers)\n\t\tgoto Redo\n\t}\n\n\tlogf(\"failure to forward request %q\", err)\n\tm := s.ServerFailure(req)\n\treturn m\n}", "func (this Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tTransProString := r.Header.Get(\"X-Proxy-DNS-Transport\")\n\tif TransProString == \"tcp\" {\n\t\tthis.TransPro = TCPcode\n\t} else if TransProString == \"udp\" {\n\t\tthis.TransPro = UDPcode\n\t} else {\n\t\t_D(\"Transport protol not udp or tcp\")\n\t\thttp.Error(w, \"unknown transport protocol\", 415)\n\t\treturn\n\t}\n\tcontentTypeStr := r.Header.Get(\"Content-Type\")\n\tif contentTypeStr != \"application/X-DNSoverHTTP\" {\n\t\t_D(\"Content-Type illegal\")\n\t\thttp.Error(w, \"unknown content type\", 415)\n\t\treturn\n\t}\n\tvar requestBody []byte\n\trequestBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"error in reading request\", 400)\n\t\t_D(\"error in reading HTTP request, error message: %s\", err)\n\t\treturn\n\t}\n\tif len(requestBody) < (int)(r.ContentLength) {\n\t\thttp.Error(w, \"error in reading request\", 400)\n\t\t_D(\"fail to read all HTTP content\")\n\t\treturn\n\t}\n\tvar dnsRequest dns.Msg\n\terr = dnsRequest.Unpack(requestBody)\n\tif err != nil {\n\t\thttp.Error(w, \"bad DNS request\", 400)\n\t\t_D(\"error in packing HTTP response to DNS, error message: %s\", err)\n\t\treturn\n\t}\n\tdnsClient := new(dns.Client)\n\tif dnsClient == nil {\n\t\thttp.Error(w, \"Server Error\", 500)\n\t\t_D(\"cannot create DNS client\")\n\t\treturn\n\t}\n\tdnsClient.ReadTimeout = this.timeout\n\tdnsClient.WriteTimeout = this.timeout\n\tdnsClient.Net = TransProString\n\t//will use a parameter to let user address resolver in future\n\tdnsResponse, RTT, err := dnsClient.Exchange(&dnsRequest, this.SERVERS[rand.Intn(len(this.SERVERS))])\n\t//dnsResponse, RTT, err := dnsClient.Exchange(&dnsRequest, this.SERVERS[0])\n\tif err != nil {\n\t\t_D(\"error in communicate with resolver, error message: %s\", err)\n\t\thttp.Error(w, \"Server Error\", 500)\n\t\treturn\n\t} else {\n\t\t_D(\"request took %s\", RTT)\n\t}\n\tif dnsResponse == nil {\n\t\t_D(\"no response back\")\n\t\thttp.Error(w, \"Server Error:No Recursive response\", 500)\n\t\treturn\n\t}\n\tresponse_bytes, err := dnsResponse.Pack()\n\tif err != nil {\n\t\thttp.Error(w, \"error packing reply\", 500)\n\t\t_D(\"error in packing request, error message: %s\", err)\n\t\treturn\n\t}\n\t_, err = w.Write(response_bytes)\n\tif err != nil {\n\t\t_D(\"Can not write response rightly, error message: %s\", err)\n\t\treturn\n\t}\n\t//don't know how to creat a response here\n}", "func (f HandlerQueryFunc) QueryDNS(w RequestWriter, r *Msg) {\n\tgo f(w, r)\n}", "func OptDNS(ip ...net.IP) Option {\n\treturn &optDNS{NameServers: ip}\n}", "func (h *Handler) serveDBUser(w http.ResponseWriter, r *http.Request) {}", "func runDNSServer() {\n\n\t// load the blocked domains\n\tblacklist := LoadBlacklistOrFail(blacklistPath)\n\tfmt.Printf(\"Loading list of %d blocked domains...\\n\", blacklist.Size())\n\n\t// make the custom handler function to reply to DNS queries\n\tupstream := getEnvOrDefault(\"UPSTREAM_DNS\", \"1.1.1.1:53\")\n\tlogging := getEnvOrDefault(\"DEBUG\", \"\") == \"true\"\n\thandler := makeDNSHandler(blacklist, upstream, logging)\n\n\t// start the server\n\tport := getEnvOrDefault(\"DNS_PORT\", \"53\")\n\tfmt.Printf(\"Starting DNS server on UDP port %s (logging = %t)...\\n\", port, logging)\n\tserver := &dns.Server{Addr: \":\" + port, Net: \"udp\"}\n\tdns.HandleFunc(\".\", handler)\n\terr := server.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func makeDNSHandler(blacklist *Blacklist, upstream string, logging bool) func(dns.ResponseWriter, *dns.Msg) {\n\n\t// create the logger functions\n\tlogger := func(res *dns.Msg, duration time.Duration, how string) {}\n\terrorLogger := func(err error, description string) {\n\t\tlog.Print(description, err)\n\t}\n\tif logging {\n\t\tlogger = func(msg *dns.Msg, rtt time.Duration, how string) {\n\t\t\tlog.Printf(\"Using %s, response time %s:\\n%s\\n\", how, rtt.String(), msg.String())\n\t\t}\n\t\terrorLogger = func(err error, description string) {\n\n\t\t}\n\t}\n\n\t// cache for the DNS replies from the DNS server\n\tcache := NewCache()\n\n\t// we use a single client to resolve queries against the upstream DNS\n\tclient := new(dns.Client)\n\n\t// create the real handler\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\tstart := time.Now()\n\n\t\t// the standard allows multiple DNS questions in a single query... but nobody uses it, so we disallow it\n\t\t// https://stackoverflow.com/questions/4082081/requesting-a-and-aaaa-records-in-single-dns-query/4083071\n\t\tif len(req.Question) != 1 {\n\n\t\t\t// reply with a format error\n\t\t\tres := new(dns.Msg)\n\t\t\tres.SetRcode(req, dns.RcodeFormatError)\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// collect metrics\n\t\t\tduration := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"malformed_query\", \"-\").Observe(duration)\n\n\t\t\treturn\n\t\t}\n\n\t\t// extract the DNS question\n\t\tquery := req.Question[0]\n\t\tdomain := strings.TrimRight(query.Name, \".\")\n\t\tqueryType := dns.TypeToString[query.Qtype]\n\n\t\t// check the cache first: if a domain is in the cache, it cannot be blocked\n\t\t// this optimized response times for allowed domains over the blocked domains\n\t\tcached, found := cache.Get(&query)\n\t\tif found {\n\n\t\t\t// cache found, use the cached answer\n\t\t\tres := cached.SetReply(req)\n\t\t\tres.Answer = cached.Answer\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tduration := time.Since(start)\n\t\t\tlogger(res, duration, \"cache\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := duration.Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"cache\", queryType).Observe(durationSeconds)\n\n\t\t\treturn\n\t\t}\n\n\t\t// then, check if the domain is blocked\n\t\tblocked := blacklist.Contains(domain)\n\t\tif blocked {\n\n\t\t\t// reply with \"domain not found\"\n\t\t\tres := new(dns.Msg)\n\t\t\tres.SetRcode(req, dns.RcodeNameError)\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tduration := time.Since(start)\n\t\t\tlogger(res, duration, \"block\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := duration.Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"block\", queryType).Observe(durationSeconds)\n\n\t\t\treturn\n\t\t}\n\n\t\t// finally, query an upstream DNS\n\t\tres, rtt, err := client.Exchange(req, upstream)\n\t\tif err == nil {\n\n\t\t\t// reply to the query\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// cache the result if any\n\t\t\tif len(res.Answer) > 0 {\n\t\t\t\texpiration := time.Duration(res.Answer[0].Header().Ttl) * time.Second\n\t\t\t\tcache.Set(&query, res, expiration)\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tlogger(res, rtt, \"upstream\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"upstream\", queryType).Observe(durationSeconds)\n\n\t\t} else {\n\n\t\t\t// log the error\n\t\t\terrorLogger(err, \"Error in resolve query against upstream DNS \"+upstream)\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"upstream_error\", queryType).Observe(durationSeconds)\n\t\t}\n\t}\n}", "func (s *server) ServeDNSReverse(w dns.ResponseWriter, req *dns.Msg) *dns.Msg {\n\tm := new(dns.Msg)\n\tm.SetReply(req)\n\tm.Compress = true\n\tm.Authoritative = false // Set to false, because I don't know what to do wrt DNSSEC.\n\tm.RecursionAvailable = true\n\tvar err error\n\tif m.Answer, err = s.PTRRecords(req.Question[0]); err == nil {\n\t\t// TODO(miek): Reverse DNSSEC. We should sign this, but requires a key....and more\n\t\t// Probably not worth the hassle?\n\t\tif err := w.WriteMsg(m); err != nil {\n\t\t\tlogf(\"failure to return reply %q\", err)\n\t\t}\n\t\treturn m\n\t}\n\t// Always forward if not found locally.\n\treturn s.ServeDNSForward(w, req)\n}", "func startDNS() {\n\t// init meta client\n\tmetaClient = client.New()\n\t// get dns listen ip\n\tlip, err := common.GetInterfaceIP(ifi)\n\tif err != nil {\n\t\tklog.Errorf(\"[EdgeMesh] get dns listen ip err: %v\", err)\n\t\treturn\n\t}\n\n\tladdr := &net.UDPAddr{\n\t\tIP: lip,\n\t\tPort: 53,\n\t}\n\tudpConn, err := net.ListenUDP(\"udp\", laddr)\n\tif err != nil {\n\t\tklog.Errorf(\"[EdgeMesh] dns server listen on %v error: %v\", laddr, err)\n\t\treturn\n\t}\n\tdefer udpConn.Close()\n\tdnsConn = udpConn\n\tfor {\n\t\treq := make([]byte, bufSize)\n\t\tn, from, err := dnsConn.ReadFromUDP(req)\n\t\tif err != nil || n <= 0 {\n\t\t\tklog.Errorf(\"[EdgeMesh] dns server read from udp error: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tque, err := parseDNSQuery(req[:n])\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tque.from = from\n\n\t\trsp := make([]byte, 0)\n\t\trsp, err = recordHandle(que, req[:n])\n\t\tif err != nil {\n\t\t\tklog.Warningf(\"[EdgeMesh] failed to resolve dns: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif _, err = dnsConn.WriteTo(rsp, from); err != nil {\n\t\t\tklog.Warningf(\"[EdgeMesh] failed to write: %v\", err)\n\t\t}\n\t}\n}", "func HandleDNS(w dns.ResponseWriter, r *dns.Msg) {\n\n\t/* Response packet */\n\tm := new(dns.Msg)\n\n\tdefer func() {\n\t\tm.SetReply(r)\n\t\tm.MsgHdr.Authoritative = true\n\t\tw.WriteMsg(m)\n\t}()\n\n\t/* If there's not one question in the packet, it's not for us */\n\tif 1 != len(r.Question) {\n\t\tm = m.SetRcode(r, dns.RcodeNameError)\n\t\treturn\n\t}\n\tq := r.Question[0]\n\tq.Name = strings.ToLower(q.Name)\n\n\t/* If the question's for the A record of the bare domain, return it. */\n\tif DOMAIN == q.Name {\n\t\tif dns.TypeA == q.Qtype && nil != AREC {\n\t\t\tm.Answer = append(m.Answer, AREC)\n\t\t}\n\t\treturn\n\t}\n\n\t/* We can really only process one of these at once */\n\tdnsCacheLock.Lock()\n\tdefer dnsCacheLock.Unlock()\n\n\t/* If we already have this one, use it again */\n\tif v, ok := dnsCache.Get(q.Name); ok {\n\t\trr, ok := v.(*dns.TXT)\n\t\tif !ok {\n\t\t\tlog.Panicf(\"invalid RR type %T\", v)\n\t\t}\n\t\t/* nil means no tasking */\n\t\tif nil != rr {\n\t\t\tm.Answer = append(m.Answer, rr)\n\t\t}\n\t\treturn\n\t}\n\n\t/* Get interesting parts of request. There should be 4 */\n\tparts := strings.SplitN(dnsutil.TrimDomainName(q.Name, DOMAIN), \".\", 4)\n\tif 4 != len(parts) {\n\t\tm.SetRcode(r, dns.RcodeFormatError)\n\t\treturn\n\t}\n\tvar (\n\t\toutHex = parts[0] /* Output, in hex */\n\t\tcounter = parts[1] /* Cachebuster */\n\t\tmt = parts[2] /* Message Type */\n\t\tid = strings.ToLower(parts[3]) /* Implant ID */\n\t)\n\n\t/* Only TXT records are supported, and only message types t and o */\n\tif !((mt == TASKINGLABEL && dns.TypeTXT == q.Qtype) ||\n\t\tmt == OUTPUTLABEL) ||\n\t\t\"\" == id {\n\t\tm.SetRcode(r, dns.RcodeRefused)\n\t\treturn\n\t}\n\n\t/* Make sure we have an expected message type */\n\tswitch mt {\n\tcase OUTPUTLABEL: /* Output, no need to respond with anything */\n\t\tdnsCache.Add(q.Name, (*dns.TXT)(nil))\n\t\tupdateLastSeen(id)\n\t\tgo handleOutput(outHex, id)\n\t\treturn\n\tcase TASKINGLABEL: /* Tasking */\n\t\tbreak /* Handled below */\n\tdefault: /* Not something we expect */\n\t\tlog.Panicf(\"unpossible message type %q\", mt)\n\t}\n\n\t/* Update the last seen time for this implant */\n\tupdateLastSeen(id)\n\n\t/* Send beacon to interested clients */\n\tgo sendBeaconToClients(id, counter)\n\n\t/* Get the next tasking for this implant */\n\tt := GetTasking(id)\n\tif \"\" == t {\n\t\tdnsCache.Add(q.Name, (*dns.TXT)(nil))\n\t\treturn\n\t}\n\t/* Sanitize tasking */\n\ts := strings.Replace(t, \"`\", \"``\", -1)\n\ts = strings.Replace(s, `\\`, `\\\\`, -1)\n\tm.Answer = append(m.Answer, &dns.TXT{\n\t\tHdr: dns.RR_Header{\n\t\t\tName: q.Name,\n\t\t\tRrtype: dns.TypeTXT,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: TTL,\n\t\t},\n\t\tTxt: []string{s},\n\t})\n\tdnsCache.Add(q.Name, m.Answer[0])\n\tlog.Printf(\"[ID-%v] TASKING: %s (%s)\", id, t, s)\n}", "func (sc *ServiceConfig) Serve(shutdown <-chan (interface{})) {\n\n\tglog.V(1).Infof(\"starting vhost synching\")\n\t//start getting vhost endpoints\n\tgo sc.syncVhosts(shutdown)\n\n\t// Reverse proxy to the web UI server.\n\tuihandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tuiURL, err := url.Parse(\"http://127.0.0.1:7878\")\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Can't parse UI URL: %v\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tui := httputil.NewSingleHostReverseProxy(uiURL)\n\t\tif ui == nil {\n\t\t\tglog.Errorf(\"Can't proxy UI request: %v\", err)\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tui.ServeHTTP(w, r)\n\t}\n\n\tr := mux.NewRouter()\n\n\tif hnm, err := os.Hostname(); err == nil {\n\t\tsc.hostaliases = append(sc.hostaliases, hnm)\n\t}\n\n\tcmd := exec.Command(\"hostname\", \"--fqdn\")\n\tif hnm, err := cmd.CombinedOutput(); err == nil {\n\t\tsc.hostaliases = append(sc.hostaliases, string(hnm[:len(hnm)-1]))\n\t}\n\n\tdefaultHostAlias = sc.hostaliases[0]\n\n\tfor _, ha := range sc.hostaliases {\n\t\tglog.V(1).Infof(\"Use vhosthandler for: %s\", fmt.Sprintf(\"{subdomain}.%s\", ha))\n\t\tr.HandleFunc(\"/{path:.*}\", sc.vhosthandler).Host(fmt.Sprintf(\"{subdomain}.%s\", ha))\n\t\tr.HandleFunc(\"/\", sc.vhosthandler).Host(fmt.Sprintf(\"{subdomain}.%s\", ha))\n\t}\n\n\tr.HandleFunc(\"/{path:.*}\", uihandler)\n\n\thttp.Handle(\"/\", r)\n\n\t// FIXME: bubble up these errors to the caller\n\tcertfile, err := proxy.TempCertFile()\n\tif err != nil {\n\t\tglog.Fatalf(\"Could not prepare cert.pem file: %s\", err)\n\t}\n\tkeyfile, err := proxy.TempKeyFile()\n\tif err != nil {\n\t\tglog.Fatalf(\"Could not prepare key.pem file: %s\", err)\n\t}\n\terr = http.ListenAndServeTLS(sc.bindPort, certfile, keyfile, nil)\n\tif err != nil {\n\t\tglog.Fatalf(\"could not setup webserver: %s\", err)\n\t}\n}", "func hostnameHandler(w http.ResponseWriter, r *http.Request) {\n\th, err := os.Hostname()\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"unable to get hostname: %s\", err)\n\t}\n\tfmt.Fprintf(w, \"You are querying host %s\\n\", h)\n}", "func Resolve(q string) (ip net.IP, port uint16, target string, err error) {\n c := new(dns.Client)\n m := new(dns.Msg)\n m.SetQuestion(dns.Fqdn(q), dns.TypeSRV)\n m.RecursionDesired = true\n\n dns_server := \"127.0.0.1:8600\"\n if len(os.Args) > 1 {\n dns_server = os.Args[1]\n }\n fmt.Printf(\"Using dns server: %v\\n\", dns_server)\n\n r, _, err := c.Exchange(m, dns_server)\n if r == nil {\n log.Fatalf(\"error: %s\\n\", err.Error())\n }\n\n if r.Rcode != dns.RcodeSuccess {\n log.Fatalf(\"dns lookup failed\\n\")\n }\n\n for _, srv := range r.Answer {\n port = srv.(*dns.SRV).Port\n target = srv.(*dns.SRV).Target\n\n fmt.Printf(\"%v %v\\n\", port, target)\n\n for _, a := range r.Extra {\n if target != a.(*dns.A).Hdr.Name {\n continue\n }\n ip = a.(*dns.A).A\n fmt.Printf(\"%v %v\\n\", target, ip)\n return\n }\n }\n\n log.Fatalf(\"no DNS record found\\n\")\n return\n}", "func DNS(options ...ServicerFunc) Servicer {\n\ts := &dnsService{}\n\tfor _, o := range options {\n\t\to(s)\n\t}\n\treturn s\n}", "func (r *Riddler) OnDNSRequest(ctx context.Context, req *requests.DNSRequest) {\n\tcfg := ctx.Value(requests.ContextConfig).(*config.Config)\n\tbus := ctx.Value(requests.ContextEventBus).(*eventbus.EventBus)\n\tif cfg == nil || bus == nil {\n\t\treturn\n\t}\n\n\tre := cfg.DomainRegex(req.Domain)\n\tif re == nil {\n\t\treturn\n\t}\n\n\tr.CheckRateLimit()\n\tbus.Publish(requests.SetActiveTopic, r.String())\n\tbus.Publish(requests.LogTopic, fmt.Sprintf(\"Querying %s for %s subdomains\", r.String(), req.Domain))\n\n\turl := r.getURL(req.Domain)\n\tpage, err := http.RequestWebPage(url, nil, nil, \"\", \"\")\n\tif err != nil {\n\t\tbus.Publish(requests.LogTopic, fmt.Sprintf(\"%s: %s: %v\", r.String(), url, err))\n\t\treturn\n\t}\n\n\tfor _, name := range re.FindAllString(page, -1) {\n\t\tbus.Publish(requests.NewNameTopic, &requests.DNSRequest{\n\t\t\tName: cleanName(name),\n\t\t\tDomain: req.Domain,\n\t\t\tTag: r.SourceType,\n\t\t\tSource: r.String(),\n\t\t})\n\t}\n}", "func (a *AlienVault) OnDNSRequest(ctx context.Context, req *requests.DNSRequest) {\n\tif !a.System().Config().IsDomainInScope(req.Domain) {\n\t\treturn\n\t}\n\n\tbus := ctx.Value(requests.ContextEventBus).(*eventbus.EventBus)\n\tif bus == nil {\n\t\treturn\n\t}\n\n\ta.CheckRateLimit()\n\tbus.Publish(requests.LogTopic, fmt.Sprintf(\"Querying %s for %s subdomains\", a.String(), req.Domain))\n\ta.executeDNSQuery(ctx, req)\n\n\ta.CheckRateLimit()\n\ta.executeURLQuery(ctx, req)\n}", "func (h hosts) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\thost := strings.Split(r.Host, \":\")[0]\n\tparts := strings.Split(host, \".\")\n\tfor len(parts) > 0 {\n\t\tname := strings.Join(parts, \".\")\n\t\tlogr.Debug(name)\n\t\tif e, ok := h[name]; ok && e != nil {\n\t\t\te.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t\tparts = parts[0 : len(parts)-1]\n\t}\n\n\thttp.Error(w, \"Domain not recognised\", http.StatusNotFound)\n}", "func (s *Server) Serve(l net.Listener) error {\n\ts.m.Lock()\n\n\ts.server[tcp] = &dns.Server{Listener: l,\n\t\tNet: \"tcp\",\n\t\tTsigSecret: s.tsigSecret,\n\t\tMaxTCPQueries: tcpMaxQueries,\n\t\tReadTimeout: s.readTimeout,\n\t\tWriteTimeout: s.writeTimeout,\n\t\tIdleTimeout: func() time.Duration {\n\t\t\treturn s.idleTimeout\n\t\t},\n\t\tHandler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {\n\t\t\tctx := context.WithValue(context.Background(), Key{}, s)\n\t\t\tctx = context.WithValue(ctx, LoopKey{}, 0)\n\t\t\ts.ServeDNS(ctx, w, r)\n\t\t})}\n\n\ts.m.Unlock()\n\n\treturn s.server[tcp].ActivateAndServe()\n}", "func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.TLS == nil {\n\t\thttp.Error(w, \"TLS required\", 403)\n\t\treturn\n\t}\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"wrong method; want GET\", 400)\n\t\treturn\n\t}\n\tserverName := r.FormValue(\"servername\")\n\tif !validDelegateServerName(serverName) {\n\t\tlog.Printf(\"autocertdelegate: invalid server name %q\", serverName)\n\t\tbadServerName(w)\n\t\treturn\n\t}\n\tif err := s.am.HostPolicy(r.Context(), serverName); err != nil {\n\t\tlog.Printf(\"autocertdelegate: %q denied by configured HostPolicy: %v\", serverName, err)\n\t\tbadServerName(w)\n\t\treturn\n\t}\n\n\tswitch r.FormValue(\"mode\") {\n\tdefault:\n\t\thttp.Error(w, \"unknown or missing mode argument\", 400)\n\t\treturn\n\tcase \"getchallenge\":\n\t\tt := time.Now()\n\t\tfmt.Fprintf(w, \"%s/%d/%s\\n\", serverName, t.Unix(), challengeAnswer(s.key, serverName, t))\n\t\treturn\n\tcase \"getcert\":\n\t}\n\n\t// Verify serverName resolves to a local IP.\n\tlookupCtx, cancel := context.WithTimeout(r.Context(), 5*time.Second)\n\tdefer cancel()\n\tvar resolver net.Resolver\n\tresolver.PreferGo = true\n\taddrs, err := resolver.LookupHost(lookupCtx, serverName)\n\tif err != nil {\n\t\tlog.Printf(\"autocertdelegate: lookup %q error: %v\", serverName, err)\n\t\tbadServerName(w)\n\t\treturn\n\t}\n\tif len(addrs) != 1 {\n\t\tlog.Printf(\"autocertDelegate: invalid server name %q; wrong number of resolved addrs. Want 1; got: %q\", serverName, addrs)\n\t\tbadServerName(w)\n\t\treturn\n\t}\n\tchallengeIP := addrs[0]\n\tif !validChallengeAddr(challengeIP) {\n\t\tlog.Printf(\"autocertDelegate: server name %q resolved to invalid challenge IP %q\", serverName, challengeIP)\n\t\tbadServerName(w)\n\t\treturn\n\t}\n\n\tchallengePort, err := strconv.Atoi(r.FormValue(\"challengeport\"))\n\tif err != nil || challengePort < 0 || challengePort > 64<<10 {\n\t\thttp.Error(w, \"invalid challengeport param\", 400)\n\t\treturn\n\t}\n\tchallengeScheme := r.FormValue(\"challengescheme\")\n\tswitch challengeScheme {\n\tcase \"http\", \"https\":\n\tcase \"\":\n\t\tchallengeScheme = \"http\"\n\tdefault:\n\t\thttp.Error(w, \"invalid challengescheme param\", 400)\n\t\treturn\n\t}\n\tchallengeURL := fmt.Sprintf(\"%s://%s:%d/.well-known/autocertdelegate-challenge\",\n\t\tchallengeScheme, challengeIP, challengePort)\n\n\tif err := s.verifyChallengeURL(r.Context(), challengeURL, serverName); err != nil {\n\t\tlog.Printf(\"autocertdelegate: failed challenge for %q: %v\", serverName, err)\n\t\tbadServerName(w)\n\t\treturn\n\t}\n\n\twantRSA, _ := strconv.ParseBool(r.FormValue(\"rsa\"))\n\n\tvar cipherSuites []uint16\n\tif !wantRSA {\n\t\tcipherSuites = append(cipherSuites, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256)\n\t}\n\t// Prime the cache:\n\tif _, err := s.am.GetCertificate(&tls.ClientHelloInfo{\n\t\tServerName: r.FormValue(\"servername\"),\n\t\tCipherSuites: cipherSuites,\n\t}); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tkey := serverName\n\tif wantRSA {\n\t\tkey += \"+rsa\"\n\t}\n\t// But what we really want is the on-disk PEM representation:\n\tpems, err := s.am.Cache.Get(r.Context(), key)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\tw.Write(pems)\n}", "func (s *Server) Serve(pc net.PacketConn, addr net.Addr, datagram []byte) {\n\t// Test response with an A RR\n\taRr := dns.A{\n\t\tHdr: dns.RR_Header{\n\t\t\tName: \"refs.com\",\n\t\t},\n\t\tA: net.IPv4(0, 4, 2, 0),\n\t}\n\n\tdata, err := json.Marshal(aRr)\n\tif err != nil {\n\t\ts.Log.Error().Err(err).Msg(\"marshaling A RR\")\n\t\tpc.Close()\n\t\tos.Exit(1)\n\t}\n\n\tn, err := pc.WriteTo(data, addr)\n\tif err != nil {\n\t\ts.Log.Error().Err(err).Msg(\"error sending the package to origin\")\n\t}\n\n\ts.Log.Info().Msgf(\"%v bytes written to %v\", n, addr.String())\n}", "func stdDNSHandler(t *testing.T, w dns.ResponseWriter, r *dns.Msg, s *dnsTestServer, invertAnswers bool) {\n\tdoDNSAnswer(t, w, r, s.DNSDatabase, invertAnswers)\n}", "func (s *Server) ServePacket(p net.PacketConn) error {\n\ts.m.Lock()\n\ts.server[udp] = &dns.Server{PacketConn: p, Net: \"udp\", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {\n\t\tctx := context.WithValue(context.Background(), Key{}, s)\n\t\tctx = context.WithValue(ctx, LoopKey{}, 0)\n\t\ts.ServeDNS(ctx, w, r)\n\t}), TsigSecret: s.tsigSecret}\n\ts.m.Unlock()\n\n\treturn s.server[udp].ActivateAndServe()\n}", "func (s *Service) serve() {\n\tvar err error\n\tif s.tls {\n\t\tlog.Printf(\" > httpd https://%s\", s.addr)\n\t\terr = s.ln.ListenAndServeTLS(s.certFile, s.keyFile)\n\t} else {\n\t\tlog.Printf(\" > httpd http://%s\", s.addr)\n\t\terr = s.ln.ListenAndServe()\n\t}\n\tif err != nil && !strings.Contains(err.Error(), \"closed\") {\n\t\ts.err <- fmt.Errorf(\"httpd http://%s\\n%s\", s.addr, err)\n\t}\n\t<-s.shutdownChan\n}", "func (m *mDNS) AddHandler(f func(net.Interface, net.Addr, Packet)) {\n\tm.pHandlers = append(m.pHandlers, f)\n}", "func (i *Instance) Serve(ctx context.Context, addr string) (string, error) {\n\tstdlog.SetFlags(stdlog.Lshortfile)\n\tif addr == \"\" {\n\t\treturn \"\", nil\n\t}\n\ti.serveMu.Lock()\n\tdefer i.serveMu.Unlock()\n\n\tif i.listenedDebugAddress != \"\" {\n\t\t// Already serving. Return the bound address.\n\t\treturn i.listenedDebugAddress, nil\n\t}\n\n\ti.debugAddress = addr\n\tlistener, err := net.Listen(\"tcp\", i.debugAddress)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ti.listenedDebugAddress = listener.Addr().String()\n\n\tport := listener.Addr().(*net.TCPAddr).Port\n\tif strings.HasSuffix(i.debugAddress, \":0\") {\n\t\tstdlog.Printf(\"debug server listening at http://localhost:%d\", port)\n\t}\n\tevent.Log(ctx, \"Debug serving\", tag.Port.Of(port))\n\tgo func() {\n\t\tmux := http.NewServeMux()\n\t\tmux.HandleFunc(\"/\", render(MainTmpl, func(*http.Request) interface{} { return i }))\n\t\tmux.HandleFunc(\"/debug/\", render(DebugTmpl, nil))\n\t\tmux.HandleFunc(\"/debug/pprof/\", pprof.Index)\n\t\tmux.HandleFunc(\"/debug/pprof/cmdline\", cmdline)\n\t\tmux.HandleFunc(\"/debug/pprof/profile\", pprof.Profile)\n\t\tmux.HandleFunc(\"/debug/pprof/symbol\", pprof.Symbol)\n\t\tmux.HandleFunc(\"/debug/pprof/trace\", pprof.Trace)\n\t\tif i.prometheus != nil {\n\t\t\tmux.HandleFunc(\"/metrics/\", i.prometheus.Serve)\n\t\t}\n\t\tif i.rpcs != nil {\n\t\t\tmux.HandleFunc(\"/rpc/\", render(RPCTmpl, i.rpcs.getData))\n\t\t}\n\t\tif i.traces != nil {\n\t\t\tmux.HandleFunc(\"/trace/\", render(TraceTmpl, i.traces.getData))\n\t\t}\n\t\tmux.HandleFunc(\"/analysis/\", render(AnalysisTmpl, i.getAnalysis))\n\t\tmux.HandleFunc(\"/cache/\", render(CacheTmpl, i.getCache))\n\t\tmux.HandleFunc(\"/session/\", render(SessionTmpl, i.getSession))\n\t\tmux.HandleFunc(\"/view/\", render(ViewTmpl, i.getView))\n\t\tmux.HandleFunc(\"/client/\", render(ClientTmpl, i.getClient))\n\t\tmux.HandleFunc(\"/server/\", render(ServerTmpl, i.getServer))\n\t\tmux.HandleFunc(\"/file/\", render(FileTmpl, i.getFile))\n\t\tmux.HandleFunc(\"/info\", render(InfoTmpl, i.getInfo))\n\t\tmux.HandleFunc(\"/memory\", render(MemoryTmpl, getMemory))\n\n\t\t// Internal debugging helpers.\n\t\tmux.HandleFunc(\"/gc\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\truntime.GC()\n\t\t\truntime.GC()\n\t\t\truntime.GC()\n\t\t\thttp.Redirect(w, r, \"/memory\", http.StatusTemporaryRedirect)\n\t\t})\n\t\tmux.HandleFunc(\"/_makeabug\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\tbug.Report(\"bug here\")\n\t\t\thttp.Error(w, \"made a bug\", http.StatusOK)\n\t\t})\n\n\t\tif err := http.Serve(listener, mux); err != nil {\n\t\t\tevent.Error(ctx, \"Debug server failed\", err)\n\t\t\treturn\n\t\t}\n\t\tevent.Log(ctx, \"Debug server finished\")\n\t}()\n\treturn i.listenedDebugAddress, nil\n}", "func DNSsvc(d string) Option {\n\treturn func(c *Config) Option {\n\t\tprevious := c.DNSsvc\n\t\tc.DNSsvc = d\n\t\treturn DNSsvc(previous)\n\t}\n}", "func (s *Server) handleServe() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\thost := router.StripHostPort(r.Host)\n\t\tcfg := s.Cfg\n\n\t\t// If virtual hosting is enabled, the configuration is switched to the\n\t\t// configuration of the vhost\n\t\tif cfg.Core.VirtualHosting {\n\t\t\tif _, ok := cfg.Core.VirtualHosts[host]; ok {\n\t\t\t\tcfg = s.Vhosts[host]\n\t\t\t}\n\t\t}\n\n\t\tpath := r.URL.Path\n\n\t\t// If path ends with a slash, add ServeIndex\n\t\tif path[len(path)-1] == '/' {\n\t\t\tpath = path + cfg.Serve.ServeIndex\n\t\t}\n\n\t\t// Serve the file that is requested by path if it esists in ServeDir.\n\t\t// If the requested path doesn't exist, return a 404 error\n\t\tif _, err := os.Stat(cfg.Serve.ServeDir + path); err == nil {\n\t\t\ts.setHeaders(w, cfg.Serve.Headers, false)\n\t\t\tw.Header().Set(\"Content-Type\", getMIMEType(path, cfg.Serve.MIMETypes))\n\t\t\thttp.ServeFile(w, r, cfg.Serve.ServeDir+path)\n\t\t\ts.LogNetwork(200, r)\n\t\t} else {\n\n\t\t\t// Path wasn't found, so we return a 404 not found error\n\t\t\ts.HandleError(w, r, 404)\n\t\t\treturn\n\t\t}\n\t}\n}", "func svcHandler()", "func serve(addr string) error {\n\thttp.HandleFunc(\"/serve\", func(w http.ResponseWriter, r *http.Request) {\n\t\t_ = r.ParseForm()\n\n\t\taddr := r.Form.Get(\"addr\")\n\t\tif addr == \"\" {\n\t\t\t_, _ = w.Write([]byte(\"no addr\"))\n\t\t\treturn\n\t\t}\n\n\t\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"./main\", \"-addr\", addr)\n\t\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\t\t//Setsid: true,\n\t\t\tSetpgid: true,\n\t\t}\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\t_, _ = w.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\t\t// write the output to the http response\n\t\t_, _ = w.Write(out)\n\t})\n\n\tlog.Printf(\"Listening on: %s\\n\", addr)\n\treturn http.ListenAndServe(addr, nil)\n}", "func (m *mDNS) AddHandler(f func(net.Addr, Packet)) {\n\tm.pHandlers = append(m.pHandlers, f)\n}", "func (u *Umbrella) OnDNSRequest(ctx context.Context, req *requests.DNSRequest) {\n\tcfg := ctx.Value(requests.ContextConfig).(*config.Config)\n\tbus := ctx.Value(requests.ContextEventBus).(*eventbus.EventBus)\n\tif cfg == nil || bus == nil {\n\t\treturn\n\t}\n\n\tif u.API == nil || u.API.Key == \"\" {\n\t\treturn\n\t}\n\n\tif !cfg.IsDomainInScope(req.Domain) {\n\t\treturn\n\t}\n\n\tu.CheckRateLimit()\n\tbus.Publish(requests.SetActiveTopic, u.String())\n\tbus.Publish(requests.LogTopic, fmt.Sprintf(\"Querying %s for %s subdomains\", u.String(), req.Domain))\n\n\theaders := u.restHeaders()\n\turl := u.restDNSURL(req.Domain)\n\tpage, err := http.RequestWebPage(url, nil, headers, \"\", \"\")\n\tif err != nil {\n\t\tbus.Publish(requests.LogTopic, fmt.Sprintf(\"%s: %s: %v\", u.String(), url, err))\n\t\treturn\n\t}\n\t// Extract the subdomain names from the REST API results\n\tvar subs struct {\n\t\tMatches []struct {\n\t\t\tName string `json:\"name\"`\n\t\t} `json:\"matches\"`\n\t}\n\tif err := json.Unmarshal([]byte(page), &subs); err != nil {\n\t\treturn\n\t}\n\n\tfor _, m := range subs.Matches {\n\t\tif d := cfg.WhichDomain(m.Name); d != \"\" {\n\t\t\tbus.Publish(requests.NewNameTopic, &requests.DNSRequest{\n\t\t\t\tName: m.Name,\n\t\t\t\tDomain: d,\n\t\t\t\tTag: u.SourceType,\n\t\t\t\tSource: u.String(),\n\t\t\t})\n\t\t}\n\t}\n}", "func (mux *ServeMux) Handle(pattern string, handler Handler) {\n\tif pattern == \"\" {\n\t\tpanic(\"dns: invalid pattern \" + pattern)\n\t}\n\tmux.m.Lock()\n\tif mux.z == nil {\n\t\tmux.z = make(map[string]Handler)\n\t}\n\tmux.z[CanonicalName(pattern)] = handler\n\tmux.m.Unlock()\n}", "func Serve(l net.Listener, h Handler, appname, hostname string) (err error) {\n\tserv := Server{\n\t\tAppname: appname,\n\t\tHostname: hostname,\n\t\tHandler: h,\n\t}\n\treturn serv.Serve(l)\n}", "func Register(name string, action caddy.SetupFunc) {\n\tcaddy.RegisterPlugin(name, caddy.Plugin{\n\t\tServerType: \"dns\",\n\t\tAction: action,\n\t})\n}", "func DNSHealth() error { return get(SysDNS) }", "func (ss *SNSServer) DnsReady() (e error) {\n\n\t// if an SOA provider isn't given, we're done\n\tif ss.SOAProvider == \"\" {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif ss.waitForDns > 0 {\n\t\tctx, cancel = context.WithTimeout(context.Background(), ss.waitForDns)\n\t} else {\n\t\tctx, cancel = context.WithCancel(context.Background())\n\t}\n\tdefer cancel()\n\n\t// Creating the dns client for our query\n\tclient := dns.Client{\n\t\tNet: \"tcp\", // tcp to connect to the SOA provider? or udp (default)?\n\t\tDialer: &net.Dialer{\n\t\t\tTimeout: ss.waitForDns,\n\t\t},\n\t}\n\t// the message contains what we are looking for - the SOA record of the host\n\tmsg := dns.Msg{}\n\tmsg.SetQuestion(strings.SplitN(ss.SelfUrl.Host, \":\", 2)[0]+\".\", dns.TypeANY)\n\n\tdefer cancel()\n\n\tvar check = func() <-chan struct{} {\n\t\tvar channel = make(chan struct{})\n\n\t\tgo func(c chan struct{}) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tresponse *dns.Msg\n\t\t\t)\n\n\t\t\tfor {\n\t\t\t\t// sending the dns query to the soa provider\n\t\t\t\tresponse, _, err = client.Exchange(&msg, ss.SOAProvider)\n\t\t\t\t// if we found a record, then we are done\n\t\t\t\tif err == nil && response != nil && response.Rcode == dns.RcodeSuccess && len(response.Answer) > 0 {\n\t\t\t\t\tc <- struct{}{}\n\t\t\t\t\tss.metrics.DnsReady.Add(1.0)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// otherwise, we keep trying\n\t\t\t\tss.metrics.DnsReadyQueryCount.Add(1.0)\n\t\t\t\tss.logger.Info(\"checking if server's DNS is ready\",\n\t\t\t\t\tzap.String(\"endpoint\", strings.SplitN(ss.SelfUrl.Host, \":\", 2)[0]+\".\"), zap.Error(err), zap.Any(\"response\", response))\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}(channel)\n\n\t\treturn channel\n\t}\n\n\tselect {\n\tcase <-check():\n\tcase <-ctx.Done():\n\t\te = ctx.Err()\n\t}\n\n\treturn\n}", "func Dns(host string) *net.IP {\n\tfor _, dnsServer := range appConfig.Dnsservers {\n\t\tIP := dnss(host, dnsServer+\":53\")\n\t\tif IP != nil {\n\t\t\treturn IP\n\t\t}\n\t}\n\treturn nil\n}", "func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) {\n\tcleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})\n\t// if IPv6 is not enabled, also clean out any IPv6 address nameserver\n\tif !ipv6Enabled {\n\t\tcleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{})\n\t}\n\t// if the resulting resolvConf has no more nameservers defined, add appropriate\n\t// default DNS servers for IPv4 and (optionally) IPv6\n\tif len(GetNameservers(cleanedResolvConf, IP)) == 0 {\n\t\tlog.G(context.TODO()).Infof(\"No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v\", defaultIPv4Dns)\n\t\tdns := defaultIPv4Dns\n\t\tif ipv6Enabled {\n\t\t\tlog.G(context.TODO()).Infof(\"IPv6 enabled; Adding default IPv6 external servers: %v\", defaultIPv6Dns)\n\t\t\tdns = append(dns, defaultIPv6Dns...)\n\t\t}\n\t\tcleanedResolvConf = append(cleanedResolvConf, []byte(\"\\n\"+strings.Join(dns, \"\\n\"))...)\n\t}\n\treturn &File{Content: cleanedResolvConf, Hash: hashData(cleanedResolvConf)}, nil\n}", "func (d *DHCPv4) DNS() []net.IP {\n\treturn GetIPs(OptionDomainNameServer, d.Options)\n}", "func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) {}", "func (m *FPGADevicePluginServer) Serve(resourceName string) error {\n\tlog.Debugf(\"In Serve(%s)\", m.socket)\n\terr := m.Start()\n\tif err != nil {\n\t\tlog.Errorf(\"Could not start device plugin: %v\", err)\n\t\treturn err\n\t}\n\tlog.Infof(\"Starting to serve on %s\", m.socket)\n\n\terr = m.Register(pluginapi.KubeletSocket, resourceName)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not register device plugin: %v\", err)\n\t\tm.Stop()\n\t\treturn err\n\t}\n\tlog.Infof(\"Registered device plugin with Kubelet %s\", resourceName)\n\n\treturn nil\n}", "func Server(domain, addr string, rawDataSize uint8, pr chan ParsedRequest) {\n\tlog.Println(\"Starting Server.\")\n\tladdr, err := net.ResolveUDPAddr(\"udp\", addr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tconn, err := net.ListenUDP(\"udp\", laddr)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\t// Compressed domain name without dots.\n\tcDomain := strings.Replace(domain, \".\", \"\", -1)\n\t// UDP packet buffer\n\tbuf := make([]byte, 65536)\n\t// Read UDP package loop.\nNextRequest:\n\tfor {\n\t\t// Read the next package from the connection.\n\t\tn, raddr, err := conn.ReadFromUDP(buf)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Extract the domain name from the DNS package.\n\t\tdnsName := buf[12 : n-4]\n\n\t\t// prDomains will be used in the ParsedRequest returned on the pr channel.\n\t\tprDomains := make([]byte, len(dnsName))\n\t\tcopy(prDomains, dnsName)\n\n\t\t// Extract the base36 encoded data from the dnsName.\n\t\t// dnsName starts and ends with one byte that will not be used in subDomains.\n\t\tsubDomains := make([]byte, len(dnsName)-2)\n\t\ti, tdots := 0, -1\n\t\tfor dnsName[0] != 0x00 {\n\t\t\t// Max size of one subdomain (from one dot to the next) is 63 characters.\n\t\t\tif int(dnsName[0]) > 63 || i > len(subDomains) {\n\t\t\t\tlog.Printf(\"Invalid DNS request in %s server.\\n\", domain)\n\t\t\t\tcontinue NextRequest\n\t\t\t}\n\t\t\tcopy(subDomains[i:], dnsName[1:1+int(dnsName[0])])\n\t\t\ti += int(dnsName[0])\n\t\t\tdnsName = dnsName[1+dnsName[0]:]\n\t\t\ttdots++\n\t\t}\n\t\t// Validate that the request is made to the domain specified in the domain constant.\n\t\tif string(subDomains[len(subDomains)-len(cDomain)-tdots:len(subDomains)-tdots]) != cDomain {\n\t\t\tlog.Printf(\"Discard request. Request not intended for %s\\n\", domain)\n\t\t\tcontinue NextRequest\n\t\t}\n\t\tsubDomains = subDomains[:len(subDomains)-len(cDomain)-tdots]\n\n\t\t// decode the base36 encoded data from the subdomains.\n\t\tdecoded, err := base.Decode(subDomains, 36)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Discard request. Illegal characters in subdomains detected in %s server.\\n\", domain)\n\t\t\tcontinue NextRequest\n\t\t}\n\n\t\t// Copy the decoded data in to rawData to add any nulls in the beginnig of the original data.\n\t\trawData := make([]byte, int(rawDataSize))\n\t\tcopy(rawData[int(rawDataSize)-len(decoded):], decoded)\n\n\t\t// Extract the Responce ID\n\t\trespId := [2]byte{buf[0], buf[1]}\n\n\t\t// Send the ParsedRequest to the responce channel.\n\t\tpr <- ParsedRequest{respId, raddr, prDomains, rawData}\n\t}\n}", "func (h *Handler) serveServers(w http.ResponseWriter, r *http.Request) {}", "func (l *LogEntry) addDns(d *guardduty.DnsRequestAction) {\n\tl.DnsDomain = aws.StringValue(d.Domain)\n}", "func runTestDNSServer(t *testing.T, port string) *dnsTestServer {\n\tlistener, err := net.ListenPacket(\"udp\", \"127.0.0.1:\"+port)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmux := dns.NewServeMux()\n\tserver := &dns.Server{PacketConn: listener, Net: \"udp\", Handler: mux}\n\n\tgo func() {\n\t\tif err := server.ActivateAndServe(); err != nil {\n\t\t\tlog.Printf(\"Error in local DNS server: %s\", err)\n\t\t}\n\t}()\n\n\treturn newDNSTestServer(server)\n}", "func handleDNSRequest(w dns.ResponseWriter, r *dns.Msg) {\n\tdefer w.Close()\n\n\tm := new(dns.Msg)\n\tm.SetReply(r)\n\tm.Compress = false\n\n\tswitch r.Opcode {\n\tcase dns.OpcodeQuery:\n\t\tparseQuery(m)\n\t}\n\n\tw.WriteMsg(m)\n}", "func (s *Server) ServeDHCP(p dhcp4.Packet, msgType dhcp4.MessageType, options dhcp4.Options) dhcp4.Packet {\n\ts.printLeases()\n\n\tswitch msgType {\n\tcase dhcp4.Discover: // Broadcast Packet From Client - Can I have an IP?\n\t\treturn s.handleDiscover(p, options)\n\n\tcase dhcp4.Request: // Broadcast From Client - I'll take that IP (Also start for renewals)\n\t\t// start/renew a lease -- update lease time\n\t\t// some clients (OSX) just go right ahead and do Request first from previously known IP, if they get NAK, they restart full cycle with Discover then Request\n\t\treturn s.handleDHCP4Request(p, options)\n\n\tcase dhcp4.Decline: // Broadcast From Client - Sorry I can't use that IP\n\t\treturn s.handleDecline(p, options)\n\n\tcase dhcp4.Release: // From Client, I don't need that IP anymore\n\t\treturn s.handleRelease(p, options)\n\n\tcase dhcp4.Inform: // From Client, I have this IP and there's nothing you can do about it\n\t\treturn s.handleInform(p, options)\n\n\t// from server -- ignore those but enumerate just in case\n\tcase dhcp4.Offer: // Broadcast From Server - Here's an IP\n\t\tlog.Printf(\"DHCP: received message from %s: Offer\", p.CHAddr())\n\n\tcase dhcp4.ACK: // From Server, Yes you can have that IP\n\t\tlog.Printf(\"DHCP: received message from %s: ACK\", p.CHAddr())\n\n\tcase dhcp4.NAK: // From Server, No you cannot have that IP\n\t\tlog.Printf(\"DHCP: received message from %s: NAK\", p.CHAddr())\n\n\tdefault:\n\t\tlog.Printf(\"DHCP: unknown packet %v from %s\", msgType, p.CHAddr())\n\t\treturn nil\n\t}\n\treturn nil\n}", "func (i *DHCPInterface) ServeDHCP(p dhcp.Packet, msgType dhcp.MessageType, options dhcp.Options) dhcp.Packet {\n\tvar respMsg dhcp.MessageType\n\n\tswitch msgType {\n\tcase dhcp.Discover:\n\t\trespMsg = dhcp.Offer\n\tcase dhcp.Request:\n\t\trespMsg = dhcp.ACK\n\t}\n\n\tif respMsg != 0 {\n\t\trequestingMAC := p.CHAddr().String()\n\n\t\tif requestingMAC == i.MACFilter {\n\t\t\topts := dhcp.Options{\n\t\t\t\tdhcp.OptionSubnetMask: []byte(i.VMIPNet.Mask),\n\t\t\t\tdhcp.OptionRouter: []byte(*i.GatewayIP),\n\t\t\t\tdhcp.OptionDomainNameServer: i.dnsServers,\n\t\t\t\tdhcp.OptionHostName: []byte(i.Hostname),\n\t\t\t}\n\n\t\t\tif netRoutes := formClasslessRoutes(&i.Routes); netRoutes != nil {\n\t\t\t\topts[dhcp.OptionClasslessRouteFormat] = netRoutes\n\t\t\t}\n\n\t\t\tif i.ntpServers != nil {\n\t\t\t\topts[dhcp.OptionNetworkTimeProtocolServers] = i.ntpServers\n\t\t\t}\n\n\t\t\toptSlice := opts.SelectOrderOrAll(options[dhcp.OptionParameterRequestList])\n\n\t\t\treturn dhcp.ReplyPacket(p, respMsg, *i.GatewayIP, i.VMIPNet.IP, leaseDuration, optSlice)\n\t\t}\n\t}\n\n\treturn nil\n}", "func handleDHCPFindActiveServer(w http.ResponseWriter, r *http.Request) {\n\tlog.Tracef(\"%s %v\", r.Method, r.URL)\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terrorText := fmt.Sprintf(\"failed to read request body: %s\", err)\n\t\tlog.Error(errorText)\n\t\thttp.Error(w, errorText, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tinterfaceName := strings.TrimSpace(string(body))\n\tif interfaceName == \"\" {\n\t\terrorText := fmt.Sprintf(\"empty interface name specified\")\n\t\tlog.Error(errorText)\n\t\thttp.Error(w, errorText, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfound, err := dhcpd.CheckIfOtherDHCPServersPresent(interfaceName)\n\n\tothSrv := map[string]interface{}{}\n\tfoundVal := \"no\"\n\tif found {\n\t\tfoundVal = \"yes\"\n\t} else if err != nil {\n\t\tfoundVal = \"error\"\n\t\tothSrv[\"error\"] = err.Error()\n\t}\n\tothSrv[\"found\"] = foundVal\n\n\tstaticIP := map[string]interface{}{}\n\tisStaticIP, err := hasStaticIP(interfaceName)\n\tstaticIPStatus := \"yes\"\n\tif err != nil {\n\t\tstaticIPStatus = \"error\"\n\t\tstaticIP[\"error\"] = err.Error()\n\t} else if !isStaticIP {\n\t\tstaticIPStatus = \"no\"\n\t\tstaticIP[\"ip\"] = getFullIP(interfaceName)\n\t}\n\tstaticIP[\"static\"] = staticIPStatus\n\n\tresult := map[string]interface{}{}\n\tresult[\"other_server\"] = othSrv\n\tresult[\"static_ip\"] = staticIP\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\terr = json.NewEncoder(w).Encode(result)\n\tif err != nil {\n\t\thttpError(w, http.StatusInternalServerError, \"Failed to marshal DHCP found json: %s\", err)\n\t\treturn\n\t}\n}", "func Serve(opts ServeOpts) error {\n\tpluginOpts := grpcplugin.ServeOpts{\n\t\tDiagnosticsServer: newDiagnosticsSDKAdapter(prometheus.DefaultGatherer, opts.CheckHealthHandler),\n\t}\n\n\tif opts.CallResourceHandler != nil {\n\t\tpluginOpts.ResourceServer = newResourceSDKAdapter(opts.CallResourceHandler)\n\t}\n\n\tif opts.QueryDataHandler != nil {\n\t\tpluginOpts.DataServer = newDataSDKAdapter(opts.QueryDataHandler)\n\t}\n\n\tif opts.TransformDataHandler != nil {\n\t\tpluginOpts.TransformServer = newTransformSDKAdapter(opts.TransformDataHandler)\n\t}\n\n\treturn grpcplugin.Serve(pluginOpts)\n}", "func NewHandler(config *conf.DNSResolverConfig, cache c.Cache) *DNSHandler {\n\tvar (\n\t\tclientConfig *dns.ClientConfig\n\t\tresolver *Resolver\n\t)\n\n\tresolver = &Resolver{clientConfig}\n\n\thandler := &DNSHandler{\n\t\tresolver: resolver,\n\t\tcache: cache,\n\t\tconfig: config,\n\t}\n\n\tif config.Hosts.Enable {\n\t\thandler.hosts = h.NewHosts(&config.Hosts)\n\t}\n\n\treturn handler\n}", "func (acm *AcmeFS) Serve(def http.Handler) http.Handler {\n\treturn handler{func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.HasPrefix(r.URL.Path, acmeChallengeSubPath) {\n\t\t\tdef.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tscheme := \"http\"\n\t\tif r.TLS != nil {\n\t\t\tscheme = \"https\"\n\t\t}\n\n\t\tupstream, err := url.Parse(fmt.Sprintf(\"%s://%s:%d\", scheme, acm.config.ListenerAddr, acm.config.HTTPChallengePort))\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tproxy := httputil.NewSingleHostReverseProxy(upstream)\n\t\tproxy.Transport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tproxy.ServeHTTP(w, r)\n\t}}\n}", "func CreateDNSMux() *dns.ServeMux {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tmux := dns.NewServeMux()\n\tmux.HandleFunc(\".\", handleRequest)\n\treturn mux\n}", "func (writer *connectivityHooks) dnsStartHook(di httptrace.DNSStartInfo) {\n\tfmt.Fprint(writer.w, dnsColorFunc(\"--- Starting DNS lookup to resolve '%v' ---\\n\", di.Host))\n}", "func (j * JoinHelper) ConfigureDNS () error {\n\tlog.Info().Msg(\"Configuring DNS\")\n\n\tips, err := net.LookupHost(j.EicToken.DnsUrl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// update resolved.conf\n\t// [Resolve]\n\t// DNS=...\n\t// Cache=no\n\tcmdStr := fmt.Sprintf(\"echo \\\"DNS= %s 8.8.8.8 8.8.4.4\\nCache=no\\\" >> %s\", strings.Join(ips,\" \"), resolvedFile)\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", cmdStr)\n\terr = cmd.Run()\n\tif err != nil {\n\t\tlog.Error().Str(\"error\", err.Error()).Msg(\"error executing\")\n\t\treturn err\n\t}\n\n\t// restart the service\n\tlog.Info().Msg(\"restart systemd-resolved service\")\n\tcmd = exec.Command(\"/bin/sh\", \"-c\", \"systemctl restart systemd-resolved\")\n\terr = cmd.Run()\n\tif err != nil {\n\t\tlog.Error().Str(\"error\", err.Error()).Msg(\"error restarting service systemd-resolved\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (writer *connectivityHooks) dnsDoneHook(di httptrace.DNSDoneInfo) {\n\tstatusString := color.GreenString(\"OK\")\n\tif di.Err != nil {\n\t\tstatusString = color.RedString(\"ERROR\")\n\t\tfmt.Fprint(writer.w, dnsColorFunc(\"Unable to resolve the address : %v\\n\", scrubber.ScrubLine(di.Err.Error())))\n\t}\n\tfmt.Fprintf(writer.w, \"* %v [%v]\\n\\n\", dnsColorFunc(\"DNS Lookup\"), statusString)\n}", "func SetDNSHealth(err error) { set(SysDNS, err) }", "func StartDNSDaemon() (err error) {\n\tsrv := &dns.Server{Addr: \":\" + strconv.Itoa(53), Net: \"udp\"}\n\tsrv.Handler = &dnsHandler{}\n\n\tconfig, _ := dns.ClientConfigFromFile(\"/etc/resolv.conf\")\n\n\tlog.Info().Msgf(\"Successful load local /etc/resolv.conf\")\n\tfor _, server := range config.Servers {\n\t\tlog.Info().Msgf(\"Success load nameserver %s\\n\", server)\n\t}\n\n\tfmt.Printf(\"DNS Server Start At 53...\\n\")\n\terr = srv.ListenAndServe()\n\tif err != nil {\n\t\tlog.Error().Msgf(\"Failed to set udp listener %s\\n\", err.Error())\n\t}\n\treturn\n}", "func (p *FuncPool) Serve(ctx context.Context, fID, imageName, payload string) (*hpb.FwdHelloResp, *metrics.Metric, error) {\n\tf := p.getFunction(fID, imageName)\n\n\treturn f.Serve(ctx, fID, imageName, payload)\n}", "func Handle(h Handler) {\n\tHandleFunc(HandlerFunc(h.HandleSNS))\n}", "func (s *Server) ListenAndServe() error {\n\tlog.Printf(\"dns server listening on %s [%s]\", s.Config.DNS.Listen, s.Config.DNS.Protocol)\n\treturn s.proxy.ListenAndServe(s.Config.DNS.Listen, s.Config.DNS.Protocol)\n}", "func (s *server) serve() error {\n\thost, port, err := net.SplitHostPort(s.a.Address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.server.Addr = host + \":\" + port\n\ts.server.Handler = s\n\ts.server.ReadTimeout = s.a.ReadTimeout\n\ts.server.ReadHeaderTimeout = s.a.ReadHeaderTimeout\n\ts.server.WriteTimeout = s.a.WriteTimeout\n\ts.server.IdleTimeout = s.a.IdleTimeout\n\ts.server.MaxHeaderBytes = s.a.MaxHeaderBytes\n\ts.server.ErrorLog = s.a.ErrorLogger\n\n\trealPort := port\n\thh := http.Handler(http.HandlerFunc(func(\n\t\trw http.ResponseWriter,\n\t\tr *http.Request,\n\t) {\n\t\thost, _, err := net.SplitHostPort(r.Host)\n\t\tif err != nil {\n\t\t\thost = r.Host\n\t\t}\n\n\t\tif realPort != \"443\" {\n\t\t\thost = fmt.Sprint(host, \":\", realPort)\n\t\t}\n\n\t\thttp.Redirect(\n\t\t\trw,\n\t\t\tr,\n\t\t\t\"https://\"+host+r.RequestURI,\n\t\t\thttp.StatusMovedPermanently,\n\t\t)\n\t}))\n\n\tif s.a.DebugMode {\n\t\tfmt.Println(\"air: serving in debug mode\")\n\t}\n\n\tif s.a.TLSCertFile != \"\" && s.a.TLSKeyFile != \"\" {\n\t\tc, err := tls.LoadX509KeyPair(s.a.TLSCertFile, s.a.TLSKeyFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.server.TLSConfig = &tls.Config{\n\t\t\tCertificates: []tls.Certificate{c},\n\t\t}\n\t} else if !s.a.DebugMode && s.a.ACMEEnabled {\n\t\tacm := &autocert.Manager{\n\t\t\tPrompt: autocert.AcceptTOS,\n\t\t\tCache: autocert.DirCache(s.a.ACMECertRoot),\n\t\t\tClient: &acme.Client{\n\t\t\t\tDirectoryURL: s.a.ACMEDirectoryURL,\n\t\t\t},\n\t\t\tEmail: s.a.MaintainerEmail,\n\t\t}\n\t\tif s.a.ACMEHostWhitelist != nil {\n\t\t\tacm.HostPolicy = autocert.HostWhitelist(\n\t\t\t\ts.a.ACMEHostWhitelist...,\n\t\t\t)\n\t\t}\n\n\t\thh = acm.HTTPHandler(hh)\n\t\ts.a.HTTPSEnforced = true\n\n\t\ts.server.TLSConfig = acm.TLSConfig()\n\t} else {\n\t\th2s := &http2.Server{\n\t\t\tIdleTimeout: s.a.IdleTimeout,\n\t\t}\n\t\tif h2s.IdleTimeout == 0 {\n\t\t\th2s.IdleTimeout = s.a.ReadTimeout\n\t\t}\n\n\t\ts.server.Handler = h2c.NewHandler(s.server.Handler, h2s)\n\n\t\tl := newListener(s.a)\n\t\tif err := l.listen(s.server.Addr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer l.Close()\n\n\t\ts.addressMap[l.Addr().String()] = 0\n\t\tdefer delete(s.addressMap, l.Addr().String())\n\n\t\tif realPort == \"0\" {\n\t\t\t_, realPort, _ = net.SplitHostPort(l.Addr().String())\n\t\t\tfmt.Printf(\"air: listening on %v\\n\", s.addresses())\n\t\t}\n\n\t\treturn s.server.Serve(l)\n\t}\n\n\tif s.a.HTTPSEnforced {\n\t\ths := &http.Server{\n\t\t\tAddr: host + \":\" + s.a.HTTPSEnforcedPort,\n\t\t\tHandler: hh,\n\t\t\tReadTimeout: s.a.ReadTimeout,\n\t\t\tReadHeaderTimeout: s.a.ReadHeaderTimeout,\n\t\t\tWriteTimeout: s.a.WriteTimeout,\n\t\t\tIdleTimeout: s.a.IdleTimeout,\n\t\t\tMaxHeaderBytes: s.a.MaxHeaderBytes,\n\t\t\tErrorLog: s.a.ErrorLogger,\n\t\t}\n\n\t\tl := newListener(s.a)\n\t\tif err := l.listen(hs.Addr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer l.Close()\n\n\t\ts.addressMap[l.Addr().String()] = 1\n\t\tdefer delete(s.addressMap, l.Addr().String())\n\n\t\tgo hs.Serve(l)\n\t\tdefer hs.Close()\n\t}\n\n\tl := newListener(s.a)\n\tif err := l.listen(s.server.Addr); err != nil {\n\t\treturn err\n\t}\n\tdefer l.Close()\n\n\ts.addressMap[l.Addr().String()] = 0\n\tdefer delete(s.addressMap, l.Addr().String())\n\n\tif realPort == \"0\" {\n\t\t_, realPort, _ = net.SplitHostPort(l.Addr().String())\n\t\tfmt.Printf(\"air: listening on %v\\n\", s.addresses())\n\t}\n\n\treturn s.server.ServeTLS(l, \"\", \"\")\n}", "func (o KubernetesClusterWindowsProfileGmsaOutput) DnsServer() pulumi.StringOutput {\n\treturn o.ApplyT(func(v KubernetesClusterWindowsProfileGmsa) string { return v.DnsServer }).(pulumi.StringOutput)\n}", "func newdnsController(kubeClient kubernetes.Interface, namespace, zone string, rulesCallback func([]rewrite.Rule)) *dnsControl {\n\tdns := &dnsControl{\n\t\tstopCh: make(chan struct{}),\n\t\tready: make(chan struct{}),\n\t}\n\n\tstore := cache.NewUndeltaStore(func(is []interface{}) {\n\t\tdns.readyOnce.Do(func() {\n\t\t\tclose(dns.ready)\n\t\t})\n\n\t\trules := make([]rewrite.Rule, 0, len(is))\n\n\t\tfor _, i := range is {\n\t\t\tsvc := i.(*api.Service)\n\t\t\tif svc == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfrom, ok := svc.Annotations[\"egress.monzo.com/dns-name\"]\n\t\t\tif !ok {\n\t\t\t\tlog.Warningf(\"%s is missing dns-name annotation\", svc.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tto := fmt.Sprintf(\"%s.%s.svc.%s\", svc.Name, svc.Namespace, zone)\n\n\t\t\trewriteQuestionFrom := plugin.Name(from).Normalize()\n\t\t\trewriteQuestionTo := plugin.Name(to).Normalize()\n\n\t\t\trewriteAnswerFromPattern, err := regexp.Compile(rewriteQuestionTo)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trules = append(rules, &exactNameRule{\n\t\t\t\tNextAction: \"stop\",\n\t\t\t\tFrom: rewriteQuestionFrom,\n\t\t\t\tTo: rewriteQuestionTo,\n\t\t\t\tResponseRule: rewrite.ResponseRule{\n\t\t\t\t\tActive: true,\n\t\t\t\t\tType: \"name\",\n\t\t\t\t\tPattern: rewriteAnswerFromPattern,\n\t\t\t\t\tReplacement: rewriteQuestionFrom,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\trulesCallback(rules)\n\t}, cache.MetaNamespaceKeyFunc)\n\n\ts := labels.SelectorFromSet(map[string]string{\n\t\t\"app\": \"egress-gateway\",\n\t\t\"egress.monzo.com/hijack-dns\": \"true\",\n\t})\n\n\tdns.reflector = cache.NewReflector(&cache.ListWatch{\n\t\tListFunc: serviceListFunc(kubeClient, namespace, s),\n\t\tWatchFunc: serviceWatchFunc(kubeClient, namespace, s),\n\t}, &api.Service{}, store, 0)\n\n\treturn dns\n}", "func (h *handler) Serve(ctx context.Context, _ *sync.WaitGroup) error {\n\tln, err := net.Listen(\"tcp\", h.addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo h.stop(ctx, ln)\n\n\th.registerRoutes()\n\n\treturn h.server.Serve(ln)\n}", "func analyzeDns(w io.Writer, server, hostname string, samples, waitMillis int) {\n\tm := new(dns.Msg)\n\tm.Id = dns.Id()\n\tm.RecursionDesired = true\n\tm.Question = make([]dns.Question, 1)\n\tm.Question[0] = dns.Question{Name: dns.Fqdn(hostname), Qtype: dns.TypeA, Qclass: dns.ClassINET}\n\twait := time.Duration(waitMillis) * time.Millisecond\n\n\tc := new(dns.Client)\n\n\tfmt.Printf(\"QUERY %v (@%v): %v data bytes\\n\", hostname, server, m.Len())\n\n\trtts := make(DurationSlice, samples, samples)\n\tfor i := 0; i < samples; i++ {\n\t\tin, rtt, err := c.Exchange(m, server+\":53\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\trtts[i] = rtt\n\t\tfmt.Fprintf(w, \"%v bytes from %v: ttl=%v time=%v\\n\", in.Len(), server, time.Second*6, rtt)\n\t\ttime.Sleep(wait)\n\t}\n\n\t// NOTE: Potentially Eating Performance for Pretties\n\tvar min, max, avg, stddev time.Duration\n\tmin = rtts.Min()\n\tmax = rtts.Max()\n\tavg = rtts.Avg()\n\tstddev = rtts.Std()\n\n\tfmt.Fprintf(w, \"round-trip min/avg/max/stddev = %v/%v/%v/%v\\n\", min, avg, max, stddev)\n}", "func (h *Handler) serveDeleteServer(w http.ResponseWriter, r *http.Request) {}", "func (ad *AutoDNS) HandleFunc(w dns.ResponseWriter, req *dns.Msg) {\n\tvar err error\n\t/* any questions? */\n\tif len(req.Question) < 1 {\n\t\treturn\n\t}\n\n\trmsg := new(dns.Msg)\n\trmsg.SetReply(req)\n\tq := req.Question[0]\n\tswitch q.Qtype {\n\tcase dns.TypeA:\n\t\tglog.V(LINFO).Infoln(\"requesting:\", q.Name, dns.TypeToString[q.Qtype])\n\n\t\tfor qName := q.Name[:len(q.Name)-1]; strings.Count(qName, `.`) > 0; qName = qName[strings.Index(qName, `.`)+1:] {\n\t\t\toffsets := ad.outsideListIndex.Lookup([]byte(qName), 1)\n\t\t\tif len(offsets) > 0 {\n\t\t\t\tglog.V(LDEBUG).Infoln(qName, \"Hit OutsideList\")\n\t\t\t\tad.outsideHandleFunc(w, req)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tad.insideHandleFunc(w, req)\n\t\treturn\n\tcase dns.TypeANY:\n\t\tglog.V(LINFO).Infoln(\"request-block\", q.Name, dns.TypeToString[q.Qtype])\n\tdefault:\n\t\tglog.V(LINFO).Infoln(\"requesting:\", q.Name, dns.TypeToString[q.Qtype])\n\t\tad.outsideHandleFunc(w, req)\n\t\treturn\n\t}\n\n\t// fmt.Println(rmsg)\n\tif err = w.WriteMsg(rmsg); nil != err {\n\t\tglog.V(LINFO).Infoln(\"Response faild, rmsg:\", err)\n\t}\n}", "func (m *DomainDnsSrvRecord) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {\n err := m.DomainDnsRecord.Serialize(writer)\n if err != nil {\n return err\n }\n {\n err = writer.WriteStringValue(\"nameTarget\", m.GetNameTarget())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteInt32Value(\"port\", m.GetPort())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteInt32Value(\"priority\", m.GetPriority())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteStringValue(\"protocol\", m.GetProtocol())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteStringValue(\"service\", m.GetService())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteInt32Value(\"weight\", m.GetWeight())\n if err != nil {\n return err\n }\n }\n return nil\n}", "func (o *PluginDnsClient) OnAccept(socket transport.SocketApi) transport.ISocketCb {\n\tif !o.IsNameServer() {\n\t\treturn nil\n\t}\n\to.stats.dnsFlowAccept++ // New flow for the Name Server.\n\to.socket = socket // Store socket so we can reply.\n\treturn o\n}", "func (s *ProvisionedServer) FQDN(domain string) string {\n\treturn FQDN(domain, s.Server.Hostname, s.AdvertiseIP)\n}" ]
[ "0.77668357", "0.7492943", "0.74582714", "0.7435545", "0.73842216", "0.7375142", "0.73032033", "0.71936864", "0.7132464", "0.7121125", "0.7036655", "0.6979837", "0.69361687", "0.69274604", "0.6808041", "0.67672426", "0.67540807", "0.6689928", "0.6674705", "0.6641609", "0.66346574", "0.6604336", "0.653691", "0.64477664", "0.64050657", "0.63486236", "0.6329724", "0.61433315", "0.57947195", "0.5762147", "0.5610917", "0.55937254", "0.5564424", "0.5493358", "0.5492161", "0.5444036", "0.54233223", "0.5408781", "0.5404327", "0.53667176", "0.5330569", "0.53070337", "0.52967024", "0.52690727", "0.5264098", "0.5253734", "0.52484965", "0.5231955", "0.5227422", "0.52170223", "0.5208048", "0.51906466", "0.51724863", "0.5164894", "0.5153129", "0.5143205", "0.5141282", "0.51351446", "0.5112543", "0.5092888", "0.50798535", "0.50698316", "0.5061729", "0.5057764", "0.5057167", "0.50458443", "0.50446737", "0.5026513", "0.5024434", "0.5020495", "0.500725", "0.49908566", "0.49735042", "0.49708593", "0.4964383", "0.49630645", "0.49476206", "0.49422026", "0.4926882", "0.49263254", "0.49209097", "0.49200365", "0.4910557", "0.49098206", "0.49085695", "0.4902899", "0.48972762", "0.48836204", "0.48827496", "0.4870402", "0.48690224", "0.48664713", "0.4863", "0.48597452", "0.48558065", "0.48548442", "0.48399037", "0.48374844", "0.48313543", "0.48107493" ]
0.7153225
8
Name implements the Handler interface.
func (d DNS64) Name() string { return "dns64" }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mg MessageHandler) Name() string {\n\treturn nameFromFunc(mg)\n}", "func (OKHandler) Name() string {\n\treturn NameOK\n}", "func (h Oauth1Handler) Name() string { return h.name }", "func (EchoHandler) Name() string {\n\treturn NameEcho\n}", "func (dlmg RawMessageHandler) Name() string {\n\treturn nameFromFunc(dlmg)\n}", "func (PanicHandler) Name() string {\n\treturn NamePanic\n}", "func (h *DNSHandler) Name() string { return name }", "func (m *methodDesc) HandlerName() string {\n\treturn fmt.Sprintf(\"%s_%d\", m.Name, m.Num)\n}", "func (mh *MessageHandler) Name() string {\n\tret := C.EnvGetDefmessageHandlerName(mh.class.env.env, mh.class.clptr, mh.index)\n\treturn C.GoString(ret)\n}", "func (ctx *Context) HandlerName() string {\n\treturn nameOfFunction(ctx.handlers.last())\n}", "func (s *server) Name(args interface{}, resp *string) error {\n\t*resp = s.impl.Name()\n\treturn nil\n}", "func (CheckHandler) Name() string {\n\treturn NameCheck\n}", "func (p DirectHandler) Name() string { return p.ProviderName }", "func (lb *LBHandler) Name() string {\n\treturn \"loadbalancer\"\n}", "func (h *HTTP) Name() string {\n\treturn ModuleName()\n}", "func (t *LogProviderHandler) Name() string {\n\treturn LogProvider\n}", "func (h *LRFHSSHandler) Name() (string, error) {\n\treturn \"LR-FHSS only ADR algorithm\", nil\n}", "func (handler *ConsoleLogHandler) Name() string {\r\n return \"console\"\r\n}", "func (e *HTMLApplet) Name(v string) *HTMLApplet {\n\te.a[\"name\"] = v\n\treturn e\n}", "func NameHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tname := nameResponse{\n\t\tName: \"world\",\n\t}\n\n\tjson.NewEncoder(w).Encode(name)\n}", "func (m *endpoint) Name() *string {\n\treturn m.nameField\n}", "func (r *Route) Name() string {\n\treturn r.name\n}", "func (e *Endpoint) Name() string {\n\treturn e.name\n}", "func (fnGet) Name() string {\n\treturn \"get\"\n}", "func (FailHandler) Name() string {\n\treturn NameFail\n}", "func (e *EndComponent) Name() string {\n\treturn \"name\"\n}", "func (r *Router) Name() string {\n\treturn r.name\n}", "func (r *Route) Name() string {\n\treturn \"route\"\n}", "func (c *Ctx) HandlerName() string {\n\treturn c.handlerName\n}", "func (r *Router) Name() string {\n\treturn \"echo mux\"\n}", "func Name(v interface{}) string {\n\treturn New(v).Name()\n}", "func (c client) Name() string {\n\t_, l4Type := c.name.GetLookupAndType()\n\treq := NameRequest{Type: l4Type}\n\tresp := NameResponse{}\n\n\tc.client.Call(\"L4.Name\", req, &resp)\n\treturn resp.Name\n}", "func (e *Definition) Name() string {\n\tstr := e.json.Get(\"name\").Value()\n\tif str == nil {\n\t\treturn \"\"\n\t}\n\treturn str.(string)\n}", "func (e *EntryBase) Name() string {\n\treturn e.name()\n}", "func (h *Handler) SetName(name string) {\n\th.name = name\n}", "func (g *generator) Name() string {\n\treturn g.typeName\n}", "func (r *Router) Name(name string) *Router {\n\tr.Noun = r.Noun + name\n\treturn r\n}", "func (t Type) Name() string {\n\treturn schemas[t%EvCount].Name\n}", "func (i *Index) Name() string { return i.name }", "func (vl VerbLevel) Name() string {\n\tswitch vl {\n\tcase VerbQuiet:\n\t\treturn \"quiet\"\n\tcase VerbError:\n\t\treturn \"error\"\n\tcase VerbWarn:\n\t\treturn \"warn\"\n\tcase VerbInfo:\n\t\treturn \"info\"\n\tcase VerbDebug:\n\t\treturn \"debug\"\n\tcase VerbCrazy:\n\t\treturn \"crazy\"\n\t}\n\treturn \"unknown\"\n}", "func (t *DynamicMessageType) Name() string {\n\treturn t.spec.FullName\n}", "func (l *JSONFileLogger) Name() string {\n\treturn Name\n}", "func (l *JSONFileLogger) Name() string {\n\treturn Name\n}", "func (svc *ProductHTTP) Name() string {\n\treturn \"ProductHTTP\"\n}", "func (s *SendEventToMeshAndCheckEventId) Name() string {\n\treturn \"Send event to mesh and check event id\"\n}", "func (h *StdHandle) Name() string { return h.name }", "func (c *Event) Name() string {\n\treturn c.name\n}", "func (m Method) Name() string {\n\treturn m.function.name\n}", "func (Client) HandlerName() string {\n\treturn \"CryptoClient\"\n}", "func (Middleware) Name() string {\n\treturn NameIBC\n}", "func (b DefaultBinder) Name() string {\n\treturn Name\n}", "func (b DefaultBinder) Name() string {\n\treturn Name\n}", "func (s *Server) Name() string {\n\treturn s.name\n}", "func (h *Salute) Name() string {\n\treturn \"Salute\"\n}", "func (e *EDNS) Name() string { return name }", "func (ii *IndexInfo) Name() string {\n\treturn ii.Index\n}", "func (server *Server) Name() string {\n\treturn server.name\n}", "func (q *Request) Name() string {\n\treturn q.Metadata.ServiceName + \":\" + q.Operation.Name\n}", "func (e DomainEvent) Name() string {\n\treturn e.name\n}", "func (c Client) Name() string {\n\treturn \"generic\"\n}", "func (e VerifyHandler) Name() string { return e.ProviderName }", "func (ilp *IlpFormatter) Name() string {\n\treturn ilp.name\n}", "func (e *Entry) Name() string {\n\treturn e.name\n}", "func (o MethodOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Method) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o LoggerEventhubOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LoggerEventhub) string { return v.Name }).(pulumi.StringOutput)\n}", "func (r *Route) Name(name string) {\n\tif name == \"\" {\n\t\treturn\n\t}\n\tp := make([]byte, 0, len(r.pattern))\n\tfor i := 0; i < len(r.pattern); i++ {\n\t\tif r.pattern[i] != ':' {\n\t\t\tp = append(p, r.pattern[i])\n\t\t\tcontinue\n\t\t}\n\t\tp = append(p, '%')\n\t\tp = append(p, 'v')\n\t\tfor i = i + 1; i < len(r.pattern); i++ {\n\t\t\tif !isParamChar(r.pattern[i]) {\n\t\t\t\ti--\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tr.router.routeNamedMap[name] = string(p)\n}", "func (o MethodResponseOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MethodResponse) string { return v.Name }).(pulumi.StringOutput)\n}", "func (c *withNameAndCode) Name() string {\n\treturn c.name\n}", "func (n *BindFnNode) Name() string { return n.name }", "func (e *Entry) Name() string {\n\tif len(e.path) == 0 {\n\t\treturn \"\"\n\t}\n\treturn e.path[len(e.path)-1]\n}", "func (n *ExceptionNamer) Name(t *types.Type) string {\n\tkey := n.KeyFunc(t)\n\tif exception, ok := n.Exceptions[key]; ok {\n\t\treturn exception\n\t}\n\treturn n.Delegate.Name(t)\n}", "func (r *Route) Name(name string) *Route {\n\tr.route.Name(name)\n\treturn r\n}", "func (o WebhookOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Webhook) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (r *Template) Name() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"name\"])\n}", "func (server *Server) Name() string {\n\treturn \"Gophermine\"\n}", "func (*serverModule) Name() module.Name {\n\treturn ModuleName\n}", "func (h *simpleOperation) Name() string {\n\treturn h.name\n}", "func (e *BasicEvent) Name() string {\n\treturn e.name\n}", "func (a *RedisAction) Name() string {\n\treturn a.name\n}", "func (_Weth *WethCaller) Name(opts *bind.CallOpts) (string, error) {\n\tvar out []interface{}\n\terr := _Weth.contract.Call(opts, &out, \"name\")\n\n\tif err != nil {\n\t\treturn *new(string), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(string)).(*string)\n\n\treturn out0, err\n\n}", "func (ev *EventMeta) Name() string {\n\treturn \"EventMeta\"\n}", "func (p *GenericPlugin) Name() string {\n\n\tswitch p.state {\n\tcase stateNotLoaded:\n\t\treturn path.Base(p.filename)\n\tdefault:\n\t}\n\n\treturn p.name()\n}", "func (w *Wechaty) Name() string {\n\tif len(w.Option.name) != 0 {\n\t\treturn w.Option.name\n\t}\n\treturn \"wechaty\"\n}", "func (h Dnstap) Name() string { return \"dnstap\" }", "func (v *View) Name() string {\n\treturn v.name\n}", "func (v *View) Name() string {\n\treturn v.name\n}", "func (c *Content) Name() string {\n\treturn filepath.Base(c.Path)\n}", "func (r *Route) Name(name string) *Route {\n\tr.RouteName = name\n\n\treturn r\n}", "func (s *StateTemplate)Name() string {\n\treturn s.op\n}", "func (c *authorizer) Name() string {\n\treturn c.config.Name\n}", "func (e *Exchange) Name() string {\n\treturn e.name\n}", "func (n *SQSNotify) Name() string {\n\treturn n.name\n}", "func (h *Exechook) Name() string {\n\treturn \"exechook\"\n}", "func Name(ctx context.Context) string {\n\tf, ok := ctx.Value(stateKey).(*Func)\n\tif !ok {\n\t\treturn \"<Undefined>\"\n\t}\n\tname := runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name()\n\treturn strings.TrimRight(nameRe.FindStringSubmatch(name)[1], \")\")\n}", "func (b *BasicAuthenticationBackend) Name() string {\n\treturn b.name\n}", "func (n *piName) Name() string {\n\treturn n.name\n}", "func (b *ClientAdaptor) Name() string { return b.name }", "func (r *Route) Name(name string) *Route {\n\tr.Noun = r.Noun + name\n\tif _, ok := r.Router.Router.NamedRoutes[r.Noun]; ok {\n\t\tpanic(\"muxy: duplicated name: \" + r.Noun)\n\t}\n\tr.Router.Router.NamedRoutes[r.Noun] = r\n\treturn r\n}", "func (g Generator) Name() string {\n\treturn \"buffalo/generate-action\"\n}", "func (u *UserResolver) Name(ctx context.Context) *string {\n\treturn &u.m.Name\n}", "func (i *Index) Name() string {\n\treturn i.file.Name()\n}" ]
[ "0.8090795", "0.7832537", "0.77911645", "0.77820235", "0.77666354", "0.768719", "0.76012576", "0.7568156", "0.7523737", "0.7521228", "0.7371901", "0.73415166", "0.7281172", "0.72681665", "0.7267154", "0.72419786", "0.71643776", "0.71507925", "0.7054378", "0.6863543", "0.68605566", "0.68512046", "0.6837102", "0.68111277", "0.68047905", "0.6792094", "0.67558616", "0.67511225", "0.67331856", "0.6703439", "0.6659646", "0.6658268", "0.66409105", "0.6632565", "0.6626471", "0.66234326", "0.66155267", "0.6614352", "0.6614254", "0.66137975", "0.66077155", "0.6597399", "0.6597399", "0.6590271", "0.6570947", "0.65635055", "0.65496165", "0.6544772", "0.6543932", "0.6541567", "0.6532072", "0.6532072", "0.6525629", "0.65010506", "0.64962804", "0.6495022", "0.6490232", "0.6485126", "0.64810365", "0.6478676", "0.64691216", "0.64642507", "0.6462264", "0.64608926", "0.6454042", "0.6445222", "0.64441687", "0.64416265", "0.64368325", "0.64317673", "0.6419731", "0.6417011", "0.64143103", "0.64133143", "0.64123946", "0.6412073", "0.6410741", "0.6407884", "0.64030683", "0.64020354", "0.6397856", "0.639271", "0.63922083", "0.6389202", "0.63884985", "0.63884985", "0.6388303", "0.6387043", "0.63843244", "0.6380346", "0.6377187", "0.6376432", "0.637296", "0.6371444", "0.636914", "0.636753", "0.63636345", "0.63614225", "0.6355268", "0.63527113", "0.6352102" ]
0.0
-1
WriteMsg implements the dns.ResponseWriter interface.
func (r *ResponseWriter) WriteMsg(res *dns.Msg) error { state := request.Request{W: r, Req: res} // only respond with this when the request came in over IPv6. if state.Family() == 1 { // if it came in over v4, don't do anything. return r.ResponseWriter.WriteMsg(res) } // do not modify if query is not AAAA or not of class IN. if state.QType() != dns.TypeAAAA || state.QClass() != dns.ClassINET { return r.ResponseWriter.WriteMsg(res) } // do not modify if there are AAAA records or NameError. continue if NoData or any other error. ty, _ := response.Typify(res, time.Now().UTC()) if ty == response.NoError || ty == response.NameError { if hasAAAA(res) && ! r.translateAll { return r.ResponseWriter.WriteMsg(res) } } // perform request to upstream. res2, err := r.Proxy.Lookup(state, state.Name(), dns.TypeA) if err != nil { log.Warningf("[WARNING] Unable to query upstream DNS: %v", err) res.MsgHdr.Rcode = dns.RcodeServerFailure return r.ResponseWriter.WriteMsg(res) } // modify response. res.MsgHdr.Rcode = dns.RcodeSuccess nsTtl := uint32(600) for i := 0; i < len(res.Ns); i++ { if res.Ns[i].Header().Rrtype == dns.TypeSOA { nsTtl = res.Ns[i].Header().Ttl } } res.Answer = res2.Answer for i := 0; i < len(res.Answer); i++ { ans := res.Answer[i] hdr := ans.Header() if hdr.Rrtype == dns.TypeA { aaaa, _ := To6(r.Prefix, ans.(*dns.A).A) ttl := nsTtl if ans.Header().Ttl < ttl { ttl = ans.Header().Ttl } res.Answer[i] = &dns.AAAA{ Hdr: dns.RR_Header{ Name: hdr.Name, Rrtype: dns.TypeAAAA, Class: hdr.Class, Ttl: ttl, }, AAAA: aaaa, } } } res.Ns = []dns.RR{} return r.ResponseWriter.WriteMsg(res) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (w *DNSResponseWriter) WriteMsg(m *dns.Msg) error {\n\tif !w.do {\n\t\tm = dnsutil.ClearDNSSEC(m)\n\t}\n\tm = dnsutil.ClearOPT(m)\n\n\tif !w.noedns {\n\t\tw.opt.SetDo(w.do)\n\t\tm.Extra = append(m.Extra, w.opt)\n\t}\n\n\tif w.noad {\n\t\tm.AuthenticatedData = false\n\t}\n\n\tif w.Proto() == \"udp\" && m.Len() > w.size {\n\t\tm.Truncated = true\n\t\tm.Answer = []dns.RR{}\n\t\tm.Ns = []dns.RR{}\n\t\tm.AuthenticatedData = false\n\t}\n\n\treturn w.ResponseWriter.WriteMsg(m)\n}", "func WriteMsg(w http.ResponseWriter, str string) {\n\t//Todo: Format nicely\n\tfmt.Fprintf(w, str)\n}", "func (s *ScrubWriter) WriteMsg(m *dns.Msg) error {\n\tstate := Request{Req: s.req, W: s.ResponseWriter}\n\tstate.SizeAndDo(m)\n\tstate.Scrub(m)\n\treturn s.ResponseWriter.WriteMsg(m)\n}", "func (r *LoadBalanceResponseWriter) WriteMsg(res *dns.Msg) error {\n\tif res.Rcode != dns.RcodeSuccess {\n\t\treturn r.ResponseWriter.WriteMsg(res)\n\t}\n\n\tif res.Question[0].Qtype == dns.TypeAXFR || res.Question[0].Qtype == dns.TypeIXFR {\n\t\treturn r.ResponseWriter.WriteMsg(res)\n\t}\n\n\treturn r.ResponseWriter.WriteMsg(r.shuffle(res))\n}", "func (h *DNSHandler) WriteReplyMsg(w dns.ResponseWriter, message *dns.Msg) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlogger.Noticef(\"Recovered in WriteReplyMsg: %s\", r)\n\t\t}\n\t}()\n\n\terr := w.WriteMsg(message)\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t}\n}", "func WriteMsg(w io.Writer, b []byte) error {\n\t// TODO(bradfitz): this does two writes to w, which likely\n\t// does two writes on the wire, two frame generations, etc. We\n\t// should take a concrete buffered type, or use a sync.Pool to\n\t// allocate a buf and do one write.\n\tcb := make([]byte, 4)\n\tif len(b) > MaxMessageSize {\n\t\treturn fmt.Errorf(\"ipn.Write: message too large: %v bytes\", len(b))\n\t}\n\tbinary.LittleEndian.PutUint32(cb, uint32(len(b)))\n\tn, err := w.Write(cb)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n != 4 {\n\t\treturn fmt.Errorf(\"ipn.Write: short write: %v bytes (wanted 4)\", n)\n\t}\n\tn, err = w.Write(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n != len(b) {\n\t\treturn fmt.Errorf(\"ipn.Write: short write: %v bytes (wanted %v)\", n, len(b))\n\t}\n\treturn nil\n}", "func (r *ResponseReverter) WriteMsg(res *dns.Msg) error {\n\tres.Question[0] = r.originalQuestion\n\tfor _, rr := range res.Answer {\n\t\tif rr.Header().Rrtype != dns.TypeA && rr.Header().Rrtype != dns.TypeAAAA {\n\t\t\tcontinue\n\t\t}\n\n\t\tss := strings.Split(rr.String(), \"\\t\")\n\t\tif len(ss) != 5 {\n\t\t\tcontinue\n\t\t}\n\t\tip := net.ParseIP(ss[4])\n\t\tfor _, listName := range r.listNames {\n\t\t\tif err := addIP(ip, listName); err != nil {\n\t\t\t\tlog.Error(\"add IP:\", ip, \" to ipset:\", listName, \", result:\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn r.ResponseWriter.WriteMsg(res)\n}", "func (c *conn) WriteMsg(data ...[]byte) (int, error) {\n\treturn c.base.WriteMsg(data...)\n}", "func (c *Conn) WriteMessage(msg interface{}) error {\n\tdata, err := xml.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcolor.Printf(\"@{|}<!-- REQUEST -->\\n%s\\n\\n\", string(data))\n\treturn c.WriteDataUnit(data)\n}", "func (b *BaseConn) writeMsg(msg *baseproto.Message) error {\n\tmsgBuffer, err := proto.Marshal(msg)\n\tif err != nil {\n\t\tlog.Print(\"shared/connect: error marshalling msg \", err)\n\t\treturn err\n\t}\n\n\tprefix := proto.EncodeVarint(uint64(len(msgBuffer)))\n\t_, err = b.conn.Write(prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = b.conn.Write(msgBuffer)\n\treturn err\n}", "func (coll *Collector) Write(msg string, extra map[string]interface{}) (err error) {\n\n\tm := gelf.Message{\n\t\tVersion: \"1.1\",\n\t\tHost: coll.host,\n\t\tShort: msg,\n\t\tTimeUnix: float64(time.Now().Unix()),\n\t\tLevel: 6, // info always\n\t\tFacility: \"drone\",\n\t\tExtra: extra,\n\t}\n\n\tif err = coll.writer.WriteMessage(&m); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}", "func (c *Client) writeMsg(msg string) error {\n\twritten, err := c.connection.Write([]byte(msg))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif written != len(msg) {\n\t\treturn fmt.Errorf(\"Invalid length of data written to connection, expected %v but only managed %v\", len(msg), written)\n\t}\n\n\treturn nil\n}", "func Write(conn denet.UDPConn, remoteAddr net.UDPAddr, msg *Message) error {\n\tb := bytes.Buffer{}\n\terr := bencode.NewEncoder(&b).Encode(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = conn.WriteToUDP(b.Bytes(), &remoteAddr)\n\treturn err\n}", "func (w *FileLogWriter) WriteMsg(msg string, level logrus.Level) error {\n\tif level > w.C.Level {\n\t\treturn nil\n\t}\n\tw.Rl.Write([]byte(msg))\n\treturn nil\n}", "func writeFormattedMsg(conn net.Conn, msg interface{}) error {\n\t_, err := conn.Write([]byte(\"---------------------------\\n\"))\n\tt := reflect.ValueOf(msg)\n\tswitch t.Kind() {\n\tcase reflect.Map:\n\t\tfor k, v := range msg.(map[string]string) {\n\t\t\t_, err = conn.Write([]byte(k + \" : \" + v))\n\t\t}\n\t\tbreak\n\tcase reflect.String:\n\t\tv := reflect.ValueOf(msg).String()\n\t\t_, err = conn.Write([]byte(v + \"\\n\"))\n\t\tbreak\n\t} //switch\n\tconn.Write([]byte(\"---------------------------\\n\"))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func writeMessage(w io.Writer, msg []byte) error {\n\tlength := uint16(len(msg))\n\tif int(length) != len(msg) {\n\t\tpanic(len(msg))\n\t}\n\terr := binary.Write(w, binary.BigEndian, length)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(msg)\n\treturn err\n}", "func (wechatPush *WechatPush) WriteMsg(when time.Time, msg string, level int) error {\n\tif level > wechatPush.Level {\n\t\treturn nil\n\t}\n\n\tdata := InitPushData(msg)\n\n\tfor _, id := range wechatPush.WechatIds {\n\t\terr := wechatPush.message.Push(id, \"\", wechatPush.TmpId, data)\n\t\tfmt.Printf(\"push data to user:%v, error:%v\\n\", id, err)\n\t}\n\treturn nil\n}", "func (r *ResponseWriter) Write(buf []byte) (int, error) {\n\tlog.Warning(\"[WARNING] DNS64 called with Write: not performing DNS64\")\n\tn, err := r.ResponseWriter.Write(buf)\n\treturn n, err\n}", "func WriteErrMsg(w http.ResponseWriter, r *http.Request, msg string, opts ...int) {\n\thttpErr := NewErrHTTP(r, msg, opts...)\n\thttpErr.write(w, r, len(opts) > 1 /*silent*/)\n\tFreeHTTPErr(httpErr)\n}", "func (session *TCPSession) WriteMsg(msg PackInf) error {\n\tif session.IsClosed() {\n\t\tFreePack(msg)\n\t\treturn ErrorSessionClosed\n\t}\n\tselect {\n\tcase session.sendChan <- msg:\n\t\tsession.lastActiveTime = time.Now().Unix()\n\t\treturn nil\n\tdefault:\n\t\treturn ErrorSessionWriteBlocked\n\t}\n}", "func (c *codec) WriteMessage(msg *birpc.Message) error {\n\tc.wmu.Lock()\n\tdefer c.wmu.Unlock()\n\n\tm := &mpc.Message{}\n\tm.ID = msg.ID\n\tm.Func = msg.Func\n\n\tif t, ok := msg.Args.(msgp.Marshaler); ok {\n\t\tb, err := t.MarshalMsg(nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm.Args = msgp.Raw(b)\n\t}\n\n\tif t, ok := msg.Result.(msgp.Marshaler); ok {\n\t\tb, err := t.MarshalMsg(nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm.Result = msgp.Raw(b)\n\t}\n\n\tif msg.Error != nil {\n\t\tm.Error = &mpc.Error{Msg: msg.Error.Msg}\n\t}\n\n\tconn, err := c.db.Connection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := m.MarshalMsg(c.buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.buf = b[:0]\n\n\t_, err = conn.Do(\"PUBLISH\", c.ch, b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = conn.Return()\n\treturn err\n}", "func (w *reply) Write(m *Msg) {\n\tw.Client().ReplyChan <- &Exchange{Request: w.req, Reply: m}\n}", "func (session *TCPSession) WriteMsg(msg PackInf) error {\n\tif session.IsClosed() {\n\t\treturn ErrorSessionClosed\n\t}\n\tselect {\n\tcase session.sendChan <- msg:\n\t\tsession.lastActiveTime = time.Now().Unix()\n\t\treturn nil\n\tdefault:\n\t\treturn ErrorSessionWriteBlocked\n\t}\n}", "func (sf *Associate) WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) (n, oobn int, err error) {\n\treturn sf.getUnderAssociate().WriteMsgUDP(b, oob, addr)\n}", "func (fw *FileLogWriter) WriteMsg(msg string, level int) error {\n\tif fw.fd == nil || level < fw.config.LogLevel {\n\t\treturn nil\n\t}\n\tfw.lg.Println(msg)\n\tif fw.oneFile == false {\n\t\tfw.docheck()\n\t}\n\treturn nil\n}", "func (c *SodaClient) Write(sendMsg string) {\n\tmsg := strings.TrimSpace(sendMsg)\n\n\tbuf := []byte(msg)\n\n\t_, err := c.conn.Write(buf) // returns string length of write and potential write errors\n\n\tif err != nil {\n\t\tfmt.Println(msg, err)\n\t}\n}", "func (_m *MockResponseWriter) WriteMsg(_a0 *dns.Msg) error {\n\tret := _m.Called(_a0)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(*dns.Msg) error); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}", "func (rw *pssRPCRW) WriteMsg(msg p2p.Msg) error {\n\tlog.Trace(\"got writemsg pssclient\", \"msg\", msg)\n\tif rw.closed {\n\t\treturn fmt.Errorf(\"connection closed\")\n\t}\n\trlpdata := make([]byte, msg.Size)\n\tmsg.Payload.Read(rlpdata)\n\tpmsg, err := rlp.EncodeToBytes(pss.ProtocolMsg{\n\t\tCode: msg.Code,\n\t\tSize: msg.Size,\n\t\tPayload: rlpdata,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Get the keys we have\n\tvar symkeyids []string\n\terr = rw.Client.rpc.Call(&symkeyids, \"pss_getHandshakeKeys\", rw.pubKeyId, rw.topic, false, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check the capacity of the first key\n\tvar symkeycap uint16\n\tif len(symkeyids) > 0 {\n\t\terr = rw.Client.rpc.Call(&symkeycap, \"pss_getHandshakeKeyCapacity\", symkeyids[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = rw.Client.rpc.Call(nil, \"pss_sendSym\", symkeyids[0], rw.topic, hexutil.Encode(pmsg))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If this is the last message it is valid for, initiate new handshake\n\tif symkeycap == 1 {\n\t\tvar retries int\n\t\tvar sync bool\n\t\t// if it's the only remaining key, make sure we don't continue until we have new ones for further writes\n\t\tif len(symkeyids) == 1 {\n\t\t\tsync = true\n\t\t}\n\t\t// initiate handshake\n\t\t_, err := rw.handshake(retries, sync, false)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"failing\", \"err\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Socket) WriteMsg() {\r\n\tfor {\r\n\t\tselect {\r\n\t\tcase data := <-s.WriteChan:\r\n\t\t\tpref := intToBytes(len(data))\r\n\t\t\tvar buffer bytes.Buffer\r\n\t\t\tbuffer.Write(pref)\r\n\t\t\tbuffer.Write(data)\r\n\t\t\t_, err := s.Conn.Write(buffer.Bytes())\r\n\t\t\tif err != nil {\r\n\t\t\t\tfmt.Println(\"Send Error,\", err)\r\n\t\t\t}\r\n\t\tcase <-s.Ctx.Done():\r\n\t\t\tfmt.Println(\"Quit WriteMsg()\")\r\n\t\t\treturn\r\n\t\t}\r\n\t}\r\n}", "func WriteMsg(ctx context.Context, crawlabIndex string, es *elastic.Client, when time.Time, msg string) error {\n\tvals := make(map[string]interface{})\n\tvals[\"@timestamp\"] = when.Format(time.RFC3339)\n\tvals[\"@msg\"] = msg\n\tuid := uuid.NewV4().String()\n\t_, err := es.Index().Index(crawlabIndex).Id(uid).BodyJson(vals).Refresh(\"wait_for\").Do(ctx)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn err\n}", "func (conn *Conn) Write(msgType int, data []byte) error {\n\tconn.writer.Lock()\n\tdefer conn.writer.Unlock()\n\treturn conn.socket.WriteMessage(msgType, data)\n}", "func Write(m *Message) error {\n\treturn system.Write(m)\n}", "func (wc *WSConnection) WriteMsg(ctx context.Context) {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tticker.Stop()\n\t\t\twc.wsConn.Close()\n\t\t\tlogger.Errorf(\"ws soeckt write Routine panic \", string(debug.Stack()))\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase message, ok := <-wc.RChan:\n\t\t\tlogger.Infof(\"ws write msg %v \", string(message))\n\t\t\twc.wsConn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif !ok {\n\t\t\t\t// if !ok {\n\t\t\t\t// The input closed the channel.\n\t\t\t\twc.wsConn.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw, err := wc.wsConn.NextWriter(websocket.TextMessage)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Write(message)\n\t\t\t// Add queued chat messages to the current websocket message.\n\t\t\tn := len(wc.RChan)\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tw.Write(newline)\n\t\t\t\tw.Write(<-wc.RChan)\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\tlogger.Errorf(\"ws write msg close err %v \", err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\twc.wsConn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := wc.wsConn.WriteMessage(websocket.PingMessage, nil); err != nil {\n\t\t\t\tlogger.Errorf(\"ws write msg timeout \")\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func (cw *ConsoleWriter) WriteMsg(msg string, skip, level int) error {\r\n\tif cw.Level > level {\r\n\t\treturn nil\r\n\t}\r\n\tif runtime.GOOS == \"windows\" {\r\n\t\tcw.lg.Println(msg)\r\n\t} else {\r\n\t\tcw.lg.Println(colors[level](msg))\r\n\t}\r\n\treturn nil\r\n}", "func (l *Logger) syslogWrite(p Priority, msg string) (int, error) {\n\t// ensure it ends in a \\n\n\tnl := \"\"\n\tif !strings.HasSuffix(msg, \"\\n\") {\n\t\tnl = \"\\n\"\n\t}\n\n\terr := l.conn.writeString(p, l.hostname, l.tag, msg, nl)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\t// Note: return the length of the input, not the number of\n\t// bytes printed by Fprintf, because this must behave like\n\t// an io.Writer.\n\treturn len(msg), nil\n}", "func Write(laddr, raddr *net.UDPAddr, message []byte, redundancyFactor uint8) error {\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.WithField(\"err\", r).Warn(\"Send failed\")\n\t\t}\n\t}()\n\n\t// Send from same IP that the UDP listener is bound on but choose random port\n\tladdr.Port = 0\n\tconn, err := net.DialUDP(\"udp4\", laddr, raddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = conn.SetWriteBuffer(writeBufferSize); err != nil {\n\t\tlog.WithError(err).Traceln(\"SetWriteBuffer socket problem\")\n\t}\n\n\terr = sendRaptorRFC5053(conn, message, redundancyFactor)\n\t_ = conn.Close()\n\treturn err\n}", "func (c *peerConn) writeMessage(mType uint8, data []byte) error {\n\tcLen := len(data)\n\tval := make([]byte, cLen+4)\n\tbinary.LittleEndian.PutUint32(val, uint32(cLen))\n\tcopy(val[4:], data)\n\tmsg := []byte{c.version, mType}\n\tmsg = append(msg, val...)\n\t_, err := c.rw.Write(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (mc *MsgCache) WriteMsg(msg *Msg) bool {\n\n\tif mc.cache == nil {\n\t\tmc.cache = make([]*Msg, 0, 8)\n\t}\n\n\tmin := sort.Search(len(mc.cache), func(mid int) bool {\n\t\treturn mc.comparator(msg.key, mc.cache[mid].key) <= 0\n\t})\n\t//if cache contain the key. replace it\n\tif min != len(mc.cache) && mc.comparator(msg.key, mc.cache[min].key) == 0 {\n\t\tmc.cache[min].value = msg.value\n\t\tmc.cache[min].msgType = msg.msgType\n\t\tmc.size += (msg.Size() - mc.cache[min].Size())\n\t\treturn false\n\t} else {\n\t\t//insert value to slice\n\t\tmc.cache = append(mc.cache, nil)\n\t\tcopy(mc.cache[min+1:], mc.cache[min:])\n\t\tmc.cache[min] = msg\n\t\tmc.size += msg.Size()\n\t\treturn true\n\t}\n}", "func (c *Client) Write(msgType int, message []byte) error {\n\n\tif msgType == 0 {\n\t\treturn errors.New(\"Message type 0 is reserved\")\n\t}\n\n\tif c.status != Connected {\n\t\treturn errors.New(c.status.String())\n\t}\n\n\tmlen := len(message)\n\tif mlen > c.maxMsgSize {\n\t\treturn errors.New(\"Message exceeds maximum message length\")\n\t}\n\n\tc.toWrite <- &Message{MsgType: msgType, Data: message}\n\n\treturn nil\n}", "func (enc *jsonEncoder) WriteMessage(sink io.Writer, lvl string, msg string, ts time.Time) error {\n\t// Grab an encoder from the pool so that we can re-use the underlying\n\t// buffer.\n\tfinal := newJSONEncoder()\n\tdefer final.Free()\n\n\tfinal.bytes = append(final.bytes, `{\"msg\":\"`...)\n\tfinal.safeAddString(msg)\n\tfinal.bytes = append(final.bytes, `\",\"level\":\"`...)\n\tfinal.bytes = append(final.bytes, lvl...)\n\tfinal.bytes = append(final.bytes, `\",\"ts\":`...)\n\tfinal.bytes = strconv.AppendInt(final.bytes, ts.UnixNano(), 10)\n\tfinal.bytes = append(final.bytes, `,\"fields\":{`...)\n\tfinal.bytes = append(final.bytes, enc.bytes...)\n\tfinal.bytes = append(final.bytes, \"}}\\n\"...)\n\n\tn, err := sink.Write(final.bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n != len(final.bytes) {\n\t\treturn fmt.Errorf(\"incomplete write: only wrote %v of %v bytes\", n, len(final.bytes))\n\t}\n\treturn nil\n}", "func write(msg string, conn net.Conn) error {\n\tlog.Print(msg)\n\tconn.Write([]byte(msg))\n\treturn errors.New(msg)\n}", "func WriteError(w http.ResponseWriter, str string, err error) {\n\tWriteMsg(w, fmt.Sprintf(\"An Error occured - %s: %v\", str, err))\n}", "func (c *Conn) WriteMessage(msg interface{}) error {\n\terr := protocommon.MessageEncode(c.writeBuf, msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.writeBuf.Flush()\n}", "func (s *Subscriber) write(mt int, payload []byte) error {\n s.ws.SetWriteDeadline(time.Now().Add(writeWait))\n return s.ws.WriteMessage(mt, payload)\n}", "func WriteMessage(msg interface{}) (*bytes.Buffer, error) {\n\tbuf := new(bytes.Buffer)\n\n\terr := binary.Write(buf, binary.BigEndian, msg)\n\tif err != nil {\n\t\treturn buf, err\n\t}\n\treturn buf, nil\n}", "func (conn *Conn) WriteMessage(msgType MessageType, data interface{}) error {\n\tmsg := Packet{\n\t\tType: msgType,\n\t\tData: data,\n\t}\n\tbytes, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn conn.Write(websocket.TextMessage, bytes)\n}", "func (s *server) WriteReplyMessage(w io.Writer, xid uint32, acceptType AcceptType, ret interface{}) error {\n\tvar buf bytes.Buffer\n\n\t// Header\n\theader := Message{\n\t\tXid: xid,\n\t\tType: Reply,\n\t}\n\n\tif _, err := xdr.Marshal(&buf, header); err != nil {\n\t\treturn err\n\t}\n\n\t// \"Accepted\"\n\tif _, err := xdr.Marshal(&buf, ReplyBody{Type: Accepted}); err != nil {\n\t\treturn err\n\t}\n\n\t// \"Success\"\n\tif _, err := xdr.Marshal(&buf, AcceptedReply{Type: acceptType}); err != nil {\n\t\treturn err\n\t}\n\n\t// Return data\n\tif ret != nil {\n\t\tif _, err := xdr.Marshal(&buf, ret); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t_, err := w.Write(buf.Bytes())\n\treturn err\n}", "func sendMsg(conn *net.UDPConn, raddr net.UDPAddr, query interface{}) {\n\ttotalSent.Add(1)\n\tvar b bytes.Buffer\n\tif err := bencode.Marshal(&b, query); err != nil {\n\t\treturn\n\t}\n\tif n, err := conn.WriteToUDP(b.Bytes(), &raddr); err != nil {\n\t\tlogger.Infof(\"DHT: node write failed to %+v, error=%s\", raddr, err)\n\t} else {\n\t\ttotalWrittenBytes.Add(int64(n))\n\t}\n\treturn\n}", "func (w *messageWriter) Write(rm *producer.RefCountedMessage) {\n\tvar (\n\t\tnowNanos = w.nowFn().UnixNano()\n\t\tmsg = w.newMessage()\n\t\tmetrics = w.Metrics()\n\t)\n\tw.Lock()\n\tif !w.isValidWriteWithLock(nowNanos, metrics) {\n\t\tw.Unlock()\n\t\tw.close(msg)\n\t\treturn\n\t}\n\trm.IncRef()\n\tw.msgID++\n\tmeta := metadata{\n\t\tmetadataKey: metadataKey{\n\t\t\tshard: w.replicatedShardID,\n\t\t\tid: w.msgID,\n\t\t},\n\t}\n\tmsg.Set(meta, rm, nowNanos)\n\tw.acks.add(meta, msg)\n\t// Make sure all the new writes are ordered in queue.\n\tmetrics.enqueuedMessages.Inc(1)\n\tif w.lastNewWrite != nil {\n\t\tw.lastNewWrite = w.queue.InsertAfter(msg, w.lastNewWrite)\n\t} else {\n\t\tw.lastNewWrite = w.queue.PushFront(msg)\n\t}\n\tw.Unlock()\n}", "func (c *Conn) WriteMessage(messageType int, data []byte) error {\n\n\tif c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {\n\t\t// Fast path with no allocations and single frame.\n\n\t\tvar mw messageWriter\n\t\tif err := c.beginMessage(&mw, messageType); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn := copy(c.writeBuf[mw.pos:], data)\n\t\tmw.pos += n\n\t\tdata = data[n:]\n\t\treturn mw.flushFrame(true, data)\n\t}\n\n\tw, err := c.NextWriter(messageType)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err = w.Write(data); err != nil {\n\t\treturn err\n\t}\n\treturn w.Close()\n}", "func WriteResponseMessage(w http.ResponseWriter, message string, trace string, status int, success bool) {\n\tmsg := models.Message{MSG: message, Success: success, Trace: trace, Status: status}\n\tgo logger.LogString(msg)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(msg.Bytes())\n}", "func (o *AddNamespaceToGroupOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Write(com Communicator, con *net.TCPConn, m string) bool {\n\tn := len(m)\n\ts := \"\"\n\tfor i := 0; i < MessageLengthBytes; i++ {\n\t\t//fmt.Println(n % MaxByte, byte(n % MaxByte))\n\t\ts = string(byte(n%MaxByte)) + s\n\t\tn /= MaxByte\n\t}\n\treturn WriteBytes(com, con, com.Key()+s+m)\n}", "func (m *ServerMsg) Encode(w io.Writer) error {\n\tid := make([]byte, 8)\n\ttyp := make([]byte, 8)\n\tdataLen := make([]byte, 8)\n\tsize := make([]byte, 8)\n\n\tlog.Infof(\"encoding msg %d\", m.ID)\n\t// Write the ID of the call.\n\tn := binary.PutUvarint(id, m.ID)\n\tid = id[:n]\n\n\t// Write the type.\n\tn = binary.PutUvarint(typ, m.Type)\n\ttyp = typ[:n]\n\n\t//Write length of data and then data.\n\tn = binary.PutUvarint(dataLen, uint64(len(m.Data)))\n\tdataLen = dataLen[:n]\n\n\t// Write the size of the call.\n\tn = binary.PutUvarint(size, uint64(len(id)+len(typ)+len(dataLen)+len(m.Data)))\n\tsize = size[:n]\n\n\tbuf := bytes.NewBuffer(make([]byte, 0, 100))\n\tbuf.Write(size)\n\tbuf.Write(id)\n\tbuf.Write(typ)\n\tbuf.Write(dataLen)\n\tbuf.Write(m.Data)\n\n\tif _, err := w.Write(buf.Bytes()); err != nil {\n\t\treturn fmt.Errorf(\"problem encoding ServerMsg onto io.Writer: %s\", err)\n\t}\n\n\treturn nil\n}", "func (c *Chat) WriteSayMsg(msg string) {\n\tc.sayMsgPipe <- fmt.Sprintf(\"PRIVMSG #%s :%s\", c.viewers.GetRoomName(), msg)\n}", "func WriteMessage(msg Message) {\n\tVac.writeMessage(msg)\n}", "func (c *Conn) WriteMessage(content []byte) error {\n\tc.writeMutex.Lock()\n\tdefer c.writeMutex.Unlock()\n\n\t// write 2 bytes header\n\tlen := uint16(len(content) + 2)\n\tlenBuf := []byte{0, 0}\n\tlenBuf[1] = byte(len >> 8)\n\tlenBuf[0] = byte(len)\n\terr := writeAll(lenBuf, c.nc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// write remain content\n\treturn writeAll(content, c.nc)\n}", "func (w *MonResponseWriter) Write(data []byte) (int, error) {\n\treturn w.ResponseWriter.Write(data)\n}", "func (d *Data) Write (w http.ResponseWriter) {\n\tproto := Protocol{\n\t\tAuthorized: true,\n\t\tSuccess: d.Code == http.StatusOK || d.Code == 0,\n\t\tError: d.Msg,\n\t\tData: d.Data}\n\td.Data = &proto\n\t(*resp.Data)(d).Write(w)\n}", "func (c *Client) Write(message *Message) {\n\tfmt.Fprintf(c.Conn, message.ToString())\n}", "func (t *cliTransHandler) Write(ctx context.Context, conn net.Conn, sendMsg remote.Message) (err error) {\n\tvar bufWriter remote.ByteBuffer\n\tstats2.Record(ctx, sendMsg.RPCInfo(), stats.WriteStart, nil)\n\tdefer func() {\n\t\tt.ext.ReleaseBuffer(bufWriter, err)\n\t\tstats2.Record(ctx, sendMsg.RPCInfo(), stats.WriteFinish, err)\n\t}()\n\n\tbufWriter = t.ext.NewWriteByteBuffer(ctx, conn, sendMsg)\n\tsendMsg.SetPayloadCodec(t.opt.PayloadCodec)\n\terr = t.codec.Encode(ctx, sendMsg, bufWriter)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn bufWriter.Flush()\n}", "func (x *Message) Write(w io.Writer) error {\n\tif x.Compression != NoCompression {\n\t\tpanic(\"message compression not supported\")\n\t}\n\tvar _magic int32\n\tif x.Compression != NoCompression {\n\t\t_magic = 1\n\t}\n\tvar _length int32 = 1 /* magic */ + _magic /* compression */ + 4 /* checksum */ + int32(len(x.Payload))\n\tw.Write(int32Bytes(_length))\n\tw.Write([]byte{byte(_magic)})\n\tif _magic == 1 {\n\t\tw.Write([]byte{byte(x.Compression)})\n\t}\n\tw.Write(uint32Bytes(crc32.ChecksumIEEE(x.Payload)))\n\t_, err := w.Write(x.Payload)\n\treturn err\n}", "func (m *Msg) PrintMsg(w io.Writer) (n int, err error) {\n\tif len(m.Body) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tif m.Body[len(m.Body)-1] != '\\n' {\n\t\treturn fmt.Fprintln(w, m.Body)\n\t}\n\treturn fmt.Fprint(w, m.Body)\n}", "func Write(w *bufio.Writer, m Message, l *log.Logger) error {\n\tdebugf := func(format string, v ...interface{}) {\n\t\tif l != nil {\n\t\t\tl.Printf(format, v...)\n\t\t}\n\t}\n\tswitch m := m.(type) {\n\tcase KeepAlive:\n\t\tdebugf(\"-> KeepAlive\")\n\t\t_, err := w.Write([]byte{0, 0, 0, 0})\n\t\treturn err\n\tcase Choke:\n\t\tdebugf(\"-> Choke\")\n\t\treturn sendMessage0(w, 0)\n\tcase Unchoke:\n\t\tdebugf(\"-> Unchoke\")\n\t\treturn sendMessage0(w, 1)\n\tcase Interested:\n\t\tdebugf(\"-> Interested\")\n\t\treturn sendMessage0(w, 2)\n\tcase NotInterested:\n\t\tdebugf(\"-> NotInterested\")\n\t\treturn sendMessage0(w, 3)\n\tcase Have:\n\t\tdebugf(\"-> Have %v\", m.Index)\n\t\treturn sendMessage1(w, 4, m.Index)\n\tcase Bitfield:\n\t\tdebugf(\"-> Bitfield %v\", len(m.Bitfield))\n\t\treturn sendMessage(w, 5, m.Bitfield, nil, nil)\n\tcase Request:\n\t\tdebugf(\"-> Request %v %v %v\", m.Index, m.Begin, m.Length)\n\t\treturn sendMessage3(w, 6, m.Index, m.Begin, m.Length)\n\tcase Piece:\n\t\tdebugf(\"-> Piece %v %v %v\", m.Index, m.Begin, len(m.Data))\n\t\tb := make([]byte, 8)\n\t\tformatUint32(b, m.Index)\n\t\tformatUint32(b[4:], m.Begin)\n\t\terr := sendMessage(w, 7, b, m.Data, nil)\n\t\tPutBuffer(m.Data)\n\t\tm.Data = nil\n\t\treturn err\n\tcase Cancel:\n\t\tdebugf(\"-> Cancel %v %v %v\", m.Index, m.Begin, m.Length)\n\t\treturn sendMessage3(w, 8, m.Index, m.Begin, m.Length)\n\tcase Port:\n\t\tdebugf(\"-> Port %v\", m.Port)\n\t\treturn sendMessageShort(w, 9, m.Port)\n\tcase SuggestPiece:\n\t\tdebugf(\"-> SuggestPiece %v\", m.Index)\n\t\treturn sendMessage1(w, 13, m.Index)\n\tcase HaveAll:\n\t\tdebugf(\"-> HaveAll\")\n\t\treturn sendMessage0(w, 14)\n\tcase HaveNone:\n\t\tdebugf(\"-> HaveNone\")\n\t\treturn sendMessage0(w, 15)\n\tcase RejectRequest:\n\t\tdebugf(\"-> RejectRequest %v %v %v\",\n\t\t\tm.Index, m.Begin, m.Length)\n\t\treturn sendMessage3(w, 16, m.Index, m.Begin, m.Length)\n\tcase AllowedFast:\n\t\tdebugf(\"-> AllowedFast %v\", m.Index)\n\t\treturn sendMessage1(w, 17, m.Index)\n\tcase Extended0:\n\t\tdebugf(\"-> Extended0\")\n\t\tvar f extensionInfo\n\t\tf.Version = m.Version\n\t\tif m.IPv6 != nil {\n\t\t\tf.IPv6 = m.IPv6.To16()\n\t\t}\n\t\tif m.IPv4 != nil {\n\t\t\tf.IPv4 = m.IPv4.To4()\n\t\t}\n\t\tf.Port = m.Port\n\t\tf.ReqQ = m.ReqQ\n\t\tf.MetadataSize = m.MetadataSize\n\t\tf.Messages = m.Messages\n\t\tf.UploadOnly = boolOrString(m.UploadOnly)\n\t\tf.Encrypt = boolOrString(m.Encrypt)\n\t\tb, err := bencode.EncodeBytes(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sendExtended(w, 0, b, nil)\n\tcase ExtendedMetadata:\n\t\tdebugf(\"-> ExtendedMetadata %v %v\", m.Type, m.Piece)\n\t\ttpe := m.Type\n\t\tpiece := m.Piece\n\t\tinfo := &metadataInfo{Type: &tpe, Piece: &piece}\n\t\tif m.TotalSize > 0 {\n\t\t\ttotalsize := m.TotalSize\n\t\t\tinfo.TotalSize = &totalsize\n\t\t}\n\t\tb, err := bencode.EncodeBytes(info)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif m.Subtype == 0 {\n\t\t\tpanic(\"ExtendedMetadata subtype is 0\")\n\t\t}\n\t\treturn sendExtended(w, m.Subtype, b, m.Data)\n\tcase ExtendedPex:\n\t\tdebugf(\"-> ExtendedPex %v %v\", len(m.Added), len(m.Dropped))\n\t\ta4, f4, a6, f6 := pex.FormatCompact(m.Added)\n\t\td4, _, d6, _ := pex.FormatCompact(m.Dropped)\n\t\tinfo := pexInfo{\n\t\t\tAdded: a4,\n\t\t\tAddedF: f4,\n\t\t\tAdded6: a6,\n\t\t\tAdded6F: f6,\n\t\t\tDropped: d4,\n\t\t\tDropped6: d6,\n\t\t}\n\t\tb, err := bencode.EncodeBytes(info)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sendExtended(w, m.Subtype, b, nil)\n\tcase ExtendedDontHave:\n\t\tdebugf(\"-> ExtendedDontHave %v\", m.Index)\n\t\tb := formatUint32(make([]byte, 4), m.Index)\n\t\tif m.Subtype == 0 {\n\t\t\tpanic(\"ExtendedDontHave subtype is 0\")\n\t\t}\n\t\treturn sendExtended(w, m.Subtype, b, nil)\n\tdefault:\n\t\tpanic(\"Unknown message\")\n\t}\n}", "func (p *Peer) writeMessage(msg wire.Message, enc wire.MessageEncoding) error {\n\t// Don't do anything if we're disconnecting.\n\tif atomic.LoadInt32(&p.disconnect) != 0 {\n\t\treturn nil\n\t}\n\n\t// Use closures to log expensive operations so they are only run when\n\t// the logging level requires it.\n\tlog.Debugf(\"%v\", newLogClosure(func() string {\n\t\t// Debug summary of message.\n\t\tsummary := messageSummary(msg)\n\t\tif len(summary) > 0 {\n\t\t\tsummary = \" (\" + summary + \")\"\n\t\t}\n\t\treturn fmt.Sprintf(\"Sending %v%s to %s\", msg.Command(),\n\t\t\tsummary, p)\n\t}))\n\tlog.Tracef(\"%v\", newLogClosure(func() string {\n\t\treturn spew.Sdump(msg)\n\t}))\n\tlog.Tracef(\"%v\", newLogClosure(func() string {\n\t\tvar buf bytes.Buffer\n\t\t_, err := wire.WriteMessageWithEncodingN(&buf, msg, p.ProtocolVersion(),\n\t\t\tp.cfg.ChainParams.Net, enc)\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\t\treturn spew.Sdump(buf.Bytes())\n\t}))\n\n\t// Write the message to the peer.\n\tn, err := wire.WriteMessageWithEncodingN(p.conn, msg,\n\t\tp.ProtocolVersion(), p.cfg.ChainParams.Net, enc)\n\tatomic.AddUint64(&p.bytesSent, uint64(n))\n\tif p.cfg.Listeners.OnWrite != nil {\n\t\tp.cfg.Listeners.OnWrite(p, n, msg, err)\n\t}\n\treturn err\n}", "func writeMessage(c context.Context, w http.ResponseWriter, msg proto.Message, format Format) {\n\tif msg == nil {\n\t\tpanic(\"msg is nil\")\n\t}\n\n\tvar body []byte\n\tvar err error\n\tswitch format {\n\tcase FormatBinary:\n\t\tbody, err = proto.Marshal(msg)\n\n\tcase FormatJSONPB:\n\t\tvar buf bytes.Buffer\n\t\tbuf.WriteString(JSONPBPrefix)\n\t\tm := jsonpb.Marshaler{}\n\t\terr = m.Marshal(&buf, msg)\n\t\tif err == nil {\n\t\t\t_, err = buf.WriteRune('\\n')\n\t\t}\n\t\tbody = buf.Bytes()\n\n\tcase FormatText:\n\t\tvar buf bytes.Buffer\n\t\terr = proto.MarshalText(&buf, msg)\n\t\tbody = buf.Bytes()\n\n\tdefault:\n\t\tpanic(fmt.Errorf(\"impossible: invalid format %d\", format))\n\n\t}\n\tif err != nil {\n\t\twriteError(c, w, withCode(err, codes.Internal))\n\t\treturn\n\t}\n\n\tw.Header().Set(HeaderGRPCCode, strconv.Itoa(int(codes.OK)))\n\tw.Header().Set(headerContentType, format.MediaType())\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tif _, err := w.Write(body); err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"prpc: failed to write response body\")\n\t}\n}", "func (app *App) Write(key string, val JsDict) {\n\tapp.msgs = append(app.msgs, Message{Key: key, Value: val})\n}", "func (c *Conn) Write(message proto.Message) (int, error) {\n\tpayload, err := proto.Marshal(message)\n\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tvar buf bytes.Buffer\n\n\t// Prepare *type* prefix\n\tmtype := messageTypes[reflect.TypeOf(message)]\n\tif _, err := buf.Write(uint16tbs(mtype)); err != nil {\n\t\treturn -1, err\n\t}\n\n\t// Prepare *size* prefix\n\tsize := uint32(len(payload))\n\tif _, err := buf.Write(uint32tbs(size)); err != nil {\n\t\treturn -1, err\n\t}\n\n\t// Prepare *payload* body\n\tif _, err := buf.Write(payload); err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn c.conn.Write(buf.Bytes())\n}", "func (c *Connection) write(msg interface{}) {\n c.mutex.Lock()\n c.log(fmt.Sprintf(\"Sending message: %+v\", msg))\n c.socket.WriteJSON(msg)\n c.mutex.Unlock()\n}", "func (mx *MxedWebsocketConn) WriteMessage(channelId string, eventName string, message []byte) {\n\t// Encode channelId and eventName and bytes with encoder\n\toutput := mx.protocol.Encode(channelId, eventName, message)\n\tmx.conn.WriteMessage(2, output)\n}", "func (c *connection) Write(mt int, payload []byte) error {\r\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\r\n\treturn c.ws.WriteMessage(mt, payload)\r\n}", "func (mw RecoveryMiddleware) ServeMsg(nc *nats.Conn) func(msg *nats.Msg) {\n\thandler := mw.next.ServeMsg(nc)\n\treturn func(msg *nats.Msg) {\n\t\tdefer func() {\n\t\t\tif rvr := recover(); rvr != nil {\n\t\t\t\terr := errors.New(fmt.Sprintf(\"Panic: %+v\", rvr))\n\t\t\t\terr = errors.WithStack(err)\n\t\t\t\tmw.logger.Log(\n\t\t\t\t\t\"err\", fmt.Sprintf(\"%+v\", err),\n\t\t\t\t\t\"subject\", msg.Subject,\n\t\t\t\t)\n\t\t\t}\n\t\t}()\n\n\t\thandler(msg)\n\t}\n}", "func ReceiveWriteStrMsgResponse(n *net_node.Node, connection net.Conn) {\n\t// Get the filename\n\tfile_name_buff := make([]byte, 100)\n\tconnection.Read(file_name_buff)\n\tfilename := strings.Trim(string(file_name_buff), \" \")\n\tmutex.Lock()\n\tn.Files[filename].NumAckWriting += 1\n\tmutex.Unlock()\n}", "func putMsg(conn net.Conn, msg string){\n\tfmt.Printf(\"C %s\\n\", msg)\n\tio.WriteString(conn, msg)//send our message\n}", "func (v *ViewView) write(msg string) {\n\tfmt.Fprint(v.Writer, msg)\n}", "func (r *Response) Write(w io.Writer) error", "func Write(w *bufio.Writer, m *Message) (int, error) {\n\tmsg, err := proto.Marshal(m)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tsize := len(msg)\n\n\tif size > maxBodySize {\n\t\treturn 0, ErrBodyTooLong\n\t}\n\theader := make([]byte, headerSize)\n\tbinary.BigEndian.PutUint16(header, uint16(size))\n\tn, err := w.Write(header)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tnn, err := w.Write(msg)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\tif err = w.Flush(); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn nn + n, nil\n}", "func WriteMessage(b []byte) (n int, err error) {\n\treturn defaultWriter.Write(b)\n}", "func (stream *MAMWriteStream) Write(msg *Message) (trinary.Trytes, error) {\n\tstream.Lock()\n\tdefer stream.Unlock()\n\tbndl := bundle.Bundle{}\n\n\tvar err error\n\tvar msgID trinary.Trits\n\tbndl, msgID, err = stream.m.BundleWriteHeaderOnChannel(bndl, stream.currentChannelID, msg.psks, msg.ntruPks)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttrytesData, err := converter.ASCIIToTrytes(string(msg.data))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar checksum mam.MsgChecksum\n\tif msg.integrity {\n\t\tchecksum = mam.MsgChecksumMAC\n\t} else if msg.signed {\n\t\tchecksum = mam.MsgChecksumSig\n\t} else {\n\t\tchecksum = mam.MsgChecksumNone\n\t}\n\n\tbndl, err = stream.m.BundleWritePacket(msgID, trytesData, checksum, false, bndl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbndl, err = broadcastMessage(stream.iotaAPI, bndl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn bndl[0].Hash, nil\n}", "func (e *agentEndpoint) write(msg *submitws.Message) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\tif e.isClosed {\n\t\treturn\n\t}\n\tif err := e.conn.WriteMessage(websocket.BinaryMessage, msg.ToBinary()); err != nil {\n\t\tlogger.WithError(err).Errorf(\"error sending message to agent with id == %s: %v\", e.id, err)\n\t\tif err := e.conn.Close(); err != nil {\n\t\t\tlogger.WithError(err).Errorf(\"error closing connection to agent with id == %s after write error: %v\", e.id, err)\n\t\t}\n\t\te.isClosed = true\n\t}\n}", "func (wsc *WSConnection) writeResponseMessage(id int64, resp *bytes.Buffer) {\n\n\tlog := wsc.tun.log\n\t// Get writer's lock\n\twsWriterMutex.Lock()\n\tdefer wsWriterMutex.Unlock()\n\t// Write response into the tunnel\n\twsc.ws.SetWriteDeadline(time.Now().Add(time.Minute))\n\twriter, err := wsc.ws.NextWriter(websocket.BinaryMessage)\n\t// got an error, reply with a \"hey, retry\" to the request handler\n\tif err != nil {\n\t\tlog.Errorf(\"[id=%d] WS could not find writer: %s\", id, err.Error())\n\t\twsc.ws.Close()\n\t\treturn\n\t}\n\n\t// write the request Id\n\t_, err = fmt.Fprintf(writer, \"%04x\", id)\n\tif err != nil {\n\t\twsc.ws.Close()\n\t\treturn\n\t}\n\n\t// write the response itself\n\tnum, err := io.Copy(writer, resp)\n\tif err != nil {\n\t\tlog.Errorf(\"WS cannot write response: %s\", err.Error())\n\t\twsc.ws.Close()\n\t\treturn\n\t}\n\tlog.Tracef(\"[id=%d] Completed writing response of length: %d\", id, num)\n\n\t// done\n\terr = writer.Close()\n\tif err != nil {\n\t\twsc.ws.Close()\n\t\treturn\n\t}\n}", "func (w *Writer) WriteMessage(m *Message) (err error) {\n\tmBytes, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuff := bytes.NewBuffer(mBytes)\n\tbuff.Grow(1)\n\tbuff.WriteByte(delimiter)\n\trecord := buff.String()\n\n\tw.mu.Lock()\n\tn, err := w.conn.Write([]byte(record))\n\tif err != nil {\n\t\tlog.Printf(\"error: %v\", err)\n\t\t_ = w.conn.Close()\n\t}\n\tw.mu.Unlock()\n\n\tif n != len(record) {\n\t\treturn fmt.Errorf(\"bad write (%d/%d)\", n, len(mBytes))\n\t}\n\n\treturn nil\n}", "func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) {\n\t// The default dns.Mux checks the question section size, but we have our\n\t// own mux here. Check if we have a question section. If not drop them here.\n\tif r == nil || len(r.Question) == 0 {\n\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)\n\t\treturn\n\t}\n\n\tif !s.debug {\n\t\tdefer func() {\n\t\t\t// In case the user doesn't enable error plugin, we still\n\t\t\t// need to make sure that we stay alive up here\n\t\t\tif rec := recover(); rec != nil {\n\t\t\t\tif s.stacktrace {\n\t\t\t\t\tlog.Errorf(\"Recovered from panic in server: %q %v\\n%s\", s.Addr, rec, string(debug.Stack()))\n\t\t\t\t} else {\n\t\t\t\t\tlog.Errorf(\"Recovered from panic in server: %q %v\", s.Addr, rec)\n\t\t\t\t}\n\t\t\t\tvars.Panic.Inc()\n\t\t\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif !s.classChaos && r.Question[0].Qclass != dns.ClassINET {\n\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n\t\treturn\n\t}\n\n\tif m, err := edns.Version(r); err != nil { // Wrong EDNS version, return at once.\n\t\tw.WriteMsg(m)\n\t\treturn\n\t}\n\n\t// Wrap the response writer in a ScrubWriter so we automatically make the reply fit in the client's buffer.\n\tw = request.NewScrubWriter(r, w)\n\n\tq := strings.ToLower(r.Question[0].Name)\n\tvar (\n\t\toff int\n\t\tend bool\n\t\tdshandler *Config\n\t)\n\n\tfor {\n\t\tif z, ok := s.zones[q[off:]]; ok {\n\t\t\tfor _, h := range z {\n\t\t\t\tif h.pluginChain == nil { // zone defined, but has not got any plugins\n\t\t\t\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif h.metaCollector != nil {\n\t\t\t\t\t// Collect metadata now, so it can be used before we send a request down the plugin chain.\n\t\t\t\t\tctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})\n\t\t\t\t}\n\n\t\t\t\t// If all filter funcs pass, use this config.\n\t\t\t\tif passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {\n\t\t\t\t\tif h.ViewName != \"\" {\n\t\t\t\t\t\t// if there was a view defined for this Config, set the view name in the context\n\t\t\t\t\t\tctx = context.WithValue(ctx, ViewKey{}, h.ViewName)\n\t\t\t\t\t}\n\t\t\t\t\tif r.Question[0].Qtype != dns.TypeDS {\n\t\t\t\t\t\trcode, _ := h.pluginChain.ServeDNS(ctx, w, r)\n\t\t\t\t\t\tif !plugin.ClientWrite(rcode) {\n\t\t\t\t\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// The type is DS, keep the handler, but keep on searching as maybe we are serving\n\t\t\t\t\t// the parent as well and the DS should be routed to it - this will probably *misroute* DS\n\t\t\t\t\t// queries to a possibly grand parent, but there is no way for us to know at this point\n\t\t\t\t\t// if there is an actual delegation from grandparent -> parent -> zone.\n\t\t\t\t\t// In all fairness: direct DS queries should not be needed.\n\t\t\t\t\tdshandler = h\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\toff, end = dns.NextLabel(q, off)\n\t\tif end {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif r.Question[0].Qtype == dns.TypeDS && dshandler != nil && dshandler.pluginChain != nil {\n\t\t// DS request, and we found a zone, use the handler for the query.\n\t\trcode, _ := dshandler.pluginChain.ServeDNS(ctx, w, r)\n\t\tif !plugin.ClientWrite(rcode) {\n\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t}\n\t\treturn\n\t}\n\n\t// Wildcard match, if we have found nothing try the root zone as a last resort.\n\tif z, ok := s.zones[\".\"]; ok {\n\t\tfor _, h := range z {\n\t\t\tif h.pluginChain == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif h.metaCollector != nil {\n\t\t\t\t// Collect metadata now, so it can be used before we send a request down the plugin chain.\n\t\t\t\tctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})\n\t\t\t}\n\n\t\t\t// If all filter funcs pass, use this config.\n\t\t\tif passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {\n\t\t\t\tif h.ViewName != \"\" {\n\t\t\t\t\t// if there was a view defined for this Config, set the view name in the context\n\t\t\t\t\tctx = context.WithValue(ctx, ViewKey{}, h.ViewName)\n\t\t\t\t}\n\t\t\t\trcode, _ := h.pluginChain.ServeDNS(ctx, w, r)\n\t\t\t\tif !plugin.ClientWrite(rcode) {\n\t\t\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Still here? Error out with REFUSED.\n\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n}", "func (producer *Producer) Write(ctx context.Context, msg Msg, topic string) error {\n\tkmsg, result, err := producer.buildMessage(msg, topic)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not build message: %s\", err)\n\t}\n\n\tselect {\n\tcase producer.saramaProducer.Input() <- &kmsg:\n\tcase <-ctx.Done():\n\t\treturn errors.New(\"message write failed: no space in processing channel\")\n\t}\n\n\tselect {\n\tcase err, ok := <-result:\n\t\tif !ok {\n\t\t\treturn errors.New(\"result channel was closed, write result unknown\")\n\t\t}\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn errors.New(\"message write failed: context expired, write result unknown, not waiting for it\")\n\t}\n}", "func writeMessage(data interface{}, w io.Writer) error {\n\n\tresBytes, err := jsoniter.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn encodeByteSlice(w, resBytes)\n}", "func (c *client) writeMsg(msg Message, ackChan chan Message, ticker *time.Ticker) {\n\tbyteMsg, _ := json.Marshal(&msg)\n\tvar currentBackOff int = 0\n\tvar epochsPassed int = 0\n\tc.conn.Write(byteMsg)\n\n\t// indicate that we have written in this epoch\n\tselect {\n\tcase c.writtenChan <- true:\n\tdefault:\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.lostCxn:\n\t\t\treturn\n\t\tcase <-ackChan:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\t// handle exponential backoff rules\n\t\t\tif epochsPassed == currentBackOff {\n\t\t\t\tc.conn.Write(byteMsg)\n\t\t\t\tselect {\n\t\t\t\tcase c.writtenChan <- true:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tepochsPassed = 0\n\t\t\t\tif currentBackOff != c.maxBackOff {\n\t\t\t\t\tif currentBackOff == 0 {\n\t\t\t\t\t\tcurrentBackOff = 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcurrentBackOff *= 2\n\t\t\t\t\t\tif currentBackOff > c.maxBackOff {\n\t\t\t\t\t\t\tcurrentBackOff = c.maxBackOff\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tepochsPassed++\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *Msg) Write(p []byte) (n int, err error) {\n\tm.Data = append(m.Data, p...)\n\treturn len(m.Data), nil\n}", "func (clt HTTPClient) Write(msg []byte, args *WriteArgs) (err error) {\n\tvar readWriteTimeout uint\n\tif args.TmoRdS > args.TmoWrS {\n\t\treadWriteTimeout = args.TmoRdS\n\t} else {\n\t\treadWriteTimeout = args.TmoWrS\n\t}\n\n\thttpClt := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: timeoutDialer(args.TmoCxn, readWriteTimeout),\n\t\t},\n\t\tTimeout: time.Duration(args.TmoSec) * time.Second,\n\t}\n\n\tvar resp *http.Response\n\tif args.CltType == conf.HTTPPost {\n\t\t// Post request\n\t\tresp, err = httpClt.Post(args.IPAddress, \"application/x-www-form-urlencoded\", bytes.NewBuffer(msg))\n\t\tif err != nil {\n\t\t\tclt.logger.Out(logrus.ErrorLevel, logrus.Fields{\"error\": err}, \"HTTP client failed to send the request.\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tclt.logger.Out(logrus.InfoLevel, logrus.Fields{\"numBytes\": len(body)}, \"Successfully received msg reply.\")\n\n\t// Save response to a file.\n\tif args.SaveRes {\n\t\tif args.SaveResFilepath, err = clt.filer.BuildFilePath(args.SaveResDir, fmt.Sprintf(\"%03d\", args.ReqID)+\".res\"); err != nil {\n\t\t\tclt.logger.Out(logrus.ErrorLevel, logrus.Fields{\"filepath\": args.SaveResFilepath}, \"Cannot save the request.\")\n\t\t\terr = errors.Wrap(err, \"os.Stat\")\n\t\t}\n\n\t\tbuf := bytes.NewBuffer(body)\n\n\t\tgo args.SaveResCallback(args.SaveResFilepath, buf)\n\t}\n\treturn\n}", "func WriteMessageResponse(w http.ResponseWriter, message string, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t\t\"message\": message,\n\t\t},\n\t\t\"data\": data,\n\t}\n\n\treturn jsonResponse(w, env)\n}", "func (self logWriter) Write(buf[]byte) (int, error) {\n line := string(buf)\n\n logMsg := LogMsg{\n Line: strings.TrimRight(line, \"\\n\"),\n }\n\n self.writeChan <- logMsg\n\n return len(buf), nil\n}", "func (s *Stream) Write(m *router.Message) error {\n\tselect {\n\tcase s.ready = <-s.readyWrite:\n\tcase s.err = <-s.errChan:\n\tdefault:\n\t}\n\n\tswitch {\n\tcase s.err != nil:\n\t\treturn s.err\n\tcase s.ready:\n\t\ts.writer.write(m)\n\t\treturn nil\n\tdefault:\n\t\treturn &StreamNotReadyError{Stream: s.name}\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func (t *transaction) Write(msg Message) error {\n\tt.out = append(t.out, msg)\n\treturn nil\n}", "func (s *Server) ServeDNS(w dns.ResponseWriter, req *dns.Msg) {\n\tm := new(dns.Msg)\n\tm.SetReply(req)\n\tm.Compress = false\n\n\tswitch req.Opcode {\n\tcase dns.OpcodeQuery:\n\t\tm.Authoritative = true\n\t\ts.parseQuery(m)\n\t}\n\n\terr := w.WriteMsg(m)\n\tif err != nil {\n\t\tlog.Warn().Err(err).Msg(\"failed to write response message\")\n\t}\n}", "func (wr *WrappedResponseWriter) Write(d []byte) (int, error) {\n\treturn wr.gw.Write(d)\n}", "func (srv *Service) WriteOk(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (log *Logger) write(msg LogMessage) error {\n\t//check if its a zero LogMessage\n\tif msg.M == \"\" {\n\t\treturn nil\n\t}\n\n\ttimeStamp := time.Now().Format(time.RFC3339)\n\tlogMessage := timeStamp + \" - \" + strings.ToUpper(msg.S) + \" - \" + msg.M\n\n\tif msg.D == \"system\" {\n\t\t_, err := log.SystemLog.WriteString(logMessage + \"\\n\")\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Logger: Failed to write to system log file, with error: \" + err.Error())\n\t\t}\n\t} else if msg.D == \"network\" {\n\t\t_, err := log.NetworkLog.WriteString(logMessage + \"\\n\")\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Logger: Failed to write to network log file, with error: \" + err.Error())\n\t\t}\n\t}\n\treturn nil\n}", "func (s *SyslogWriter) Write(p []byte) (n int, err error) {\n\tp1 := deletePrefix.ReplaceAllString(string(p), \"\")\n\tlevel := \"\"\n\ttolog := string(replaceLevel.ReplaceAllStringFunc(p1, func(l string) string {\n\t\tlevel = l\n\t\treturn \"\"\n\t}))\n\tswitch level {\n\tcase \"[DEBUG] \":\n\t\ts.w.Debug(tolog)\n\tcase \"[INFO] \":\n\t\ts.w.Info(tolog)\n\tcase \"[NOTICE] \":\n\t\ts.w.Notice(tolog)\n\tcase \"[WARNING] \", \"[WARN] \":\n\t\ts.w.Warning(tolog)\n\tcase \"[ERROR] \", \"[ERR] \":\n\t\ts.w.Err(tolog)\n\tcase \"[CRIT] \":\n\t\ts.w.Crit(tolog)\n\tcase \"[ALERT] \":\n\t\ts.w.Alert(tolog)\n\tcase \"[EMERG] \":\n\t\ts.w.Emerg(tolog)\n\tdefault:\n\t\ts.w.Notice(tolog)\n\t}\n\treturn len(p), nil\n}", "func (o *CheckUserGetNamespaceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func EncodeMsg(msg Message) []byte {\n\tm := marshalutil.New()\n\tm.WriteByte(byte(msg.Type()))\n\tmsg.Write(m)\n\treturn m.Bytes()\n}" ]
[ "0.8023605", "0.7636882", "0.74785084", "0.718114", "0.68073887", "0.680327", "0.6802047", "0.66146004", "0.6377222", "0.63633126", "0.6352378", "0.6326357", "0.6089254", "0.6060336", "0.6011126", "0.6008627", "0.59936947", "0.5983884", "0.59683204", "0.59326553", "0.590117", "0.5888014", "0.5886498", "0.58677465", "0.58573663", "0.58500516", "0.582012", "0.5791123", "0.576617", "0.57588655", "0.5706872", "0.56962657", "0.5691861", "0.56839335", "0.56706643", "0.56390303", "0.562342", "0.5578425", "0.55748", "0.55668294", "0.5566251", "0.5559767", "0.5548067", "0.55180717", "0.5513145", "0.5489631", "0.5489583", "0.5471415", "0.54713607", "0.54662734", "0.5464594", "0.5458371", "0.54567045", "0.5446406", "0.5446234", "0.5429545", "0.54140466", "0.5408477", "0.5400596", "0.538512", "0.5383428", "0.5382002", "0.537338", "0.53650206", "0.5354802", "0.5346916", "0.53418237", "0.5337962", "0.5317627", "0.5307593", "0.530619", "0.529677", "0.52805305", "0.52794445", "0.5265993", "0.52649176", "0.52617395", "0.5257927", "0.5257151", "0.52533776", "0.525135", "0.5249827", "0.5244767", "0.52409565", "0.52317977", "0.52274835", "0.52267253", "0.5219759", "0.52032375", "0.51935667", "0.51933444", "0.5190975", "0.51849717", "0.51845807", "0.5175571", "0.51659304", "0.51599175", "0.5156134", "0.51410484", "0.51344407" ]
0.64959806
8
Write implements the dns.ResponseWriter interface.
func (r *ResponseWriter) Write(buf []byte) (int, error) { log.Warning("[WARNING] DNS64 called with Write: not performing DNS64") n, err := r.ResponseWriter.Write(buf) return n, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (wr *WrappedResponseWriter) Write(d []byte) (int, error) {\n\treturn wr.gw.Write(d)\n}", "func (r *ResponseReverter) Write(buf []byte) (int, error) {\n\tn, err := r.ResponseWriter.Write(buf)\n\treturn n, err\n}", "func (w *MonResponseWriter) Write(data []byte) (int, error) {\n\treturn w.ResponseWriter.Write(data)\n}", "func (w *customResponseWriter) Write(b []byte) (int, error) {\n\tif w.status == 0 {\n\t\tw.status = http.StatusOK\n\t}\n\tn, err := w.ResponseWriter.Write(b)\n\tw.length += n\n\treturn n, err\n}", "func (r *Response) Write(p []byte) (int, error) {\n\tr.Started = true\n\treturn r.ResponseWriter.Write(p)\n}", "func (gzipRespWtr gzipResponseWriter) Write(data []byte) (int, error) {\n\treturn gzipRespWtr.Writer.Write(data)\n}", "func (o *GetDistrictForSchoolOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (rw *ResponseWriter) Write(bytes []byte) (int, error) {\n\tbytesOut, err := rw.ResponseWriter.Write(bytes)\n\trw.BytesOut += bytesOut\n\treturn bytesOut, err\n}", "func (w *responseWriter) Write(data []byte) (int, error) {\n\tn, err := w.ResponseWriter.Write(data)\n\tif w.resp.StatusCode == 0 {\n\t\tw.resp.StatusCode = http.StatusOK\n\t}\n\treturn n, err\n}", "func (w *logResponseWritter) Write(data []byte) (int, error) {\n\n\twritten, err := w.ResponseWriter.Write(data)\n\tw.size += written\n\n\treturn written, err\n}", "func (o *NewDiscoveryDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (rwp *ResponseWriterProxy) Write(bs []byte) (int, error) {\n\trwp.buffer.Write(bs)\n\treturn rwp.under.Write(bs)\n}", "func (r *LogRecord) Write(p []byte) (int, error) {\n\twritten, err := r.ResponseWriter.Write(p)\n\tr.responseBytes += int64(written)\n\treturn written, err\n}", "func Write(w http.ResponseWriter, data interface{}, statusCode int) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.WriteHeader(statusCode)\n\n\t// For now, we're assuming json.Marshal succeeds...\n\tmarshalledData, _ := json.Marshal(data)\n\tw.Write(marshalledData)\n}", "func (w *writerWrapper) Write(buf []byte) (int, error) {\n\tif !w.wroteHeader {\n\t\tw.WriteHeader(http.StatusOK)\n\t}\n\tn, err := w.ResponseWriter.Write(buf)\n\tw.bytes += n\n\treturn n, err\n}", "func Write(w http.ResponseWriter, r *http.Request, data interface{}) {\n\tlog.Debug().\n\t\tStr(\"host\", r.Host).\n\t\tStr(\"address\", r.RemoteAddr).\n\t\tStr(\"method\", r.Method).\n\t\tStr(\"requestURI\", r.RequestURI).\n\t\tStr(\"proto\", r.Proto).\n\t\tStr(\"useragent\", r.UserAgent()).\n\t\tMsgf(\"%s\", data)\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(&data)\n\treturn\n}", "func (o *WatchCoreV1NamespacedEndpointsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CreateCoordinationV1NamespacedLeaseOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (cr ConnectResponse) Write(connWriter *bufio.Writer) error {\n\n\terr := connWriter.WriteByte(cr.Ver)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = connWriter.WriteByte(cr.Method)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn connWriter.Flush()\n}", "func (handler Handler) Write(w http.ResponseWriter, b []byte) (int, error) {\n\treturn w.Write(b)\n}", "func (r *Response) Write(w io.Writer) error {\n\n\t// Status line\n\ttext := r.Status\n\tif text == \"\" {\n\t\tvar ok bool\n\t\ttext, ok = StatusText[r.StatusCode]\n\t\tif !ok {\n\t\t\ttext = \"status code \" + strconv.Itoa(r.StatusCode)\n\t\t}\n\t}\n\tprotoMajor, protoMinor := strconv.Itoa(r.ProtoMajor), strconv.Itoa(r.ProtoMinor)\n\tstatusCode := strconv.Itoa(r.StatusCode) + \" \"\n\ttext = strings.TrimPrefix(text, statusCode)\n\tio.WriteString(w, \"HTTP/\"+protoMajor+\".\"+protoMinor+\" \"+statusCode+text+\"\\r\\n\")\n\n\t// Process Body,ContentLength,Close,Trailer\n\ttw, err := newTransferWriter(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = tw.WriteHeader(w)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Rest of header\n\terr = r.Header.WriteSubset(w, respExcludeHeader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// End-of-header\n\tio.WriteString(w, \"\\r\\n\")\n\n\t// Write body and trailer\n\t_, err = tw.WriteBody(w)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Success\n\treturn nil\n}", "func (w *BodylessResponseWriter) Write(b []byte) (int, error) {\n\treturn 0, nil\n}", "func (o *AddNamespaceToGroupOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (w *AppResponseWriter) Write(data []byte) (n int, err error) {\n\tif !w.written {\n\t\tw.statusCode = http.StatusOK\n\t\tw.written = true\n\t}\n\treturn w.ResponseWriter.Write(data)\n}", "func (o *CheckUserGetNamespaceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (c *customResponseWriter) Write(b []byte) (int, error) {\n\tsize, err := c.ResponseWriter.Write(b)\n\tc.size += size\n\treturn size, err\n}", "func (rw *responseWriter) Write(b []byte) (int, error) {\n\tn, err := rw.ResponseWriter.Write(b)\n\trw.written += n\n\n\treturn n, err\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (w *responseWriter) Write(b []byte) (int, error) {\n\tif w.Status == 0 {\n\t\tw.Status = 200\n\t}\n\tn, err := w.ResponseWriter.Write(b)\n\tw.Length += n\n\treturn n, err\n}", "func (o *WatchNetworkingV1NetworkPolicyListForAllNamespacesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (res *ResponseRecorder) Write(contents []byte) (int, error) {\n\tif res.statusCode == 0 { // if not setted set it here\n\t\tres.WriteHeader(http.StatusOK)\n\t}\n\tres.chunks = append(res.chunks, contents)\n\treturn res.underline.Write(contents)\n}", "func (d *Data) Write (w http.ResponseWriter) {\n\tproto := Protocol{\n\t\tAuthorized: true,\n\t\tSuccess: d.Code == http.StatusOK || d.Code == 0,\n\t\tError: d.Msg,\n\t\tData: d.Data}\n\td.Data = &proto\n\t(*resp.Data)(d).Write(w)\n}", "func (m *mockHTTPWriter) Write(buf []byte) (int, error) {\n\tw := m.ResponseWriter\n\tif !strings.Contains(string(buf), \"certs\") && !strings.Contains(string(buf), \"BEGIN CERTIFICATE\") && !strings.Contains(string(buf), \"caname\") {\n\t\tm.t.Error(\"Invalid response being sent back from certificates endpoint\")\n\t}\n\treturn w.Write(buf)\n}", "func (res *Response) Write(p []byte) (n int, err error) {\n\treturn res.c.Write(p)\n}", "func (w *LoggingResponseWriter) Write(b []byte) (int, error) {\n\treturn w.writer.Write(b)\n}", "func (o *CreateCoordinationV1NamespacedLeaseCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetV1RdssOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = models.RDSS{}\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (s *Status) Write(w http.ResponseWriter) error {\n\tw.WriteHeader(s.Code)\n\tswitch ct := w.Header().Get(\"Content-Type\"); ct {\n\tcase \"application/json\":\n\t\t_, err := fmt.Fprintf(w, `{\"error\":%q}`, s.String())\n\t\treturn err\n\tdefault:\n\t\t_, err := io.WriteString(w, s.String())\n\t\treturn err\n\t}\n}", "func (r *Response) Write(data []byte) (int, error) {\n\treturn r.rw.Write(data)\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (cr CmdResponse) Write(connWriter *bufio.Writer) error {\n\tvar err error\n\terr = connWriter.WriteByte(cr.Ver)\n\terr = connWriter.WriteByte(cr.Rep)\n\terr = connWriter.WriteByte(cr.Rsv)\n\terr = connWriter.WriteByte(cr.Bnd.Atyp)\n\t_, err = connWriter.Write(cr.Bnd.Ipv4Addr[:])\n\t_, err = connWriter.Write(cr.Bnd.Port[:])\n\terr = connWriter.Flush()\n\treturn err\n}", "func (DefaultDispatcher) Write(rw http.ResponseWriter, resp Response) error {\n\tswitch x := resp.(type) {\n\tcase JSONResponse:\n\t\trw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\tio.WriteString(rw, \")]}',\\n\") // Break parsing of JavaScript in order to prevent XSSI.\n\t\treturn json.NewEncoder(rw).Encode(x.Data)\n\tcase *TemplateResponse:\n\t\tt, ok := (x.Template).(*template.Template)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"%T is not a safe template and it cannot be parsed and written\", t)\n\t\t}\n\t\trw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\tif len(x.FuncMap) == 0 {\n\t\t\tif x.Name == \"\" {\n\t\t\t\treturn t.Execute(rw, x.Data)\n\t\t\t}\n\t\t\treturn t.ExecuteTemplate(rw, x.Name, x.Data)\n\t\t}\n\t\tcloned, err := t.Clone()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcloned = cloned.Funcs(x.FuncMap)\n\t\tif x.Name == \"\" {\n\t\t\treturn cloned.Execute(rw, x.Data)\n\t\t}\n\t\treturn cloned.ExecuteTemplate(rw, x.Name, x.Data)\n\tcase safehtml.HTML:\n\t\trw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\t\t_, err := io.WriteString(rw, x.String())\n\t\treturn err\n\tcase FileServerResponse:\n\t\trw.Header().Set(\"Content-Type\", x.ContentType())\n\t\t// The http package will take care of writing the file body.\n\t\treturn nil\n\tcase RedirectResponse:\n\t\thttp.Redirect(rw, x.Request.req, x.Location, int(x.Code))\n\t\treturn nil\n\tcase NoContentResponse:\n\t\trw.WriteHeader(int(StatusNoContent))\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"%T is not a safe response type and it cannot be written\", resp)\n\t}\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (r *response) Write(b []byte) (int, error) {\n\tr.wrote = true\n\treturn r.rw.Write(b)\n}", "func (o *CreateZoneCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *WatchApiregistrationV1APIServiceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ListAppsV1NamespacedDeploymentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (rw *responseWriter) Write(b []byte) (int, error) {\n\tsize, err := rw.ResponseWriter.Write(b)\n\trw.size += size\n\treturn size, err\n}", "func (w *responseWriter) Write(data []byte) (int, error) {\n\tif w.status == 0 {\n\t\tw.status = http.StatusOK\n\t}\n\tsize, err := w.rw.Write(data)\n\tw.size += size\n\treturn size, err\n}", "func (o *PostRegisterDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateUserGardenDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header X-Request-Id\n\n\txRequestID := o.XRequestID\n\tif xRequestID != \"\" {\n\t\trw.Header().Set(\"X-Request-Id\", xRequestID)\n\t}\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateCoreV1NamespacedPodBindingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (req *Request) Write(b []byte) (int, error) {\n\treturn req.res.Write(b)\n}", "func (o *GetDistrictForSchoolNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(404)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (w *responseWriter) Write(p []byte) (int, error) {\n\tw.started = true\n\treturn w.writer.Write(p)\n}", "func (w *responseWriter) Write(p []byte) (int, error) {\n\tw.started = true\n\treturn w.writer.Write(p)\n}", "func (o *ConnectCoreV1GetNodeProxyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (coll *Collector) Write(msg string, extra map[string]interface{}) (err error) {\n\n\tm := gelf.Message{\n\t\tVersion: \"1.1\",\n\t\tHost: coll.host,\n\t\tShort: msg,\n\t\tTimeUnix: float64(time.Now().Unix()),\n\t\tLevel: 6, // info always\n\t\tFacility: \"drone\",\n\t\tExtra: extra,\n\t}\n\n\tif err = coll.writer.WriteMessage(&m); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}", "func (rl *ResponseLogger) Write(b []byte) (int, error) {\n\t// If no status has been written default to OK\n\tif rl.Status == 0 {\n\t\trl.Status = http.StatusOK\n\t}\n\n\tsize, err := rl.ResponseWriter.Write(b)\n\trl.Length += size\n\treturn size, err\n}", "func (o *CreateCoreV1NamespacedServiceAccountTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *AddNamespaceToGroupNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(404)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *Response) Write(b []byte) (int, error) {\n\treturn r.Writer.Write(b)\n}", "func (h *ResponseHeader) Write(w *bufio.Writer) error {\n\t_, err := w.Write(h.Header())\n\treturn err\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *loggingWriter) Write(p []byte) (int, error) {\n\tif r.accessStats.status == 0 {\n\t\t// The status will be StatusOK if WriteHeader has not been called yet\n\t\tr.accessStats.status = http.StatusOK\n\t}\n\twritten, err := r.ResponseWriter.Write(p)\n\tr.accessStats.size += written\n\treturn written, err\n}", "func (o *GetPersonsUsernameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateUserGardenCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header X-Request-Id\n\n\txRequestID := o.XRequestID\n\tif xRequestID != \"\" {\n\t\trw.Header().Set(\"X-Request-Id\", xRequestID)\n\t}\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (lrw *LoggingResponseWriter) Write(content []byte) (int, error) {\n\treturn lrw.wrapped.Write(content)\n}", "func (o *ConnectCoreV1OptionsNodeProxyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *AddOrgMembersV1OK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateClusterDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (w *WithCodeResponseWriter) Write(bytes []byte) (int, error) {\n\treturn w.Writer.Write(bytes)\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (m *DomainDnsSrvRecord) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {\n err := m.DomainDnsRecord.Serialize(writer)\n if err != nil {\n return err\n }\n {\n err = writer.WriteStringValue(\"nameTarget\", m.GetNameTarget())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteInt32Value(\"port\", m.GetPort())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteInt32Value(\"priority\", m.GetPriority())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteStringValue(\"protocol\", m.GetProtocol())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteStringValue(\"service\", m.GetService())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteInt32Value(\"weight\", m.GetWeight())\n if err != nil {\n return err\n }\n }\n return nil\n}", "func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) {\n\t// The default dns.Mux checks the question section size, but we have our\n\t// own mux here. Check if we have a question section. If not drop them here.\n\tif r == nil || len(r.Question) == 0 {\n\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)\n\t\treturn\n\t}\n\n\tif !s.debug {\n\t\tdefer func() {\n\t\t\t// In case the user doesn't enable error plugin, we still\n\t\t\t// need to make sure that we stay alive up here\n\t\t\tif rec := recover(); rec != nil {\n\t\t\t\tif s.stacktrace {\n\t\t\t\t\tlog.Errorf(\"Recovered from panic in server: %q %v\\n%s\", s.Addr, rec, string(debug.Stack()))\n\t\t\t\t} else {\n\t\t\t\t\tlog.Errorf(\"Recovered from panic in server: %q %v\", s.Addr, rec)\n\t\t\t\t}\n\t\t\t\tvars.Panic.Inc()\n\t\t\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif !s.classChaos && r.Question[0].Qclass != dns.ClassINET {\n\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n\t\treturn\n\t}\n\n\tif m, err := edns.Version(r); err != nil { // Wrong EDNS version, return at once.\n\t\tw.WriteMsg(m)\n\t\treturn\n\t}\n\n\t// Wrap the response writer in a ScrubWriter so we automatically make the reply fit in the client's buffer.\n\tw = request.NewScrubWriter(r, w)\n\n\tq := strings.ToLower(r.Question[0].Name)\n\tvar (\n\t\toff int\n\t\tend bool\n\t\tdshandler *Config\n\t)\n\n\tfor {\n\t\tif z, ok := s.zones[q[off:]]; ok {\n\t\t\tfor _, h := range z {\n\t\t\t\tif h.pluginChain == nil { // zone defined, but has not got any plugins\n\t\t\t\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif h.metaCollector != nil {\n\t\t\t\t\t// Collect metadata now, so it can be used before we send a request down the plugin chain.\n\t\t\t\t\tctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})\n\t\t\t\t}\n\n\t\t\t\t// If all filter funcs pass, use this config.\n\t\t\t\tif passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {\n\t\t\t\t\tif h.ViewName != \"\" {\n\t\t\t\t\t\t// if there was a view defined for this Config, set the view name in the context\n\t\t\t\t\t\tctx = context.WithValue(ctx, ViewKey{}, h.ViewName)\n\t\t\t\t\t}\n\t\t\t\t\tif r.Question[0].Qtype != dns.TypeDS {\n\t\t\t\t\t\trcode, _ := h.pluginChain.ServeDNS(ctx, w, r)\n\t\t\t\t\t\tif !plugin.ClientWrite(rcode) {\n\t\t\t\t\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// The type is DS, keep the handler, but keep on searching as maybe we are serving\n\t\t\t\t\t// the parent as well and the DS should be routed to it - this will probably *misroute* DS\n\t\t\t\t\t// queries to a possibly grand parent, but there is no way for us to know at this point\n\t\t\t\t\t// if there is an actual delegation from grandparent -> parent -> zone.\n\t\t\t\t\t// In all fairness: direct DS queries should not be needed.\n\t\t\t\t\tdshandler = h\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\toff, end = dns.NextLabel(q, off)\n\t\tif end {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif r.Question[0].Qtype == dns.TypeDS && dshandler != nil && dshandler.pluginChain != nil {\n\t\t// DS request, and we found a zone, use the handler for the query.\n\t\trcode, _ := dshandler.pluginChain.ServeDNS(ctx, w, r)\n\t\tif !plugin.ClientWrite(rcode) {\n\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t}\n\t\treturn\n\t}\n\n\t// Wildcard match, if we have found nothing try the root zone as a last resort.\n\tif z, ok := s.zones[\".\"]; ok {\n\t\tfor _, h := range z {\n\t\t\tif h.pluginChain == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif h.metaCollector != nil {\n\t\t\t\t// Collect metadata now, so it can be used before we send a request down the plugin chain.\n\t\t\t\tctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})\n\t\t\t}\n\n\t\t\t// If all filter funcs pass, use this config.\n\t\t\tif passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {\n\t\t\t\tif h.ViewName != \"\" {\n\t\t\t\t\t// if there was a view defined for this Config, set the view name in the context\n\t\t\t\t\tctx = context.WithValue(ctx, ViewKey{}, h.ViewName)\n\t\t\t\t}\n\t\t\t\trcode, _ := h.pluginChain.ServeDNS(ctx, w, r)\n\t\t\t\tif !plugin.ClientWrite(rcode) {\n\t\t\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Still here? Error out with REFUSED.\n\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n}", "func (r tokenResponseWriter) Write(b []byte) (int, error) {\n\treturn r.w.Write(b) // pass it to the original ResponseWriter\n}", "func (w *ResponseWriterTee) Write(b []byte) (int, error) {\n\tw.Buffer.Write(b)\n\treturn w.w.Write(b)\n}", "func (o *ReplaceHTTPErrorRuleOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateCoreV1NamespacedPodBindingCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateZoneInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(500)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(404)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateDiscoveryV1beta1NamespacedEndpointSliceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Write(w http.ResponseWriter, e *Event) error {\n\t_, err := e.WriteTo(w)\n\tw.(http.Flusher).Flush()\n\treturn err\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetStateAddressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *AddAttendeeToTalkOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *AddResourceUsageOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (resp *response) Write(b []byte) (int, error) {\n\tsize, err := resp.ResponseWriter.Write(b)\n\tresp.size += size\n\treturn size, err\n}", "func (o *CreateACLDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CheckUserGetNamespaceNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(404)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceHTTPErrorRuleDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}" ]
[ "0.7227558", "0.66389495", "0.6472529", "0.63813066", "0.63485616", "0.6334436", "0.63220686", "0.6317124", "0.63072747", "0.62975454", "0.6284686", "0.6268591", "0.6265778", "0.62511075", "0.62383574", "0.6223491", "0.62064946", "0.6202987", "0.61590576", "0.61532104", "0.61507", "0.6146798", "0.6130645", "0.6124806", "0.6123503", "0.61134136", "0.61116916", "0.6109494", "0.6100847", "0.6077432", "0.60755986", "0.6073886", "0.6065049", "0.6062377", "0.60561615", "0.6045786", "0.6035983", "0.6031247", "0.6029183", "0.60277253", "0.6019191", "0.5997433", "0.5992984", "0.5984206", "0.5981906", "0.59655035", "0.59527147", "0.59473693", "0.5945057", "0.5940438", "0.5939879", "0.59364593", "0.593306", "0.5932495", "0.5932302", "0.59109384", "0.59099203", "0.5906903", "0.59019977", "0.59019977", "0.59017783", "0.5896981", "0.5891286", "0.5890185", "0.5889252", "0.5883647", "0.5877534", "0.5872929", "0.5872225", "0.58716804", "0.5869266", "0.58659786", "0.5862782", "0.58620703", "0.58616763", "0.58610713", "0.58590215", "0.58587134", "0.58580077", "0.58567244", "0.5856705", "0.5856281", "0.58538175", "0.58534193", "0.5849717", "0.58495927", "0.5846161", "0.5845378", "0.5844805", "0.58394974", "0.58303285", "0.5826138", "0.5825536", "0.58241594", "0.58238363", "0.58225644", "0.5818685", "0.58185965", "0.5818548", "0.5813299" ]
0.72080183
1
Hijack implements the dns.ResponseWriter interface.
func (r *ResponseWriter) Hijack() { r.ResponseWriter.Hijack() return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {\n\treturn r.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (r *response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn r.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (mrw *MonitoringResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif hj, ok := mrw.ResponseWriter.(http.Hijacker); ok {\n\t\treturn hj.Hijack()\n\t}\n\treturn nil, nil, fmt.Errorf(\"http.Hijacker interface is not supported\")\n}", "func (w *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tw.hijacked = true\n\tconn := newNodeConn(w.Value, w.reqReader)\n\tbrw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))\n\treturn conn, brw, nil\n\n}", "func (resp *response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif resp.size < 0 {\n\t\tresp.size = 0\n\t}\n\treturn resp.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (response *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn response.Writer.(http.Hijacker).Hijack()\n}", "func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif w.size < 0 {\n\t\tw.size = 0\n\t}\n\treturn w.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif w.size < 0 {\n\t\tw.size = 0\n\t}\n\treturn w.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thj, ok := r.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"webserver doesn't support hijacking\")\n\t}\n\treturn hj.Hijack()\n}", "func (w *WithCodeResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn w.Writer.(http.Hijacker).Hijack()\n}", "func serveHijack(w http.ResponseWriter, targetConn net.Conn) error {\n\thijacker, ok := w.(http.Hijacker)\n\tif !ok {\n\t\treturn caddyhttp.Error(http.StatusInternalServerError,\n\t\t\tfmt.Errorf(\"ResponseWriter does not implement http.Hijacker\"))\n\t}\n\tclientConn, bufReader, err := hijacker.Hijack()\n\tif err != nil {\n\t\treturn caddyhttp.Error(http.StatusInternalServerError,\n\t\t\tfmt.Errorf(\"hijack failed: %v\", err))\n\t}\n\tdefer clientConn.Close()\n\t// bufReader may contain unprocessed buffered data from the client.\n\tif bufReader != nil {\n\t\t// snippet borrowed from `proxy` plugin\n\t\tif n := bufReader.Reader.Buffered(); n > 0 {\n\t\t\trbuf, err := bufReader.Reader.Peek(n)\n\t\t\tif err != nil {\n\t\t\t\treturn caddyhttp.Error(http.StatusBadGateway, err)\n\t\t\t}\n\t\t\ttargetConn.Write(rbuf)\n\t\t}\n\t}\n\t// Since we hijacked the connection, we lost the ability to write and flush headers via w.\n\t// Let's handcraft the response and send it manually.\n\tres := &http.Response{StatusCode: http.StatusOK,\n\t\tProto: \"HTTP/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tHeader: make(http.Header),\n\t}\n\tres.Header.Set(\"Server\", \"Caddy\")\n\n\terr = res.Write(clientConn)\n\tif err != nil {\n\t\treturn caddyhttp.Error(http.StatusInternalServerError,\n\t\t\tfmt.Errorf(\"failed to send response to client: %v\", err))\n\t}\n\n\treturn dualStream(targetConn, clientConn, clientConn, false)\n}", "func (w *LoggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn w.writer.(http.Hijacker).Hijack()\n}", "func (l *logWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn l.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\th, ok := w.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"the response writer doesn't support the http.Hijacker interface\")\n\t}\n\treturn h.Hijack()\n}", "func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thijacker, ok := c.writer.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"ResponseWriter doesn't support Hijacker interface\")\n\t}\n\treturn hijacker.Hijack()\n}", "func (w *gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif hj, ok := w.ResponseWriter.(http.Hijacker); ok {\n\t\treturn hj.Hijack()\n\t}\n\treturn nil, nil, fmt.Errorf(\"http.Hijacker interface is not supported\")\n}", "func (w *interceptRW) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif w, ok := w.ResponseWriter.(http.Hijacker); ok {\n\t\treturn w.Hijack()\n\t}\n\treturn nil, nil, http.ErrNotSupported\n}", "func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tr.rendered = true\n\thijacker, ok := r.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"the ResponseWriter doesn't support the Hijacker interface\")\n\t}\n\treturn hijacker.Hijack()\n}", "func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {\n\tif w.handlerDone.isSet() {\n\t\tpanic(\"net/http: Hijack called after ServeHTTP finished\")\n\t}\n\tif w.wroteHeader {\n\t\tw.cw.flush()\n\t}\n\n\tc := w.conn\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// Release the bufioWriter that writes to the chunk writer, it is not\n\t// used after a connection has been hijacked.\n\trwc, buf, err = c.hijackLocked()\n\tif err == nil {\n\t\tputBufioWriter(w.w)\n\t\tw.w = nil\n\t}\n\treturn rwc, buf, err\n}", "func (g *GzipResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn g.r.Hijack()\n}", "func (w *FlushingWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif hijacker, ok := w.WriterFlusher.(http.Hijacker); ok {\n\t\tw.hijacked = true\n\t\treturn hijacker.Hijack()\n\t}\n\treturn nil, nil, errors.New(\"cannot hijack connection\")\n}", "func (r *recorder) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\trw := bufio.NewReadWriter(bufio.NewReader(r.conn), bufio.NewWriter(r.conn))\n\treturn r.conn, rw, nil\n}", "func (d4w *d4Writer) hijackHeader() bool {\n\td4w.fb[1] = 2\n\treturn true\n}", "func (pe *providerEndpoint) setHijack(cb HijackFunc) {\n\tpe.hijack = cb\n}", "func wrapWriter(w http.ResponseWriter) writerProxy {\n\tvar _, cn = w.(http.CloseNotifier) // nolint\n\tvar _, fl = w.(http.Flusher)\n\tvar _, hj = w.(http.Hijacker)\n\tvar _, rf = w.(io.ReaderFrom)\n\n\tvar bw = basicWriter{ResponseWriter: w}\n\tif cn && fl && hj && rf {\n\t\treturn &fancyWriter{&bw}\n\t}\n\tif fl {\n\t\treturn &flushWriter{&bw}\n\t}\n\treturn &bw\n}", "func newResponseWriter(w http.ResponseWriter, mimePolicy MimePolicy, writerFactory WriterFactory, minSizeToCompress int) ResponseWriter {\n\tif _, ok := w.(http.Hijacker); ok {\n\t\t// w is an http.Hijacker, the return value must be also a hijackerResponseWriter.\n\t\tcached := hijackerResponseWriterPool.Get()\n\t\tif cached != nil {\n\t\t\twriter := cached.(*hijackerResponseWriter)\n\t\t\twriter.Reset(w, mimePolicy, writerFactory, minSizeToCompress)\n\t\t\treturn writer\n\t\t}\n\t\treturn internalNewHijackerResponseWriter(w, mimePolicy, writerFactory, minSizeToCompress)\n\t}\n\n\tcached := responseWriterPool.Get()\n\tif cached != nil {\n\t\twriter := cached.(*responseWriter)\n\t\twriter.Reset(w, mimePolicy, writerFactory, minSizeToCompress)\n\t\treturn writer\n\t}\n\treturn internalNewResponseWriter(w, mimePolicy, writerFactory, minSizeToCompress)\n\n}", "func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {\n\tvar (\n\t\thj, i0 = t.writer.(http.Hijacker)\n\t\tcn, i1 = t.writer.(http.CloseNotifier)\n\t\tpu, i2 = t.writer.(http.Pusher)\n\t\tfl, i3 = t.writer.(http.Flusher)\n\t\trf, i4 = t.writer.(io.ReaderFrom)\n\t)\n\n\tswitch {\n\tcase !i0 && !i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t}{t}\n\tcase !i0 && !i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\tio.ReaderFrom\n\t\t}{t, rf}\n\tcase !i0 && !i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Flusher\n\t\t}{t, fl}\n\tcase !i0 && !i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, fl, rf}\n\tcase !i0 && !i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t}{t, pu}\n\tcase !i0 && !i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, pu, rf}\n\tcase !i0 && !i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, pu, fl}\n\tcase !i0 && !i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, pu, fl, rf}\n\tcase !i0 && i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t}{t, cn}\n\tcase !i0 && i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, rf}\n\tcase !i0 && i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t}{t, cn, fl}\n\tcase !i0 && i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, fl, rf}\n\tcase !i0 && i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t}{t, cn, pu}\n\tcase !i0 && i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, pu, rf}\n\tcase !i0 && i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, cn, pu, fl}\n\tcase !i0 && i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, pu, fl, rf}\n\tcase i0 && !i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t}{t, hj}\n\tcase i0 && !i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, rf}\n\tcase i0 && !i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t}{t, hj, fl}\n\tcase i0 && !i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, fl, rf}\n\tcase i0 && !i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t}{t, hj, pu}\n\tcase i0 && !i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, pu, rf}\n\tcase i0 && !i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, hj, pu, fl}\n\tcase i0 && !i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, pu, fl, rf}\n\tcase i0 && i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t}{t, hj, cn}\n\tcase i0 && i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, rf}\n\tcase i0 && i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t}{t, hj, cn, fl}\n\tcase i0 && i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, fl, rf}\n\tcase i0 && i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t}{t, hj, cn, pu}\n\tcase i0 && i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, pu, rf}\n\tcase i0 && i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, hj, cn, pu, fl}\n\tcase i0 && i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, pu, fl, rf}\n\tdefault:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t}{t}\n\t}\n}", "func (wr *WrappedResponseWriter) Write(d []byte) (int, error) {\n\treturn wr.gw.Write(d)\n}", "func (u *HyperConn) SockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) {\n\treq, client, err := u.newRequestHyperConn(method, endpoint, data, ct)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tclient.Do(req)\n\tconn, br := client.Hijack()\n\treturn conn, br, nil\n}", "func Middleware(options ...HijackOptions) func(http.Handler) http.Handler {\n\topt := DefaultHijackOptions\n\tif len(options) > 0 {\n\t\topt = options[0]\n\t}\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx, log := chi.RouteContext(r.Context()), middleware.GetLogEntry(r)\n\t\t\tif ctx == nil || r.Method != \"OPTIONS\" {\n\t\t\t\t// Just proxy to the next handler\n\t\t\t\th.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Hijack request\n\t\t\tvar routes Routes\n\t\t\tu := getStringSliceFromURI(r.RequestURI)\n\t\t\tchi.Walk(ctx.Routes, walkFn(u, &routes))\n\t\t\traw, err := opt.Render(routes)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Panic(fmt.Sprintf(\"rendering OPTIONS description failed: %s\", err), nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.WriteHeader(200)\n\t\t\tw.Header().Add(\"Content-Type\", opt.ContentType)\n\t\t\tw.Write(raw)\n\t\t})\n\t}\n}", "func (r *Response) SetWriter(w http.ResponseWriter) { r.ResponseWriter = w }", "func (h *Handler) prepResponse(w http.ResponseWriter) {\n\tw.Header().Add(varyHeader, originHeader)\n}", "func Proxy(addr string, logger Logger) dns.Handler {\n\treturn dns.HandlerFunc(func(w dns.ResponseWriter, req *dns.Msg) {\n\t\t// log request\n\t\tif logger != nil {\n\t\t\tlogger(ProxyRequest, req, nil, \"\")\n\t\t}\n\n\t\t// forward request to fallback\n\t\trs, err := dns.Exchange(req, addr)\n\t\tif err != nil {\n\t\t\tif logger != nil {\n\t\t\t\tlogger(ProxyError, nil, err, \"\")\n\t\t\t}\n\t\t\t_ = w.Close()\n\t\t\treturn\n\t\t}\n\n\t\t// log response\n\t\tif logger != nil {\n\t\t\tlogger(ProxyResponse, rs, nil, \"\")\n\t\t}\n\n\t\t// write response\n\t\terr = w.WriteMsg(rs)\n\t\tif err != nil {\n\t\t\tif logger != nil {\n\t\t\t\tlogger(NetworkError, nil, err, \"\")\n\t\t\t}\n\t\t\t_ = w.Close()\n\t\t}\n\t})\n}", "func (r *ResponseWriter) Write(buf []byte) (int, error) {\n\tlog.Warning(\"[WARNING] DNS64 called with Write: not performing DNS64\")\n\tn, err := r.ResponseWriter.Write(buf)\n\treturn n, err\n}", "func (handler Handler) WriteHeader(w http.ResponseWriter, code int) {\n\n\tif apiConfig.TLSConfig.TLSEnabled {\n\t\tw.Header().Add(\"Strict-Transport-Security\", \"max-age=63072000; includeSubDomains\")\n\t}\n\n\tw.WriteHeader(code)\n\thandler.wroteHeader = true\n}", "func (aw AWrapper) Wrap(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\th.ServeHTTP(w, r)\n\t\tw.Write([]byte(\"A wrapper wrote this\\n\"))\n\t})\n}", "func WrapHeaderHack(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tvar wrote bool\n\t\tww := httpsnoop.Wrap(w, httpsnoop.Hooks{\n\t\t\tWrite: func(next httpsnoop.WriteFunc) httpsnoop.WriteFunc {\n\t\t\t\twrote = true\n\t\t\t\treturn next\n\t\t\t},\n\t\t\tWriteHeader: func(next httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {\n\t\t\t\twrote = true\n\t\t\t\treturn next\n\t\t\t},\n\t\t\tReadFrom: func(next httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc {\n\t\t\t\twrote = true\n\t\t\t\treturn func(src io.Reader) (int64, error) {\n\t\t\t\t\tn, err := next(src)\n\t\t\t\t\tif n > 0 {\n\t\t\t\t\t\twrote = true\n\t\t\t\t\t}\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\n\t\th.ServeHTTP(ww, req)\n\n\t\tif !wrote {\n\t\t\tw.WriteHeader(204)\n\t\t}\n\t})\n}", "func (pe *providerEndpoint) getHijack() HijackFunc {\n\treturn pe.hijack\n}", "func (rww *ResponseWriterWrapper) Unwrap() http.ResponseWriter {\n\treturn rww.ResponseWriter\n}", "func WrapResponseWriter(w http.ResponseWriter) (http.ResponseWriter, *Response) {\n\trw := responseWriter{\n\t\tResponseWriter: w,\n\t\tresp: Response{\n\t\t\tHeaders: w.Header(),\n\t\t},\n\t}\n\n\th, _ := w.(http.Hijacker)\n\tp, _ := w.(http.Pusher)\n\trf, _ := w.(io.ReaderFrom)\n\n\tswitch {\n\tcase h != nil && p != nil:\n\t\trwhp := responseWriterHijackerPusher{\n\t\t\tresponseWriter: rw,\n\t\t\tHijacker: h,\n\t\t\tPusher: p,\n\t\t}\n\t\tif rf != nil {\n\t\t\trwhprf := responseWriterHijackerPusherReaderFrom{rwhp, rf}\n\t\t\treturn &rwhprf, &rwhprf.resp\n\t\t}\n\t\treturn &rwhp, &rwhp.resp\n\tcase h != nil:\n\t\trwh := responseWriterHijacker{\n\t\t\tresponseWriter: rw,\n\t\t\tHijacker: h,\n\t\t}\n\t\tif rf != nil {\n\t\t\trwhrf := responseWriterHijackerReaderFrom{rwh, rf}\n\t\t\treturn &rwhrf, &rwhrf.resp\n\t\t}\n\t\treturn &rwh, &rwh.resp\n\tcase p != nil:\n\t\trwp := responseWriterPusher{\n\t\t\tresponseWriter: rw,\n\t\t\tPusher: p,\n\t\t}\n\t\tif rf != nil {\n\t\t\trwprf := responseWriterPusherReaderFrom{rwp, rf}\n\t\t\treturn &rwprf, &rwprf.resp\n\t\t}\n\t\treturn &rwp, &rwp.resp\n\tdefault:\n\t\tif rf != nil {\n\t\t\trwrf := responseWriterReaderFrom{rw, rf}\n\t\t\treturn &rwrf, &rwrf.resp\n\t\t}\n\t\treturn &rw, &rw.resp\n\t}\n}", "func (b *basicWriter) Unwrap() http.ResponseWriter {\n\treturn b.ResponseWriter\n}", "func (rw *responseWriter) Unwrap() http.ResponseWriter {\n\treturn rw.ResponseWriter\n}", "func (o *GetIdentityIDUnreachable) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(520)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (ac challenge) SetHeaders(w http.ResponseWriter) {\n\n}", "func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) {\n\t// The default dns.Mux checks the question section size, but we have our\n\t// own mux here. Check if we have a question section. If not drop them here.\n\tif r == nil || len(r.Question) == 0 {\n\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)\n\t\treturn\n\t}\n\n\tif !s.debug {\n\t\tdefer func() {\n\t\t\t// In case the user doesn't enable error plugin, we still\n\t\t\t// need to make sure that we stay alive up here\n\t\t\tif rec := recover(); rec != nil {\n\t\t\t\tif s.stacktrace {\n\t\t\t\t\tlog.Errorf(\"Recovered from panic in server: %q %v\\n%s\", s.Addr, rec, string(debug.Stack()))\n\t\t\t\t} else {\n\t\t\t\t\tlog.Errorf(\"Recovered from panic in server: %q %v\", s.Addr, rec)\n\t\t\t\t}\n\t\t\t\tvars.Panic.Inc()\n\t\t\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)\n\t\t\t}\n\t\t}()\n\t}\n\n\tif !s.classChaos && r.Question[0].Qclass != dns.ClassINET {\n\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n\t\treturn\n\t}\n\n\tif m, err := edns.Version(r); err != nil { // Wrong EDNS version, return at once.\n\t\tw.WriteMsg(m)\n\t\treturn\n\t}\n\n\t// Wrap the response writer in a ScrubWriter so we automatically make the reply fit in the client's buffer.\n\tw = request.NewScrubWriter(r, w)\n\n\tq := strings.ToLower(r.Question[0].Name)\n\tvar (\n\t\toff int\n\t\tend bool\n\t\tdshandler *Config\n\t)\n\n\tfor {\n\t\tif z, ok := s.zones[q[off:]]; ok {\n\t\t\tfor _, h := range z {\n\t\t\t\tif h.pluginChain == nil { // zone defined, but has not got any plugins\n\t\t\t\t\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif h.metaCollector != nil {\n\t\t\t\t\t// Collect metadata now, so it can be used before we send a request down the plugin chain.\n\t\t\t\t\tctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})\n\t\t\t\t}\n\n\t\t\t\t// If all filter funcs pass, use this config.\n\t\t\t\tif passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {\n\t\t\t\t\tif h.ViewName != \"\" {\n\t\t\t\t\t\t// if there was a view defined for this Config, set the view name in the context\n\t\t\t\t\t\tctx = context.WithValue(ctx, ViewKey{}, h.ViewName)\n\t\t\t\t\t}\n\t\t\t\t\tif r.Question[0].Qtype != dns.TypeDS {\n\t\t\t\t\t\trcode, _ := h.pluginChain.ServeDNS(ctx, w, r)\n\t\t\t\t\t\tif !plugin.ClientWrite(rcode) {\n\t\t\t\t\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// The type is DS, keep the handler, but keep on searching as maybe we are serving\n\t\t\t\t\t// the parent as well and the DS should be routed to it - this will probably *misroute* DS\n\t\t\t\t\t// queries to a possibly grand parent, but there is no way for us to know at this point\n\t\t\t\t\t// if there is an actual delegation from grandparent -> parent -> zone.\n\t\t\t\t\t// In all fairness: direct DS queries should not be needed.\n\t\t\t\t\tdshandler = h\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\toff, end = dns.NextLabel(q, off)\n\t\tif end {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif r.Question[0].Qtype == dns.TypeDS && dshandler != nil && dshandler.pluginChain != nil {\n\t\t// DS request, and we found a zone, use the handler for the query.\n\t\trcode, _ := dshandler.pluginChain.ServeDNS(ctx, w, r)\n\t\tif !plugin.ClientWrite(rcode) {\n\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t}\n\t\treturn\n\t}\n\n\t// Wildcard match, if we have found nothing try the root zone as a last resort.\n\tif z, ok := s.zones[\".\"]; ok {\n\t\tfor _, h := range z {\n\t\t\tif h.pluginChain == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif h.metaCollector != nil {\n\t\t\t\t// Collect metadata now, so it can be used before we send a request down the plugin chain.\n\t\t\t\tctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})\n\t\t\t}\n\n\t\t\t// If all filter funcs pass, use this config.\n\t\t\tif passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {\n\t\t\t\tif h.ViewName != \"\" {\n\t\t\t\t\t// if there was a view defined for this Config, set the view name in the context\n\t\t\t\t\tctx = context.WithValue(ctx, ViewKey{}, h.ViewName)\n\t\t\t\t}\n\t\t\t\trcode, _ := h.pluginChain.ServeDNS(ctx, w, r)\n\t\t\t\tif !plugin.ClientWrite(rcode) {\n\t\t\t\t\terrorFunc(s.Addr, w, r, rcode)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Still here? Error out with REFUSED.\n\terrorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)\n}", "func forwardResponse(w http.ResponseWriter, response *http.Response) error {\n\tw.Header().Del(\"Server\") // remove Server: Caddy, append via instead\n\tw.Header().Add(\"Via\", strconv.Itoa(response.ProtoMajor)+\".\"+strconv.Itoa(response.ProtoMinor)+\" caddy\")\n\n\tfor header, values := range response.Header {\n\t\tfor _, val := range values {\n\t\t\tw.Header().Add(header, val)\n\t\t}\n\t}\n\tremoveHopByHop(w.Header())\n\tw.WriteHeader(response.StatusCode)\n\tbuf := bufferPool.Get().([]byte)\n\tbuf = buf[0:cap(buf)]\n\t_, err := io.CopyBuffer(w, response.Body, buf)\n\tbufferPool.Put(buf)\n\treturn err\n}", "func WrapResponse(w http.ResponseWriter, request types.InterxRequest, response types.ProxyResponse, statusCode int, saveToCache bool) {\n\tif statusCode == 0 {\n\t\tstatusCode = 503 // Service Unavailable Error\n\t}\n\tif saveToCache {\n\t\t// GetLogger().Info(\"[gateway] Saving in the cache\")\n\n\t\tchainIDHash := GetBlake2bHash(response.Chainid)\n\t\tendpointHash := GetBlake2bHash(request.Endpoint)\n\t\trequestHash := GetBlake2bHash(request)\n\t\tif conf, ok := RPCMethods[request.Method][request.Endpoint]; ok {\n\t\t\terr := PutCache(chainIDHash, endpointHash, requestHash, types.InterxResponse{\n\t\t\t\tResponse: response,\n\t\t\t\tStatus: statusCode,\n\t\t\t\tExpireAt: time.Now().Add(time.Duration(conf.CachingDuration) * time.Second),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\t// GetLogger().Error(\"[gateway] Failed to save in the cache: \", err.Error())\n\t\t\t}\n\t\t\t// GetLogger().Info(\"[gateway] Save finished\")\n\t\t}\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.Header().Add(\"Interx_chain_id\", response.Chainid)\n\tw.Header().Add(\"Interx_block\", strconv.FormatInt(response.Block, 10))\n\tw.Header().Add(\"Interx_blocktime\", response.Blocktime)\n\tw.Header().Add(\"Interx_timestamp\", strconv.FormatInt(response.Timestamp, 10))\n\tw.Header().Add(\"Interx_request_hash\", response.RequestHash)\n\tif request.Endpoint == config.QueryDataReference {\n\t\treference, err := database.GetReference(string(request.Params))\n\t\tif err == nil {\n\t\t\tw.Header().Add(\"Interx_ref\", \"/download/\"+reference.FilePath)\n\t\t}\n\t}\n\n\tif response.Response != nil {\n\t\tresponse.Signature, response.Hash = GetResponseSignature(response)\n\n\t\tw.Header().Add(\"Interx_signature\", response.Signature)\n\t\tw.Header().Add(\"Interx_hash\", response.Hash)\n\t\tw.WriteHeader(statusCode)\n\n\t\tjson.NewEncoder(w).Encode(response.Response)\n\t} else {\n\t\tw.WriteHeader(statusCode)\n\n\t\tif response.Error == nil {\n\t\t\tresponse.Error = \"service not available\"\n\t\t}\n\t\tjson.NewEncoder(w).Encode(response.Error)\n\t}\n}", "func execmServerConnHijack(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*httputil.ServerConn).Hijack()\n\tp.Ret(1, ret, ret1)\n}", "func (o *GetCurrentUserUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header WWW_Authenticate\n\n\twWWAuthenticate := o.WWWAuthenticate\n\tif wWWAuthenticate != \"\" {\n\t\trw.Header().Set(\"WWW_Authenticate\", wWWAuthenticate)\n\t}\n\n\trw.WriteHeader(401)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (w *MonResponseWriter) Write(data []byte) (int, error) {\n\treturn w.ResponseWriter.Write(data)\n}", "func Middleware(h http.Handler) http.Handler {\n return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n log.Println(\"Running PRE plugin\")\n r.Header.Set(\"X-Trace-ID\", strconv.Itoa(int(rand.Int63())))\n h.ServeHTTP(w, r)\n })\n}", "func (rec *Recoverer) Wrap(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tw.Header().Set(\"Connection\", \"close\")\n\t\t\t\trestErr := &rest.Error{\n\t\t\t\t\tErr: fmt.Errorf(\"there was a panic: %s\", err),\n\t\t\t\t\tStatus: http.StatusInternalServerError,\n\t\t\t\t\tMessage: \"Internal Server Error\",\n\t\t\t\t\tIsSilent: false,\n\t\t\t\t\tInternalLogs: []zapcore.Field{\n\t\t\t\t\t\tzap.Stack(\"stack\"),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\trest.SendRESTError(w, r, rec.Logger, restErr)\n\t\t\t}\n\t\t}()\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func wrapWriter(w http.ResponseWriter) writerProxy {\n\tbw := basicWriter{ResponseWriter: w}\n\treturn &bw\n}", "func (o *UpdateHostIgnitionUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(401)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (a *Middleware) Wrap(h http.Handler) {\n\ta.Handler = h\n}", "func Wrap(w http.ResponseWriter) JResponseWriter {\n\tif w, ok := w.(JResponseWriter); ok {\n\t\treturn w\n\t}\n\n\tif w.Header().Get(\"Content-Type\") == \"\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t}\n\n\treturn &Response{rw: w, fields: make(map[string]interface{})}\n}", "func makeDNSHandler(blacklist *Blacklist, upstream string, logging bool) func(dns.ResponseWriter, *dns.Msg) {\n\n\t// create the logger functions\n\tlogger := func(res *dns.Msg, duration time.Duration, how string) {}\n\terrorLogger := func(err error, description string) {\n\t\tlog.Print(description, err)\n\t}\n\tif logging {\n\t\tlogger = func(msg *dns.Msg, rtt time.Duration, how string) {\n\t\t\tlog.Printf(\"Using %s, response time %s:\\n%s\\n\", how, rtt.String(), msg.String())\n\t\t}\n\t\terrorLogger = func(err error, description string) {\n\n\t\t}\n\t}\n\n\t// cache for the DNS replies from the DNS server\n\tcache := NewCache()\n\n\t// we use a single client to resolve queries against the upstream DNS\n\tclient := new(dns.Client)\n\n\t// create the real handler\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\tstart := time.Now()\n\n\t\t// the standard allows multiple DNS questions in a single query... but nobody uses it, so we disallow it\n\t\t// https://stackoverflow.com/questions/4082081/requesting-a-and-aaaa-records-in-single-dns-query/4083071\n\t\tif len(req.Question) != 1 {\n\n\t\t\t// reply with a format error\n\t\t\tres := new(dns.Msg)\n\t\t\tres.SetRcode(req, dns.RcodeFormatError)\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// collect metrics\n\t\t\tduration := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"malformed_query\", \"-\").Observe(duration)\n\n\t\t\treturn\n\t\t}\n\n\t\t// extract the DNS question\n\t\tquery := req.Question[0]\n\t\tdomain := strings.TrimRight(query.Name, \".\")\n\t\tqueryType := dns.TypeToString[query.Qtype]\n\n\t\t// check the cache first: if a domain is in the cache, it cannot be blocked\n\t\t// this optimized response times for allowed domains over the blocked domains\n\t\tcached, found := cache.Get(&query)\n\t\tif found {\n\n\t\t\t// cache found, use the cached answer\n\t\t\tres := cached.SetReply(req)\n\t\t\tres.Answer = cached.Answer\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tduration := time.Since(start)\n\t\t\tlogger(res, duration, \"cache\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := duration.Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"cache\", queryType).Observe(durationSeconds)\n\n\t\t\treturn\n\t\t}\n\n\t\t// then, check if the domain is blocked\n\t\tblocked := blacklist.Contains(domain)\n\t\tif blocked {\n\n\t\t\t// reply with \"domain not found\"\n\t\t\tres := new(dns.Msg)\n\t\t\tres.SetRcode(req, dns.RcodeNameError)\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tduration := time.Since(start)\n\t\t\tlogger(res, duration, \"block\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := duration.Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"block\", queryType).Observe(durationSeconds)\n\n\t\t\treturn\n\t\t}\n\n\t\t// finally, query an upstream DNS\n\t\tres, rtt, err := client.Exchange(req, upstream)\n\t\tif err == nil {\n\n\t\t\t// reply to the query\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// cache the result if any\n\t\t\tif len(res.Answer) > 0 {\n\t\t\t\texpiration := time.Duration(res.Answer[0].Header().Ttl) * time.Second\n\t\t\t\tcache.Set(&query, res, expiration)\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tlogger(res, rtt, \"upstream\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"upstream\", queryType).Observe(durationSeconds)\n\n\t\t} else {\n\n\t\t\t// log the error\n\t\t\terrorLogger(err, \"Error in resolve query against upstream DNS \"+upstream)\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"upstream_error\", queryType).Observe(durationSeconds)\n\t\t}\n\t}\n}", "func normalResponse(w http.ResponseWriter, r *http.Request){\n\trespStr := `<html>\n<head><title> My Custom Response </title> </head>\n<body> <h1> Testing the response headers ...... </h1></body>\n</html>`\nw.Write([]byte(respStr))\n}", "func (o *ReplicateUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(401)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func wrapHandler(handler http.Handler, conf Config) http.HandlerFunc {\n return func(write http.ResponseWriter, req *http.Request) {\n\n // Add response headers\n setupResponse(&write, req, conf)\n if (*req).Method == \"OPTIONS\" {\n return\n }\n\n nfrw := &NotFoundRedirectRespWr{ResponseWriter: write}\n handler.ServeHTTP(nfrw, req)\n\n if conf.Debug {\n log.Debug(conf.DebugMsgPref, \"%s %s Response header: %s\", \"Serving:\", req.RequestURI, nfrw.status)\n }\n\n // If status code is 404 - crete logger output and redirect request to custom 404 page\n if nfrw.status == 404 {\n if conf.Debug {\n log.Debug(conf.DebugMsgPref, \"Redirecting %s to %s\", req.RequestURI, conf.NotFoundPage)\n }\n http.Redirect(write, req, conf.NotFoundPage, http.StatusFound)\n }\n }\n}", "func (bw BWrapper) Wrap(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\th.ServeHTTP(w, r)\n\t\tw.Write([]byte(\"B wrapper wrote this\\n\"))\n\t})\n\n}", "func (rwp *ResponseWriterProxy) Write(bs []byte) (int, error) {\n\trwp.buffer.Write(bs)\n\treturn rwp.under.Write(bs)\n}", "func (dc DefaultContainer) GetResponseWriter() http.ResponseWriter { return dc.ResponseWriter }", "func defaulthandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"Hello surfer!\")\n}", "func (a *SessionAuthenticator) Challenge(*http.Request, http.ResponseWriter) {\n}", "func (r *Response) Write(p []byte) (int, error) {\n\tr.Started = true\n\treturn r.ResponseWriter.Write(p)\n}", "func (r *ResponseReverter) Write(buf []byte) (int, error) {\n\tn, err := r.ResponseWriter.Write(buf)\n\treturn n, err\n}", "func (o *GetServiceInstanceByNameUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(401)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (w *writerWrapper) Unwrap() http.ResponseWriter {\n\treturn w.ResponseWriter\n}", "func (r *ResponseWriter) WriteMsg(res *dns.Msg) error {\n\tstate := request.Request{W: r, Req: res}\n\n\t// only respond with this when the request came in over IPv6.\n\tif state.Family() == 1 { // if it came in over v4, don't do anything.\n\t\treturn r.ResponseWriter.WriteMsg(res)\n\t}\n\n\t// do not modify if query is not AAAA or not of class IN.\n\tif state.QType() != dns.TypeAAAA || state.QClass() != dns.ClassINET {\n\t\treturn r.ResponseWriter.WriteMsg(res)\n\t}\n\n\t// do not modify if there are AAAA records or NameError. continue if NoData or any other error.\n\tty, _ := response.Typify(res, time.Now().UTC())\n\tif ty == response.NoError || ty == response.NameError {\n\t\tif hasAAAA(res) && ! r.translateAll {\n\t\t\treturn r.ResponseWriter.WriteMsg(res)\n\t\t}\n\t}\n\n\t// perform request to upstream.\n\tres2, err := r.Proxy.Lookup(state, state.Name(), dns.TypeA)\n\tif err != nil {\n\t\tlog.Warningf(\"[WARNING] Unable to query upstream DNS: %v\", err)\n\t\tres.MsgHdr.Rcode = dns.RcodeServerFailure\n\t\treturn r.ResponseWriter.WriteMsg(res)\n\t}\n\n\t// modify response.\n\tres.MsgHdr.Rcode = dns.RcodeSuccess\n\tnsTtl := uint32(600)\n\tfor i := 0; i < len(res.Ns); i++ {\n\t\tif res.Ns[i].Header().Rrtype == dns.TypeSOA {\n\t\t\tnsTtl = res.Ns[i].Header().Ttl\n\t\t}\n\t}\n\tres.Answer = res2.Answer\n\tfor i := 0; i < len(res.Answer); i++ {\n\t\tans := res.Answer[i]\n\t\thdr := ans.Header()\n\t\tif hdr.Rrtype == dns.TypeA {\n\t\t\taaaa, _ := To6(r.Prefix, ans.(*dns.A).A)\n\t\t\tttl := nsTtl\n\t\t\tif ans.Header().Ttl < ttl {\n\t\t\t\tttl = ans.Header().Ttl\n\t\t\t}\n\t\t\tres.Answer[i] = &dns.AAAA{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: hdr.Name,\n\t\t\t\t\tRrtype: dns.TypeAAAA,\n\t\t\t\t\tClass: hdr.Class,\n\t\t\t\t\tTtl: ttl,\n\t\t\t\t},\n\t\t\t\tAAAA: aaaa,\n\t\t\t}\n\t\t}\n\t}\n\tres.Ns = []dns.RR{}\n\n\treturn r.ResponseWriter.WriteMsg(res)\n}", "func (o *ReplaceHTTPErrorRuleDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetNamespacedNotebooksUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\r\n\r\n\trw.WriteHeader(401)\r\n\tif o.Payload != nil {\r\n\t\tpayload := o.Payload\r\n\t\tif err := producer.Produce(rw, payload); err != nil {\r\n\t\t\tpanic(err) // let the recovery middleware deal with this\r\n\t\t}\r\n\t}\r\n}", "func ProxyRootHandler(\n\tclient *http.Client,\n\ttargetURL, selfURL *url.URL,\n\tlogger *log.Logger,\n) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tnewURL := &url.URL{\n\t\t\tScheme: targetURL.Scheme,\n\t\t\tHost: targetURL.Host,\n\t\t\t// In incoming r.URL only these 2 fields are set:\n\t\t\tPath: r.URL.Path,\n\t\t\tRawQuery: r.URL.RawQuery,\n\t\t}\n\t\treq, err := http.NewRequest(r.Method, newURL.String(), r.Body)\n\t\tif CheckError(logger, w, err) {\n\t\t\treturn\n\t\t}\n\t\treq.Header.Set(\"X-Forwarded-For\", r.RemoteAddr)\n\t\tCopyRequestHeaders(r, req, requestHeadersToCopy)\n\n\t\tresp, err := client.Do(req)\n\t\tif CheckError(logger, w, err) {\n\t\t\treturn\n\t\t}\n\n\t\tdefer resp.Body.Close()\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif CheckError(logger, w, err) {\n\t\t\treturn\n\t\t}\n\n\t\theader := w.Header()\n\t\tfor key, values := range resp.Header {\n\t\t\tcanonicalKey := textproto.CanonicalMIMEHeaderKey(key)\n\t\t\tfor _, value := range values {\n\t\t\t\tif canonicalKey == \"Location\" {\n\t\t\t\t\tvalue = RewriteURL(logger, value, targetURL.Host, selfURL)\n\t\t\t\t}\n\t\t\t\theader.Add(canonicalKey, value)\n\t\t\t}\n\t\t}\n\t\tw.WriteHeader(resp.StatusCode)\n\t\tif _, err := w.Write(body); err != nil {\n\t\t\tif logger != nil {\n\t\t\t\tlogger.Print(err)\n\t\t\t}\n\t\t}\n\t\tif flusher, ok := w.(http.Flusher); ok {\n\t\t\tflusher.Flush()\n\t\t}\n\t}\n}", "func (o *unauthorizedErrorResponder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.WriteHeader(500)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func DefaultResponder(w http.ResponseWriter, r *http.Request, rh *Request) {\n\tif (len(rh.Response.Header)) > 0 {\n\t\tfor k := range rh.Response.Header {\n\t\t\tw.Header().Add(k, rh.Response.Header.Get(k))\n\t\t}\n\t}\n\tif rh.Response.StatusCode > 0 {\n\t\tw.WriteHeader(rh.Response.StatusCode)\n\t}\n\tif (len(rh.Response.BodyBuffer)) > 0 {\n\t\tw.Write(rh.Response.BodyBuffer)\n\t}\n}", "func (o *GetIBAServerUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(401)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (b *Browser) HijackRequests() *HijackRouter {\n\treturn newHijackRouter(b, b).initEvents()\n}", "func (lw LoggingWrapper) Wrap(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\th.ServeHTTP(w, r)\n\t\tlog.Info(fmt.Sprintf(\"request for %v with method %v\", r.RequestURI, r.Method))\n\t})\n}", "func (p *Page) HijackRequests() *HijackRouter {\n\treturn newHijackRouter(p.browser, p).initEvents()\n}", "func (l *Logger) Middleware(next goji.Handler) goji.Handler {\n\tmiddleware := func(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\t\tl.printRequest(ctx, r)\n\n\t\t// WrapWriter lets us peek at ResponseWriter outputs\n\t\tlw := mutil.WrapWriter(w)\n\n\t\tstartTime := time.Now()\n\t\tnext.ServeHTTPC(ctx, lw, r)\n\n\t\tif lw.Status() == 0 {\n\t\t\tlw.WriteHeader(http.StatusOK)\n\t\t}\n\n\t\tfinishTime := time.Now()\n\n\t\tl.printResponse(lw, finishTime.Sub(startTime))\n\t}\n\n\treturn goji.HandlerFunc(middleware)\n}", "func (fn MuxWrappable) Wrapped(tokenCheck bool) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tlogEndpoint(req)\n\t\tif tokenCheck {\n\t\t\t_, ok := AuthHandler(w, req)\n\t\t\tif !ok {\n\t\t\t\treturn // AuthHandler takes care of this stuff\n\t\t\t}\n\t\t}\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\tstatus, err := fn(w, req)\n\t\tif err != nil {\n\t\t\temap := makeErrMap()\n\t\t\tlog.Println(err)\n\t\t\thttp.Error(w, emap[status], status)\n\t\t}\n\t}\n}", "func (o *ReplicateForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(403)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *rememberingWriter) WriteHeader(code int) {\n\tif r.statusCode == 0 {\n\t\tr.statusCode = code\n\t}\n\tr.ResponseWriter.WriteHeader(code)\n}", "func (r *Response) Write(w io.Writer) error", "func (r *Responder) Conflict() { r.write(http.StatusConflict) }", "func (o *RegisterInfraEnvUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(401)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetWhoamiUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(401)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (e *engine) writeHeaders(ctx *Context) {\n\tfor k, v := range ctx.Reply().Hdr {\n\t\tfor _, vv := range v {\n\t\t\tctx.Res.Header().Add(k, vv)\n\t\t}\n\t}\n\n\tctx.Res.Header().Set(ahttp.HeaderServer, aahServerName)\n\n\t// Set the HSTS if SSL is enabled on aah server\n\t// Know more: https://www.owasp.org/index.php/HTTP_Strict_Transport_Security_Cheat_Sheet\n\tif AppIsSSLEnabled() {\n\t\tctx.Res.Header().Set(ahttp.HeaderStrictTransportSecurity, hstsHeaderValue)\n\t}\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (middleware *Middleware) Middleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif urlBlacklisted(r.RequestURI) {\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tnow := time.Now().UTC()\n\t\tsw := &customResponseWriter{ResponseWriter: w}\n\t\trequestID := requestID()\n\t\tctx := context.WithValue(r.Context(), \"request-id\", requestID)\n\t\tr = r.WithContext(ctx)\n\t\tnext.ServeHTTP(sw, r)\n\t\tfinishTime := time.Now().UTC()\n\n\t\tdefer func() {\n\t\t\tgo func(req *http.Request, sw *customResponseWriter) {\n\t\t\t\tvar match mux.RouteMatch\n\t\t\t\tif middleware.router.Match(r, &match) && match.Route != nil {\n\t\t\t\t\tvar routeName string\n\t\t\t\t\trouteName = match.Route.GetName()\n\t\t\t\t\tif len(routeName) == 0 {\n\t\t\t\t\t\tif r, err := match.Route.GetPathTemplate(); err == nil {\n\t\t\t\t\t\t\trouteName = r\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\trecord := &Record{\n\t\t\t\t\t\tRouteName: routeName,\n\t\t\t\t\t\tIPAddr: req.RemoteAddr,\n\t\t\t\t\t\tTimestamp: finishTime,\n\t\t\t\t\t\tMethod: req.Method,\n\t\t\t\t\t\tURI: req.RequestURI,\n\t\t\t\t\t\tProtocol: req.Proto,\n\t\t\t\t\t\tReferer: req.Referer(),\n\t\t\t\t\t\tUserAgent: req.UserAgent(),\n\t\t\t\t\t\tStatus: sw.status,\n\t\t\t\t\t\tElapsedTime: finishTime.Sub(now),\n\t\t\t\t\t\tResponseBytes: sw.length,\n\t\t\t\t\t\tRequestID: requestID,\n\t\t\t\t\t}\n\n\t\t\t\t\tmiddleware.processHooks(record)\n\t\t\t\t}\n\t\t\t}(r, sw)\n\t\t}()\n\t})\n}", "func (p *Proxy) onResponse(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {\n\tfor _, h := range mubeng.HopHeaders {\n\t\tresp.Header.Del(h)\n\t}\n\n\treturn resp\n}", "func writeResponse(status int, w http.ResponseWriter, out io.Reader) error {\n\t// hijack the connection so we can write our own chunked output and trailers\n\thijacker, ok := w.(http.Hijacker)\n\tif !ok {\n\t\tlog.Error(\"Failed to create hijacker! cannot continue!\")\n\t\treturn errors.New(\"Could not create hijacker\")\n\t}\n\tconn, writer, err := hijacker.Hijack()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\t// write status\n\twriter.WriteString(fmt.Sprintf(\"HTTP/1.1 %d %s\\r\\n\", status, http.StatusText(status)))\n\n\t// Write out headers\n\tw.Header().Write(writer)\n\n\t// end of headers\n\twriter.WriteString(\"\\r\\n\")\n\n\t// write body\n\tstreamErr := writeChunks(out, writer)\n\n\t// close body\n\twriter.WriteString(\"0\\r\\n\")\n\n\t// if there was a stream error, write out an error trailer. hopefully\n\t// the client will pick it up!\n\tif streamErr != nil {\n\t\twriter.WriteString(StreamErrHeader + \": \" + sanitizedErrStr(streamErr) + \"\\r\\n\")\n\t}\n\twriter.WriteString(\"\\r\\n\") // close response\n\twriter.Flush()\n\treturn streamErr\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *GetIdentityIDNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(404)\n}", "func dummyServerResponse(w http.ResponseWriter, r *http.Request) {\n\tw.Write(dummyBytes())\n}", "func (o *UpdateHostIgnitionForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(403)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceHTTPErrorRuleNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(404)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *Responder) UseProxy() { r.write(http.StatusUseProxy) }", "func Middleware(handler http.Handler) http.Handler {\n\twrappedHandler := func(w http.ResponseWriter, r *http.Request) {\n\t\t// get a new context with our trace from the request, and add common fields\n\t\tctx, span := common.StartSpanOrTraceFromHTTP(r)\n\t\tdefer span.Send()\n\t\t// push the context with our trace and span on to the request\n\t\tr = r.WithContext(ctx)\n\n\t\t// replace the writer with our wrapper to catch the status code\n\t\twrappedWriter := common.NewResponseWriter(w)\n\n\t\t// get bits about the handler\n\t\thandler := middleware.Handler(ctx)\n\t\tif handler == nil {\n\t\t\tspan.AddField(\"handler.name\", \"http.NotFound\")\n\t\t\thandler = http.NotFoundHandler()\n\t\t} else {\n\t\t\thType := reflect.TypeOf(handler)\n\t\t\tspan.AddField(\"handler.type\", hType.String())\n\t\t\tname := runtime.FuncForPC(reflect.ValueOf(handler).Pointer()).Name()\n\t\t\tspan.AddField(\"handler.name\", name)\n\t\t\tspan.AddField(\"name\", name)\n\t\t}\n\t\t// find any matched patterns\n\t\tpm := middleware.Pattern(ctx)\n\t\tif pm != nil {\n\t\t\t// TODO put a regex on `p.String()` to pull out any `:foo` and then\n\t\t\t// use those instead of trying to pull them out of the pattern some\n\t\t\t// other way\n\t\t\tif p, ok := pm.(*pat.Pattern); ok {\n\t\t\t\tspan.AddField(\"goji.pat\", p.String())\n\t\t\t\tspan.AddField(\"goji.methods\", p.HTTPMethods())\n\t\t\t\tspan.AddField(\"goji.path_prefix\", p.PathPrefix())\n\t\t\t\tpatvar := strings.TrimPrefix(p.String(), p.PathPrefix()+\":\")\n\t\t\t\tspan.AddField(\"goji.pat.\"+patvar, pat.Param(r, patvar))\n\t\t\t} else {\n\t\t\t\tspan.AddField(\"pat\", \"NOT pat.Pattern\")\n\n\t\t\t}\n\t\t}\n\t\t// TODO get all the parameters and their values\n\t\thandler.ServeHTTP(wrappedWriter.Wrapped, r)\n\t\tif wrappedWriter.Status == 0 {\n\t\t\twrappedWriter.Status = 200\n\t\t}\n\t\tspan.AddField(\"response.status_code\", wrappedWriter.Status)\n\t}\n\treturn http.HandlerFunc(wrappedHandler)\n}", "func mockConnUpgradeHandler(t *testing.T, upgradeType string, write []byte) http.Handler {\n\tt.Helper()\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\trequire.Equal(t, constants.WebAPIConnUpgrade, r.URL.Path)\n\t\trequire.Equal(t, upgradeType, r.Header.Get(constants.WebAPIConnUpgradeHeader))\n\t\trequire.Equal(t, upgradeType, r.Header.Get(constants.WebAPIConnUpgradeTeleportHeader))\n\t\trequire.Equal(t, constants.WebAPIConnUpgradeConnectionType, r.Header.Get(constants.WebAPIConnUpgradeConnectionHeader))\n\n\t\thj, ok := w.(http.Hijacker)\n\t\trequire.True(t, ok)\n\n\t\tconn, _, err := hj.Hijack()\n\t\trequire.NoError(t, err)\n\t\tdefer conn.Close()\n\n\t\t// Upgrade response.\n\t\tresponse := &http.Response{\n\t\t\tStatusCode: http.StatusSwitchingProtocols,\n\t\t\tProtoMajor: 1,\n\t\t\tProtoMinor: 1,\n\t\t}\n\t\trequire.NoError(t, response.Write(conn))\n\n\t\t// Upgraded.\n\t\tswitch upgradeType {\n\t\tcase constants.WebAPIConnUpgradeTypeALPNPing:\n\t\t\t// Wrap conn with Ping and write some pings.\n\t\t\tpingConn := pingconn.New(conn)\n\t\t\tpingConn.WritePing()\n\t\t\t_, err = pingConn.Write(write)\n\t\t\trequire.NoError(t, err)\n\t\t\tpingConn.WritePing()\n\n\t\tdefault:\n\t\t\t_, err = conn.Write(write)\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t})\n}" ]
[ "0.77528965", "0.7647703", "0.75203705", "0.74987763", "0.74898046", "0.74327314", "0.7413569", "0.7413569", "0.7370214", "0.7349438", "0.73259205", "0.7270454", "0.72371703", "0.71998346", "0.71146065", "0.71057314", "0.7030835", "0.7023406", "0.698105", "0.6809214", "0.6698819", "0.6391877", "0.6155211", "0.5706467", "0.56611305", "0.5638605", "0.56085646", "0.560144", "0.55330783", "0.552074", "0.5500485", "0.54725593", "0.54582095", "0.54353386", "0.5411048", "0.5410901", "0.53687567", "0.5309409", "0.52869534", "0.5263718", "0.5254896", "0.5252623", "0.521443", "0.5210659", "0.5175992", "0.51718515", "0.516211", "0.5151355", "0.5136498", "0.51362866", "0.51332325", "0.5127733", "0.51082194", "0.51015395", "0.51009536", "0.5098619", "0.5098296", "0.50902337", "0.50812477", "0.50773543", "0.5075248", "0.5072291", "0.5067274", "0.505827", "0.5054457", "0.50481975", "0.5045466", "0.5042081", "0.50417113", "0.50346065", "0.5026482", "0.50234324", "0.5023231", "0.5004391", "0.4990454", "0.49897757", "0.49890915", "0.4978275", "0.49779207", "0.49749643", "0.49551", "0.49494684", "0.49444723", "0.49433783", "0.49414068", "0.49379826", "0.49310353", "0.49224082", "0.49186614", "0.49156162", "0.49139678", "0.49138877", "0.49079368", "0.4906548", "0.49045685", "0.49043438", "0.48982808", "0.4895997", "0.4894289", "0.4891534" ]
0.79434186
0
To6 takes a prefix and IPv4 address and returns an IPv6 address according to RFC 6052.
func To6(prefix *net.IPNet, addr net.IP) (net.IP, error) { addr = addr.To4() if addr == nil { return nil, errors.New("Not a valid IPv4 address") } n, _ := prefix.Mask.Size() // assumes prefix has been validated during setup v6 := make([]byte, 16) i, j := 0, 0 for ; i < n/8; i++ { v6[i] = prefix.IP[i] } for ; i < 8; i, j = i+1, j+1 { v6[i] = addr[j] } if i == 8 { i++ } for ; j < 4; i, j = i+1, j+1 { v6[i] = addr[j] } return v6, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ToIpv6(ip string) string {\n\t// ip := \"2001:b28:f23d:f001::a\"\n\t// log.Println(strings.Split(ip, \"::\"))\n\t// log.Println(strings.Split(strings.Split(ip, \"::\")[0], \":\"))\n\t// log.Println(strings.Split(strings.Split(ip, \"::\")[1], \":\"))\n\t// log.Println(8 - len(strings.Split(strings.Split(ip, \"::\")[0], \":\")) - len(strings.Split(strings.Split(ip, \"::\")[1], \":\")))\n\tif !strings.Contains(ip, \"::\") {\n\t\treturn ip\n\t}\n\tfirstSub := strings.Split(ip, \"::\")\n\tipv6b1 := firstSub[0]\n\tipv6b2 := firstSub[1]\n\tb1, b2 := len(ipv6b1), len(ipv6b2)\n\tneedZero := 0\n\tif b1 == 0 {\n\t\tneedZero = 8 - len(strings.Split(ipv6b2, \":\"))\n\t} else {\n\t\tneedZero = 8 - len(strings.Split(ipv6b1, \":\")) -\n\t\t\tlen(strings.Split(ipv6b2, \":\"))\n\t}\n\t// log.Println(ipv6b1, \"--\", ipv6b2, \"--\", needZero, len(strings.Split(ipv6b1, \":\")), len(strings.Split(ipv6b2, \":\")))\n\tfor i := 0; i < needZero; i++ {\n\t\tif b1 == 0 {\n\t\t\tipv6b1 = ipv6b1 + \"0:\"\n\t\t\tif i == needZero-1 {\n\t\t\t\tipv6b1 = ipv6b1 + ipv6b2\n\t\t\t}\n\t\t} else if b2 == 0 {\n\t\t\tipv6b1 = ipv6b1 + \":0\"\n\t\t\tif i == needZero-1 {\n\t\t\t\tipv6b1 = ipv6b1 + \":0\"\n\t\t\t}\n\t\t} else {\n\t\t\tipv6b1 = ipv6b1 + \":0\"\n\t\t\tif i == needZero-1 {\n\t\t\t\tipv6b1 = ipv6b1 + \":\" + ipv6b2\n\t\t\t}\n\t\t}\n\t\t// log.Println(ipv6b1)\n\t}\n\treturn ipv6b1\n}", "func GenerateIPv6ULAPrefix() (string, error) {\n\tulaAddr := []byte{0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}\n\t_, err := cryptorand.Read(ulaAddr[1:6])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tipNet := net.IPNet{\n\t\tIP: net.IP(ulaAddr),\n\t\tMask: net.CIDRMask(48, 128),\n\t}\n\treturn ipNet.String(), nil\n}", "func extractIPv6(reverseName string) (string, error) {\n\tsegments := ReverseArray(strings.Split(reverseName, \".\"))\n\n\t// IPv6nibbleCount is the expected number of nibbles in IPv6 PTR record as defined in rfc3596\n\tconst ipv6nibbleCount = 32\n\n\tif len(segments) != ipv6nibbleCount {\n\t\treturn \"\", fmt.Errorf(\"incorrect number of segments in IPv6 PTR: %v\", len(segments))\n\t}\n\n\tvar slice6 []string\n\tfor i := 0; i < len(segments); i += 4 {\n\t\tslice6 = append(slice6, strings.Join(segments[i:i+4], \"\"))\n\t}\n\n\tip := net.ParseIP(strings.Join(slice6, \":\")).To16()\n\tif ip == nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse IPv6 segments: %v\", slice6)\n\t}\n\treturn ip.String(), nil\n}", "func ip6stringToIP(ip string) net.IP {\n var chunks = strings.Split(ip, \":\")\n var rxIp4 = regexp.MustCompile(\n \"([0-9]{1,3}).([0-9]{1,3}).([0-9]{1,3}).([0-9]{1,3})\")\n\n for i, chunk := range chunks {\n if rxIp4.MatchString(chunk) {\n var octet1, _ = strconv.Atoi(rxIp4.FindStringSubmatch(chunk)[1])\n var octet2, _ = strconv.Atoi(rxIp4.FindStringSubmatch(chunk)[2])\n var octet3, _ = strconv.Atoi(rxIp4.FindStringSubmatch(chunk)[3])\n var octet4, _ = strconv.Atoi(rxIp4.FindStringSubmatch(chunk)[4])\n\n var slot7 = fmt.Sprintf(\"%x\", (octet1 << 8) + octet2)\n var slot8 = fmt.Sprintf(\"%x\", (octet3 << 8) + octet4)\n chunks[i] = slot7\n chunks = append(chunks, slot8)\n break\n }\n }\n var ip6 = strings.Join(chunks, \":\")\n var ipobj = net.ParseIP(ip6)\n\n return ipobj\n}", "func Ipv6HasPrefix(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldIpv6), v))\n\t})\n}", "func reverse6(slice []string) string {\n\tfor i := 0; i < len(slice)/2; i++ {\n\t\tj := len(slice) - i - 1\n\t\tslice[i], slice[j] = slice[j], slice[i]\n\t}\n\tslice6 := []string{}\n\tfor i := 0; i < len(slice)/4; i++ {\n\t\tslice6 = append(slice6, strings.Join(slice[i*4:i*4+4], \"\"))\n\t}\n\tip := net.ParseIP(strings.Join(slice6, \":\")).To16()\n\tif ip == nil {\n\t\treturn \"\"\n\t}\n\treturn ip.String()\n}", "func parseIPv6Zone(s string) (net.IP, string) {\n\ts, zone := splitHostZone(s)\n\treturn net.ParseIP(s), zone\n}", "func ipv6Slice(addr []byte) IP {\n\treturn IP{\n\t\thi: binary.BigEndian.Uint64(addr[:8]),\n\t\tlo: binary.BigEndian.Uint64(addr[8:]),\n\t\tz: z6noz,\n\t}\n}", "func Ipv6(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldIpv6), v))\n\t})\n}", "func (internet *Internet) IPv6Address() string {\n\tvar parts []string\n\n\tfor i := 0; i < 8; i++ {\n\t\tparts = append(parts, fmt.Sprintf(\"%x\", internet.faker.random.Intn(65536)))\n\t}\n\n\treturn strings.Join(parts, \":\")\n}", "func (i Internet) Ipv6() string {\n\tips := make([]string, 0, 8)\n\n\tfor j := 0; j < 8; j++ {\n\t\tblock := \"\"\n\t\tfor w := 0; w < 4; w++ {\n\t\t\tblock = block + strconv.Itoa(i.Faker.RandomDigitNotNull())\n\t\t}\n\n\t\tips = append(ips, block)\n\t}\n\n\treturn strings.Join(ips, \":\")\n}", "func PublicIpv6HasPrefix(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldPublicIpv6), v))\n\t})\n}", "func ntoIPv6(sip []string) string {\n\tif len(sip) != 3 {\n\t\treturn \"\"\n\t}\n\tip := make(net.IP, net.IPv6len)\n\tsum := decode([]byte(sip[0]))\n\tip[0] = byte((sum >> 40) & 0xFF)\n\tip[1] = byte((sum >> 32) & 0xFF)\n\tip[2] = byte((sum >> 24) & 0xFF)\n\tip[3] = byte((sum >> 16) & 0xFF)\n\tip[4] = byte((sum >> 8) & 0xFF)\n\tip[5] = byte(sum & 0xFF)\n\n\tsum = decode([]byte(sip[1]))\n\tip[6] = byte((sum >> 40) & 0xFF)\n\tip[7] = byte((sum >> 32) & 0xFF)\n\tip[8] = byte((sum >> 24) & 0xFF)\n\tip[9] = byte((sum >> 16) & 0xFF)\n\tip[10] = byte((sum >> 8) & 0xFF)\n\tip[11] = byte(sum & 0xFF)\n\n\tsum = decode([]byte(sip[2]))\n\tip[12] = byte((sum >> 24) & 0xFF)\n\tip[13] = byte((sum >> 16) & 0xFF)\n\tip[14] = byte((sum >> 8) & 0xFF)\n\tip[15] = byte(sum & 0xFF)\n\n\treturn ip.String()\n}", "func ipv6AtoN(ip net.IP) (sip string) {\n\tip = ip.To16()\n\tif ip == nil {\n\t\treturn\n\t}\n\tsum := uint64(ip[0]) << 40\n\tsum += uint64(ip[1]) << 32\n\tsum += uint64(ip[2]) << 24\n\tsum += uint64(ip[3]) << 16\n\tsum += uint64(ip[4]) << 8\n\tsum += uint64(ip[5])\n\tsip = encode(sum)\n\tsum = uint64(ip[6]) << 40\n\tsum += uint64(ip[7]) << 32\n\tsum += uint64(ip[8]) << 24\n\tsum += uint64(ip[9]) << 16\n\tsum += uint64(ip[10]) << 8\n\tsum += uint64(ip[11])\n\tsip = sip + \":\" + encode(sum)\n\tsum = uint64(ip[12]) << 24\n\tsum += uint64(ip[13]) << 16\n\tsum += uint64(ip[14]) << 8\n\tsum += uint64(ip[15])\n\tsip = sip + \":\" + encode(sum)\n\tfmt.Println(sip, \"len:\", len(sip))\n\treturn\n}", "func parseIPv6Host(input string, start int) (*utils.NetAddr, int, error) {\n\thostStr := input[start:]\n\t// if there is only one ':' in the entire input, the host isn't\n\t// an IPv6 address\n\tif strings.Count(hostStr, \":\") == 1 {\n\t\treturn nil, 0, trace.BadParameter(\"%q has an invalid host, host cannot contain '[' unless it is an IPv6 address\", input)\n\t}\n\t// if there's no closing ']', this isn't a valid IPv6 address\n\trbraceIdx := strings.Index(hostStr, \"]\")\n\tif rbraceIdx == -1 {\n\t\treturn nil, 0, trace.BadParameter(\"%q has an invalid host, host cannot contain '[' or ':' unless it is an IPv6 address\", input)\n\t}\n\t// if there's nothing after ']' then the path is missing\n\tif len(hostStr) <= rbraceIdx+2 {\n\t\treturn nil, 0, trace.BadParameter(\"%q is missing a path, use form [user@]host:[path]\", input)\n\t}\n\n\tmaybeAddr := hostStr[:rbraceIdx+1]\n\thost, err := utils.ParseAddr(maybeAddr)\n\tif err != nil {\n\t\treturn nil, 0, trace.Wrap(err)\n\t}\n\n\t// the host ends after the login + the IPv6 address\n\t// (including the trailing ']') and a ':'\n\treturn host, start + rbraceIdx + 1 + 1, nil\n}", "func PublicIpv6HasSuffix(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldPublicIpv6), v))\n\t})\n}", "func UintsToIPv6(network, host uint64) net.IP {\n\tvar ip = make(net.IP, net.IPv6len)\n\tip[0] = byte(network >> 56)\n\tip[1] = byte(network >> 48)\n\tip[2] = byte(network >> 40)\n\tip[3] = byte(network >> 32)\n\tip[4] = byte(network >> 24)\n\tip[5] = byte(network >> 16)\n\tip[6] = byte(network >> 8)\n\tip[7] = byte(network)\n\tip[8] = byte(host >> 56)\n\tip[9] = byte(host >> 48)\n\tip[10] = byte(host >> 40)\n\tip[11] = byte(host >> 32)\n\tip[12] = byte(host >> 24)\n\tip[13] = byte(host >> 16)\n\tip[14] = byte(host >> 8)\n\tip[15] = byte(host)\n\treturn ip\n}", "func trimIPv6(host string) (string, error) {\n\t// `missing ']' in host` error is already handled in `SplitHostPort`\n\tif host[len(host)-1] == ']' {\n\t\tif host[0] != '[' {\n\t\t\treturn \"\", errors.New(\"missing '[' in host\")\n\t\t}\n\t\treturn host[1:][:len(host)-2], nil\n\t}\n\treturn host, nil\n}", "func IPv6(opts ...options.OptionFunc) string {\n\treturn singleFakeData(IPV6Tag, func() interface{} {\n\t\topt := options.BuildOptions(opts)\n\t\ti := Internet{fakerOption: *opt}\n\t\treturn i.ipv6()\n\t}, opts...).(string)\n}", "func Ping6(ifname, addr string) Node {\n\treturn &ping6{\n\t\tifname: ifname,\n\t\taddr: addr,\n\t}\n}", "func IPv6(str string) bool {\n\tip := net.ParseIP(str)\n\treturn ip != nil && strings.Contains(str, \":\")\n}", "func (o NodeBalancerOutput) Ipv6() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *NodeBalancer) pulumi.StringOutput { return v.Ipv6 }).(pulumi.StringOutput)\n}", "func Ipv6HasSuffix(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldIpv6), v))\n\t})\n}", "func parseIPv6(s string) (ip ipOctets, cc int) {\n\tip = make(ipOctets, net.IPv6len/2)\n\n\tfor i := 0; i < net.IPv6len/2; i++ {\n\t\tip[i] = make([]ipOctet, 0)\n\t}\n\n\tellipsis := -1 // position of ellipsis in ip\n\n\t// Might have leading ellipsis\n\tif len(s) >= 2 && s[0] == ':' && s[1] == ':' {\n\t\tellipsis = 0\n\t\ts = s[2:]\n\t\tcc += 2\n\n\t\t// Might be only ellipsis\n\t\tif len(s) == 0 || s[0] == '_' || s[0] == '/' {\n\t\t\tfor i := 0; i < net.IPv6len/2; i++ {\n\t\t\t\tip.push(i, 0, 0)\n\t\t\t}\n\t\t\treturn ip, cc\n\t\t}\n\t}\n\n\tvar bb [2]uint16 // octet bounds\n\ti := 0 // octet idx\n\tk := 0 // bound idx: 0 - lo, 1 - hi\n\n\t// Loop, parsing hex numbers followed by colon.\nloop:\n\tfor i < net.IPv6len/2 {\n\t\t// Hex number.\n\t\tn, c, ok := xtoi(s)\n\t\tif !ok || n > 0xFFFF {\n\t\t\treturn nil, cc\n\t\t}\n\n\t\t// If followed by dot, might be in trailing net.IPv4.\n\t\tif c < len(s) && s[c] == '.' {\n\t\t\tip, n := parseIPv4(s)\n\n\t\t\treturn ip, cc + n\n\t\t}\n\n\t\t// Save this 16-bit chunk.\n\t\tbb[k] = uint16(n)\n\n\t\t// Stop at max of string.\n\t\ts = s[c:]\n\t\tcc += c\n\t\tif len(s) == 0 {\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\ti++\n\t\t\tbreak\n\t\t}\n\n\t\tswitch s[0] {\n\t\tcase ':':\n\t\t\tfallthrough\n\t\tcase ',':\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\tbb[1] = 0\n\t\t\tk = 0\n\t\tcase '-':\n\t\t\tif k == 1 {\n\t\t\t\t// To many dashes in one octet.\n\t\t\t\treturn nil, cc\n\t\t\t}\n\t\t\tk++\n\t\tdefault:\n\t\t\tip.push(i, bb[0], bb[1])\n\t\t\tbreak loop\n\t\t}\n\n\t\tif s[0] == ':' {\n\t\t\ti++\n\t\t}\n\n\t\ts = s[1:]\n\t\tcc++\n\n\t\t// Look for ellipsis.\n\t\tif s[0] == ':' {\n\t\t\tif ellipsis >= 0 { // already have one\n\t\t\t\treturn nil, cc\n\t\t\t}\n\t\t\tellipsis = i\n\t\t\ts = s[1:]\n\t\t\tcc++\n\t\t\tif len(s) == 0 || s[0] == '_' || s[0] == '/' { // can be at end\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// If didn't parse enough, expand ellipsis.\n\tif i < net.IPv6len/2 {\n\t\tif ellipsis < 0 {\n\t\t\treturn nil, cc\n\t\t}\n\t\tn := net.IPv6len/2 - i\n\t\tfor j := i - 1; j >= ellipsis; j-- {\n\t\t\tip[j+n] = ip[j]\n\t\t}\n\t\tfor j := ellipsis + n - 1; j >= ellipsis; j-- {\n\t\t\tip[j] = []ipOctet{{0, 0}}\n\t\t}\n\t} else if ellipsis >= 0 {\n\t\t// Ellipsis must represent at least one 0 group.\n\t\treturn nil, cc\n\t}\n\n\treturn ip, cc\n}", "func BigToIPv6(i *big.Int) net.IP {\n\tn, h := bigToUint64s(i)\n\treturn UintsToIPv6(n, h)\n}", "func configureIPv6Address(ipv6 bool) error {\n\tif !ipv6 {\n\t\treturn nil\n\t}\n\tlink, err := netlink.LinkByName(\"lo\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find 'lo' link: %v\", err)\n\t}\n\t// Equivalent to `ip -6 addr add \"::6/128\" dev lo`\n\taddress := &net.IPNet{IP: net.ParseIP(\"::6\"), Mask: net.CIDRMask(128, 128)}\n\taddr := &netlink.Addr{IPNet: address}\n\n\terr = netlink.AddrAdd(link, addr)\n\tif ignoreExists(err) != nil {\n\t\treturn fmt.Errorf(\"failed to add IPv6 inbound address: %v\", err)\n\t}\n\treturn nil\n}", "func (internet Internet) IPv6(v reflect.Value) (interface{}, error) {\n\treturn internet.ipv6(), nil\n}", "func Ipv6AddrToInt(ipAddr string) string {\n\tbits := strings.Split(ipAddr, \":\")\n\tb0, _ := strconv.ParseInt(bits[0], 16, 64)\n\tb1, _ := strconv.ParseInt(bits[1], 16, 64)\n\tb2, _ := strconv.ParseInt(bits[2], 16, 64)\n\tb3, _ := strconv.ParseInt(bits[3], 16, 64)\n\tb4, _ := strconv.ParseInt(bits[4], 16, 64)\n\tb5, _ := strconv.ParseInt(bits[5], 16, 64)\n\tb6, _ := strconv.ParseInt(bits[6], 16, 64)\n\tb7, _ := strconv.ParseInt(bits[7], 16, 64)\n\tvar sum1, sum2, sum3, sum4 int64\n\tvar sum1S, sum2S, sum3S, sum4S string\n\n\tif b0 == 0 {\n\t\tsum1 += b0 + 1<<16\n\t\tsum1 += b1\n\t\tsum1S = strconv.FormatInt(sum1, 2)[1:]\n\t\tnowLong := 32 - len(sum1S)\n\t\tfor i := 0; i < nowLong; i++ {\n\t\t\tsum1S = \"0\" + sum1S\n\t\t}\n\t} else {\n\t\tsum1 += b0 << 16\n\t\tsum1 += b1\n\t\tsum1S = strconv.FormatInt(sum1, 2)\n\t\tlog.Println(sum1S)\n\t\tnowLong := 32 - len(sum1S)\n\t\tfor i := 0; i < nowLong; i++ {\n\t\t\tsum1S = \"0\" + sum1S\n\t\t}\n\t}\n\n\tif b0 == 0 {\n\t\tsum2 += b2 + 1<<16\n\t\tsum2 += b3\n\t\tsum2S = strconv.FormatInt(sum2, 2)[1:]\n\t\tnowLong := 32 - len(sum2S)\n\t\tfor i := 0; i < nowLong; i++ {\n\t\t\tsum2S = \"0\" + sum2S\n\t\t}\n\t} else {\n\t\tsum2 += b2 << 16\n\t\tsum2 += b3\n\t\tsum2S = strconv.FormatInt(sum2, 2)\n\t\tnowLong := 32 - len(sum2S)\n\t\tfor i := 0; i < nowLong; i++ {\n\t\t\tsum2S = \"0\" + sum2S\n\t\t}\n\t}\n\n\tif b0 == 0 {\n\t\tsum3 += b4 + 1<<16\n\t\tsum3 += b5\n\t\tsum3S = strconv.FormatInt(sum3, 2)[1:]\n\t\tnowLong := 32 - len(sum3S)\n\t\tfor i := 0; i < nowLong; i++ {\n\t\t\tsum3S = \"0\" + sum3S\n\t\t}\n\t} else {\n\t\tsum3 += b4 << 16\n\t\tsum3 += b5\n\t\tsum3S = strconv.FormatInt(sum3, 2)\n\t\tnowLong := 32 - len(sum3S)\n\t\tfor i := 0; i < nowLong; i++ {\n\t\t\tsum3S = \"0\" + sum3S\n\t\t}\n\t}\n\n\tif b0 == 0 {\n\t\tsum4 += b6 + 1<<16\n\t\tsum4 += b7\n\t\tsum4S = strconv.FormatInt(sum4, 2)[1:]\n\t\tnowLong := 32 - len(sum4S)\n\t\tfor i := 0; i < nowLong; i++ {\n\t\t\tsum4S = \"0\" + sum4S\n\t\t}\n\t} else {\n\t\tsum4 += b6 << 16\n\t\tsum4 += b7\n\t\tsum4S = strconv.FormatInt(sum4, 2)\n\t\tnowLong := 32 - len(sum4S)\n\t\tfor i := 0; i < nowLong; i++ {\n\t\t\tsum4S = \"0\" + sum4S\n\t\t}\n\t}\n\t// log.Println(sum1S, len(sum1S))\n\t// log.Println(sum2S, len(sum2S))\n\t// log.Println(sum3S, len(sum3S))\n\t// log.Println(sum4S, len(sum4S))\n\n\treturn sum1S + sum2S + sum3S + sum4S\n}", "func HWAddrToIPv6LLA(hwaddr net.HardwareAddr) net.IP {\n\treturn net.IP{\n\t\t0xfe,\n\t\t0x80,\n\t\t0x00,\n\t\t0x00,\n\t\t0x00,\n\t\t0x00,\n\t\t0x00,\n\t\t0x00,\n\t\t(hwaddr[0] ^ 0x02),\n\t\thwaddr[1],\n\t\thwaddr[2],\n\t\t0xff,\n\t\t0xfe,\n\t\thwaddr[3],\n\t\thwaddr[4],\n\t\thwaddr[5],\n\t}\n}", "func Ipv6EqualFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EqualFold(s.C(FieldIpv6), v))\n\t})\n}", "func (o NetworkInterfaceOutput) Ipv6PrefixCount() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *NetworkInterface) pulumi.IntPtrOutput { return v.Ipv6PrefixCount }).(pulumi.IntPtrOutput)\n}", "func (a *Inet6Addr) Family() int { return syscall.AF_INET6 }", "func IsAddressIPv6(addr string) bool {\n\treturn govalidator.IsIPv6(addr)\n}", "func IpV6Address() string {\n\tvar ip net.IP\n\tfor i := 0; i < net.IPv6len; i++ {\n\t\tnumber := uint8(seedAndReturnRandom(255))\n\t\tip = append(ip, number)\n\t}\n\treturn ip.String()\n}", "func PublicIpv6(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPublicIpv6), v))\n\t})\n}", "func IPv6Raw(addr [16]byte) IP {\n\treturn IP{\n\t\thi: binary.BigEndian.Uint64(addr[:8]),\n\t\tlo: binary.BigEndian.Uint64(addr[8:]),\n\t\tz: z6noz,\n\t}\n}", "func (o NetworkInterfaceOutput) Ipv6Prefixes() NetworkInterfaceIpv6PrefixSpecificationArrayOutput {\n\treturn o.ApplyT(func(v *NetworkInterface) NetworkInterfaceIpv6PrefixSpecificationArrayOutput { return v.Ipv6Prefixes }).(NetworkInterfaceIpv6PrefixSpecificationArrayOutput)\n}", "func IsIPv6(value string) bool {\n\tip := net.ParseIP(value)\n\tif ip == nil {\n\t\treturn false\n\t}\n\treturn ip.To16() != nil\n}", "func network4(addr uint32, prefix uint) uint32 {\n\treturn addr & netmask(prefix)\n}", "func getIPprefix(s string) string {\n\tip4Prefixes := strings.Split(s, \".\")\n\tif len(ip4Prefixes) == 4 {\n\t\treturn ip4Prefixes[0]\n\t}\n\tip6Prefix := strings.Split(s, \":\")[0][1:]\n\treturn ip6Prefix\n}", "func ConvertAddressToUser(prefix []byte, addr interfaces.IAddress) []byte {\n\tdat := prefix\n\tdat = append(dat, addr.Bytes()...)\n\tsha256d := Sha(Sha(dat).Bytes()).Bytes()\n\tuserd := prefix\n\tuserd = append(userd, addr.Bytes()...)\n\tuserd = append(userd, sha256d[:4]...)\n\treturn userd\n}", "func PublicIpv6EqualFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EqualFold(s.C(FieldPublicIpv6), v))\n\t})\n}", "func Configure6(iface netlink.Link, packet *dhcpv6.Message) error {\n\tp := NewPacket6(iface, packet)\n\treturn p.Configure()\n}", "func Ipv6ContainsFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldIpv6), v))\n\t})\n}", "func IPv6ToUInts(ip net.IP) (network, host uint64) {\n\tn := []byte(ip.To16())\n\tnetwork = (uint64(n[0]) << 56) | (uint64(n[1]) << 48) | (uint64(n[2]) << 40) | (uint64(n[3]) << 32) | (uint64(n[4]) << 24) | (uint64(n[5]) << 16) | (uint64(n[6]) << 8) | uint64(n[7])\n\thost = (uint64(n[8]) << 56) | (uint64(n[9]) << 48) | (uint64(n[10]) << 40) | (uint64(n[11]) << 32) | (uint64(n[12]) << 24) | (uint64(n[13]) << 16) | (uint64(n[14]) << 8) | uint64(n[15])\n\treturn network, host\n}", "func IsIPv6(s string) bool {\n\tip := net.ParseIP(s)\n\treturn ip != nil && ip.To4() == nil\n}", "func ipv6EllipsisIPv4(s string) (n int, ok bool) {\n\tsplit := strings.Split(s, \"::\")\n\tleft := strings.Split(split[0], \":\")\n\tnleft := len(left)\n\tif split[0] == \"\" {\n\t\tnleft = 0\n\t}\n\tnoipv4 := strings.TrimSuffix(strings.TrimRight(split[1], \".0123456789\"), \":\")\n\tright := strings.Split(noipv4, \":\")\n\tnright := len(right)\n\tif noipv4 == \"\" {\n\t\tnright = 0\n\t}\n\n\tif nleft+nright <= 5 {\n\t\treturn len(s), true\n\t}\n\tif nleft+nright <= 7 {\n\t\treturn len(split[0]) + 2 + len(noipv4), true\n\t}\n\tright = right[:7-len(left)]\n\trightlen := len(strings.Join(right, \":\"))\n\treturn len(split[0]) + 2 + rightlen, true\n}", "func (a *L3n4Addr) IsIPv6() bool {\n\treturn a.AddrCluster.Is6()\n}", "func (svc *Service) IPv6AddressAnnotation(cloud *gce.Cloud) (string, error) {\n\treturn ipAddressFromAnnotation(svc, cloud, IPv6Version)\n}", "func PublicIpv6ContainsFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldPublicIpv6), v))\n\t})\n}", "func SockaddrToIPAndZone(sa Sockaddr) (net.IP, string) {\n\tswitch sa := sa.(type) {\n\tcase *SockaddrInet4:\n\t\tip := make([]byte, 16)\n\t\t// V4InV6Prefix\n\t\tip[10] = 0xff\n\t\tip[11] = 0xff\n\t\tcopy(ip[12:16], sa.Addr[:])\n\t\treturn ip, \"\"\n\n\tcase *SockaddrInet6:\n\t\tip := make([]byte, 16)\n\t\tcopy(ip, sa.Addr[:])\n\t\treturn ip, IP6ZoneToString(int(sa.ZoneId))\n\t}\n\treturn nil, \"\"\n}", "func parseIPv6Argument(argv string) (ipconf []types.BaseCustomizationIpV6Generator, err error) {\n\tfor _, substring := range strings.Split(argv, \",\") {\n\t\t// remove leading and trailing white space\n\t\tsubstring = strings.TrimSpace(substring)\n\t\t// handle \"dhcp6\" and lists of static IPv6 addresses\n\t\tswitch substring {\n\t\tcase \"dhcp6\":\n\t\t\tipconf = append(\n\t\t\t\tipconf,\n\t\t\t\t&types.CustomizationDhcpIpV6Generator{},\n\t\t\t)\n\t\tdefault:\n\t\t\t// check if subnet mask was specified\n\t\t\tswitch strings.Count(substring, \"/\") {\n\t\t\t// no mask, set default\n\t\t\tcase 0:\n\t\t\t\tipconf = append(ipconf, &types.CustomizationFixedIpV6{\n\t\t\t\t\tIpAddress: substring,\n\t\t\t\t\tSubnetMask: 64,\n\t\t\t\t})\n\t\t\t// a single forward slash was found: parse and use subnet mask\n\t\t\tcase 1:\n\t\t\t\tparts := strings.Split(substring, \"/\")\n\t\t\t\tmask, err := strconv.Atoi(parts[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unable to convert subnet mask to int: %w\", err)\n\t\t\t\t}\n\t\t\t\tipconf = append(ipconf, &types.CustomizationFixedIpV6{\n\t\t\t\t\tIpAddress: parts[0],\n\t\t\t\t\tSubnetMask: int32(mask),\n\t\t\t\t})\n\t\t\t// too many forward slashes; return error\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"unable to parse IPv6 address (too many subnet separators): %s\", substring)\n\t\t\t}\n\t\t}\n\t}\n\treturn ipconf, nil\n}", "func (o *IPPrefixesSynthetics) GetPrefixesIpv6() []string {\n\tif o == nil || o.PrefixesIpv6 == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.PrefixesIpv6\n}", "func DoHV6(base string) (ip netip.Addr, ok bool) {\n\tpopulateOnce.Do(populate)\n\tfor _, ip := range dohIPsOfBase[base] {\n\t\tif ip.Is6() {\n\t\t\treturn ip, true\n\t\t}\n\t}\n\treturn ip, false\n}", "func appendIPv6(data []byte, ip net.IP) ([]byte, error) {\n\tip = ip.To16()\n\tif ip == nil {\n\t\treturn nil, &ColumnConverterError{\n\t\t\tOp: \"Append\",\n\t\t\tTo: \"IPv6\",\n\t\t\tHint: \"invalid IP version\",\n\t\t}\n\t}\n\treturn append(data, ip[:]...), nil\n}", "func (o NetworkInterfaceResponseOutput) Ipv6Address() pulumi.StringOutput {\n\treturn o.ApplyT(func(v NetworkInterfaceResponse) string { return v.Ipv6Address }).(pulumi.StringOutput)\n}", "func IsIPv6(s string) bool {\n\tip := net.ParseIP(s)\n\treturn ip != nil && strings.Contains(s, \":\") // && ip.To6() == nil\n}", "func (o *IPPrefixesSynthetics) SetPrefixesIpv6(v []string) {\n\to.PrefixesIpv6 = v\n}", "func Ipv6EQ(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldIpv6), v))\n\t})\n}", "func (o *DhcpLease6DataData) GetScope6StartAddress6Addr() string {\n\tif o == nil || o.Scope6StartAddress6Addr == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Scope6StartAddress6Addr\n}", "func (o *DhcpLease6DataData) SetScope6Prefix(v string) {\n\to.Scope6Prefix = &v\n}", "func cleanIPV6(l []string) []string {\n\tvar r []string\n\tfor _, i := range l {\n\t\tmatched, _ := regexp.MatchString(`\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}`, i)\n\t\tif matched {\n\t\t\tr = append(r, i)\n\t\t}\n\t}\n\tsort.Strings(r)\n\treturn (r)\n}", "func (o *DhcpLease6DataData) GetScope6Prefix() string {\n\tif o == nil || o.Scope6Prefix == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Scope6Prefix\n}", "func (o *DhcpLease6DataData) GetRange6StartAddress6Addr() string {\n\tif o == nil || o.Range6StartAddress6Addr == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Range6StartAddress6Addr\n}", "func (opdns OpenDNSProvider) GetIPv6() (string, error) {\n\t// Lookup OpenDNS IPv6 Addr\n\topdnsRes, err := opdns.doQuery(dns.Fqdn(opendnsResolver), dns.TypeAAAA, opendnsResolver+\":53\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\topdnsRecord, ok := opdnsRes.Answer[0].(*dns.AAAA)\n\tif !ok {\n\t\treturn \"\", errors.New(\"OpenDNS failed to return a valid IPv6 address for itself\")\n\t}\n\n\t// Query OpenDNS for clients public ip\n\tres, err := opdns.doQuery(opendnsMyIP, dns.TypeAAAA, \"[\"+opdnsRecord.AAAA.String()+\"]:53\")\n\tif erry, ok := err.(*net.OpError); ok && erry.Err.Error() == \"connect: no route to host\" {\n\t\treturn \"\", errors.New(\"No route to OpenDNS IPv6. Does your connection support IPv6?\")\n\t} else if err != nil {\n\t\treturn \"\", err\n\t}\n\n\trecord, ok := res.Answer[0].(*dns.AAAA)\n\tif !ok {\n\t\treturn \"\", errors.New(\"OpenDNS failed to return a valid AAAA record\")\n\t}\n\n\treturn record.AAAA.String(), nil\n}", "func (o *DhcpLease6DataData) GetLease6Address6Addr() string {\n\tif o == nil || o.Lease6Address6Addr == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Lease6Address6Addr\n}", "func (ip6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {\n\tpayload := b.Bytes()\n\tif ip6.HopByHop != nil {\n\t\treturn fmt.Errorf(\"unable to serialize hopbyhop for now\")\n\t}\n\tbytes, err := b.PrependBytes(40)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbytes[0] = (ip6.Version << 4) | (ip6.TrafficClass >> 4)\n\tbytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)\n\tbinary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))\n\tif opts.FixLengths {\n\t\tip6.Length = uint16(len(payload))\n\t}\n\tbinary.BigEndian.PutUint16(bytes[4:], ip6.Length)\n\tbytes[6] = byte(ip6.NextHeader)\n\tbytes[7] = byte(ip6.HopLimit)\n\tif len(ip6.SrcIP) != 16 {\n\t\treturn fmt.Errorf(\"invalid src ip %v\", ip6.SrcIP)\n\t}\n\tif len(ip6.DstIP) != 16 {\n\t\treturn fmt.Errorf(\"invalid dst ip %v\", ip6.DstIP)\n\t}\n\tcopy(bytes[8:], ip6.SrcIP)\n\tcopy(bytes[24:], ip6.DstIP)\n\treturn nil\n}", "func containIPv6Addr(host string) bool {\n\t// the shortest IPv6 address is ::\n\treturn len(strings.Split(host, \":\")) > 2\n}", "func isIPv6(fl FieldLevel) bool {\n\tip := net.ParseIP(fl.Field().String())\n\n\treturn ip != nil && ip.To4() == nil\n}", "func (o SubnetOutput) AssignIpv6AddressOnCreation() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *Subnet) pulumi.BoolPtrOutput { return v.AssignIpv6AddressOnCreation }).(pulumi.BoolPtrOutput)\n}", "func (l *L3n4AddrID) IsIPv6() bool {\n\treturn l.L3n4Addr.IsIPv6()\n}", "func Ipv6GTE(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldIpv6), v))\n\t})\n}", "func newIPv6ParserOption(prefix string, index, count int) parserOption {\n\treturn parserOption{prefix, index, count}\n}", "func IsIPv6(ip string) bool {\n\treturn strings.Index(ip, \":\") >= 0\n}", "func PublicIpv6EQ(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPublicIpv6), v))\n\t})\n}", "func IsIPv6(ipAddress string) bool {\n\tip := net.ParseIP(ipAddress)\n\treturn ip != nil && strings.Count(ipAddress, \":\") >= 2\n}", "func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {\n\toptionLength := 0\n\tfor _, opt := range i.Options {\n\t\tl, err := opt.serializeTo(b, opts.FixLengths)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toptionLength += l\n\t}\n\tbytes, err := b.PrependBytes(2)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbytes[0] = uint8(i.NextHeader)\n\tif opts.FixLengths {\n\t\ti.HeaderLength = uint8((optionLength + 2) / 8)\n\t}\n\tbytes[1] = i.HeaderLength\n\treturn nil\n}", "func IsIPv6(ip string) bool {\n\tparsed := net.ParseIP(ip)\n\treturn parsed != nil && parsed.To4() == nil\n}", "func appendIPv6Str(data []byte, strIp string) ([]byte, error) {\n\tip := net.ParseIP(strIp)\n\tif ip == nil {\n\t\treturn nil, &ColumnConverterError{\n\t\t\tOp: \"Append\",\n\t\t\tTo: \"IPv6\",\n\t\t\tHint: \"invalid IP format\",\n\t\t}\n\t}\n\treturn appendIPv6(data, ip)\n}", "func (t *Interface) IPv6Forwarding(ctrl bool) error {\n\tk := boolToByte(ctrl)\n\treturn ioutil.WriteFile(\"/proc/sys/net/ipv6/conf/\"+t.Name()+\"/forwarding\", []byte{k}, 0)\n}", "func IPv6NetStartEnd(ip net.IP, mask int) (sNet, sHost, eNet, eHost uint64) {\n\n\tn, h := IPv6ToUInts(ip)\n\n\tvar subj, cMask uint64\n\tswitch {\n\tcase mask < 0:\n\t\treturn minIPv6, maxIPv6, minIPv6, maxIPv6\n\tcase mask > 128:\n\t\treturn maxIPv6, maxIPv6, maxIPv6, maxIPv6\n\tcase mask <= 64:\n\t\tsubj = n\n\t\tcMask = 64 - uint64(mask)\n\tcase mask > 64 && mask <= 128:\n\t\tsubj = h\n\t\tcMask = uint64(mask) - 64\n\t}\n\n\tsm := uint64(maxIPv6 << cMask)\n\tem := ^sm\n\n\tstart := subj & sm\n\tend := subj | em\n\n\tswitch {\n\tcase mask >= 0 && mask <= 64:\n\t\tsNet = start\n\t\tsHost = minIPv6\n\t\teNet = end\n\t\teHost = maxIPv6\n\tcase mask > 64 && mask <= 128:\n\t\tsNet = n\n\t\tsHost = start\n\t\teNet = n\n\t\teHost = end\n\t}\n\treturn sNet, sHost, eNet, eHost\n}", "func ParseDevAddrPrefix(prefixString string) (prefix DevAddrPrefix, err error) {\n\tpattern := regexp.MustCompile(\"([[:xdigit:]]{8})/([[:digit:]]+)\")\n\tmatches := pattern.FindStringSubmatch(prefixString)\n\tif len(matches) != 3 {\n\t\terr = errors.New(\"Invalid Prefix\")\n\t\treturn\n\t}\n\taddr, _ := ParseDevAddr(matches[1]) // errors handled in regexp\n\tprefix.Length, _ = strconv.Atoi(matches[2]) // errors handled in regexp\n\tprefix.DevAddr = addr.Mask(prefix.Length)\n\treturn\n}", "func Ipv6Contains(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldIpv6), v))\n\t})\n}", "func nextDNSv6Gen(ip netip.Addr, id []byte) netip.Addr {\n\tif len(id) > 12 {\n\t\treturn netip.Addr{}\n\t}\n\ta := ip.As16()\n\tcopy(a[16-len(id):], id)\n\treturn netip.AddrFrom16(a)\n}", "func (af AddressFamily) IsIPv6() bool {\n\treturn af == AddressFamilyIPv6\n}", "func IPAndZoneToSockaddr(ip net.IP, zone string) Sockaddr {\n\t// Unspecified?\n\tif ip == nil {\n\t\tif zone != \"\" {\n\t\t\treturn &SockaddrInet6{ZoneId: uint32(IP6ZoneToInt(zone))}\n\t\t}\n\t\treturn new(SockaddrInet4)\n\t}\n\n\t// Valid IPv4?\n\tif ip4 := ip.To4(); ip4 != nil && zone == \"\" {\n\t\tvar buf [4]byte\n\t\tcopy(buf[:], ip4) // last 4 bytes\n\t\treturn &SockaddrInet4{Addr: buf}\n\t}\n\n\t// Valid IPv6 address?\n\tif ip6 := ip.To16(); ip6 != nil {\n\t\tvar buf [16]byte\n\t\tcopy(buf[:], ip6)\n\t\treturn &SockaddrInet6{Addr: buf, ZoneId: uint32(IP6ZoneToInt(zone))}\n\t}\n\n\treturn nil\n}", "func IPv6Unspecified() IP { return IP{z: z6noz} }", "func IPv6FragmentHash(h header.IPv6, id uint32) uint32 {\n\tt := h.SourceAddress().As16()\n\ty := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24\n\tt = h.DestinationAddress().As16()\n\tz := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24\n\treturn Hash3Words(id, y, z, hashIV)\n}", "func (o LoadBalancerZoneMappingOutput) Ipv6Address() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v LoadBalancerZoneMapping) *string { return v.Ipv6Address }).(pulumi.StringPtrOutput)\n}", "func (_class PIFClass) GetIPv6(sessionID SessionRef, self PIFRef) (_retval []string, _err error) {\n\t_method := \"PIF.get_IPv6\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_selfArg, _err := convertPIFRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"self\"), self)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_result, _err := _class.client.APICall(_method, _sessionIDArg, _selfArg)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_retval, _err = convertStringSetToGo(_method + \" -> \", _result.Value)\n\treturn\n}", "func IPv6LinkLocalAllNodes() IP { return IPv6Raw([16]byte{0: 0xff, 1: 0x02, 15: 0x01}) }", "func (o AccessConfigResponseOutput) ExternalIpv6PrefixLength() pulumi.IntOutput {\n\treturn o.ApplyT(func(v AccessConfigResponse) int { return v.ExternalIpv6PrefixLength }).(pulumi.IntOutput)\n}", "func TestDial_IPV6(t *testing.T) {\n\tif c, err := amqp.Dial(\"amqp://localhost\"); err != nil {\n\t\tt.Skip(\"can't connect to local AMQP server\")\n\t} else {\n\t\tc.Close()\n\t}\n\tl, err := net.Listen(\"tcp6\", \"[::]:0\")\n\tif err != nil {\n\t\tt.Skip(\"ipv6 not supported\")\n\t}\n\tl.Close()\n\n\tfor _, u := range []string{\"amqp://[::]:5672\", \"amqp://[::]\"} {\n\t\tu := u // Don't use range variable in func literal.\n\t\tt.Run(u, func(t *testing.T) {\n\t\t\tc, err := amqp.Dial(u)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%q: %v\", u, err)\n\t\t\t} else {\n\t\t\t\tc.Close()\n\t\t\t}\n\t\t})\n\t}\n}", "func (b IPv6Fragment) DestinationAddress() tcpip.Address {\n\tpanic(\"not supported\")\n}", "func PublicIpv6Contains(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldPublicIpv6), v))\n\t})\n}", "func isIP6AddrResolvable(fl FieldLevel) bool {\n\tif !isIPv6(fl) {\n\t\treturn false\n\t}\n\n\t_, err := net.ResolveIPAddr(\"ip6\", fl.Field().String())\n\n\treturn err == nil\n}", "func PublicIpv6GTE(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldPublicIpv6), v))\n\t})\n}", "func (rrl *RRL) addrPrefix(addr string) string {\n\ti := strings.LastIndex(addr, \":\")\n\tip := net.ParseIP(addr[:i])\n\tif ip.To4() != nil {\n\t\tip = ip.Mask(net.CIDRMask(rrl.ipv4PrefixLength, 32))\n\t\treturn ip.String()\n\t}\n\tip = net.ParseIP(addr[1 : i-1]) // strip brackets from ipv6 e.g. [2001:db8::1]\n\tip = ip.Mask(net.CIDRMask(rrl.ipv6PrefixLength, 128))\n\n\treturn ip.String()\n}", "func broadcast4(addr uint32, prefix uint) uint32 {\n\treturn addr | ^netmask(prefix)\n}", "func (addr DevAddr) WithPrefix(prefix DevAddrPrefix) (prefixed DevAddr) {\n\tk := uint(prefix.Length)\n\tfor i := 0; i < 4; i++ {\n\t\tif k >= 8 {\n\t\t\tprefixed[i] = prefix.DevAddr[i] & 0xff\n\t\t\tk -= 8\n\t\t\tcontinue\n\t\t}\n\t\tprefixed[i] = (prefix.DevAddr[i] & ^byte(0xff>>k)) | (addr[i] & byte(0xff>>k))\n\t\tk = 0\n\t}\n\treturn\n}" ]
[ "0.69586384", "0.6730306", "0.6644067", "0.6531961", "0.6518947", "0.64932704", "0.64857215", "0.6434441", "0.6371735", "0.63117236", "0.6310245", "0.6301997", "0.62453604", "0.61817735", "0.60887283", "0.6050287", "0.6015003", "0.6001845", "0.5973858", "0.597152", "0.5947329", "0.5940137", "0.5892646", "0.5817638", "0.5788113", "0.57785696", "0.57729006", "0.5756924", "0.57452846", "0.5736699", "0.57089823", "0.5690883", "0.56815577", "0.56371224", "0.5634544", "0.56276745", "0.5621503", "0.5618626", "0.56154823", "0.56062615", "0.5595028", "0.5577922", "0.5575641", "0.557129", "0.55558866", "0.5539811", "0.5536012", "0.5528186", "0.551043", "0.5506158", "0.5493032", "0.548858", "0.54712605", "0.5468239", "0.54674584", "0.54645395", "0.545999", "0.54579675", "0.54563093", "0.5455191", "0.54505676", "0.54489386", "0.54326546", "0.5419938", "0.54186505", "0.5414079", "0.5387049", "0.5371214", "0.5361961", "0.53528744", "0.5339052", "0.53389686", "0.53265417", "0.5325977", "0.5323748", "0.5317387", "0.53112775", "0.52994835", "0.5295068", "0.52888477", "0.5286891", "0.5282848", "0.52827704", "0.5271754", "0.52677834", "0.5257357", "0.5254576", "0.5254141", "0.52406335", "0.5231699", "0.5231691", "0.5228596", "0.52273893", "0.5215936", "0.5204472", "0.51998544", "0.51897025", "0.5185492", "0.51816833", "0.51705396" ]
0.87126887
0
hasAAAA checks if AAAA records exists in dns.Msg
func hasAAAA(res *dns.Msg) bool { for _, a := range res.Answer { if a.Header().Rrtype == dns.TypeAAAA { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func hasIPAns(m *dns.Msg) (ok bool) {\n\tfor _, rr := range m.Answer {\n\t\tif t := rr.Header().Rrtype; t == dns.TypeA || t == dns.TypeAAAA {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (s stdlib) LookupAAAA(name string) ([][16]byte, error) {\n\t/* Get only IPv4 IPs */\n\tips, err := s.lookupIPFilter(\n\t\tname,\n\t\tfunc(i net.IP) net.IP {\n\t\t\t/* Make sure it's not an IPv4 address */\n\t\t\tif nil != i.To4() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn i.To16()\n\t\t},\n\t)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\t/* Convert to byte slices */\n\tret := make([][16]byte, len(ips))\n\tfor i, ip := range ips {\n\t\tcopy(ret[i][:], ip)\n\t}\n\n\treturn ret, nil\n}", "func (data *DNSData) addAAAA(name string, ip net.IP) {\n\tdata.v6Addresses[name] = dns.AAAA{\n\t\tHdr: dns.RR_Header{\n\t\t\tName: name,\n\t\t\tRrtype: dns.TypeAAAA,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: data.DefaultTTL,\n\t\t},\n\t\tAAAA: ip,\n\t}\n}", "func (e ENS) handleEthLinkAAAA(name string, domain string) ([]dns.RR, error) {\n\tresults := make([]dns.RR, 0)\n\tif name == domain {\n\t\tfor i := range e.IPFSGatewayAAAAs {\n\t\t\tresult, err := dns.NewRR(fmt.Sprintf(\"%s 3600 IN AAAA %s\", domain, e.IPFSGatewayAAAAs[i]))\n\t\t\tif err != nil {\n\t\t\t\treturn results, err\n\t\t\t}\n\t\t\tresults = append(results, result)\n\t\t}\n\t\treturn results, nil\n\t}\n\n\t// We want to return a default A rrset if the .eth resolver has a content\n\t// We want to return a default AAAA rrset if the .eth resolver has a content\n\t// hash but not an AAAA rrset\n\taaaaRRSet, err := e.obtainAAAARRSet(name, domain)\n\tif err == nil && len(aaaaRRSet) != 0 {\n\t\t// We have an AAAA rrset; use it\n\t\toffset := 0\n\t\tfor offset < len(aaaaRRSet) {\n\t\t\tvar result dns.RR\n\t\t\tresult, offset, err = dns.UnpackRR(aaaaRRSet, offset)\n\t\t\tif err == nil {\n\t\t\t\tresults = append(results, result)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif len(e.IPFSGatewayAAAAs) > 0 {\n\t\t\tcontenthash, err := e.obtainContenthash(name, domain)\n\t\t\tif err == nil && len(contenthash) != 0 {\n\t\t\t\t// We have a content hash but no AAAA record; use the default\n\t\t\t\tfor i := range e.IPFSGatewayAAAAs {\n\t\t\t\t\tresult, err := dns.NewRR(fmt.Sprintf(\"%s 3600 IN AAAA %s\", name, e.IPFSGatewayAAAAs[i]))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Warnf(\"error creating %s AAAA RR: %v\", name, err)\n\t\t\t\t\t}\n\t\t\t\t\tresults = append(results, result)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn results, nil\n}", "func AAAA(hdr dns.RR_Header, ip net.IP) *dns.AAAA {\n\treturn &dns.AAAA{\n\t\tHdr: hdr,\n\t\tAAAA: ip.To16(),\n\t}\n}", "func (data *DNSData) FindAAAA(name string) *dns.AAAA {\n\tfqdn := dns.Fqdn(name)\n\n\trecord, ok := data.v6Addresses[fqdn]\n\tif ok {\n\t\treturn &record\n\t}\n\n\treturn nil\n}", "func (e ENS) HasRecords(domain string, name string) (bool, error) {\n\tresolver, err := ens.NewDNSResolver(e.Client, strings.TrimSuffix(domain, \".\"))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn resolver.HasRecords(name)\n}", "func DNSARecordsVerification(t *testing.T, hostNames []string) bool {\n\tt.Logf(\"Verifying DNS A Records...\")\n\tFQDNList := lib.FetchDNSARecordsFQDN(t, dnsVSUUID, AviClients[0])\n\tdiffString := DiffOfLists(FQDNList, hostNames)\n\tif len(diffString) == initialNumOfFQDN {\n\t\treturn true\n\t}\n\tnewSharedVSFQDN := DiffOfLists(diffString, initialFQDNList)\n\tvar val int\n\tfor _, fqdn := range newSharedVSFQDN {\n\t\tif strings.HasPrefix(fqdn, clusterName+\"--shared\") == true {\n\t\t\tval++\n\t\t}\n\t}\n\tif (len(newSharedVSFQDN) - val) == 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func ContainsAddress(addrs []Address, a Address) bool {\n\treturn IndexOfAddress(addrs, a) != -1\n}", "func NewAAAA(name string, ip net.IP, ttl uint32) *dns.AAAA {\n\treturn &dns.AAAA{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: ttl}, AAAA: ip}\n}", "func (f *wsClientFilter) existsAddress(a btcutil.Address) bool {\n\tswitch a := a.(type) {\n\tcase *btcutil.AddressPubKeyHash:\n\t\t_, ok := f.pubKeyHashes[*a.Hash160()]\n\t\treturn ok\n\tcase *btcutil.AddressScriptHash:\n\t\t_, ok := f.scriptHashes[*a.Hash160()]\n\t\treturn ok\n\tcase *btcutil.AddressPubKey:\n\t\tserializedPubKey := a.ScriptAddress()\n\t\tswitch len(serializedPubKey) {\n\t\tcase 33: // compressed\n\t\t\tvar compressedPubKey [33]byte\n\t\t\tcopy(compressedPubKey[:], serializedPubKey)\n\t\t\t_, ok := f.compressedPubKeys[compressedPubKey]\n\t\t\tif !ok {\n\t\t\t\t_, ok = f.pubKeyHashes[*a.AddressPubKeyHash().Hash160()]\n\t\t\t}\n\t\t\treturn ok\n\t\tcase 65: // uncompressed\n\t\t\tvar uncompressedPubKey [65]byte\n\t\t\tcopy(uncompressedPubKey[:], serializedPubKey)\n\t\t\t_, ok := f.uncompressedPubKeys[uncompressedPubKey]\n\t\t\tif !ok {\n\t\t\t\t_, ok = f.pubKeyHashes[*a.AddressPubKeyHash().Hash160()]\n\t\t\t}\n\t\t\treturn ok\n\t\t}\n\t}\n\n\t_, ok := f.otherAddresses[a.EncodeAddress()]\n\treturn ok\n}", "func (a Authenticate) HasAddress(ctx weave.Context, addr weave.Address) bool {\n\tfor _, s := range a.GetConditions(ctx) {\n\t\tif addr.Equals(s.Address()) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func matchesAudience(as, bs []string) bool {\n\tif len(bs) == 0 || len(as) == 0 {\n\t\treturn false\n\t}\n\n\tfor _, b := range bs {\n\t\tfor _, a := range as {\n\t\t\tif b == a || stripPort(a) == stripPort(b) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (_Contract *ContractCaller) HasDNSRecords(opts *bind.CallOpts, node [32]byte, name [32]byte) (bool, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"hasDNSRecords\", node, name)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}", "func (data *DNSData) addA(name string, ip net.IP) {\n\tdata.v4Addresses[name] = dns.A{\n\t\tHdr: dns.RR_Header{\n\t\t\tName: name,\n\t\t\tRrtype: dns.TypeA,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: data.DefaultTTL,\n\t\t},\n\t\tA: ip,\n\t}\n}", "func recordExists(needle *endpoint.Endpoint, haystack []*endpoint.Endpoint) (*endpoint.Endpoint, bool) {\n\tfor _, record := range haystack {\n\t\tif record.DNSName == needle.DNSName {\n\t\t\treturn record, true\n\t\t}\n\t}\n\n\treturn nil, false\n}", "func (_Contract *ContractSession) HasDNSRecords(node [32]byte, name [32]byte) (bool, error) {\n\treturn _Contract.Contract.HasDNSRecords(&_Contract.CallOpts, node, name)\n}", "func (r *ResponseWriter) WriteMsg(res *dns.Msg) error {\n\tstate := request.Request{W: r, Req: res}\n\n\t// only respond with this when the request came in over IPv6.\n\tif state.Family() == 1 { // if it came in over v4, don't do anything.\n\t\treturn r.ResponseWriter.WriteMsg(res)\n\t}\n\n\t// do not modify if query is not AAAA or not of class IN.\n\tif state.QType() != dns.TypeAAAA || state.QClass() != dns.ClassINET {\n\t\treturn r.ResponseWriter.WriteMsg(res)\n\t}\n\n\t// do not modify if there are AAAA records or NameError. continue if NoData or any other error.\n\tty, _ := response.Typify(res, time.Now().UTC())\n\tif ty == response.NoError || ty == response.NameError {\n\t\tif hasAAAA(res) && ! r.translateAll {\n\t\t\treturn r.ResponseWriter.WriteMsg(res)\n\t\t}\n\t}\n\n\t// perform request to upstream.\n\tres2, err := r.Proxy.Lookup(state, state.Name(), dns.TypeA)\n\tif err != nil {\n\t\tlog.Warningf(\"[WARNING] Unable to query upstream DNS: %v\", err)\n\t\tres.MsgHdr.Rcode = dns.RcodeServerFailure\n\t\treturn r.ResponseWriter.WriteMsg(res)\n\t}\n\n\t// modify response.\n\tres.MsgHdr.Rcode = dns.RcodeSuccess\n\tnsTtl := uint32(600)\n\tfor i := 0; i < len(res.Ns); i++ {\n\t\tif res.Ns[i].Header().Rrtype == dns.TypeSOA {\n\t\t\tnsTtl = res.Ns[i].Header().Ttl\n\t\t}\n\t}\n\tres.Answer = res2.Answer\n\tfor i := 0; i < len(res.Answer); i++ {\n\t\tans := res.Answer[i]\n\t\thdr := ans.Header()\n\t\tif hdr.Rrtype == dns.TypeA {\n\t\t\taaaa, _ := To6(r.Prefix, ans.(*dns.A).A)\n\t\t\tttl := nsTtl\n\t\t\tif ans.Header().Ttl < ttl {\n\t\t\t\tttl = ans.Header().Ttl\n\t\t\t}\n\t\t\tres.Answer[i] = &dns.AAAA{\n\t\t\t\tHdr: dns.RR_Header{\n\t\t\t\t\tName: hdr.Name,\n\t\t\t\t\tRrtype: dns.TypeAAAA,\n\t\t\t\t\tClass: hdr.Class,\n\t\t\t\t\tTtl: ttl,\n\t\t\t\t},\n\t\t\t\tAAAA: aaaa,\n\t\t\t}\n\t\t}\n\t}\n\tres.Ns = []dns.RR{}\n\n\treturn r.ResponseWriter.WriteMsg(res)\n}", "func (w *Wallet) HasAddress(s *aklib.DBConfig, out *tx.MultiSigOut) (bool, error) {\n\tfor _, mout := range out.Addresses {\n\t\tfor a := range w.AddressPublic {\n\t\t\tadrstr, err := address.Address58(s.Config, mout)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif a == adrstr {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn false, nil\n}", "func (i *Instance) AAAA(ip net.IP) *dns.AAAA {\n\treturn &dns.AAAA{\n\t\tHdr: dns.RR_Header{\n\t\t\tName: i.TargetHost.Qualify(i.Domain).String(),\n\t\t\tRrtype: dns.TypeAAAA,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: i.TTLInSeconds(),\n\t\t},\n\t\tAAAA: ip,\n\t}\n}", "func (pfx Prefix) Contains(x Prefix) bool {\n\tif x.pfxlen <= pfx.pfxlen {\n\t\treturn false\n\t}\n\n\tmask := (uint32(1) << (32 - pfx.pfxlen))\n\treturn (pfx.addr & mask) == (x.addr & mask)\n}", "func (this *SIPMessage) HasHeader(headerName string) bool {\n\t_, present := this.nameTable[strings.ToLower(headerName)]\n\treturn present\n}", "func IsAddress(a string) bool {\n\tif len(a) > 0 && a[:3] == string(binary.PrefixAccountPubkey) {\n\t\treturn true\n\t}\n\treturn false\n}", "func ContainsChunkWithAddress(chunks []Chunk, a Address) bool {\n\treturn IndexOfChunkWithAddress(chunks, a) != -1\n}", "func (_Contract *ContractCallerSession) HasDNSRecords(node [32]byte, name [32]byte) (bool, error) {\n\treturn _Contract.Contract.HasDNSRecords(&_Contract.CallOpts, node, name)\n}", "func HasPrefix(m *disgord.Message) (bool, string) {\n\tprefixes := append(NamePrefixes(), \"1.\", \"dig\", \"whois\")\n\tfor _, prefix := range prefixes {\n\t\tif strings.HasPrefix(m.Content, prefix) {\n\t\t\treturn true, prefix\n\t\t}\n\t}\n\treturn false, \"\"\n}", "func isCacheableSucceded(m *dns.Msg) (ok bool) {\n\tqType := m.Question[0].Qtype\n\n\treturn (qType != dns.TypeA && qType != dns.TypeAAAA) || hasIPAns(m) || isCacheableNegative(m)\n}", "func (r Dns_Domain) CreateAaaaRecord(host *string, data *string, ttl *int) (resp datatypes.Dns_Domain_ResourceRecord_AaaaType, err error) {\n\tparams := []interface{}{\n\t\thost,\n\t\tdata,\n\t\tttl,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain\", \"createAaaaRecord\", params, &r.Options, &resp)\n\treturn\n}", "func (o SubnetOutput) EnableResourceNameDnsAaaaRecordOnLaunch() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *Subnet) pulumi.BoolPtrOutput { return v.EnableResourceNameDnsAaaaRecordOnLaunch }).(pulumi.BoolPtrOutput)\n}", "func (o *NSQProducer) HasBroadcastAddress() bool {\n\tif o != nil && o.BroadcastAddress != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Member) HasAddr() bool {\n\tif o != nil && o.Addr != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *StorageSasExpander) HasSasAddress() bool {\n\tif o != nil && o.SasAddress != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NetworkElementSummaryAllOf) HasIpv4Address() bool {\n\tif o != nil && o.Ipv4Address != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func SOA(hdr dns.RR_Header, ns, mbox string, minttl uint32) *dns.SOA {\n\treturn &dns.SOA{\n\t\tHdr: hdr,\n\t\tNs: ns,\n\t\tMbox: mbox,\n\t\tMinttl: minttl,\n\t\tRefresh: 60,\n\t\tRetry: 600,\n\t\tExpire: 86400,\n\t}\n}", "func (as *accountSet) contains(addr common.Address) bool {\n\t_, exist := as.accounts[addr]\n\treturn exist\n}", "func (idx *ExistsAddrIndex) existsAddress(bucket internalBucket, k [addrKeySize]byte) bool {\n\tif bucket.Get(k[:]) != nil {\n\t\treturn true\n\t}\n\n\tidx.unconfirmedLock.RLock()\n\t_, exists := idx.mpExistsAddr[k]\n\tidx.unconfirmedLock.RUnlock()\n\n\treturn exists\n}", "func (f *FooBarStruct) IncludeA(v string) bool {\n\treturn f.IndexA(v) > -1\n}", "func (o *LastBidAsk) HasA() bool {\n\tif o != nil && o.A != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ApplianceAllOfNetworkingIpv4Dhcp) HasDns() bool {\n\tif o != nil && o.Dns != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ExistsMsgSend(tx sdk.Tx) bool {\n\tfor _, msg := range tx.GetMsgs() {\n\t\tif msg.Route() == \"htdfservice\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (h *dnsHeader) isAQuery() bool {\n\treturn h.flags&dnsQR != dnsQR\n}", "func (o *V0037DiagRpcm) HasAveTime() bool {\n\tif o != nil && o.AveTime != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *IpamNetworkDataData) HasNetworkStartAddressAddr() bool {\n\tif o != nil && o.NetworkStartAddressAddr != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *headerMatcher) matchHeader(exists []byte, bs []byte) bool {\n\tl := len(exists) + len(bs)\n\tif l >= headerLength || l < headerPrefixLength {\n\t\treturn false\n\t}\n\th := append(exists, bs...)\n\treturn bytes.HasPrefix(h, headerStdoutPrefix) ||\n\t\tbytes.HasPrefix(h, headerStderrPrefix)\n}", "func (o *GroupReplaceRequest) HasAddress() bool {\n\tif o != nil && o.Address != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (ss *SNSServer) DnsReady() (e error) {\n\n\t// if an SOA provider isn't given, we're done\n\tif ss.SOAProvider == \"\" {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif ss.waitForDns > 0 {\n\t\tctx, cancel = context.WithTimeout(context.Background(), ss.waitForDns)\n\t} else {\n\t\tctx, cancel = context.WithCancel(context.Background())\n\t}\n\tdefer cancel()\n\n\t// Creating the dns client for our query\n\tclient := dns.Client{\n\t\tNet: \"tcp\", // tcp to connect to the SOA provider? or udp (default)?\n\t\tDialer: &net.Dialer{\n\t\t\tTimeout: ss.waitForDns,\n\t\t},\n\t}\n\t// the message contains what we are looking for - the SOA record of the host\n\tmsg := dns.Msg{}\n\tmsg.SetQuestion(strings.SplitN(ss.SelfUrl.Host, \":\", 2)[0]+\".\", dns.TypeANY)\n\n\tdefer cancel()\n\n\tvar check = func() <-chan struct{} {\n\t\tvar channel = make(chan struct{})\n\n\t\tgo func(c chan struct{}) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tresponse *dns.Msg\n\t\t\t)\n\n\t\t\tfor {\n\t\t\t\t// sending the dns query to the soa provider\n\t\t\t\tresponse, _, err = client.Exchange(&msg, ss.SOAProvider)\n\t\t\t\t// if we found a record, then we are done\n\t\t\t\tif err == nil && response != nil && response.Rcode == dns.RcodeSuccess && len(response.Answer) > 0 {\n\t\t\t\t\tc <- struct{}{}\n\t\t\t\t\tss.metrics.DnsReady.Add(1.0)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// otherwise, we keep trying\n\t\t\t\tss.metrics.DnsReadyQueryCount.Add(1.0)\n\t\t\t\tss.logger.Info(\"checking if server's DNS is ready\",\n\t\t\t\t\tzap.String(\"endpoint\", strings.SplitN(ss.SelfUrl.Host, \":\", 2)[0]+\".\"), zap.Error(err), zap.Any(\"response\", response))\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}(channel)\n\n\t\treturn channel\n\t}\n\n\tselect {\n\tcase <-check():\n\tcase <-ctx.Done():\n\t\te = ctx.Err()\n\t}\n\n\treturn\n}", "func (n IPv4Network) Contains(addr string) bool {\n\tip := net.ParseIP(addr)\n\tipnet := n.GetNetwork()\n\treturn ipnet.Contains(ip)\n}", "func Contains(s []string, e string) bool {\n\tfor _, a := range s {\n\t\tif Peer(a, e) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (as Addresses) Contains(addr net.Addr) bool {\n\tfor _, one := range as {\n\t\t// TODO: support port wildcard\n\t\tif one.String() == addr.String() {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func Contains(a []string, s string) bool {\n\tif len(a) == 0 {\n\t\treturn false\n\t}\n\treturn Index(a, s) >= 0\n}", "func (o *DnsZoneDataData) HasServerAddr() bool {\n\tif o != nil && o.ServerAddr != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (l *listeners) exists(uuid string) (ok bool) {\n\t_, ok = l.m[uuid]\n\treturn\n}", "func (ua *UserAddress) Exists() bool { //user_address\n\treturn ua._exists\n}", "func (r *Route) HasAlias(a string) bool {\n\tif r.IsDefault() {\n\t\treturn false\n\t}\n\ta = strings.ToLower(a)\n\tfor _, alias := range r.aliases {\n\t\tif strings.ToLower(alias) == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Contains(aa []string, s string) bool {\n\tfor _, v := range aa {\n\t\tif s == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TAttlistOtherIDSource) IsNasa() bool { return me.String() == \"NASA\" }", "func (me TxsdInvoiceType) IsAa() bool { return me.String() == \"AA\" }", "func (v *Resolver) Exists(tr *trace.Trace, addr string) (string, bool) {\n\ttr = tr.NewChild(\"Alias.Exists\", addr)\n\tdefer tr.Finish()\n\n\taddr = v.cleanIfLocal(addr)\n\n\trcpts, _ := v.lookup(addr, tr)\n\tif len(rcpts) > 0 {\n\t\treturn addr, true\n\t}\n\n\tdomain := envelope.DomainOf(addr)\n\tcatchAll, _ := v.lookup(\"*@\"+domain, tr)\n\tif len(catchAll) > 0 {\n\t\treturn addr, true\n\t}\n\n\treturn addr, false\n}", "func analyzeDns(w io.Writer, server, hostname string, samples, waitMillis int) {\n\tm := new(dns.Msg)\n\tm.Id = dns.Id()\n\tm.RecursionDesired = true\n\tm.Question = make([]dns.Question, 1)\n\tm.Question[0] = dns.Question{Name: dns.Fqdn(hostname), Qtype: dns.TypeA, Qclass: dns.ClassINET}\n\twait := time.Duration(waitMillis) * time.Millisecond\n\n\tc := new(dns.Client)\n\n\tfmt.Printf(\"QUERY %v (@%v): %v data bytes\\n\", hostname, server, m.Len())\n\n\trtts := make(DurationSlice, samples, samples)\n\tfor i := 0; i < samples; i++ {\n\t\tin, rtt, err := c.Exchange(m, server+\":53\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\trtts[i] = rtt\n\t\tfmt.Fprintf(w, \"%v bytes from %v: ttl=%v time=%v\\n\", in.Len(), server, time.Second*6, rtt)\n\t\ttime.Sleep(wait)\n\t}\n\n\t// NOTE: Potentially Eating Performance for Pretties\n\tvar min, max, avg, stddev time.Duration\n\tmin = rtts.Min()\n\tmax = rtts.Max()\n\tavg = rtts.Avg()\n\tstddev = rtts.Std()\n\n\tfmt.Fprintf(w, \"round-trip min/avg/max/stddev = %v/%v/%v/%v\\n\", min, avg, max, stddev)\n}", "func (m DelegateMap) Contains(name CandName) bool {\n\t_, ok := m[name]\n\treturn ok\n}", "func hasAddrsplosion(addrs []ma.Multiaddr) bool {\n\taset := make(map[string]int)\n\n\tfor _, a := range addrs {\n\t\tkey, port := addrKeyAndPort(a)\n\t\txport, ok := aset[key]\n\t\tif ok && port != xport {\n\t\t\treturn true\n\t\t}\n\t\taset[key] = port\n\t}\n\n\treturn false\n}", "func (o *IamIpAddress) HasAddress() bool {\n\tif o != nil && o.Address != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func hasFoundry(state kv.KVStoreReader, agentID isc.AgentID, sn uint32) bool {\n\treturn accountFoundriesMapR(state, agentID).HasAt(codec.EncodeUint32(sn))\n}", "func isFollowup(subj string) bool {\n\tfor _, prefix := range _BAD_PREFIXES {\n\t\tif strings.HasPrefix(strings.ToLower(subj), prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func hasPrefix(buf []byte, prefix []byte) bool {\n\ttrim := bytes.TrimLeftFunc(buf, unicode.IsSpace)\n\treturn bytes.HasPrefix(trim, prefix)\n}", "func (o *DnsZoneDataData) HasZoneAlsoNotify() bool {\n\tif o != nil && o.ZoneAlsoNotify != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *DnsZoneDataData) HasZoneNotify() bool {\n\tif o != nil && o.ZoneNotify != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func hasPrefix(s, prefix string) bool {\n\treturn len(prefix) <= len(s) && s[:len(prefix)] == prefix\n}", "func (c *AddressCacheBook) Add(address string) (bool, error) {\n\tc.addrs[strings.ToUpper(address)] = true\n\treturn true, nil\n}", "func (o *MicrosoftGraphEducationSchool) HasAddress() bool {\n\tif o != nil && o.Address != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (oss *OrderedStringSet) Contains(aString string) bool {\n\t_, ok := oss.members[aString]\n\treturn ok\n}", "func hasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}", "func StreetHasSuffix(v string) predicate.Delivery {\n\treturn predicate.Delivery(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldStreet), v))\n\t})\n}", "func getASPrefixes(as int, ipv4Slice *[]string, ipv6Slice *[]string) (int, int, error) {\n ann4 := 0; ann6 := 0\n url := fmt.Sprintf(\"https://stat.ripe.net//data/announced-prefixes/data.json?resource=AS%d\", as);\n res, err := http.Get(url);\n if err == nil {\n bytes, err := ioutil.ReadAll(res.Body)\n res.Body.Close()\n if err == nil {\n var data map[string]interface{}\n if err := json.Unmarshal(bytes, &data); err != nil {\n err := errors.New(\"JSON parsing failed\")\n return 0, 0, err\n }\n if data[\"status\"] == \"ok\" {\n prefixes := data[\"data\"].(map[string]interface{})[\"prefixes\"].([]interface{})\n for j := 0; j < len(prefixes); j++ {\n prefix := prefixes[j].(map[string]interface{})[\"prefix\"].(string)\n if strings.ContainsRune(prefix, ':') {\n //fmt.Printf(\"# IPv6: %s\\n\", prefix)\n *ipv6Slice=append(*ipv6Slice, prefix);\n ann6++\n } else {\n //fmt.Printf(\"# IPv4: %s\\n\", prefix)\n *ipv4Slice=append(*ipv4Slice, prefix);\n ann4++\n }\n }\n }\n } else {\n return 0, 0, errors.New(\"Reading document body failed\")\n }\n } else {\n return 0, 0, errors.New(\"HTTP request failed\")\n }\n return ann4, ann6, nil\n}", "func (o *InlineResponse20027Person) HasAddress() bool {\n\tif o != nil && o.Address != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WafTrafficPolicy) HasSpamAndAbuse() bool {\n\tif o != nil && o.SpamAndAbuse != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsDefaultARecord(record *DomainRecord) bool {\n\treturn record.Name == Ptr && record.Type == AType && record.TTL == DefaultTTL\n}", "func (o *V0037Node) HasAddress() bool {\n\tif o != nil && o.Address != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (a *Audience) HasEntered() bool {\n\tsession := GetMongo()\n\tdefer session.Close()\n\n\tc := bson.M{AudienceColumns.BroadcastID: a.BroadcastID, AudienceColumns.UserID: a.UserID}\n\terr := session.DB(DBName).C(ColNameAudience).Find(c).One(&a)\n\tif err != nil {\n\t\tif err == mgo.ErrNotFound {\n\t\t\treturn false\n\t\t}\n\t}\n\ta.LeaveTime = time.Time{}\n\treturn true\n}", "func (b *bot) hasSNExternalID(externalID string) bool {\n\tvar exists string\n\terr := b.queryRow(\"SELECT 1 FROM supernytt WHERE external_id = $1\", externalID).Scan(&exists)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (s *DNSSeeder) addNa(nNa *wire.NetAddress) bool {\n\n\tif len(s.nodes) > s.maxSize {\n\t\treturn false\n\t}\n\n\tif nNa.Port != s.Port {\n\t\treturn false\n\t}\n\n\t// generate the key and add to nodes\n\tk := net.JoinHostPort(nNa.IP.String(), strconv.Itoa(int(nNa.Port)))\n\n\tif _, dup := s.nodes[k]; dup == true {\n\t\treturn false\n\t}\n\n\t// if the reported timestamp suggests the netaddress has not been seen in the last 24 hours\n\t// then ignore this netaddress\n\tif (time.Now().Add(-(time.Hour * 24))).After(nNa.Timestamp) {\n\t\treturn false\n\t}\n\n\tnt := node{\n\t\tna: nNa,\n\t\tstatus: statusRG,\n\t\tdnsType: dns.TypeA,\n\t}\n\n\t// select the dns type based on the remote address type and port\n\tif x := nt.na.IP.To4(); x == nil {\n\t\tnt.dnsType = dns.TypeAAAA\n\t}\n\n\t// add the new node details to nodes\n\ts.nodes[k] = &nt\n\n\treturn true\n}", "func (me TAttlistKeywordListOwner) IsNasa() bool { return me.String() == \"NASA\" }", "func (x *fastReflection_AddressStringToBytesResponse) Has(fd protoreflect.FieldDescriptor) bool {\n\tswitch fd.FullName() {\n\tcase \"cosmos.auth.v1beta1.AddressStringToBytesResponse.address_bytes\":\n\t\treturn len(x.AddressBytes) != 0\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.auth.v1beta1.AddressStringToBytesResponse\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.auth.v1beta1.AddressStringToBytesResponse does not contain field %s\", fd.FullName()))\n\t}\n}", "func (o *IPPrefixesSynthetics) HasPrefixesIpv4ByLocation() bool {\n\tif o != nil && o.PrefixesIpv4ByLocation != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *PaymentInitiationRecipient) HasAddress() bool {\n\tif o != nil && o.Address.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *PSlice) exists(addr swarm.Address) (bool, int) {\n\tif len(s.peers) == 0 {\n\t\treturn false, 0\n\t}\n\tfor i, a := range s.peers {\n\t\tif a.Equal(addr) {\n\t\t\treturn true, i\n\t\t}\n\t}\n\treturn false, 0\n}", "func contains(slice []string, sa string) bool {\n\tfor _, sb := range slice {\n\t\tif sa == sb {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func checkRecord(s string) bool {\n\n}", "func (o *DnsZoneDataData) HasZoneDelayedCreateTime() bool {\n\tif o != nil && o.ZoneDelayedCreateTime != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Contains(s, substr string) bool {\n\tfor i := range s {\n\t\tif HasPrefix(s[i:], substr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (i *IE) HasASSOSI() bool {\n\tif i.Type != UserPlaneIPResourceInformation {\n\t\treturn false\n\t}\n\tif len(i.Payload) < 1 {\n\t\treturn false\n\t}\n\n\treturn has7thBit(i.Payload[0])\n}", "func (s stdlib) LookupAAAAC(string) ([]string, error) {\n\treturn nil, ErrNotImplemented\n}", "func (p partition) HasPartitionAs(t *testing.T, B partition, incarnation int64, status string) {\n\tfor _, a := range p {\n\t\tfor _, b := range B {\n\t\t\tmem, ok := a.node.memberlist.Member(b.node.Address())\n\t\t\tassert.True(t, ok, \"expected members to contain member\")\n\t\t\tassert.Equal(t, incarnation, mem.Incarnation, \"expected correct incarnation number\")\n\t\t\tassert.Equal(t, status, mem.Status, \"expected correct status\")\n\t\t}\n\t}\n}", "func (o *DnsZoneDataData) HasZoneForward() bool {\n\tif o != nil && o.ZoneForward != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (arr *SRArr) IsDidExist(did string) bool {\n\n\tfindPos := sort.Search(len(*arr), func(i int) bool {\n\t\treturn (*arr)[i].Did >= did\n\t})\n\n\t//find the first ClientIsJoin == false session after small did\n\tif findPos < len(*arr) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (b *blacklist) Has(r string) bool {\n\tif r == \"\" {\n\t\treturn false\n\t}\n\n\tscanner := bufio.NewScanner(bytes.NewReader(b.data))\n\tdomain := \"\"\n\n\tfor scanner.Scan() {\n\t\tdomain = scanner.Text()\n\t\tif strings.HasSuffix(r, domain) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (o *Capitalization) HasATT_NAME() bool {\n\tif o != nil && o.ATT_NAME != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (as AddrRanges) Contains(addr Address) bool {\n\tfor _, a := range as {\n\t\tif a.Contains(addr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func checkOverlaps(a, ap Span) bool {\n\tif len(a.EndKey) == 0 {\n\t\treturn bytes.Compare(ap.EndKey, a.StartKey) > 0\n\t}\n\treturn bytes.Compare(a.StartKey, ap.EndKey) < 0 && bytes.Compare(ap.StartKey, a.EndKey) < 0\n}", "func (o *NetworkElementSummaryAllOf) HasInbandIpAddress() bool {\n\tif o != nil && o.InbandIpAddress != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}" ]
[ "0.6227902", "0.61794496", "0.59920263", "0.58651966", "0.5799353", "0.5728175", "0.5353992", "0.5345535", "0.5342247", "0.5326436", "0.5300598", "0.5182696", "0.5155952", "0.51235956", "0.5079019", "0.5046965", "0.50436896", "0.5026131", "0.50196564", "0.49746326", "0.49627617", "0.49198365", "0.4902515", "0.48898768", "0.48870423", "0.48758468", "0.48463595", "0.48426107", "0.47984856", "0.47552082", "0.47302994", "0.47213468", "0.47109467", "0.4706835", "0.47039565", "0.47016278", "0.46905595", "0.46880463", "0.46833113", "0.4678841", "0.4673133", "0.46679947", "0.46669617", "0.46634108", "0.46566224", "0.46119964", "0.46117562", "0.4609815", "0.46034032", "0.45931977", "0.45925307", "0.45899588", "0.45801488", "0.45784047", "0.4576492", "0.4563262", "0.45605493", "0.45581424", "0.45494393", "0.45465335", "0.4538864", "0.45322877", "0.45320255", "0.45274663", "0.45255932", "0.45213157", "0.45186704", "0.45150733", "0.45147473", "0.45136932", "0.45122686", "0.45119423", "0.45102593", "0.4507953", "0.45058686", "0.4503775", "0.44932762", "0.44928896", "0.44923508", "0.44867116", "0.448528", "0.44834095", "0.4463965", "0.44499716", "0.44475722", "0.4447084", "0.44432795", "0.44358176", "0.44347456", "0.44339618", "0.44329423", "0.44322976", "0.44192067", "0.44184053", "0.44180527", "0.44176897", "0.44164217", "0.44132334", "0.44101354", "0.4406022" ]
0.84228426
0
SetupTest sets test environment variables.
func (s AWSTestSuite) SetupTest() { os.Clearenv() os.Setenv(env.EnvAWSAccessKeyID, "ID") os.Setenv(env.EnvAWSBucket, "test.example.com") os.Setenv(env.EnvAWSPath, "/backup/database") os.Setenv(env.EnvAWSSecretAccessKey, "secret") os.Setenv(env.EnvAWSRegion, "us-east-1") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (env testEnvironment) SetupTestEnvironment() error {\n\n\terr := env.API.CreateProject(env.EventData.Project, env.shipyard)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create a project in keptn: %w\", err)\n\t}\n\n\t// Create a service in Keptn\n\terr = env.API.CreateService(env.EventData.Project, env.EventData.Service)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create a service in keptn: %w\", err)\n\t}\n\n\terr = env.API.CreateJobConfig(env.EventData.Project, env.EventData.Stage, env.EventData.Service, env.jobConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func SetupTestCase(t *testing.T) {\r\n\t// setup code in here.\r\n\terr := LoadEnvFile(t)\r\n\tif err != nil {\r\n\t\t// even if we couldn't load the env file we won't fail the test\r\n\t\t// but if proper env variables aren't set, tests will fail on test data validation\r\n\t\t//t.Fatal(err)\r\n\t}\r\n}", "func setupTestEnv(t *testing.T) testEnv {\n\tdirPath, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get current working directory for testing - %s\", err.Error())\n\t}\n\n\t_, err = os.Stat(path.Join(dirPath, \"go.mod\"))\n\tif err != nil {\n\t\tt.Fatalf(\"current working directory is not repo - %s\", err.Error())\n\t}\n\n\ttestDataDir := path.Join(dirPath, \".testdata\")\n\n\terr = os.MkdirAll(testDataDir, 0700)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create test data directory - %s\", err.Error())\n\t}\n\n\tname := randStringBytesRmndr(10)\n\tresource := path.Join(testDataDir, name)\n\tif runtime.GOOS == \"windows\" {\n\t\tresource = name\n\t}\n\n\treturn testEnv{\n\t\tmutexConfig: MutexConfig{\n\t\t\tResource: resource,\n\t\t},\n\t\tdataDirPath: testDataDir,\n\t\tharnessSrcPath: path.Join(dirPath, \"cmd/testharness/main.go\"),\n\t}\n}", "func setupTestEnvironment() {\n\tapiRoot = testApiRoot\n\n\t// Get test api key from balanced\n\tkey := ApiKey{}\n\terr := post(apiKeyUri, nil, &key)\n\tif err != nil {\n\t\tlog.Println(\"Unable to generate test key\")\n\t\tos.Exit(1)\n\t}\n\n\tapiKey = key.Secret\n\n\t// Get test marketplace from balanced\n\tmarketplace := Marketplace{}\n\terr = post(marketplaceUri, nil, &marketplace)\n\tif err != nil {\n\t\tlog.Println(\"Unable to generate test marketplace\")\n\t\tos.Exit(1)\n\t}\n\n\tmarketplaceId = marketplace.Id\n}", "func TestSetupEnvironment(t *testing.T) {\n\t// Parse CloudFormation templates\n\terr := cfn.DecodeTemplateVariables()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// The tests only work in us-east-2, so change the region if the region is not correct\n\tif *sess.Config.Region != correctRegion {\n\t\tsess.Config.Region = aws.String(correctRegion)\n\t\tc.Svc = cloudformation.New(sess)\n\t}\n\n\t_, _, instanceId, _, err = c.CreateStackAndGetResources(nil, aws.String(testStackName),\n\t\tcfn.E2eConnectTestCloudformationTemplate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for a while after stack creation so that the instance can properly initialize\n\ttime.Sleep(cfn.PostCreationWait)\n}", "func TestSetup(t *testing.T) {\n\ttestServer, testData = setup(t) // Initialize all test data for this suite.\n\tdataClient = data.NewClient()\n}", "func SetupTestEnvironment() (*dbconn.DBConn, sqlmock.Sqlmock, *Buffer, *Buffer, *Buffer) {\n\tconnectionPool, mock, testStdout, testStderr, testLogfile := testhelper.SetupTestEnvironment()\n\n\t// Default if not set is GPDB version `5.1.0`\n\tenvTestGpdbVersion := os.Getenv(\"TEST_GPDB_VERSION\")\n\tif envTestGpdbVersion != \"\" {\n\t\ttesthelper.SetDBVersion(connectionPool, envTestGpdbVersion)\n\t}\n\n\tSetupTestCluster()\n\tbackup.SetVersion(\"0.1.0\")\n\treturn connectionPool, mock, testStdout, testStderr, testLogfile\n}", "func TestSetup() {\n\tviper.Reset()\n\tviper.SetConfigType(\"yaml\")\n\tviper.SetEnvKeyReplacer(strings.NewReplacer(\".\", \"_\"))\n\tviper.AutomaticEnv()\n\tos.Setenv(\"RESTIC_PASSWORD\", ResticPassword)\n}", "func (suite *baseReplicationTestSuite) SetupTest() {\n\tsuite.mu.Lock()\n\tdefer suite.mu.Unlock()\n\n\tsuite.clientStateDir = suite.T().TempDir()\n\n\tif suite.srvProvider == nil {\n\t\tsuite.srvProvider = &inProcessTestServerProvider{}\n\t}\n}", "func SetupTestEnv(t *testing.T, opts ...TestOption) *TestSetup {\n\t// Hack to get the path of this file in order to find the crd path no matter\n\t// where this is called from.\n\t_, thisFileName, _, _ := runtime.Caller(0)\n\tcrdPath := filepath.Join(filepath.Dir(thisFileName), \"..\", \"..\", \"..\", \"config\", \"crd\", \"bases\")\n\ttestEnv := &envtest.Environment{\n\t\tCRDDirectoryPaths: []string{crdPath},\n\t\tErrorIfCRDPathMissing: true,\n\t}\n\n\tcfg, err := testEnv.Start()\n\trequire.NoError(t, err)\n\trequire.NotNil(t, cfg)\n\n\tk8sClient, err := kclient.New(cfg, kclient.Options{Scheme: scheme})\n\trequire.NoError(t, err)\n\trequire.NotNil(t, k8sClient)\n\n\tns := createNamespaceForTest(t, k8sClient)\n\n\tt.Cleanup(func() {\n\t\tdeleteNamespaceForTest(t, k8sClient, ns)\n\t\terr = testEnv.Stop()\n\t\trequire.NoError(t, err)\n\t})\n\n\tsetup := &TestSetup{\n\t\tK8sClient: k8sClient,\n\t\tNamespace: ns,\n\t\tK8sRestConfig: cfg,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(setup)\n\t}\n\n\tsetupTeleportClient(t, setup)\n\n\t// Create and start the Kubernetes operator\n\tsetup.StartKubernetesOperator(t)\n\n\tt.Cleanup(func() {\n\t\tsetup.StopKubernetesOperator()\n\t})\n\n\treturn setup\n}", "func SetupTestEnv() *TestEnv {\n\tctx := context.Background()\n\tmux := http.NewServeMux()\n\tserver := httptest.NewServer(mux)\n\n\ttestEnv := &TestEnv{\n\t\tMux: mux,\n\t\tServer: server,\n\t\tContext: ctx,\n\t}\n\n\terr := testEnv.TokenMock()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create mock for token info request, err: %v\", err)\n\t}\n\n\treturn testEnv\n}", "func TestSetup(t *testing.T) {\n Log(\"Setting up\")\n\n Debug(\"login: %s\", *login)\n Debug(\"password: %s\", *password)\n Debug(\"port: %d\", *port)\n Debug(\"verbose: %t\", *verbose)\n\n dir, err := ioutil.TempDir(Dir, Prefix)\n if err != nil {\n t.Errorf(\"Unable to create test directory in %s: %s\", Dir, err)\n return\n }\n\n TmpDir = dir\n Debug(\"Temp Dir: %s\", TmpDir)\n\n file, err := ioutil.TempFile(TmpDir, Prefix)\n if err != nil {\n t.Errorf(\"Unable to create temp file in %s: %s\", TmpDir, err)\n return\n }\n\n TmpFile = file\n Debug(\"Temp File: %s\", TmpFile.Name())\n}", "func (suite *OauthTestSuite) SetupTest() {\n\t//\n}", "func (suite *SavingsRewardsTestSuite) SetupTest() {\n\tconfig := sdk.GetConfig()\n\tapp.SetBech32AddressPrefixes(config)\n\n\t_, allAddrs := app.GeneratePrivKeyAddressPairs(10)\n\tsuite.addrs = allAddrs[:5]\n\tsuite.genesisTime = time.Date(2020, 12, 15, 14, 0, 0, 0, time.UTC)\n}", "func SetupTestConfig() {\n\tflag.Parse()\n\n\t// Now set the configuration file\n\tviper.SetEnvPrefix(\"CORE\")\n\tviper.AutomaticEnv()\n\treplacer := strings.NewReplacer(\".\", \"_\")\n\tviper.SetEnvKeyReplacer(replacer)\n\tviper.SetConfigName(\"core\") // name of config file (without extension)\n\tconfigtest.AddDevConfigPath(nil)\n\n\terr := viper.ReadInConfig() // Find and read the config file\n\tif err != nil { // Handle errors reading the config file\n\t\tpanic(fmt.Errorf(\"Fatal error config file: %s \\n\", err))\n\t}\n\n\t// Init the BCCSP\n\tvar bccspConfig *factory.FactoryOpts\n\terr = viper.UnmarshalKey(\"peer.BCCSP\", &bccspConfig)\n\tif err != nil {\n\t\tbccspConfig = nil\n\t}\n\n\ttmpKeyStore, err := ioutil.TempDir(\"/tmp\", \"msp-keystore\")\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Could not create temporary directory: %s\\n\", tmpKeyStore))\n\t}\n\n\tmsp.SetupBCCSPKeystoreConfig(bccspConfig, tmpKeyStore)\n\n\terr = factory.InitFactories(bccspConfig)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Could not initialize BCCSP Factories [%s]\", err))\n\t}\n}", "func (t *SubprocessTest) SetUp(ti *ogletest.TestInfo) {\n\terr := t.initialize(ti.Ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (suite *TestSuite) SetupTest() {\n\thostname := \"localhost\"\n\tusername := \"bleh\"\n\tpassword := \"bleh\"\n\tsuite.client = NewClient(hostname, username, password, false, true)\n}", "func setupTest() {\n}", "func TestSetup(t *imagetest.TestWorkflow) error {\n\tvm, err := t.CreateTestVM(vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn vm.ResizeDiskAndReboot(resizeDiskSize)\n}", "func (suite *DetectorTestSuite) SetupTest() {\n\tdefaultCatalog = make(Catalog)\n\tcollectorPriorities = make(map[string]CollectorPriority)\n}", "func (s *Suite) Setup() {\n\t// We manipuate the Args to set them up for the testcases\n\t// after this test we restore the initial args\n\toldArgs := os.Args\n\tdefer func() { os.Args = oldArgs }()\n\tcases := []struct {\n\t\tName string\n\t\tArgs []string\n\t}{\n\t\t{\"flags set\", []string{\"--config\", \"../../internal/config/config.yaml\", \"--environment\", \"test\"}},\n\t}\n\tfor _, tc := range cases {\n\t\t// this call is required because otherwise flags panics, if args are set between flag.Parse calls\n\t\tflag.CommandLine = flag.NewFlagSet(tc.Name, flag.ExitOnError)\n\t\t// we need a value to set Args[0] to, cause flag begins parsing at Args[1]\n\t\tos.Args = append([]string{tc.Name}, tc.Args...)\n\t}\n\n\tconfig.Init()\n\n\ts.ent = enttest.Open(s.T(), dialect.SQLite,\n\t\tfmt.Sprintf(\"file:%s-%d?mode=memory&cache=shared&_fk=1\",\n\t\t\ts.T().Name(), time.Now().UnixNano(),\n\t\t),\n\t\tenttest.WithMigrateOptions(migrate.WithGlobalUniqueID(true)),\n\t)\n\n\tsrv := handler.New(resolver.NewSchema(s.ent))\n\tsrv.AddTransport(transport.POST{})\n\tsrv.Use(entgql.Transactioner{TxOpener: s.ent})\n\n\tm := auth.Middleware(s.ent)\n\ts.Client = client.New(m(srv))\n\n\tinitdb.InitData(context.Background(), s.ent, config.AppConfig)\n\tInitUser = s.LoginWithInitUser()\n}", "func (tc *testContext) sshSetup() error {\n\tif err := tc.ensureTestRunnerSA(); err != nil {\n\t\treturn fmt.Errorf(\"error ensuring SA created: %w\", err)\n\t}\n\tif err := tc.ensureTestRunnerRole(); err != nil {\n\t\treturn fmt.Errorf(\"error ensuring Role created: %w\", err)\n\t}\n\tif err := tc.ensureTestRunnerRoleBinding(); err != nil {\n\t\treturn fmt.Errorf(\"error ensuring RoleBinding created: %w\", err)\n\t}\n\treturn nil\n}", "func (s *EcdhTestSuite) SetupTest() {\n\ts.e1 = NewEcdh(nil)\n\ts.e2 = NewEcdh(elliptic.P224())\n\ts.e3 = &Ecdh{\n\t\tcurve: elliptic.P256(),\n\t}\n\ts.sharedkey1 = []byte(nil)\n\ts.sharedkey2 = []byte{0, 2, 4}\n\n}", "func SetupEnvironment(root, key, marketId string) {\n\tapiRoot = root\n\tapiKey = key\n\tmarketplaceId = marketId\n}", "func (suite *KeeperTestSuite) SetupTest() {\n\ttApp := app.NewTestApp()\n\n\tctx := tApp.NewContext(true, tmprototypes.Header{Height: 1, Time: tmtime.Now()})\n\ttApp.InitializeFromGenesisStates()\n\t_, addrs := app.GeneratePrivKeyAddressPairs(5)\n\tvar strAddrs []string\n\tfor _, addr := range addrs {\n\t\tacc := tApp.GetAccountKeeper().NewAccountWithAddress(ctx, addr)\n\t\ttApp.GetAccountKeeper().SetAccount(ctx, acc)\n\t\tstrAddrs = append(strAddrs, addr.String())\n\t}\n\n\tkeeper := tApp.GetIssuanceKeeper()\n\tmodAccount, err := sdk.AccAddressFromBech32(\"kava1cj7njkw2g9fqx4e768zc75dp9sks8u9znxrf0w\")\n\tsuite.Require().NoError(err)\n\n\tsuite.tApp = tApp\n\tsuite.ctx = ctx\n\tsuite.keeper = keeper\n\tsuite.addrs = strAddrs\n\tsuite.modAccount = modAccount\n}", "func (suite *TrackvisitedTestSuite) SetupTest() {}", "func (suite *APIContainerInspectSuite) SetUpTest(c *check.C) {\n\tSkipIfFalse(c, environment.IsLinux)\n}", "func (suite *HandlerTestSuite) SetupTest() {\n\tsuite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)\n\tsuite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))\n\tsuite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))\n}", "func (s *statsComputerSuite) SetupTest() {\n\t// Have to define our overridden assertions in the test setup. If we did it earlier, s.T() will return nil\n\ts.Assertions = require.New(s.T())\n\ts.sc = &statsComputer{}\n}", "func setupTestEnv(t *testing.T, method string, endpoint string, bodystr string, ghUsername string) (*httptest.ResponseRecorder, *http.Request, *Env) {\n\trec := httptest.NewRecorder()\n\treq, err := http.NewRequest(method, endpoint, strings.NewReader(bodystr))\n\tif err != nil {\n\t\tt.Fatalf(\"got non-nil error: %v\", err)\n\t}\n\n\tenv := getTestEnv()\n\treq = loginWithTestUser(t, req, env, ghUsername)\n\n\treturn rec, req, env\n}", "func SetupEnvironment(trelloKey, trelloToken, trelloNextActionsListID, trelloProjectsListID string) {\n\tos.Setenv(\"TRELLO_KEY\", trelloKey)\n\tos.Setenv(\"TRELLO_TOKEN\", trelloToken)\n\tos.Setenv(\"TRELLO_NEXT_ACTIONS_LIST_ID\", trelloNextActionsListID)\n\tos.Setenv(\"TRELLO_PROJECTS_LIST_ID\", trelloProjectsListID)\n}", "func (suite *testSuite) SetupTest() {\n\tvar err error\n\tsuite.testID = xid.New().String()\n\n\tsuite.builder, err = NewBuilder(suite.logger, nil)\n\tif err != nil {\n\t\tsuite.Fail(\"Instantiating Builder failed:\", err)\n\t}\n\n\tcreateFunctionOptions := &platform.CreateFunctionOptions{\n\t\tLogger: suite.logger,\n\t\tFunctionConfig: *functionconfig.NewConfig(),\n\t}\n\n\tcreateFunctionBuildOptions := &platform.CreateFunctionBuildOptions{\n\t\tLogger: createFunctionOptions.Logger,\n\t\tFunctionConfig: createFunctionOptions.FunctionConfig,\n\t}\n\n\tsuite.builder.options = createFunctionBuildOptions\n}", "func (s *MarketplaceTestSuite) SetupTest() {\n\t// call parent's method.\n\ts.AppTestSuite.SetupTest()\n\n\tconn, err := NewGRPCClient(s.App.Creds())\n\ts.Require().NoError(err, \"cannot get grpc client\")\n\n\ts.conn = conn\n\ts.client = pb.NewMarketClient(conn)\n}", "func Setup() {\n\n\t// first merge in defaults\n\tmerge(&Data, defaultData)\n\n\tmode := os.Getenv(\"GO_ENV\")\n\n\t// merge in env specific configs\n\tswitch mode {\n\tcase \"dev\", \"\": // \"\" means default is dev\n\t\tmerge(&Data, devData)\n\tcase \"test\":\n\t\tmerge(&Data, testData)\n\tcase \"prod\":\n\t\tmerge(&Data, prodData)\n\t}\n\n\t// try to overwrite from env variables\n\tfor _, f := range structs.New(&Data).Fields() {\n\t\tnameCamel := toSnakeCase(f.Name())\n\t\tnameUpper := strings.ToUpper(nameCamel)\n\t\tenvVal, ok := os.LookupEnv(nameUpper)\n\t\tif ok { // ok means env variable exists, even if its value is blank\n\t\t\tf.Set(envVal)\n\t\t}\n\t}\n\n}", "func (s *TankBoardTestSuite) SetupTest() {\n\n\t// Return the right type for drivers\n\ts.adaptor.SetValueReadState(\"isRebooted\", false)\n\ts.adaptor.SetValueReadState(\"distance\", float64(0))\n\n\t// Config\n\ts.board.config = &models.TankConfig{\n\t\tDepth: 100,\n\t\tLiterPerCm: 1,\n\t\tSensorHeight: 0,\n\t}\n}", "func SetUpEnvironment() {\n\tif !environ.HasValue(\"BOT_NAME\") {\n\t\thostname, err := os.Hostname()\n\t\tif err != nil {\n\t\t\tlogs.Panicf(\"Failed to get hostname: %+v\\n\", err)\n\t\t}\n\n\t\tenviron.SetValue(\"BOT_NAME\", strings.ToLower(hostname))\n\t}\n\n\tbotDir := path.Join(RootDir(), \"bot\")\n\tinputsDir := path.Join(botDir, \"inputs\")\n\tlogDir := path.Join(botDir, \"logs\")\n\tcacheDir := path.Join(botDir, \"cache\")\n\n\tenviron.SetValue(\"CACHE_DIR\", cacheDir)\n\tenviron.SetValue(\"LOG_DIR\", logDir)\n\tenviron.SetValue(\"FUZZ_INPUTS\", path.Join(inputsDir, \"fuzzer-testcases\"))\n\tenviron.SetValue(\"FUZZ_INPUTS_DISK\", path.Join(inputsDir, \"fuzzer-testcases-disk\"))\n}", "func (suite *APIContainerAttachSuite) SetUpTest(c *check.C) {\n\tSkipIfFalse(c, environment.IsLinux)\n}", "func setupVars() {\n\tvar err error\n\n\t// setup logging and other general configurations\n\tlogger, err = util.MakeLogger(\"debug\", \"json\")\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"could not create test logger: %v\\n\", err)\n\t\tos.Exit(10)\n\t}\n\n\t// setup kubernetes related configurations\n\tfakeK8sClient = fake.NewClientBuilder().Build()\n\n\t// setup kong proxy related configurations\n\tfakeKongAdminAPI, err = kongt.NewFakeAdminAPIServer()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"could not setup Kong Proxy testing environment: %v\\n\", err)\n\t\tos.Exit(10)\n\t}\n\tfakeKongConfig.Client = fakeKongAdminAPI.KongClient\n}", "func (s *BaseTest) SetUpTest(c *check.C) {\n\ts.cleanupHandlers = nil\n}", "func (s *SignSuite) SetUpTest(c *C) {\n}", "func SetupForTesting(t *testing.T) {\n\tpools := lib.FetchPools(t, AviClients[0])\n\tinitialNumOfPools = len(pools)\n\tVSes := lib.FetchVirtualServices(t, AviClients[0])\n\tinitialVSesList = []string{}\n\tfor _, vs := range VSes {\n\t\tinitialVSesList = append(initialVSesList, *vs.Name)\n\t}\n\tinitialNumOfVSes = len(initialVSesList)\n\tinitialFQDNList = lib.FetchDNSARecordsFQDN(t, dnsVSUUID, AviClients[0])\n\tinitialNumOfFQDN = len(initialFQDNList)\n\tingressHostNames = []string{}\n\tingressSecureHostNames = []string{}\n\tingressInsecureHostNames = []string{}\n\tingressesCreated = []string{}\n\tingressesDeleted = []string{}\n\tingressesUpdated = []string{}\n}", "func Setup() *CoreTest {\n\tct := new(CoreTest)\n\tct.DB = LoadDatabase(ct.Core.Log)\n\tct.Core, ct.Test = Services(ct.DB)\n\tct.Request = NewRequest()\n\treturn ct\n}", "func (suite *HandlerTestSuite) SetupTest() {\n\tsuite.repo = repositoryForTest()\n\tsuite.publisher = publisherForTest()\n\tsuite.handler = routerForTest(suite.repo, suite.publisher, loggerForTest())\n}", "func (suite *ServiceTestSuite) SetupTest() {\n\tsuite.mockCtrl = gomock.NewController(suite.T())\n\tsuite.resmgrClient = resource_mocks.NewMockResourceManagerServiceYARPCClient(suite.mockCtrl)\n\tsuite.hostMgrClient = host_mocks.NewMockInternalHostServiceYARPCClient(suite.mockCtrl)\n\tsuite.metrics = metrics.NewMetrics(tally.NoopScope)\n\tsuite.hostService = NewService(suite.hostMgrClient, suite.resmgrClient, suite.metrics)\n}", "func (envManager *TestEnvManager) StartUp() (err error) {\n\tif err = envManager.testEnv.Bringup(); err != nil {\n\t\tlog.Printf(\"Failed to bring up environment\")\n\t\treturn\n\t}\n\tfor _, comp := range envManager.testEnv.GetComponents() {\n\t\tif err := comp.Start(); err != nil {\n\t\t\tlog.Printf(\"Failed to setup component: %s\", comp.GetName())\n\t\t\treturn err\n\t\t}\n\t}\n\tif ready, err := envManager.WaitUntilReady(); err != nil || !ready {\n\t\terr = fmt.Errorf(\"failed to get env ready: %s\", err)\n\t\treturn err\n\t}\n\tlog.Printf(\"Successfully started environment %s\", envManager.testEnv.GetName())\n\treturn\n}", "func (b *DatabaseTestSuiteBase) SetupTest() {\n\tassert := require.New(b.T())\n\n\ttx, err := storage.DB().Beginx()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tb.tx = tx\n\n\tstorage.RedisClient().FlushAll(context.Background())\n\tassert.NoError(storage.MigrateDown(storage.DB().DB))\n\tassert.NoError(storage.MigrateUp(storage.DB().DB))\n}", "func TestENV(t *testing.T) {\n\tif confy.GetEnvironment() != \"test\" {\n\t\tt.Skipf(\"skipping test. Env should be test when running `go test`, instead env is %v\", confy.GetEnvironment())\n\t}\n\n\tos.Setenv(\"CONFY_ENV\", \"production\")\n\tdefer os.Setenv(\"CONFY_ENV\", \"\")\n\tconfy.DefaultConfy = confy.NewConfy()\n\tif confy.GetEnvironment() != \"production\" {\n\t\tt.Errorf(\"Env should be production when set it with CONFY_ENV\")\n\t}\n}", "func init() {\n\ttestEnv.Init()\n}", "func (suite *HandlersTestSuite) SetupTest() {\n\terr := os.Setenv(\"JWT_SECRET\", td.JwtSecret)\n\tsuite.NoError(err)\n\n\tmockDB := &db.MockService{}\n\terr = mockDB.ParseDB(os.Getenv(\"DATABASE_URL\"))\n\tsuite.NoError(err)\n\terr = mockDB.Init()\n\tsuite.NoError(err)\n\n\t// Reset database data\n\tdb.MockDB = db.MockDatabase{\n\t\tUsers: []*schemas.AppUser{},\n\t\tCreditScores: []*schemas.CreditScore{},\n\t\tCustomers: []*schemas.Customer{},\n\t}\n\n\tmockRedis := cache.NewMockRedis()\n\th = NewHandler(mockDB, mockRedis)\n\th.UserId = td.UserUUID\n}", "func (ctx *TestContext) SetupTestContext(pickle *messages.Pickle) {\n\tlog.WithField(\"type\", \"test\").Infof(\"Starting test scenario: %s\", pickle.Name)\n\n\tvar logHook *test.Hook\n\tlogHook, ctx.logsRestoreFunc = log.MockSharedLoggerHook()\n\tctx.logsHook = &loggingtest.Hook{Hook: logHook}\n\n\tctx.setupApp()\n\tctx.userID = 0 // not set\n\tctx.lastResponse = nil\n\tctx.lastResponseBody = \"\"\n\tctx.inScenario = true\n\tctx.requestHeaders = map[string][]string{}\n\tctx.dbTableData = make(map[string]*messages.PickleStepArgument_PickleTable)\n\tctx.templateSet = ctx.constructTemplateSet()\n\tctx.identifierReferences = make(map[string]int64)\n\tctx.dbTables = make(map[string]map[string]map[string]interface{})\n\tctx.needPopulateDatabase = false\n\n\t// reset the seed to get predictable results on PRNG for tests\n\trand.Seed(1)\n\n\terr := ctx.initDB()\n\tif err != nil {\n\t\tfmt.Println(\"Unable to empty db\")\n\t\tpanic(err)\n\t}\n}", "func (t *TestFramework) Setup() {\n\t// Global initialization for the whole framework goes in here\n\n\t// Set up the actual test suite\n\tgomega.Expect(t.setup(t)).To(gomega.Succeed())\n}", "func (suite *PouchAPIHelpSuite) SetUpTest(c *check.C) {\n\tSkipIfFalse(c, IsLinux)\n}", "func (suite *PostRootSuite) SetupTest() {\n\t// Setup router\n\treturn\n}", "func CommonSetup(d Defaults, t *testing.T) *TestEnv {\n\tenvLock.Lock()\n\tdefer envLock.Unlock()\n\tif baseEnv.Vars.Kubeconfig != \"\" {\n\t\tt.Logf(\"Environment already initialized\")\n\t} else {\n\t\tbaseEnv = TestEnv{\n\t\t\tVars: resolveEnvVars(d, t),\n\t\t}\n\t\tSubstituteOverlayVars(d.OperatorKustomizeBase, baseEnv.Vars, t)\n\t\tif t.Failed() {\n\t\t\treturn nil\n\t\t}\n\t\tbaseEnv.InstallCrds(d, t)\n\t\tSubstituteOverlayVars(\"testdata/spinnaker/base\", baseEnv.Vars, t)\n\t}\n\treturn &TestEnv{\n\t\tVars: baseEnv.Vars,\n\t}\n}", "func (ts *SubscriptionTestSuite) SetupTest() {\n\tconfig, err := conf.LoadTestConfig(\"../config_test.json\")\n\trequire.NoError(ts.T(), err)\n\tconn:= CreateMockedConnection()\n\tstripe:= CreateMockedStripe()\n\tapi := Create(config, conn, stripe)\n\tts.API = api\n}", "func (suite *PouchDaemonSuite) SetUpTest(c *check.C) {\n\tSkipIfFalse(c, environment.IsLinux)\n}", "func (suite *PouchDaemonSuite) SetUpTest(c *check.C) {\n\tSkipIfFalse(c, environment.IsLinux)\n}", "func SetupTestContext(t *testing.T) func() {\n\t_, done := SetupTestContextWithContext(t)\n\treturn done\n}", "func (as *Action) SetupTest() {\n\tas.App.SessionStore = newSessionStore()\n\ts, _ := as.App.SessionStore.New(nil, as.App.SessionName)\n\tas.Session = &buffalo.Session{\n\t\tSession: s,\n\t}\n\n\tif as.Model != nil {\n\t\tas.Model.SetupTest()\n\t}\n\tas.csrf = csrf.New\n\tcsrf.New = func(next buffalo.Handler) buffalo.Handler {\n\t\treturn func(c buffalo.Context) error {\n\t\t\treturn next(c)\n\t\t}\n\t}\n}", "func (suite *Suite[Env]) SetupSuite() {\n\tskipDelete, _ := runner.GetProfile().ParamStore().GetBoolWithDefault(parameters.SkipDeleteOnFailure, false)\n\tif skipDelete {\n\t\tsuite.params.SkipDeleteOnFailure = true\n\t}\n\n\tsuite.Require().NotEmptyf(suite.params.StackName, \"The stack name is empty. You must define it with WithName\")\n\t// Check if the Env type is correct otherwise raises an error before creating the env.\n\terr := client.CheckEnvStructValid[Env]()\n\tsuite.Require().NoError(err)\n}", "func (f *VRFTest) Setup() error {\n\tchainlinkClients, err := environment.GetChainlinkClients(f.Environment)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnodeAddresses, err := actions.ChainlinkNodeAddresses(chainlinkClients)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadapter, err := environment.GetExternalAdapter(f.Environment)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.chainlinkClients = chainlinkClients\n\tf.nodeAddresses = nodeAddresses\n\tf.adapter = adapter\n\treturn f.deployContracts()\n}", "func TestSet(t *testing.T) {\n\ttests := []struct {\n\t\tkey string\n\t\tvalue string\n\t}{\n\t\t{\"KEY_0\", \"Abc\"},\n\t\t{\"KEY_1\", \"Def\"},\n\t}\n\n\t// Test the method.\n\tos.Clearenv()\n\tfor _, test := range tests {\n\t\tif err := Set(test.key, test.value); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\t// Call methods from os module to get test\n\t// variables from the environment.\n\tfor _, test := range tests {\n\t\tif v := os.Getenv(test.key); v != test.value {\n\t\t\tt.Errorf(\"expected `%s` but `%s`\", test.value, v)\n\t\t}\n\t}\n}", "func (s *ReformSuite) SetupTest() {\n\tif testing.Short() {\n\t\ts.T().Skip(\"skipping in short mode\")\n\t}\n\n\tpl := reform.NewPrintfLogger(s.T().Logf)\n\tpl.LogTypes = true\n\tDB.Logger = pl\n\tDB.Querier = DB.WithTag(\"test:%s\", s.T().Name())\n\n\tcheckForeignKeys(s.T(), DB.Querier)\n\n\ttx, err := DB.Begin()\n\ts.Require().NoError(err)\n\ts.tx = tx\n\ts.q = tx.Querier\n}", "func (suite *BaseSuite) SetupSuite() {\n\tif err := godotenv.Load(); err != nil {\n\t\tsuite.T().Log(err)\n\t}\n\n\tsetFromEnv := func(key string, target *string) {\n\t\tv := os.Getenv(key)\n\t\tif v == \"\" {\n\t\t\tsuite.FailNowf(\"missing environment variable\", \"%q required for integration tests.\", key)\n\t\t}\n\n\t\t*target = v\n\t}\n\n\tsetFromEnv(\"AZURE_TENANT_ID\", &suite.TenantID)\n\tsetFromEnv(\"AZURE_SUBSCRIPTION_ID\", &suite.SubscriptionID)\n\tsetFromEnv(\"AZURE_CLIENT_ID\", &suite.ClientID)\n\tsetFromEnv(\"AZURE_CLIENT_SECRET\", &suite.ClientSecret)\n\tsetFromEnv(\"SERVICEBUS_CONNECTION_STRING\", &suite.ConnStr)\n\tsetFromEnv(\"TEST_SERVICEBUS_RESOURCE_GROUP\", &suite.ResourceGroup)\n\n\t// TODO: automatically infer the location from the resource group, if it's not specified.\n\t// https://github.com/Azure/azure-service-bus-go/issues/40\n\tsetFromEnv(\"TEST_SERVICEBUS_LOCATION\", &suite.Location)\n\n\tparsed, err := conn.ParsedConnectionFromStr(suite.ConnStr)\n\tif !suite.NoError(err) {\n\t\tsuite.FailNowf(\"connection string could not be parsed\", \"Connection String: %q\", suite.ConnStr)\n\t}\n\tsuite.Namespace = parsed.Namespace\n\tsuite.Token = suite.servicePrincipalToken()\n\tsuite.Environment = azure.PublicCloud\n\tsuite.TagID = randomString(\"tag\", 10)\n\n\tif !suite.NoError(suite.ensureProvisioned(sbmgmt.SkuTierStandard)) {\n\t\tsuite.FailNow(\"failed to ensure provisioned\")\n\t}\n}", "func TestEnviron(t *testing.T) {\n\ttests := map[string]string{\n\t\t\"KEY_0\": \"Abc\",\n\t\t\"KEY_1\": \"Def\",\n\t}\n\n\t// Set test data.\n\tos.Clearenv()\n\tfor key, value := range tests {\n\t\tif err := os.Setenv(key, value); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\t// Test function.\n\tfor i, str := range Environ() {\n\t\ttmp := strings.Split(str, \"=\")\n\t\tkey, value := tmp[0], tmp[1]\n\t\tif v, ok := tests[key]; v != value || !ok {\n\t\t\tif !ok {\n\t\t\t\tt.Errorf(\"test %v. extra key`%v`\", i, key)\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"test %v. expected `%v` but `%v`\", i, v, value)\n\t\t\t}\n\t\t}\n\t}\n}", "func (suite *TenantTestSuite) SetupTest() {\n\n\tlog.SetOutput(ioutil.Discard)\n\n\t// Connect to mongo testdb\n\tsession, _ := mongo.OpenSession(suite.cfg.MongoDB)\n\n\t// Add authentication token to mongo testdb\n\tseedAuth := bson.M{\"api_key\": \"S3CR3T\"}\n\t_ = mongo.Insert(session, suite.cfg.MongoDB.Db, \"authentication\", seedAuth)\n\n\t// seed mongo\n\tsession, err := mgo.Dial(suite.cfg.MongoDB.Host)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\tc := session.DB(suite.cfg.MongoDB.Db).C(\"roles\")\n\tc.Insert(\n\t\tbson.M{\n\t\t\t\"resource\": \"tenants.list\",\n\t\t\t\"roles\": []string{\"super_admin\"},\n\t\t})\n\tc.Insert(\n\t\tbson.M{\n\t\t\t\"resource\": \"tenants.get\",\n\t\t\t\"roles\": []string{\"super_admin\"},\n\t\t})\n\tc.Insert(\n\t\tbson.M{\n\t\t\t\"resource\": \"tenants.create\",\n\t\t\t\"roles\": []string{\"super_admin\"},\n\t\t})\n\tc.Insert(\n\t\tbson.M{\n\t\t\t\"resource\": \"tenants.delete\",\n\t\t\t\"roles\": []string{\"super_admin\"},\n\t\t})\n\tc.Insert(\n\t\tbson.M{\n\t\t\t\"resource\": \"tenants.update\",\n\t\t\t\"roles\": []string{\"super_admin\"},\n\t\t})\n\n\t// seed first tenant\n\tc = session.DB(suite.cfg.MongoDB.Db).C(\"tenants\")\n\tc.Insert(bson.M{\n\t\t\"id\": \"6ac7d684-1f8e-4a02-a502-720e8f11e50b\",\n\t\t\"info\": bson.M{\n\t\t\t\"name\": \"AVENGERS\",\n\t\t\t\"email\": \"email@something\",\n\t\t\t\"website\": \"www.avengers.com\",\n\t\t\t\"created\": \"2015-10-20 02:08:04\",\n\t\t\t\"updated\": \"2015-10-20 02:08:04\"},\n\t\t\"db_conf\": []bson.M{\n\t\t\tbson.M{\n\t\t\t\t\"store\": \"ar\",\n\t\t\t\t\"server\": \"a.mongodb.org\",\n\t\t\t\t\"port\": 27017,\n\t\t\t\t\"database\": \"ar_db\",\n\t\t\t\t\"username\": \"admin\",\n\t\t\t\t\"password\": \"3NCRYPT3D\"},\n\t\t\tbson.M{\n\t\t\t\t\"store\": \"status\",\n\t\t\t\t\"server\": \"b.mongodb.org\",\n\t\t\t\t\"port\": 27017,\n\t\t\t\t\"database\": \"status_db\",\n\t\t\t\t\"username\": \"admin\",\n\t\t\t\t\"password\": \"3NCRYPT3D\"},\n\t\t},\n\t\t\"users\": []bson.M{\n\t\t\tbson.M{\n\t\t\t\t\"name\": \"cap\",\n\t\t\t\t\"email\": \"[email protected]\",\n\t\t\t\t\"api_key\": \"C4PK3Y\"},\n\t\t\tbson.M{\n\t\t\t\t\"name\": \"thor\",\n\t\t\t\t\"email\": \"[email protected]\",\n\t\t\t\t\"api_key\": \"TH0RK3Y\"},\n\t\t}})\n\n\t// seed second tenant\n\tc.Insert(bson.M{\n\t\t\"id\": \"6ac7d684-1f8e-4a02-a502-720e8f11e50c\",\n\t\t\"info\": bson.M{\n\t\t\t\"name\": \"GUARDIANS\",\n\t\t\t\"email\": \"email@something2\",\n\t\t\t\"website\": \"www.gotg.com\",\n\t\t\t\"created\": \"2015-10-20 02:08:04\",\n\t\t\t\"updated\": \"2015-10-20 02:08:04\"},\n\t\t\"db_conf\": []bson.M{\n\t\t\tbson.M{\n\t\t\t\t\"store\": \"ar\",\n\t\t\t\t\"server\": \"a.mongodb.org\",\n\t\t\t\t\"port\": 27017,\n\t\t\t\t\"database\": \"ar_db\",\n\t\t\t\t\"username\": \"admin\",\n\t\t\t\t\"password\": \"3NCRYPT3D\"},\n\t\t\tbson.M{\n\t\t\t\t\"store\": \"status\",\n\t\t\t\t\"server\": \"b.mongodb.org\",\n\t\t\t\t\"port\": 27017,\n\t\t\t\t\"database\": \"status_db\",\n\t\t\t\t\"username\": \"admin\",\n\t\t\t\t\"password\": \"3NCRYPT3D\"},\n\t\t},\n\t\t\"users\": []bson.M{\n\t\t\tbson.M{\n\t\t\t\t\"name\": \"groot\",\n\t\t\t\t\"email\": \"[email protected]\",\n\t\t\t\t\"api_key\": \"GR00TK3Y\"},\n\t\t\tbson.M{\n\t\t\t\t\"name\": \"starlord\",\n\t\t\t\t\"email\": \"[email protected]\",\n\t\t\t\t\"api_key\": \"ST4RL0RDK3Y\"},\n\t\t}})\n}", "func (suite *HealthSuite) SetupTest() {\n\tsuite.Response = httptest.NewRecorder()\n}", "func (s *TrackerSuite) SetupTest() {\n\ts.service = NewTracker()\n\tassert.NotEqual(s.T(), nil, s.service)\n}", "func (suite *PouchHelpSuite) SetUpTest(c *check.C) {\n\tSkipIfFalse(c, IsLinux)\n}", "func (s *OssDownloadSuite) SetUpTest(c *C) {\n\terr := removeTempFiles(\"../oss\", \".jpg\")\n\tc.Assert(err, IsNil)\n}", "func SetUp() TestComponents {\n\tcfg, err := config.LoadTest()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to load config: %v\\n\", err)\n\t}\n\n\tdb, err := database.SetUp(cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to connect to database: %v\\n\", err)\n\t}\n\n\tjwtManager, err := auth.NewJWTManager(cfg.SecretKey, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create JWT manager: %v\\n\", err)\n\t}\n\n\ts := services.SetUp(db, jwtManager)\n\tr := router.SetUp(s, cfg)\n\n\treturn TestComponents{\n\t\tDB: db,\n\t\tServices: s,\n\t\tRouter: r,\n\t}\n}", "func SetupTest(content TestContent) (tmpDir string, cleanup func(), err error) {\n\t// Create a temporary dir with a go package to test.\n\ttmpDir, err = ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\treturn\n\t}\n\tcleanup = func() {\n\t\tutil.RemoveAll(tmpDir)\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tcleanup()\n\t\t\tcleanup = nil\n\t\t}\n\t}()\n\tpkgPath := filepath.Join(tmpDir, \"go\", \"test2json_test\")\n\terr = os.MkdirAll(pkgPath, os.ModePerm)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = ioutil.WriteFile(filepath.Join(pkgPath, \"test2json_test.go\"), []byte(content), os.ModePerm)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Make go modules happy.\n\tctx := context.Background()\n\tvar goBin string\n\tgoBin, err = golang.FindGo()\n\tif err != nil {\n\t\terr = skerr.Wrap(err)\n\t\treturn\n\t}\n\t_, err = exec.RunCwd(ctx, tmpDir, goBin, \"mod\", \"init\", \"fake.com/test2json_test\")\n\treturn\n}", "func (suite *TestManagerSuite) SetupTest() {\n\trp := &scan.Report{\n\t\tDigest: \"d1000\",\n\t\tRegistrationUUID: \"ruuid\",\n\t\tMimeType: v1.MimeTypeNativeReport,\n\t\tTrackID: \"tid001\",\n\t}\n\n\tuuid, err := suite.m.Create(rp)\n\trequire.NoError(suite.T(), err)\n\trequire.NotEmpty(suite.T(), uuid)\n\tsuite.rpUUID = uuid\n}", "func setupEnv(t *testing.T, cwd string) func() {\n\tt.Helper()\n\n\tdir, err := ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create a temp dir: %s\", err)\n\t}\n\tl.Printf(\"setupEnv: created a temp dir %s\\n\", dir)\n\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get current dir: %s\", err)\n\t}\n\n\terr = fileutil.Copy(filepath.Join(dir, deptfile.FileName), filepath.Join(cwd, deptfile.FileName))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to open and read testdata/gotool.mod: %s\", err)\n\t}\n\terr = fileutil.Copy(filepath.Join(dir, deptfile.FileSumName), filepath.Join(cwd, deptfile.FileSumName))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to open and read testdata/gotool.mod: %s\", err)\n\t}\n\n\t_ = os.Chdir(dir)\n\treturn func() {\n\t\t_ = os.Chdir(pwd)\n\t\tos.RemoveAll(dir)\n\t}\n}", "func init() {\n\tsetUpConfig()\n\tsetUpUsingEnv()\n}", "func PrepareTests() (*pop.Connection, Env, error) {\n\n\tvar err error\n\tvar db *pop.Connection\n\tvar migrator pop.FileMigrator\n\n\tenv := make(map[string]string)\n\tenv[\"migrations\"] = os.Getenv(\"migrations_path\")\n\tenv[\"targetdb\"] = os.Getenv(\"database\")\n\tenv[\"pwd\"], err = os.Getwd()\n\t//env[\"new_key\"] = \"\" for every new env to be taken into consideration\n\n\tif err != nil {\n\t\treturn nil, env, err\n\t}\n\n\tdb, err = pop.Connect(env[\"targetdb\"])\n\n\tmigrator, err = pop.NewFileMigrator(env[\"migrations\"], db)\n\tif err != nil {\n\t\treturn nil, env, err\n\t}\n\n\terr = migrator.Reset()\n\tif err != nil {\n\t\treturn nil, env, err\n\t}\n\n\terr = migrator.Status()\n\tif err != nil {\n\t\treturn nil, env, err\n\t}\n\n\treturn db, env, err\n}", "func (suite *KeeperTestSuite) SetupTest() {\n\tsuite.coordinator = tibctesting.NewCoordinator(suite.T(), 2)\n\tsuite.chainA = suite.coordinator.GetChain(tibctesting.GetChainID(0))\n\tsuite.chainB = suite.coordinator.GetChain(tibctesting.GetChainID(1))\n\t// commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)\n\tsuite.coordinator.CommitNBlocks(suite.chainA, 2)\n\tsuite.coordinator.CommitNBlocks(suite.chainB, 2)\n}", "func (suite *EventStoreTestSuite) SetupTest() {\n\tawsConfig := &aws.Config{\n\t\tRegion: aws.String(\"us-west-2\"),\n\t\tEndpoint: aws.String(\"http://localhost:8000\"),\n\t\t// Endpoint: aws.String(os.Getenv(\"DYNAMODB_HOST\")),\n\t}\n\n\tawsSession, err := session.NewSession(awsConfig)\n\tassert.Nil(suite.T(), err, \"there should be no error\")\n\n\tsuite.store, err = NewEventStore(\n\t\t\"test\",\n\t\tWithDynamoDB(awsSession),\n\t)\n\tassert.Nil(suite.T(), err, \"there should be no error\")\n\tassert.NotNil(suite.T(), suite.store, \"there should be a store\")\n\n\tassert.Nil(suite.T(), suite.store.CreateTable(context.Background()), \"could not create table\")\n\n\tsuite.ctx = eh.NewContextWithNamespace(context.Background(), \"ns\")\n\tassert.Nil(suite.T(), suite.store.CreateTable(suite.ctx), \"could not create table\")\n}", "func TestMain(m *testing.M) {\n\tefiVarDir, cleanup, err := vartest.SetupVarZip(\"../testdata/sys_fw_efi_vars.zip\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer cleanup()\n\tuefivars.EfiVarDir = efiVarDir\n\tos.Exit(m.Run())\n}", "func (suite *KeeperTestSuite) SetupTest() {\n\tsuite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)\n\tsuite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))\n\tsuite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))\n\t// commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)\n\tsuite.coordinator.CommitNBlocks(suite.chainA, 2)\n\tsuite.coordinator.CommitNBlocks(suite.chainB, 2)\n}", "func TestEnvironment(t *testing.T) {\n\tvariables := []string{\n\t\t\"MONGO_URI\",\n\t\t\"SECRET_KEY\",\n\t\t\"MAGIC_LINK\",\n\t\t\"ROOT_LINK\",\n\t\t\"GITHUB_API\",\n\t\t\"GITHUB_OAUTH\",\n\t\t\"GITHUB_ORG\",\n\t\t\"DEBRICKED_API\",\n\t\t\"DEBRICKED_USER\",\n\t\t\"DEBRICKED_PASS\",\n\t}\n\tfor e := range variables {\n\t\tif _, present := os.LookupEnv(variables[e]); !present {\n\t\t\tt.Errorf(\"Expected environment variable %s to be set\", variables[e])\n\t\t}\n\t}\n}", "func (suite *ControllerTestSuite) SetupTest() {\n\tsuite.mMgr = &scannertesting.Manager{}\n\tsuite.mMeta = &metadatatesting.Manager{}\n\n\tm := &v1.ScannerAdapterMetadata{\n\t\tScanner: &v1.Scanner{\n\t\t\tName: \"Trivy\",\n\t\t\tVendor: \"Harbor\",\n\t\t\tVersion: \"0.1.0\",\n\t\t},\n\t\tCapabilities: []*v1.ScannerCapability{{\n\t\t\tConsumesMimeTypes: []string{\n\t\t\t\tv1.MimeTypeOCIArtifact,\n\t\t\t\tv1.MimeTypeDockerArtifact,\n\t\t\t},\n\t\t\tProducesMimeTypes: []string{\n\t\t\t\tv1.MimeTypeNativeReport,\n\t\t\t\tv1.MimeTypeRawReport,\n\t\t\t\tv1.MimeTypeGenericVulnerabilityReport,\n\t\t\t},\n\t\t}},\n\t\tProperties: v1.ScannerProperties{\n\t\t\t\"extra\": \"testing\",\n\t\t},\n\t}\n\n\tsuite.sample = &scanner.Registration{\n\t\tName: \"forUT\",\n\t\tDescription: \"sample registration\",\n\t\tURL: \"https://sample.scanner.com\",\n\t}\n\n\tmc := &v1testing.Client{}\n\tmc.On(\"GetMetadata\").Return(m, nil)\n\n\tmcp := &v1testing.ClientPool{}\n\tmocktesting.OnAnything(mcp, \"Get\").Return(mc, nil)\n\tsuite.c = &basicController{\n\t\tmanager: suite.mMgr,\n\t\tproMetaMgr: suite.mMeta,\n\t\tclientPool: mcp,\n\t}\n}", "func initEnv() {\n\terr := os.RemoveAll(TestDataDir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func setEnvironmentVariables(t *testing.T, vars map[string]string) {\n\tfor k, v := range vars {\n\t\tt.Setenv(k, v)\n\t}\n}", "func (c *Config) setupEnvironment() {\n\tos.Setenv(constants.BundleDirEnvVar, c.BundleDir)\n\n\tos.Setenv(\"JAVA_HOME\", filepath.Join(c.BundleDir, \"jre\"))\n\t// set the environment variables for gopsutil based on configured values\n\tos.Setenv(hostfs.HostProcVar, c.ProcPath)\n\tos.Setenv(hostfs.HostEtcVar, c.EtcPath)\n\tos.Setenv(hostfs.HostVarVar, c.VarPath)\n\tos.Setenv(hostfs.HostRunVar, c.RunPath)\n\tos.Setenv(hostfs.HostSysVar, c.SysPath)\n}", "func (a *AWSTestSuite) SetupTest() {\n\tapi := testlib.NewAWSMockedAPI(a.ctrl)\n\ta.Mocks = &Mocks{\n\t\tAPI: api,\n\t\tAWS: &Client{\n\t\t\tservice: &Service{\n\t\t\t\trds: api.RDS,\n\t\t\t\tec2: api.EC2,\n\t\t\t\tiam: api.IAM,\n\t\t\t\tacm: api.ACM,\n\t\t\t\ts3: api.S3,\n\t\t\t\troute53: api.Route53,\n\t\t\t\tsecretsManager: api.SecretsManager,\n\t\t\t\tresourceGroupsTagging: api.ResourceGroupsTagging,\n\t\t\t\tkms: api.KMS,\n\t\t\t\tsts: api.STS,\n\t\t\t\tdynamodb: api.DynamoDB,\n\t\t\t},\n\t\t\tcache: newClientDummyCache(),\n\t\t\tconfig: &aws.Config{},\n\t\t\tmux: &sync.Mutex{},\n\t\t},\n\t\tLog: testlib.NewMockedFieldLogger(a.ctrl),\n\t\tModel: testlib.NewModelMockedAPI(a.ctrl),\n\t}\n}", "func (suite *ScannerAPITestSuite) SetupTest() {\n\tsuite.originC = sc.DefaultController\n\tm := &MockScannerAPIController{}\n\tsc.DefaultController = m\n\n\tsuite.mockC = m\n}", "func (s *OneTestOneDBSuite) SetupSuite() {\n\ts.Helper = NewHelper(s.TT, s.DBName, s.DropDBBeforeCreate)\n\ts.Env = biz.NewEnv(s.Modules...)\n\ts.Env.Boot()\n}", "func (s *backendSuite) setUpTest(c *C) {\n\ts.controller = gomock.NewController(c)\n\ts.mockBackend = mock.NewMockBackend(s.controller)\n\ts.backend = kv.MakeBackend(s.mockBackend)\n}", "func (s *TestSuite) SetupSuite() {\n\t// for randomness\n\tgofakeit.Seed(time.Now().UnixNano())\n\tportPrefix := os.Getenv(\"E2E_TEST_PORT_PREFIX\")\n\n\tif portPrefix == \"\" {\n\t\tfmt.Println(\"E2E_TEST_PORT_PREFIX is not set. default to 40\")\n\t\tportPrefix = \"40\"\n\t}\n\n\tos.Setenv(\"DB_HOST\", \"localhost\")\n\tos.Setenv(\"DB_PORT\", fmt.Sprintf(\"%s32\", portPrefix))\n\tos.Setenv(\"DB_USERNAME\", \"clipo\")\n\tos.Setenv(\"DB_PASSWORD\", \"s3cr3t_1\")\n\tos.Setenv(\"DB_NAME\", \"appdb\")\n\tos.Setenv(\"DB_SSLMODE\", \"disable\")\n\n\tos.Setenv(\"SMTP_HOST\", \"localhost\")\n\tos.Setenv(\"SMTP_PORT\", fmt.Sprintf(\"%s13\", portPrefix))\n\tos.Setenv(\"SMTP_FROM\", \"test@localhost\")\n\n\tdbURL := os.Getenv(\"DB_URL\")\n\n\tif dbURL == \"\" {\n\t\tdbURL = fmt.Sprintf(\"postgres://clipo:s3cr3t_1@localhost:%s32/appdb?sslmode=disable\", portPrefix)\n\t}\n\tdb, err := sql.Open(\"postgres\", dbURL)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to connect to DB %v\", err)\n\t\tpanic(err)\n\t}\n\n\ts.db = db\n\n\tapp, err := cmd.BuildApp()\n\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to create server %w\", err))\n\t}\n\tserver := httptest.NewServer(app.Handler)\n\n\tclipoURL := server.URL\n\tmailURL := fmt.Sprintf(\"http://localhost:%s12\", portPrefix)\n\n\ts.app = app\n\ts.server = server\n\ts.ClipoURL = clipoURL\n\ts.EmailClient = testutil.NewEmailClient(mailURL)\n}", "func TestSetEnv(t *testing.T, key string, value string) func() {\n\tt.Helper()\n\toriginalValue := os.Getenv(key)\n\tos.Setenv(key, value)\n\treturn func() { os.Setenv(key, originalValue) }\n}", "func (amqpSuite *AmqpSuite) SetupTest() {\n\tamqpSuite.T().Cleanup(func() {\n\t\t// Flushing Stdout may help with race conditions at the end of a test.\n\t\tos.Stdout.Sync()\n\t})\n}", "func (suite *APIContainerExecStartSuite) SetUpTest(c *check.C) {\n\tSkipIfFalse(c, environment.IsLinux)\n\tPullImage(c, busyboxImage)\n}", "func SetupTestOSContext(t *testing.T) func() {\n\tc := SetupTestOSContextEx(t)\n\treturn func() { c.Cleanup(t) }\n}", "func TestMain(t *testing.M) {\n\tapiKey := flag.String(\"api-key\", \"\", \"The API key you want to serve\")\n\tflag.Parse()\n\tif *apiKey == \"\" {\n\t\tfmt.Println(\"No API key. Aborting...\")\n\t\tos.Exit(1)\n\t}\n\tos.Setenv(\"APIKEY\", *apiKey)\n\tos.Exit(t.Run())\n}", "func setUp(ctx context.Context, env *TestEnv, isGuest bool) error {\n\t// The directories names that are created during the test and deleted at the end of the test.\n\tenv.CreatedDirectories = append(env.CreatedDirectories, \"/var/cache/shill\", \"/run/shill\", \"/run/state/logged-in\", \"/run/dhcpcd\", \"/var/lib/dhcpcd\")\n\n\t// Stop shill temporarily.\n\tif err := upstart.StopJob(ctx, \"shill\"); err != nil {\n\t\treturn errors.Wrap(err, \"failed stopping shill\")\n\t}\n\n\tvar user, userType string\n\tif isGuest {\n\t\tuser = cryptohome.GuestUser\n\t\tuserType = \"guest\"\n\t} else {\n\t\tuser = FakeUser\n\t\tuserType = \"fake\"\n\t}\n\n\tuserHash, err := cryptohome.UserHash(ctx, user)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed getting the user hash for the %s user\", userType)\n\t}\n\n\tenv.ShillUserProfileDir = filepath.Join(DaemonStoreBase, userHash)\n\n\tif err := eraseState(ctx, env); err != nil {\n\t\ttesting.ContextLog(ctx, errors.Wrap(err, \"failed erasing the system state\"))\n\t}\n\n\tenv.ShillUserProfile = filepath.Join(env.ShillUserProfileDir, \"shill.profile\")\n\n\treturn nil\n}", "func (h *H) Setup() {\n\tvar err error\n\th.restConfig, err = clientcmd.RESTConfigFromKubeConfig(h.Kubeconfig.Contents)\n\tExpect(err).ShouldNot(HaveOccurred(), \"failed to configure client\")\n\n\tif len(h.InstalledWorkloads) < 1 {\n\t\th.InstalledWorkloads = make(map[string]string)\n\t}\n\n\t// setup project to run tests\n\tsuffix := randomStr(5)\n\tproj, err := h.createProject(suffix)\n\tExpect(err).ShouldNot(HaveOccurred(), \"failed to create project\")\n\tExpect(proj).ShouldNot(BeNil())\n\n\th.proj = proj\n}", "func (suite *LegacyTestSuite) SetupTest() {\n\tsuite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)\n\tsuite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))\n\tsuite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))\n\t// commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)\n\tsuite.coordinator.CommitNBlocks(suite.chainA, 2)\n\tsuite.coordinator.CommitNBlocks(suite.chainB, 2)\n}", "func TestEnvironmentSet(t *testing.T) {\n\tport := make(chan int, 1)\n\tdefer createTestServer(port, t).Close()\n\taddr := <-port\n\tresp, err := http.Get(fmt.Sprintf(\"http://localhost:%d/env/set\", addr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\tt.Fatalf(\"Wrong env.set status code. Expected '200' but got '%d'\", resp.StatusCode)\n\t}\n}", "func setupEnv(args *execdriver.InitArgs) error {\n\t// Get env\n\tvar env []string\n\tcontent, err := ioutil.ReadFile(\".dockerenv\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to load environment variables: %v\", err)\n\t}\n\tif err := json.Unmarshal(content, &env); err != nil {\n\t\treturn fmt.Errorf(\"Unable to unmarshal environment variables: %v\", err)\n\t}\n\t// Propagate the plugin-specific container env variable\n\tenv = append(env, \"container=\"+os.Getenv(\"container\"))\n\n\targs.Env = env\n\n\tos.Clearenv()\n\tfor _, kv := range args.Env {\n\t\tparts := strings.SplitN(kv, \"=\", 2)\n\t\tif len(parts) == 1 {\n\t\t\tparts = append(parts, \"\")\n\t\t}\n\t\tos.Setenv(parts[0], parts[1])\n\t}\n\n\treturn nil\n}" ]
[ "0.7796102", "0.7715769", "0.73810494", "0.73787904", "0.73571074", "0.73106945", "0.7277659", "0.7103508", "0.70789695", "0.7066052", "0.7039628", "0.7036749", "0.69876665", "0.6954473", "0.6899937", "0.68667054", "0.68244296", "0.67213666", "0.670484", "0.66903955", "0.6676785", "0.6662372", "0.6654746", "0.66488844", "0.66325647", "0.66272056", "0.66071004", "0.6594", "0.6560299", "0.6555187", "0.6530923", "0.65255374", "0.65088946", "0.64933187", "0.6472237", "0.6470322", "0.6465111", "0.6462629", "0.6446634", "0.64392", "0.6429688", "0.6426231", "0.64257944", "0.64168626", "0.6405859", "0.6402499", "0.6400709", "0.6398718", "0.6378131", "0.6367372", "0.6356647", "0.63520235", "0.63502973", "0.6349851", "0.6326264", "0.63212365", "0.63212365", "0.6286975", "0.62822646", "0.6269936", "0.6268553", "0.62597007", "0.62592334", "0.6250285", "0.6242573", "0.62389266", "0.62329626", "0.6226685", "0.6222841", "0.6208331", "0.6198961", "0.6197376", "0.61748725", "0.6174028", "0.61713624", "0.61519736", "0.61468714", "0.6140761", "0.6131387", "0.6130768", "0.6117929", "0.61095566", "0.6107154", "0.61068803", "0.6105786", "0.61015695", "0.6097973", "0.6092471", "0.6089388", "0.6082225", "0.6074948", "0.60733855", "0.6071951", "0.6050532", "0.6044805", "0.6043378", "0.6041025", "0.60384417", "0.60377", "0.60332596" ]
0.79195976
0
TearDownTest clears all environment variables.
func (s AWSTestSuite) TearDownTest() { os.Clearenv() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (testEnv *TestEnv) TearDownTestEnv() {\n\ttestEnv.Server.Close()\n\ttestEnv.Server = nil\n\ttestEnv.Mux = nil\n\ttestEnv.Client = nil\n}", "func (envManager *TestEnvManager) TearDown() {\n\tif envManager.skipCleanup {\n\t\tlog.Println(\"Dev mode (--skip_cleanup), skipping cleanup\")\n\t\treturn\n\t}\n\n\tfor _, comp := range envManager.testEnv.GetComponents() {\n\t\tif alive, err := comp.IsAlive(); err != nil {\n\t\t\tlog.Printf(\"Failed to check if componment %s is alive: %s\", comp.GetName(), err)\n\t\t} else if alive {\n\t\t\tif err = comp.Stop(); err != nil {\n\t\t\t\tlog.Printf(\"Failed to stop componment %s: %s\", comp.GetName(), err)\n\t\t\t}\n\t\t}\n\t}\n\t_ = envManager.testEnv.Cleanup()\n}", "func (env *LocalTestEnv) TearDown() error {\n\treturn os.RemoveAll(env.TmpPath)\n}", "func (atb *adminXLTestBed) TearDown() {\n\tos.RemoveAll(atb.configPath)\n\tremoveRoots(atb.xlDirs)\n\tresetTestGlobals()\n}", "func (env *TestEnv) Cleanup() {\n\tenv.Txmgr.Shutdown()\n\tenv.DBEnv.Cleanup()\n\tenv.TStoreEnv.Cleanup()\n}", "func (setup *SimpleTestSetup) TearDown() {\n\tsetup.harnessPool.DisposeAll()\n\tsetup.harnessWalletPool.DisposeAll()\n\t//setup.nodeGoBuilder.Dispose()\n\tsetup.WorkingDir.Dispose()\n}", "func initEnv() {\n\terr := os.RemoveAll(TestDataDir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (s *UnitTestSuite) TearDownSuite() {\n\t// summon the GC!\n\ts.config = nil\n\ts.Application = nil\n\ts.FakeUserSignupClient = nil\n\ts.FakeMasterUserRecordClient = nil\n\ts.FakeBannedUserClient = nil\n\ts.FakeToolchainStatusClient = nil\n}", "func (suite *TestManagerSuite) TearDownTest() {\n\t// No delete method defined in manager as no requirement,\n\t// so, to clear env, call dao method here\n\terr := scan.DeleteReport(suite.rpUUID)\n\trequire.NoError(suite.T(), err)\n}", "func (t *SubprocessTest) TearDown() {\n\terr := t.destroy()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (ts *TestSetup) Cleanup() {\n\tif ts.Server != nil {\n\t\tts.Server.Stop()\n\t}\n\tif ts.NC != nil {\n\t\tts.NC.Close()\n\t}\n\tif ts.GNATSD != nil {\n\t\tts.GNATSD.Shutdown()\n\t}\n\n\tif ts.SystemUserCredsFile != \"\" {\n\t\tos.Remove(ts.SystemUserCredsFile)\n\t}\n\n\tif ts.SystemAccountJWTFile != \"\" {\n\t\tos.Remove(ts.SystemAccountJWTFile)\n\t}\n\n\tif ts.OperatorJWTFile != \"\" {\n\t\tos.Remove(ts.SystemUserCredsFile)\n\t}\n}", "func (suite *OauthTestSuite) TearDownTest() {\n\t// Scopes are static, populated from fixtures,\n\t// so there is no need to clear them after running a test\n\tsuite.db.Unscoped().Delete(new(AuthorizationCode))\n\tsuite.db.Unscoped().Delete(new(RefreshToken))\n\tsuite.db.Unscoped().Delete(new(AccessToken))\n\tsuite.db.Unscoped().Not(\"id\", []int64{1, 2}).Delete(new(User))\n\tsuite.db.Unscoped().Not(\"id\", []int64{1, 2, 3}).Delete(new(Client))\n}", "func TeardownEnvironment() {\n\tos.Setenv(\"TRELLO_KEY\", \"\")\n\tos.Setenv(\"TRELLO_TOKEN\", \"\")\n\tos.Setenv(\"TRELLO_NEXT_ACTIONS_LIST_ID\", \"\")\n\tos.Setenv(\"TRELLO_PROJECTS_LIST_ID\", \"\")\n}", "func (s *CliHttpTestSuite) TearDownSuite() {\n\tcmd := exec.Command(\"rm\", \"-f\", testFileName)\n\terr := cmd.Run()\n\ts.NoError(err)\n}", "func (s *OssDownloadSuite) TearDownTest(c *C) {\n\terr := removeTempFiles(\"../oss\", \".jpg\")\n\tc.Assert(err, IsNil)\n\n\terr = removeTempFiles(\"../oss\", \".temp\")\n\tc.Assert(err, IsNil)\n}", "func (t *TestFramework) Teardown() {\n\t// Global deinitialization for the whole framework goes in here\n\n\t// Teardown the actual test suite\n\tgomega.Expect(t.teardown(t)).To(gomega.Succeed())\n}", "func (suite *PouchStartSuite) TearDownTest(c *check.C) {\n\tc.Assert(environment.PruneAllContainers(apiClient), check.IsNil)\n}", "func (it *integTestSuite) TearDownSuite(c *C) {\n\t// stop all data nodes and brokers\n\tit.stopAllNodes(c, \"TearDownSuite\")\n\tit.dnodes = nil\n\tit.brokers = nil\n\n\ttime.Sleep(time.Millisecond * 100)\n\n\t// cleanup directories\n\tfor i := 0; i < it.numNodes; i++ {\n\t\tos.RemoveAll(fmt.Sprintf(dbpathFormat, i))\n\t}\n\n\tmeta.DestroyClusterState(it.cfg, meta.ClusterTypeTstore)\n\tmeta.DestroyClusterState(it.cfg, meta.ClusterTypeKstore)\n}", "func (s *OneTestOneDBSuite) TearDownSuite() {\n\ts.Env.Close()\n\ts.Helper.Close(s.TT, s.DropDBAfterTest)\n}", "func TearDownSuite(suiteCtx *types.SuiteContext) {\n\tBy(\"tearing down the test environment\")\n\n\tselenium.RemoveSeleniumIfNeeded(suiteCtx)\n\n\terr := suiteCtx.TestEnv.Stop()\n\tExpect(err).ToNot(HaveOccurred())\n}", "func TestTearDown(t *testing.T) {\n\t// remove user in order to not change initial state\n\tdb := NewDBDriver()\n\tdefer db.Close()\n\t//db.Delete(user)\n\tres := db.Exec(\"delete from users where email = ?\", email).Error\n\tassert.Nil(t, res)\n}", "func (test *Test) Teardown() {\n\ttest.cleanup()\n}", "func (suite *iamTestSuite) TearDownSuite() {\n\tvar err error\n\terr = os.Remove(testKeyFile)\n\tsuite.Truef(err == nil || os.IsNotExist(err), \"failed to remove file %s: %v\", testKeyFile, err)\n\terr = os.Remove(testBadKeyFile)\n\tsuite.Truef(err == nil || os.IsNotExist(err), \"failed to remove file %s: %v\", testBadKeyFile, err)\n}", "func (env testEnvironment) Cleanup() error {\n\tif err := env.DeleteProject(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func tearDown(ctx context.Context, env *TestEnv) {\n\t// Stop any shill instances started during testing.\n\tif err := upstart.StopJob(ctx, \"shill\"); err != nil {\n\t\ttesting.ContextLog(ctx, errors.Wrap(err, \"failed stopping shill\"))\n\t}\n\n\tif err := eraseState(ctx, env); err != nil {\n\t\ttesting.ContextLog(ctx, errors.Wrap(err, \"failed erasing the system state\"))\n\t}\n\n\tif err := upstart.RestartJob(ctx, \"shill\"); err != nil {\n\t\ttesting.ContextLog(ctx, errors.Wrap(err, \"failed restarting shill\"))\n\t}\n}", "func (suite *ScannerAPITestSuite) TearDownTest() {\n\t// Restore\n\tsc.DefaultController = suite.originC\n}", "func (tf *TestFixture) TearDown(ctx context.Context, s *testing.FixtState) {\n\tif tf.fcm != nil {\n\t\tif err := tf.fcm.Close(ctx); err != nil {\n\t\t\ts.Error(\"Failed to close tunnel to CallboxManager: \", err)\n\t\t}\n\t}\n\n\tif _, err := tf.RemoteCellularClient.TearDown(ctx, &empty.Empty{}); err != nil {\n\t\ts.Error(\"Failed to tear down cellular remote service: \", err)\n\t}\n\n\tif err := tf.rpcClient.Close(ctx); err != nil {\n\t\ts.Error(\"Failed to close DUT RPC client: \", err)\n\t}\n}", "func (suite *DetectorTestSuite) TearDownSuite() {\n\tdefaultCatalog = suite.originalCatalog\n\tcollectorPriorities = suite.originalPriorities\n}", "func (s *BaseTest) TearDownTest(c *check.C) {\n\t// run cleanup handlers and clear the slice\n\tfor _, f := range s.cleanupHandlers {\n\t\tf()\n\t}\n\ts.cleanupHandlers = nil\n}", "func (t *tInfo) teardown() {\n\tt.recorders.close()\n\n\tif t.apiClient != nil {\n\t\tt.apiClient.ClusterV1().Version().Delete(context.Background(), &api.ObjectMeta{Name: t.testName})\n\t\tt.apiClient.Close()\n\t\tt.apiClient = nil\n\t}\n\n\tif t.esClient != nil {\n\t\tt.esClient.Close()\n\t}\n\n\ttestutils.StopElasticsearch(t.elasticsearchName, t.elasticsearchDir)\n\n\tif t.mockCitadelQueryServer != nil {\n\t\tt.mockCitadelQueryServer.Stop()\n\t\tt.mockCitadelQueryServer = nil\n\t}\n\n\tif t.evtsMgr != nil {\n\t\tt.evtsMgr.Stop()\n\t\tt.evtsMgr = nil\n\t}\n\n\tt.evtProxyServices.Stop()\n\n\tif t.apiServer != nil {\n\t\tt.apiServer.Stop()\n\t\tt.apiServer = nil\n\t}\n\n\t// stop certificate server\n\ttestutils.CleanupIntegTLSProvider()\n\n\tif t.mockResolver != nil {\n\t\tt.mockResolver.Stop()\n\t\tt.mockResolver = nil\n\t}\n\n\t// remove the local persistent events store\n\tt.logger.Infof(\"removing events store %s\", t.storeConfig.Dir)\n\tos.RemoveAll(t.storeConfig.Dir)\n\n\tt.logger.Infof(\"completed test\")\n}", "func (s *WriterSuite) TearDownTest() {\n\terr := os.Remove(s.playbook.Location)\n\ts.Nil(err)\n}", "func tearDownGridFSTestData() error {\n\tsessionProvider, err := db.NewSessionProvider(*toolOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsession, err := sessionProvider.GetSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = session.Database(testDB).Drop(context.Background()); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (e *Environment) Teardown() {\n\tif os.Getenv(\"NO_TEARDOWN\") != \"\" {\n\t\tlog.Printf(\"Skipping teardown\")\n\t\treturn\n\t}\n\tlog.Println(\"Tearing down...\")\n\n\tfor name, c := range e.clusters {\n\t\tif err := c.kind.Delete(c.genName, \"\"); err != nil {\n\t\t\te.t.Errorf(\"Delete cluster %q (%q): %s\", name, c.genName, err)\n\t\t} else {\n\t\t\tlog.Printf(\"Deleted cluster %q (%q)\", name, c.genName)\n\t\t}\n\t\tif err := os.Remove(c.kubeConfigPath); err != nil {\n\t\t\te.t.Errorf(\"Failed to delete %q: %s\", c.kubeConfigPath, err)\n\t\t}\n\t}\n}", "func (suite *TenantTestSuite) TearDownTest() {\n\n\tsession, err := mgo.Dial(suite.cfg.MongoDB.Host)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmainDB := session.DB(suite.cfg.MongoDB.Db)\n\n\tcols, err := mainDB.CollectionNames()\n\tfor _, col := range cols {\n\t\tmainDB.C(col).RemoveAll(nil)\n\t}\n\n}", "func (tr *TestRecorder) CleanUp() {\n\ttr.Recorder.CleanUp()\n\ttests.ResetHelpers()\n}", "func teardown(dbPaths []string) int {\n\tfor _, v := range dbPaths {\n\t\terr := os.RemoveAll(v)\n\t\tif err != nil {\n\t\t\terrors.New(\"Remove test db file error\")\n\t\t}\n\t}\n\treturn 0\n}", "func (s *ReformSuite) TearDownTest() {\n\tif s.tx == nil {\n\t\tpanic(s.T().Name() + \": tx is nil\")\n\t}\n\tif s.q == nil {\n\t\tpanic(s.T().Name() + \": q is nil\")\n\t}\n\n\tcheckForeignKeys(s.T(), s.q)\n\ts.Require().NoError(s.tx.Rollback())\n\n\tDB.Logger = nil\n\tDB.Querier = DB.WithTag(\"\")\n}", "func (h *Harness) tearDown() (e error) {\n\tif h.Node != nil {\n\t\th.Node.Shutdown()\n\t}\n\tif e := h.node.shutdown(); E.Chk(e) {\n\t\treturn e\n\t}\n\tif e := os.RemoveAll(h.testNodeDir); E.Chk(e) {\n\t\treturn e\n\t}\n\tdelete(testInstances, h.testNodeDir)\n\treturn nil\n}", "func (suite *PouchRunDeviceSuite) TearDownTest(c *check.C) {\n}", "func (suite *baseReplicationTestSuite) TearDownTest() {\n\tsuite.mu.Lock()\n\tdefer suite.mu.Unlock()\n\n\t// stop replicas\n\tfor i, srv := range suite.replicas {\n\t\tif suite.replicasRunning[i] {\n\t\t\tsrv.Shutdown(suite.T())\n\t\t}\n\t}\n\tsuite.replicas = []TestServer{}\n\n\t// stop primary\n\tif suite.primary != nil {\n\t\tsuite.primary.Shutdown(suite.T())\n\t\tsuite.primary = nil\n\t}\n\tsuite.primary = nil\n}", "func (suite *OauthTestSuite) TearDownSuite() {\n\t//\n}", "func (s *HTTPTestSuite) TearDownSuite() {\n\thttpmock.DeactivateAndReset()\n}", "func (s *TestSuite) TearDownSuite() {\n\ts.server.Close()\n\tcleanDB(s.db)\n}", "func (test *Test) TearDown() {\r\n\tif test.schStarted {\r\n\t\ttest.Scheduler.Stop(test.Context)\r\n\t}\r\n\tif test.MasterDB != nil {\r\n\t\ttest.MasterDB.Close()\r\n\t}\r\n}", "func (suite *Suite[Env]) TearDownSuite() {\n\tif runner.GetProfile().AllowDevMode() && suite.params.DevMode {\n\t\treturn\n\t}\n\n\tif suite.firstFailTest != \"\" && suite.params.SkipDeleteOnFailure {\n\t\tsuite.Require().FailNow(fmt.Sprintf(\"%v failed. As SkipDeleteOnFailure feature is enabled the tests after %v were skipped. \"+\n\t\t\t\"The environment of %v was kept.\", suite.firstFailTest, suite.firstFailTest, suite.firstFailTest))\n\t\treturn\n\t}\n\n\t// TODO: Implement retry on delete\n\tctx, cancel := context.WithTimeout(context.Background(), deleteTimeout)\n\tdefer cancel()\n\terr := infra.GetStackManager().DeleteStack(ctx, suite.params.StackName)\n\tif err != nil {\n\t\tsuite.T().Errorf(\"unable to delete stack: %s, err :%v\", suite.params.StackName, err)\n\t\tsuite.T().Fail()\n\t}\n}", "func (suite *MetricsTestSuite) TearDownTest() {\n\t//\n}", "func TestClean(t *testing.T) {\n\te := newTestEnvr()\n\t// set env vars to new values\n\tnewValues := []string{\"CAT\", \"DOG\", \"BIRD\"}\n\tfor i, v := range testVars {\n\t\tif err := os.Setenv(v, newValues[i]); err != nil {\n\t\t\tt.Fatalf(\"os.Setenv(%q, %q) err = %s\", v, newValues[i], err)\n\t\t}\n\t}\n\t// Verify values\n\tfor i, v := range testVars {\n\t\twant := newValues[i]\n\t\tgot := os.Getenv(v)\n\t\tif got != want {\n\t\t\tt.Fatalf(\"os.GetEnv(%q) = %q, want %q\", v, got, want)\n\t\t}\n\t}\n\t// Set with .Clean() which should overwrite the values\n\te.Clean()\n\t// Check them again, should be changed\n\tfor i, v := range testVars {\n\t\tif os.Getenv(v) == newValues[i] {\n\t\t\tt.Errorf(\"os.GetEnv(%q) = %q, should have changed after .Clean() to old value in .env\", v, os.Getenv(v))\n\t\t}\n\t}\n}", "func (s *SignSuite) TearDownTest(c *C) {\n}", "func (f *fixture) TearDown(ctx context.Context, s *testing.FixtState) {\n\tif f.startChrome && f.cr != nil {\n\t\tif err := UnmountAllSmbMounts(ctx, f.cr); err != nil {\n\t\t\ts.Error(\"Failed to unmount all SMB mounts: \", err)\n\t\t}\n\t}\n\tf.cr = nil\n\tif err := f.server.Stop(ctx); err != nil {\n\t\ts.Error(\"Failed to stop smbd: \", err)\n\t}\n\tf.server = nil\n\tif err := os.RemoveAll(f.tempDir); err != nil {\n\t\ts.Error(\"Failed to remove temporary guest share: \", err)\n\t}\n\tf.tempDir = \"\"\n}", "func (ct *CoreTest) Teardown() {\n\tTeardownDatabase(ct.DB)\n}", "func (s *JobApiTestSuite) cleanUp() {\n\ttest.DeleteAllRuns(s.runClient, s.resourceNamespace, s.T())\n\ttest.DeleteAllJobs(s.jobClient, s.resourceNamespace, s.T())\n\ttest.DeleteAllPipelines(s.pipelineClient, s.T())\n\ttest.DeleteAllExperiments(s.experimentClient, s.resourceNamespace, s.T())\n}", "func (tc *testContext) cleanup() {\n\ttc.osdkTestCtx.Cleanup()\n}", "func (suite *PouchRestartSuite) TearDownTest(c *check.C) {\n}", "func (suite *PouchRestartSuite) TearDownTest(c *check.C) {\n}", "func TestUnset(t *testing.T) {\n\ttests := []struct {\n\t\tkey string\n\t\tvalue string\n\t}{\n\t\t{\"KEY_0\", \"Abc\"},\n\t\t{\"KEY_1\", \"Def\"},\n\t}\n\n\t// Set test data.\n\tos.Clearenv()\n\tfor _, test := range tests {\n\t\tif err := os.Setenv(test.key, test.value); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\tif v := os.Getenv(test.key); v != test.value {\n\t\t\tt.Errorf(\"expected `%s` but `%s`\", test.value, v)\n\t\t}\n\t}\n\n\t// Erase the data and check the function.\n\tfor _, test := range tests {\n\t\tif err := Unset(test.key); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\tif v := os.Getenv(test.key); v != \"\" {\n\t\t\tt.Errorf(\"must be cleaned but `%s`\", v)\n\t\t}\n\t}\n}", "func (suite *PopTestSuite) TearDown() {\n\t// disconnect from the package DB connections\n\tif suite.lowPrivConn != nil {\n\t\tif err := suite.lowPrivConn.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\tif suite.highPrivConn != nil && suite.highPrivConn != suite.lowPrivConn {\n\t\tif err := suite.highPrivConn.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\n\t// Remove the package DB if this isn't a per test transaction\n\tif !suite.usePerTestTransaction {\n\t\tif err := dropDB(suite.pgConn, (*suite.lowPrivConnDetails).Database); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\terr := suite.pgConn.Close()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n}", "func (p *pgTester) teardown() error {\n\tvar err error\n\tif err = p.testDbConn.Close(); err != nil {\n\t\treturn err\n\t}\n\tp.testDbConn = nil\n\tif err = p.dropDB(p.TestDBName); err != nil {\n\t\treturn err\n\t}\n\tif p.liveDbConn != nil {\n\t\tif err = p.liveDbConn.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.liveDbConn = nil\n\t}\n\tif p.liveTestDbConn != nil {\n\t\tif err = p.liveTestDbConn.Close(); err != nil {\n\n\t\t\treturn err\n\t\t}\n\t\tp.liveTestDbConn = nil\n\t\tif err = p.dropDB(p.LiveTestDBName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn os.Remove(p.pgPassFile)\n}", "func (s *MysqlSuite) TearDownSuite() {\n\ts.DBConn.Close()\n}", "func CleanupSuite() {\n\t// Run on all Ginkgo nodes\n}", "func (p *PodmanTestIntegration) Cleanup() {\n\tp.StopVarlink()\n\t// TODO\n\t// Stop all containers\n\t// Rm all containers\n\n\tif err := os.RemoveAll(p.TempDir); err != nil {\n\t\tfmt.Printf(\"%q\\n\", err)\n\t}\n\n\t// Clean up the registries configuration file ENV variable set in Create\n\tresetRegistriesConfigEnv()\n}", "func newTestEnvr() *Envr {\n\tfor _, v := range testVars {\n\t\tif err := os.Unsetenv(v); err != nil {\n\t\t\tlog.Fatalf(\"os.Unsetenv() err = %s\", err)\n\t\t}\n\t}\n\treturn New(envName, testVars)\n}", "func (env *TestVDBEnv) Cleanup() {\n\tenv.t.Logf(\"Cleaningup TestVDBEnv\")\n\tenv.DBProvider.Close()\n\tos.RemoveAll(env.dbPath)\n}", "func TestClear(t *testing.T) {\n\ttests := []struct {\n\t\tkey string\n\t\tvalue string\n\t}{\n\t\t{\"KEY_0\", \"Abc\"},\n\t\t{\"KEY_1\", \"Def\"},\n\t}\n\n\t// Set test data.\n\tos.Clearenv()\n\tfor _, test := range tests {\n\t\tif err := os.Setenv(test.key, test.value); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\tif v := os.Getenv(test.key); v != test.value {\n\t\t\tt.Errorf(\"expected `%s` but `%s`\", test.value, v)\n\t\t}\n\t}\n\n\t// Erase the data and check the function.\n\tClear()\n\tfor _, test := range tests {\n\t\tif v := os.Getenv(test.key); v != \"\" {\n\t\t\tt.Errorf(\"must be cleaned but `%s`\", v)\n\t\t}\n\t}\n}", "func tearDownTest() {\n\n}", "func (s *ApiIntegrationTestSuite) TearDownSuite(c *C) {\n\tc.Log(\"Tearing down api integration test suite\")\n\n\ts.testServer.Close()\n\tc.Log(\"Test server closed\")\n}", "func (suite *RouterTestSuite) TearDownSuite() {\n\tsuite.server.Close()\n}", "func (te *TestEnvironment) Clean() {\n\tuserIDs, err := te.UserDAO.ListAllUserIDs()\n\tif err != nil {\n\t\tte.Logger.Error(fmt.Sprintf(\"ListAllUserIDs returns err: %v\", err))\n\t}\n\n\tfor _, uid := range userIDs {\n\t\tuser, err := te.UserDAO.GetUserByID(uid)\n\t\tif err != nil {\n\t\t\tte.Logger.Error(fmt.Sprintf(\"GetUserByID(%s) returns err: %v\", uid, err))\n\t\t}\n\n\t\tgDrive, err := te.GDriveFactory.New(user)\n\t\tif err != nil {\n\t\t\tte.Logger.Error(fmt.Sprintf(\"Error initializing gdrive client for user %q\", user.Id))\n\t\t\treturn\n\t\t}\n\t\tfileIDs, err := gDrive.ListFileIDs(googledrive.AllMP4s)\n\t\tif err != nil {\n\t\t\tte.Logger.Error(fmt.Sprintf(\"Error listing all file IDs for user %q\", user.Id))\n\t\t}\n\t\tfor _, fid := range fileIDs {\n\t\t\tfor _, prefix := range googledrive.FilePrefixes {\n\t\t\t\tif err := gDrive.MarkFileByID(fid, prefix, true); err != nil {\n\t\t\t\t\tte.Logger.Error(fmt.Sprintf(\"gDrive.MarkFileByID(%s, %s, true) for user %q returns err: %v\", fid, prefix, user.Id, err))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := data.DeleteAllUserData(uid, false, te.sqlService, te.SimpleStorage, te.Logger); err != nil {\n\t\t\tte.Logger.Error(fmt.Sprintf(\"DeleteAllUserDataInDB(%s, %t, _) returns err: %v\", uid, false, err))\n\t\t}\n\t}\n}", "func clear() {\n\tos.RemoveAll(TestDir)\n}", "func (f *fixture) TearDown(ctx context.Context, s *testing.FixtState) {\n\tchrome.Unlock()\n\tf.cleanUp(ctx, s)\n}", "func (suite *PopTestSuite) TearDownTest() {\n\tsuite.tearDownTxnTest()\n}", "func TearDown(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\n\tcontainer := di.Get()\n\n\tif err := container.TaskRepository.DeleteAll(ctx); err != nil {\n\t\thttp.Error(w, \"taskRepository.DeleteAll error: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := container.UserRepository.DeleteAll(ctx); err != nil {\n\t\thttp.Error(w, \"userRepository.DeleteAll error: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"done\")\n}", "func (b *DatabaseTestSuiteBase) TearDownTest() {\n\tif err := b.tx.Rollback(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func TestCleanUp(t *testing.T) {\n\tvar dataSource DataSourceName\n\tdataSource.Init()\n\tdb := GetDBConnection(dataSource.DSNString(), logger.Silent)\n\tDropTables(db)\n}", "func (suite *TenantTestSuite) TearDownSuite() {\n\n\tsession, err := mgo.Dial(suite.cfg.MongoDB.Host)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tsession.DB(suite.cfg.MongoDB.Db).DropDatabase()\n}", "func Teardown() error {\n\t// TODO: wait for all cr deleted.\n\tGlobal = nil\n\treturn nil\n}", "func CleanupTests(driver, dsn string, verbose bool) error {\n\tclient, err := kivik.New(driver, dsn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcount, err := doCleanup(client, verbose)\n\tif verbose {\n\t\tfmt.Printf(\"Deleted %d test databases\\n\", count)\n\t}\n\treturn err\n}", "func (s *BaseTest) SetUpTest(c *check.C) {\n\ts.cleanupHandlers = nil\n}", "func ClearTestTables() (err error) {\n\terr = dropTable(\"users\")\n\treturn\n}", "func (suite *SignupTestSuite) TearDownSuite() {\n\tsuite.Equal(numAsserts, suite.Asserts, fmt.Sprintf(\"we should have %d asserts\", numAsserts))\n}", "func (tr *TestRunner) CleanUp() error {\n\tfor imsi := range tr.imsis {\n\t\terr := deleteSubscribersFromHSS(imsi)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, instance := range tr.activePCRFs {\n\t\terr := clearSubscribersFromPCRFPerInstance(instance)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, instance := range tr.activeOCSs {\n\t\terr := clearSubscribersFromOCSPerInstance(instance)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (suite *EnqueuerTestSuite) TearDownSuite() {\n\tsuite.cancel()\n\n\tconn := suite.pool.Get()\n\tdefer func() {\n\t\t_ = conn.Close()\n\t}()\n\n\t_ = tests.ClearAll(suite.namespace, conn)\n}", "func Teardown() {\n\tfmt.Println(\"====== Clean kubernetes testing pod ======\")\n\tres, err := kubeclient.ExecKubectl(\"kubectl delete pod -l app=nsenter\")\n\tfmt.Println(res)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \" + err.Error())\n\t}\n}", "func (amqpSuite *AmqpSuite) TearDownSuite() {\n\tif amqpSuite.connConsume != nil {\n\t\tdefer amqpSuite.connConsume.Close()\n\t}\n\n\tif amqpSuite.connPublish != nil {\n\t\tdefer amqpSuite.connPublish.Close()\n\t}\n\n\tif amqpSuite.channelConsume != nil {\n\t\tdefer amqpSuite.channelConsume.Close()\n\t}\n\tif amqpSuite.channelPublish != nil {\n\t\tdefer amqpSuite.channelPublish.Close()\n\t}\n\n\tif tearDownSuite, ok := amqpSuite.InnerSuite.(suite.TearDownAllSuite); ok {\n\t\ttearDownSuite.TearDownSuite()\n\t}\n}", "func (suite *SingletonFlushTestSuite) AfterTest(suiteName, testName string) {\n\tdownErr := StopProxy()\n\trequire.Nilf(suite.T(), downErr, \"Error stopping proxy: %v\", downErr)\n\tfmt.Println(\"Cleaning 3scale backend state\")\n\tflushErr := suite.backend.Flush()\n\tif flushErr != nil {\n\t\tfmt.Printf(\"Error flushing the backend state: %v\", flushErr)\n\t\tsuite.backend.states = suite.backend.states[:0]\n\t}\n\tdeleteErr := os.Remove(\"./temp.yaml\")\n\trequire.Nilf(suite.T(), deleteErr, \"Error deleting temporary envoy.yaml\")\n}", "func CleanEnv() {\n\t// apparently os.Unsetenv doesn't exist in the version of go I'm using\n\t_ = os.Setenv(\"BM_CONFIG_DIR\", \"\")\n\t_ = os.Setenv(\"BM_USER\", \"\")\n\t_ = os.Setenv(\"BM_ACCOUNT\", \"\")\n\t_ = os.Setenv(\"BM_ENDPOINT\", \"\")\n\t_ = os.Setenv(\"BM_AUTH_ENDPOINT\", \"\")\n\t_ = os.Setenv(\"BM_DEBUG_LEVEL\", \"\")\n}", "func (s *BaseHelixTestSuite) TearDownSuite() {\n\tif s.Admin.zkClient.IsConnected() {\n\t\ts.Admin.zkClient.Disconnect()\n\t}\n}", "func CleanupTestHarness() {\n\tcleanupCerts()\n}", "func teardown() {\n\t// if kubeTunnel != nil {\n\t// \tlog.Debugf(\"Tearing down tunnel connection to server...\")\n\t// \tkubeTunnel.Close()\n\t// }\n}", "func teardown() {\n\tts.Close()\n\tdb.Close()\n}", "func (t *NvidiaGPUUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) {\n\t// rely on the namespace deletion to clean up everything\n}", "func AfterTest() {\n\thttpmock.DeactivateAndReset()\n}", "func AfterSuiteCleanup() {\n\tlogf.Log.Info(\"AfterSuiteCleanup\")\n}", "func (suite *MetricsTestSuite) TearDownSuite() {\n\t//\n}", "func (s *OssDownloadSuite) TearDownSuite(c *C) {\n\t// Delete Part\n\tlmur, err := s.bucket.ListMultipartUploads()\n\tc.Assert(err, IsNil)\n\n\tfor _, upload := range lmur.Uploads {\n\t\tvar imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,\n\t\t\tKey: upload.Key, UploadID: upload.UploadID}\n\t\terr = s.bucket.AbortMultipartUpload(imur)\n\t\tc.Assert(err, IsNil)\n\t}\n\n\t// Delete Objects\n\tlor, err := s.bucket.ListObjects()\n\tc.Assert(err, IsNil)\n\n\tfor _, object := range lor.Objects {\n\t\terr = s.bucket.DeleteObject(object.Key)\n\t\tc.Assert(err, IsNil)\n\t}\n\n\ttestLogger.Println(\"test download completed\")\n}", "func afterServeHTTPTest(t *testing.T, webroot string) {\n\tif !strings.Contains(webroot, testDirPrefix) {\n\t\tt.Fatalf(\"Cannot clean up after test because webroot is: %s\", webroot)\n\t}\n\t// cleans up everything under the test dir. No need to clean the individual files.\n\terr := os.RemoveAll(webroot)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to clean up test dir %s. Error was: %v\", webroot, err)\n\t}\n}", "func (suite *EventStoreTestSuite) TearDownTest() {\n\tassert.Nil(suite.T(), suite.store.DeleteTable(context.Background()), \"could not delete table\")\n\tassert.Nil(suite.T(), suite.store.DeleteTable(suite.ctx), \"could not delete table\")\n}", "func (Tests) Reset(ctx context.Context) error {\n\targ := BuildDockerComposeArgs(ProjectName, ProjectType, \"test\", DockerComposeTestFile)\n\targ = append(arg, \"down\")\n\treturn Exec(ComposeBin, arg...)\n}", "func (e *Environment) Cleanup() {\n\tif e.serveCancel != nil {\n\t\te.serveCancel()\n\t\t_ = e.serveG.Wait()\n\t}\n\tif appdPath, _ := exec.LookPath(e.Appd()); appdPath != \"\" {\n\t\tos.Remove(appdPath)\n\t}\n\tif appcliPath, _ := exec.LookPath(e.Appcli()); appcliPath != \"\" {\n\t\tos.Remove(appcliPath)\n\t}\n\thome, _ := os.UserHomeDir()\n\tos.RemoveAll(filepath.Join(home, \".\"+e.Appcli()))\n\tos.RemoveAll(filepath.Join(home, \".\"+e.Appd()))\n}", "func (env *DisconnectEnv) Teardown() {\n\tif env.fioPodName != \"\" {\n\t\tfmt.Printf(\"removing fio pod\\n\")\n\t\terr := common.DeletePod(env.fioPodName)\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tenv.fioPodName = \"\"\n\t}\n\tif env.volToDelete != \"\" {\n\t\tcommon.RmPVC(env.volToDelete, env.storageClass)\n\t\tenv.volToDelete = \"\"\n\t}\n}", "func (s *SignSuite) TearDownSuite(c *C) {\n}" ]
[ "0.80028594", "0.7915147", "0.76951486", "0.7650105", "0.7488591", "0.7420243", "0.7291392", "0.71527064", "0.71319944", "0.71099573", "0.7091775", "0.70676035", "0.70446503", "0.69684124", "0.6963401", "0.690389", "0.68975645", "0.688101", "0.6853538", "0.6825987", "0.6799223", "0.6792707", "0.6789565", "0.67824537", "0.6782124", "0.67552567", "0.67467976", "0.67164904", "0.67138815", "0.6688633", "0.66835517", "0.6659954", "0.66538864", "0.6647757", "0.6645357", "0.6608608", "0.6585065", "0.6572165", "0.65567255", "0.65490294", "0.6543925", "0.654368", "0.65317196", "0.65264916", "0.65162", "0.6506149", "0.65038574", "0.6501391", "0.64902", "0.6474942", "0.64317715", "0.641601", "0.64064765", "0.64064765", "0.63917136", "0.6381136", "0.63723", "0.6367968", "0.6367161", "0.6365895", "0.6357739", "0.6349213", "0.63463056", "0.63438886", "0.6328767", "0.6317601", "0.6313031", "0.6309342", "0.63032293", "0.6294676", "0.6289112", "0.6283955", "0.6281191", "0.6242031", "0.62388164", "0.62179846", "0.62141794", "0.6190799", "0.61784035", "0.6175766", "0.61530757", "0.6151911", "0.6147175", "0.614577", "0.61445373", "0.6137433", "0.61302775", "0.61133796", "0.61096805", "0.6099855", "0.6091564", "0.608319", "0.6076603", "0.60679334", "0.6063882", "0.6062634", "0.60607725", "0.6054552", "0.60505295", "0.6026629" ]
0.71729386
7
Test check behavior of GetAWS() functions.
func (s AWSTestSuite) TestGetAWS() { s.Equal("ID", env.GetAWSAccessKeyID()) s.Equal("test.example.com", env.GetAWSBucket()) s.Equal("/backup/database", env.GetAWSPath()) s.Equal("secret", env.GetAWSSecretAccessKey()) s.Equal("us-east-1", env.GetAWSRegion()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestAWS(t *testing.T) {\n\tlogger = runner.NewLogger(\"aws_test\")\n\n\tlogger.Info(\"TestAWS completed\")\n}", "func TestTerraformAwsS3Example(t *testing.T) {\n\tt.Parallel()\n\n\tconst (\n\t\tawsRegion = \"eu-west-1\"\n\t\texpectedBucketName = \"paloth-test-bucket.\"\n\t\texpectedStatus = \"Enabled\"\n\t)\n\n\t// Construct the terraform options with default retryable errors to handle the most common retryable errors in\n\t// terraform testing.\n\tterraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{\n\t\tTerraformDir: \"../..\",\n\t})\n\n\tdefer terraform.Destroy(t, terraformOptions)\n\n\tterraform.InitAndApply(t, terraformOptions)\n\n\tbucketID := terraform.Output(t, terraformOptions, \"bucket_name\")\n\n\tassert.Equal(t, expectedBucketName, bucketID)\n\n\tactualStatus := aws.GetS3BucketVersioning(t, awsRegion, bucketID)\n\n\tassert.Equal(t, expectedStatus, actualStatus)\n}", "func TestGetObject(t *testing.T) {\n\tmock := mockS3Impl{\n\t\tInMemoryStore: make(map[string]string),\n\t}\n\n\tmock.InMemoryStore[\"foo\"] = \"bar\"\n\n\tmyservice := mockS3.Myservice{\n\t\tS3Client: mock,\n\t}\n\n\tstr, err := myservice.GetObjectAsString(\"foo\")\n\tif err != nil {\n\t\tt.Fail()\n\t}\n\n\tif str != \"bar\" {\n\t\tt.Fail()\n\t}\n}", "func TestTerraformAwsS3Example(t *testing.T) {\n\tt.Parallel()\n\n\t// Give this S3 Bucket a unique ID for a name tag so we can distinguish it from any other Buckets provisioned\n\t// in your AWS account\n\texpectedName := fmt.Sprintf(\"terratest-aws-s3-example-%s\", strings.ToLower(random.UniqueId()))\n\n\t// Give this S3 Bucket an environment to operate as a part of for the purposes of resource tagging\n\texpectedEnvironment := \"Automated Testing\"\n\n\t// Pick a random AWS region to test in. This helps ensure your code works in all regions.\n\tawsRegion := aws.GetRandomStableRegion(t, nil, nil)\n\n\t// Construct the terraform options with default retryable errors to handle the most common retryable errors in\n\t// terraform testing.\n\tterraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{\n\t\t// The path to where our Terraform code is located\n\t\tTerraformDir: \"../examples/terraform-aws-s3-example\",\n\n\t\t// Variables to pass to our Terraform code using -var options\n\t\tVars: map[string]interface{}{\n\t\t\t\"tag_bucket_name\": expectedName,\n\t\t\t\"tag_bucket_environment\": expectedEnvironment,\n\t\t\t\"with_policy\": \"true\",\n\t\t\t\"region\": awsRegion,\n\t\t},\n\t})\n\n\t// At the end of the test, run `terraform destroy` to clean up any resources that were created\n\tdefer terraform.Destroy(t, terraformOptions)\n\n\t// This will run `terraform init` and `terraform apply` and fail the test if there are any errors\n\tterraform.InitAndApply(t, terraformOptions)\n\n\t// Run `terraform output` to get the value of an output variable\n\tbucketID := terraform.Output(t, terraformOptions, \"bucket_id\")\n\n\t// Verify that our Bucket has versioning enabled\n\tactualStatus := aws.GetS3BucketVersioning(t, awsRegion, bucketID)\n\texpectedStatus := \"Enabled\"\n\tassert.Equal(t, expectedStatus, actualStatus)\n\n\t// Verify that our Bucket has a policy attached\n\taws.AssertS3BucketPolicyExists(t, awsRegion, bucketID)\n\n\t// Verify that our bucket has server access logging TargetBucket set to what's expected\n\tloggingTargetBucket := aws.GetS3BucketLoggingTarget(t, awsRegion, bucketID)\n\texpectedLogsTargetBucket := fmt.Sprintf(\"%s-logs\", bucketID)\n\tloggingObjectTargetPrefix := aws.GetS3BucketLoggingTargetPrefix(t, awsRegion, bucketID)\n\texpectedLogsTargetPrefix := \"TFStateLogs/\"\n\n\tassert.Equal(t, expectedLogsTargetBucket, loggingTargetBucket)\n\tassert.Equal(t, expectedLogsTargetPrefix, loggingObjectTargetPrefix)\n}", "func testSSES3EncryptionPutGet() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"PutEncryptedObject(bucketName, objectName, reader, sse)\"\n\targs := map[string]interface{}{\n\t\t\"bucketName\": \"\",\n\t\t\"objectName\": \"\",\n\t\t\"sse\": \"\",\n\t}\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket.\n\terr = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"us-east-1\"})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tdefer cleanupBucket(bucketName, c)\n\n\ttestCases := []struct {\n\t\tbuf []byte\n\t}{\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 1)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 15)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 16)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 17)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 31)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 32)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 33)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 1024)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 1024*2)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 1024*1024)},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\t// Generate a random object name\n\t\tobjectName := randString(60, rand.NewSource(time.Now().UnixNano()), \"\")\n\t\targs[\"objectName\"] = objectName\n\n\t\t// Secured object\n\t\tsse := encrypt.NewSSE()\n\t\targs[\"sse\"] = sse\n\n\t\t// Put encrypted data\n\t\t_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})\n\t\tif err != nil {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"PutEncryptedObject failed\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Read the data back without any encryption headers\n\t\tr, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})\n\t\tif err != nil {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"GetEncryptedObject failed\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer r.Close()\n\n\t\t// Compare the sent object with the received one\n\t\trecvBuffer := bytes.NewBuffer([]byte{})\n\t\tif _, err = io.Copy(recvBuffer, r); err != nil {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Test \"+string(i+1)+\", error: \"+err.Error(), err)\n\t\t\treturn\n\t\t}\n\t\tif recvBuffer.Len() != len(testCase.buf) {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Test \"+string(i+1)+\", Number of bytes of received object does not match, expected \"+string(len(testCase.buf))+\", got \"+string(recvBuffer.Len()), err)\n\t\t\treturn\n\t\t}\n\t\tif !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Test \"+string(i+1)+\", Encrypted sent is not equal to decrypted, expected \"+string(testCase.buf)+\", got \"+string(recvBuffer.Bytes()), err)\n\t\t\treturn\n\t\t}\n\n\t\tsuccessLogger(testName, function, args, startTime).Info()\n\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func testSSECEncryptionPutGet() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"PutEncryptedObject(bucketName, objectName, reader, sse)\"\n\targs := map[string]interface{}{\n\t\t\"bucketName\": \"\",\n\t\t\"objectName\": \"\",\n\t\t\"sse\": \"\",\n\t}\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket.\n\terr = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"us-east-1\"})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tdefer cleanupBucket(bucketName, c)\n\n\ttestCases := []struct {\n\t\tbuf []byte\n\t}{\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 1)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 15)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 16)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 17)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 31)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 32)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 33)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 1024)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 1024*2)},\n\t\t{buf: bytes.Repeat([]byte(\"F\"), 1024*1024)},\n\t}\n\n\tconst password = \"correct horse battery staple\" // https://xkcd.com/936/\n\n\tfor i, testCase := range testCases {\n\t\t// Generate a random object name\n\t\tobjectName := randString(60, rand.NewSource(time.Now().UnixNano()), \"\")\n\t\targs[\"objectName\"] = objectName\n\n\t\t// Secured object\n\t\tsse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName))\n\t\targs[\"sse\"] = sse\n\n\t\t// Put encrypted data\n\t\t_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse})\n\t\tif err != nil {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"PutEncryptedObject failed\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Read the data back\n\t\tr, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse})\n\t\tif err != nil {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"GetEncryptedObject failed\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer r.Close()\n\n\t\t// Compare the sent object with the received one\n\t\trecvBuffer := bytes.NewBuffer([]byte{})\n\t\tif _, err = io.Copy(recvBuffer, r); err != nil {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Test \"+string(i+1)+\", error: \"+err.Error(), err)\n\t\t\treturn\n\t\t}\n\t\tif recvBuffer.Len() != len(testCase.buf) {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Test \"+string(i+1)+\", Number of bytes of received object does not match, expected \"+string(len(testCase.buf))+\", got \"+string(recvBuffer.Len()), err)\n\t\t\treturn\n\t\t}\n\t\tif !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Test \"+string(i+1)+\", Encrypted sent is not equal to decrypted, expected \"+string(testCase.buf)+\", got \"+string(recvBuffer.Bytes()), err)\n\t\t\treturn\n\t\t}\n\n\t\tsuccessLogger(testName, function, args, startTime).Info()\n\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func TestAWSMetadata(t *testing.T) {\n\t// Configure RDS API mock.\n\trds := &mocks.RDSMock{\n\t\tDBInstances: []*rds.DBInstance{\n\t\t\t// Standalone RDS instance.\n\t\t\t{\n\t\t\t\tDBInstanceArn: aws.String(\"arn:aws:rds:us-west-1:123456789012:db:postgres-rds\"),\n\t\t\t\tDBInstanceIdentifier: aws.String(\"postgres-rds\"),\n\t\t\t\tDbiResourceId: aws.String(\"db-xyz\"),\n\t\t\t\tIAMDatabaseAuthenticationEnabled: aws.Bool(true),\n\t\t\t},\n\t\t\t// Instance that is a part of an Aurora cluster.\n\t\t\t{\n\t\t\t\tDBInstanceArn: aws.String(\"arn:aws:rds:us-east-1:123456789012:db:postgres-aurora-1\"),\n\t\t\t\tDBInstanceIdentifier: aws.String(\"postgres-aurora-1\"),\n\t\t\t\tDBClusterIdentifier: aws.String(\"postgres-aurora\"),\n\t\t\t},\n\t\t},\n\t\tDBClusters: []*rds.DBCluster{\n\t\t\t// Aurora cluster.\n\t\t\t{\n\t\t\t\tDBClusterArn: aws.String(\"arn:aws:rds:us-east-1:123456789012:cluster:postgres-aurora\"),\n\t\t\t\tDBClusterIdentifier: aws.String(\"postgres-aurora\"),\n\t\t\t\tDbClusterResourceId: aws.String(\"cluster-xyz\"),\n\t\t\t},\n\t\t},\n\t\tDBProxies: []*rds.DBProxy{\n\t\t\t{\n\t\t\t\tDBProxyArn: aws.String(\"arn:aws:rds:us-east-1:123456789012:db-proxy:prx-resource-id\"),\n\t\t\t\tDBProxyName: aws.String(\"rds-proxy\"),\n\t\t\t},\n\t\t},\n\t\tDBProxyEndpoints: []*rds.DBProxyEndpoint{\n\t\t\t{\n\t\t\t\tDBProxyEndpointName: aws.String(\"rds-proxy-endpoint\"),\n\t\t\t\tDBProxyName: aws.String(\"rds-proxy\"),\n\t\t\t},\n\t\t},\n\t}\n\n\t// Configure Redshift API mock.\n\tredshift := &mocks.RedshiftMock{\n\t\tClusters: []*redshift.Cluster{\n\t\t\t{\n\t\t\t\tClusterNamespaceArn: aws.String(\"arn:aws:redshift:us-west-1:123456789012:namespace:namespace-id\"),\n\t\t\t\tClusterIdentifier: aws.String(\"redshift-cluster-1\"),\n\t\t\t},\n\t\t\t{\n\t\t\t\tClusterNamespaceArn: aws.String(\"arn:aws:redshift:us-east-2:210987654321:namespace:namespace-id\"),\n\t\t\t\tClusterIdentifier: aws.String(\"redshift-cluster-2\"),\n\t\t\t},\n\t\t},\n\t}\n\n\t// Configure ElastiCache API mock.\n\telasticache := &mocks.ElastiCacheMock{\n\t\tReplicationGroups: []*elasticache.ReplicationGroup{\n\t\t\t{\n\t\t\t\tARN: aws.String(\"arn:aws:elasticache:us-west-1:123456789012:replicationgroup:my-redis\"),\n\t\t\t\tReplicationGroupId: aws.String(\"my-redis\"),\n\t\t\t\tClusterEnabled: aws.Bool(true),\n\t\t\t\tTransitEncryptionEnabled: aws.Bool(true),\n\t\t\t\tUserGroupIds: []*string{aws.String(\"my-user-group\")},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Configure MemoryDB API mock.\n\tmemorydb := &mocks.MemoryDBMock{\n\t\tClusters: []*memorydb.Cluster{\n\t\t\t{\n\t\t\t\tARN: aws.String(\"arn:aws:memorydb:us-west-1:123456789012:cluster:my-cluster\"),\n\t\t\t\tName: aws.String(\"my-cluster\"),\n\t\t\t\tTLSEnabled: aws.Bool(true),\n\t\t\t\tACLName: aws.String(\"my-user-group\"),\n\t\t\t},\n\t\t},\n\t}\n\n\tstsMock := &mocks.STSMock{}\n\n\t// Configure Redshift Serverless API mock.\n\tredshiftServerlessWorkgroup := mocks.RedshiftServerlessWorkgroup(\"my-workgroup\", \"us-west-1\")\n\tredshiftServerlessEndpoint := mocks.RedshiftServerlessEndpointAccess(redshiftServerlessWorkgroup, \"my-endpoint\", \"us-west-1\")\n\tredshiftServerless := &mocks.RedshiftServerlessMock{\n\t\tWorkgroups: []*redshiftserverless.Workgroup{redshiftServerlessWorkgroup},\n\t\tEndpoints: []*redshiftserverless.EndpointAccess{redshiftServerlessEndpoint},\n\t}\n\n\t// Create metadata fetcher.\n\tmetadata, err := NewMetadata(MetadataConfig{\n\t\tClients: &cloud.TestCloudClients{\n\t\t\tRDS: rds,\n\t\t\tRedshift: redshift,\n\t\t\tElastiCache: elasticache,\n\t\t\tMemoryDB: memorydb,\n\t\t\tRedshiftServerless: redshiftServerless,\n\t\t\tSTS: stsMock,\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname string\n\t\tinAWS types.AWS\n\t\toutAWS types.AWS\n\t}{\n\t\t{\n\t\t\tname: \"RDS instance\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDS: types.RDS{\n\t\t\t\t\tInstanceID: \"postgres-rds\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tRegion: \"us-west-1\",\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDS: types.RDS{\n\t\t\t\t\tInstanceID: \"postgres-rds\",\n\t\t\t\t\tResourceID: \"db-xyz\",\n\t\t\t\t\tIAMAuth: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Aurora cluster\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDS: types.RDS{\n\t\t\t\t\tInstanceID: \"postgres-aurora\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDS: types.RDS{\n\t\t\t\t\tClusterID: \"postgres-aurora\",\n\t\t\t\t\tResourceID: \"cluster-xyz\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"RDS instance, part of Aurora cluster\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDS: types.RDS{\n\t\t\t\t\tInstanceID: \"postgres-aurora-1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDS: types.RDS{\n\t\t\t\t\tClusterID: \"postgres-aurora\",\n\t\t\t\t\tResourceID: \"cluster-xyz\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Redshift cluster 1\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshift: types.Redshift{\n\t\t\t\t\tClusterID: \"redshift-cluster-1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tRegion: \"us-west-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshift: types.Redshift{\n\t\t\t\t\tClusterID: \"redshift-cluster-1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Redshift cluster 2\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshift: types.Redshift{\n\t\t\t\t\tClusterID: \"redshift-cluster-2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tAccountID: \"210987654321\",\n\t\t\t\tRegion: \"us-east-2\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshift: types.Redshift{\n\t\t\t\t\tClusterID: \"redshift-cluster-2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ElastiCache\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tElastiCache: types.ElastiCache{\n\t\t\t\t\tReplicationGroupID: \"my-redis\",\n\t\t\t\t\tEndpointType: \"configuration\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tRegion: \"us-west-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tElastiCache: types.ElastiCache{\n\t\t\t\t\tReplicationGroupID: \"my-redis\",\n\t\t\t\t\tUserGroupIDs: []string{\"my-user-group\"},\n\t\t\t\t\tTransitEncryptionEnabled: true,\n\t\t\t\t\tEndpointType: \"configuration\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"MemoryDB\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tMemoryDB: types.MemoryDB{\n\t\t\t\t\tClusterName: \"my-cluster\",\n\t\t\t\t\tEndpointType: \"cluster\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tRegion: \"us-west-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tMemoryDB: types.MemoryDB{\n\t\t\t\t\tClusterName: \"my-cluster\",\n\t\t\t\t\tACLName: \"my-user-group\",\n\t\t\t\t\tTLSEnabled: true,\n\t\t\t\t\tEndpointType: \"cluster\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"RDS Proxy\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDSProxy: types.RDSProxy{\n\t\t\t\t\tName: \"rds-proxy\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDSProxy: types.RDSProxy{\n\t\t\t\t\tName: \"rds-proxy\",\n\t\t\t\t\tResourceID: \"prx-resource-id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"RDS Proxy custom endpoint\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDSProxy: types.RDSProxy{\n\t\t\t\t\tCustomEndpointName: \"rds-proxy-endpoint\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDSProxy: types.RDSProxy{\n\t\t\t\t\tName: \"rds-proxy\",\n\t\t\t\t\tCustomEndpointName: \"rds-proxy-endpoint\",\n\t\t\t\t\tResourceID: \"prx-resource-id\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Redshift Serverless workgroup\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tRegion: \"us-west-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshiftServerless: types.RedshiftServerless{\n\t\t\t\t\tWorkgroupName: \"my-workgroup\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tRegion: \"us-west-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshiftServerless: types.RedshiftServerless{\n\t\t\t\t\tWorkgroupName: \"my-workgroup\",\n\t\t\t\t\tWorkgroupID: \"some-uuid-for-my-workgroup\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Redshift Serverless VPC endpoint\",\n\t\t\tinAWS: types.AWS{\n\t\t\t\tRegion: \"us-west-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshiftServerless: types.RedshiftServerless{\n\t\t\t\t\tEndpointName: \"my-endpoint\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutAWS: types.AWS{\n\t\t\t\tAccountID: \"123456789012\",\n\t\t\t\tRegion: \"us-west-1\",\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshiftServerless: types.RedshiftServerless{\n\t\t\t\t\tWorkgroupName: \"my-workgroup\",\n\t\t\t\t\tEndpointName: \"my-endpoint\",\n\t\t\t\t\tWorkgroupID: \"some-uuid-for-my-workgroup\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tctx := context.Background()\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdatabase, err := types.NewDatabaseV3(types.Metadata{\n\t\t\t\tName: \"test\",\n\t\t\t}, types.DatabaseSpecV3{\n\t\t\t\tProtocol: defaults.ProtocolPostgres,\n\t\t\t\tURI: \"localhost\",\n\t\t\t\tAWS: test.inAWS,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = metadata.Update(ctx, database)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, test.outAWS, database.GetAWS())\n\t\t\trequire.Equal(t, []string{test.inAWS.AssumeRoleARN}, stsMock.GetAssumedRoleARNs())\n\t\t\trequire.Equal(t, []string{test.inAWS.ExternalID}, stsMock.GetAssumedRoleExternalIDs())\n\t\t\tstsMock.ResetAssumeRoleHistory()\n\t\t})\n\t}\n}", "func TestAWSMetadataNoPermissions(t *testing.T) {\n\t// Create unauthorized mocks.\n\trds := &mocks.RDSMockUnauth{}\n\tredshift := &mocks.RedshiftMockUnauth{}\n\n\tstsMock := &mocks.STSMock{}\n\n\t// Create metadata fetcher.\n\tmetadata, err := NewMetadata(MetadataConfig{\n\t\tClients: &cloud.TestCloudClients{\n\t\t\tRDS: rds,\n\t\t\tRedshift: redshift,\n\t\t\tSTS: stsMock,\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname string\n\t\tmeta types.AWS\n\t}{\n\t\t{\n\t\t\tname: \"RDS instance\",\n\t\t\tmeta: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDS: types.RDS{\n\t\t\t\t\tInstanceID: \"postgres-rds\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"RDS proxy\",\n\t\t\tmeta: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDSProxy: types.RDSProxy{\n\t\t\t\t\tName: \"rds-proxy\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"RDS proxy endpoint\",\n\t\t\tmeta: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRDSProxy: types.RDSProxy{\n\t\t\t\t\tCustomEndpointName: \"rds-proxy-endpoint\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Redshift cluster\",\n\t\t\tmeta: types.AWS{\n\t\t\t\tAssumeRoleARN: \"arn:aws:iam::123456789012:role/DBDiscoverer\",\n\t\t\t\tExternalID: \"externalID123\",\n\t\t\t\tRedshift: types.Redshift{\n\t\t\t\t\tClusterID: \"redshift-cluster-1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tctx := context.Background()\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdatabase, err := types.NewDatabaseV3(types.Metadata{\n\t\t\t\tName: \"test\",\n\t\t\t}, types.DatabaseSpecV3{\n\t\t\t\tProtocol: defaults.ProtocolPostgres,\n\t\t\t\tURI: \"localhost\",\n\t\t\t\tAWS: test.meta,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Verify there's no error and metadata stayed the same.\n\t\t\terr = metadata.Update(ctx, database)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, test.meta, database.GetAWS())\n\t\t\trequire.Equal(t, []string{test.meta.AssumeRoleARN}, stsMock.GetAssumedRoleARNs())\n\t\t\trequire.Equal(t, []string{test.meta.ExternalID}, stsMock.GetAssumedRoleExternalIDs())\n\t\t\tstsMock.ResetAssumeRoleHistory()\n\t\t})\n\t}\n}", "func verifyAws() {\n\t_, err := aws.EnvAuth()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func testAwsClient() *awsClient {\n\tclient := awsClient{}\n\tclient.EC2 = teststubs.CreateTestEC2InstanceMock()\n\tclient.AutoScaling = teststubs.CreateTestAutoScalingMock()\n\tclient.Route53 = teststubs.CreateTestRoute53Mock()\n\n\treturn &client\n}", "func checkHaveSsse3() bool", "func setupAWS(ctx *context.Context, config *ConfigParams) (*blob.Bucket, error) {\n\tc := &aws.Config{\n\t\t// Either hard-code the region or use AWS_REGION.\n\t\tRegion: aws.String(config.Region),\n\t\t// credentials.NewEnvCredentials assumes two environment variables are\n\t\t// present:\n\t\t// 1. AWS_ACCESS_KEY_ID, and\n\t\t// 2. AWS_SECRET_ACCESS_KEY.\n\t\t// Credentials: credentials.NewEnvCredentials(),\n\t\tCredentials: credentials.NewStaticCredentials(\n\t\t\tconfig.AccessKey,\n\t\t\tconfig.SecretKey,\n\t\t\tconfig.Token,\n\t\t),\n\t}\n\ts := session.Must(session.NewSession(c))\n\treturn s3blob.OpenBucket(*ctx, config.Bucket, s, nil)\n}", "func (ad *AWSData) getAWSInfo() error {\n\tlog.Trace(\"GetInfo Start\")\n\tchange := sets.NewSet()\n\tfor _, zid := range ad.zones {\n\t\tdata, err := GetAWSZoneInfo(ad.r53, ad.dnsfilters, zid)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Problems reading from zone:{}, {}\", zid, err)\n\t\t\treturn err\n\t\t} else {\n\t\t\tfor host, s := range data {\n\t\t\t\tar := khostdns.CreateArecord(host, s)\n\t\t\t\tif cips, ok := ad.awsHosts.Load(host); ok {\n\t\t\t\t\tcipsl := cips.(khostdns.Arecord).GetIps()\n\t\t\t\t\tif !cmp.Equal(cipsl, s) {\n\t\t\t\t\t\tlog.Trace(\"Found host change:{} ips:{}\", host, s)\n\t\t\t\t\t\tchange.Add(ar)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlog.Trace(\"Found new host:{} ips:{}\", host, s)\n\t\t\t\t\tchange.Add(ar)\n\t\t\t\t}\n\t\t\t\tad.awsHosts.Store(host, ar)\n\t\t\t}\n\t\t}\n\t}\n\tif change.Cardinality() > 0 {\n\t\tchange.Each(func(i interface{}) bool {\n\t\t\tar2 := i.(khostdns.Arecord)\n\t\t\tlog.Trace(\"notify changes:{} ips:{}\", ar2.GetHostname(), ar2.GetIps())\n\t\t\tad.notifyChannel <- ar2\n\t\t\treturn false\n\t\t})\n\t}\n\tlog.Trace(\"GetInfo End\")\n\treturn nil\n}", "func TestAWSIAMNoPermissions(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\n\t// Create unauthorized mocks for AWS services.\n\tstsClient := &mocks.STSMock{\n\t\tARN: \"arn:aws:iam::123456789012:role/test-role\",\n\t}\n\t// Make configurator.\n\tconfigurator, err := NewIAM(ctx, IAMConfig{\n\t\tAccessPoint: &mockAccessPoint{},\n\t\tClients: &clients.TestCloudClients{}, // placeholder,\n\t\tHostID: \"host-id\",\n\t})\n\trequire.NoError(t, err)\n\n\ttests := []struct {\n\t\tname string\n\t\tmeta types.AWS\n\t\tclients clients.Clients\n\t}{\n\t\t{\n\t\t\tname: \"RDS database\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", RDS: types.RDS{InstanceID: \"postgres-rds\", ResourceID: \"postgres-rds-resource-id\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRDS: &mocks.RDSMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Aurora cluster\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", RDS: types.RDS{ClusterID: \"postgres-aurora\", ResourceID: \"postgres-aurora-resource-id\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRDS: &mocks.RDSMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"RDS database missing metadata\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", RDS: types.RDS{ClusterID: \"postgres-aurora\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRDS: &mocks.RDSMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Redshift cluster\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", Redshift: types.Redshift{ClusterID: \"redshift-cluster-1\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRedshift: &mocks.RedshiftMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ElastiCache\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", ElastiCache: types.ElastiCache{ReplicationGroupID: \"some-group\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\t// As of writing this API won't be called by the configurator anyway,\n\t\t\t\t// but might as well provide it in case that changes.\n\t\t\t\tElastiCache: &mocks.ElastiCacheMock{Unauth: true},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: trace.AccessDenied(\"unauthorized\"),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"IAM UnmodifiableEntityException\",\n\t\t\tmeta: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", Redshift: types.Redshift{ClusterID: \"redshift-cluster-1\"}},\n\t\t\tclients: &clients.TestCloudClients{\n\t\t\t\tRedshift: &mocks.RedshiftMockUnauth{},\n\t\t\t\tIAM: &mocks.IAMErrorMock{\n\t\t\t\t\tError: awserr.New(iam.ErrCodeUnmodifiableEntityException, \"unauthorized\", fmt.Errorf(\"unauthorized\")),\n\t\t\t\t},\n\t\t\t\tSTS: stsClient,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t// Update cloud clients.\n\t\t\tconfigurator.cfg.Clients = test.clients\n\n\t\t\tdatabase, err := types.NewDatabaseV3(types.Metadata{\n\t\t\t\tName: \"test\",\n\t\t\t}, types.DatabaseSpecV3{\n\t\t\t\tProtocol: defaults.ProtocolPostgres,\n\t\t\t\tURI: \"localhost\",\n\t\t\t\tAWS: test.meta,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Make sure there're no errors trying to setup/destroy IAM.\n\t\t\terr = configurator.processTask(ctx, iamTask{\n\t\t\t\tisSetup: true,\n\t\t\t\tdatabase: database,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = configurator.UpdateIAMStatus(database)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, types.IAMPolicyStatus_IAM_POLICY_STATUS_FAILED, database.GetAWS().IAMPolicyStatus, \"must be invalid because of perm issues\")\n\n\t\t\terr = configurator.processTask(ctx, iamTask{\n\t\t\t\tisSetup: false,\n\t\t\t\tdatabase: database,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = configurator.UpdateIAMStatus(database)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, types.IAMPolicyStatus_IAM_POLICY_STATUS_UNSPECIFIED, database.GetAWS().IAMPolicyStatus, \"must be unspecified, task is tearing down\")\n\t\t})\n\t}\n}", "func setupAWS(ctx context.Context, bucket string) (*blob.Bucket, error) {\n\tc := &aws.Config{\n\t\t// Either hard-code the region or use AWS_REGION.\n\t\tRegion: aws.String(\"us-east-2\"),\n\t\t// credentials.NewEnvCredentials assumes two environment variables are\n\t\t// present:\n\t\t// 1. AWS_ACCESS_KEY_ID, and\n\t\t// 2. AWS_SECRET_ACCESS_KEY.\n\t\tCredentials: credentials.NewEnvCredentials(),\n\t}\n\ts := session.Must(session.NewSession(c))\n\treturn s3blob.OpenBucket(ctx, s, bucket)\n}", "func (m *MockMappedResource) Aws() aws.Resource {\n\tret := m.ctrl.Call(m, \"Aws\")\n\tret0, _ := ret[0].(aws.Resource)\n\treturn ret0\n}", "func NewAWS(ctx arn.Ctx, r ...Router) *AWS {\n\troot := append(ChainRouter{STSRouter{}}, r...)\n\tw := &AWS{\n\t\tCtxRouter: CtxRouter{arn.Ctx{}: &root},\n\t\tCtx: ctx,\n\t}\n\tw.Cfg = awsmock.Config(func(q *aws.Request) {\n\t\tw.mu.Lock()\n\t\tdefer w.mu.Unlock()\n\t\tif q := w.newRequest(q); !w.Route(q) {\n\t\t\tpanic(\"mock: \" + q.Name() + \" not handled\")\n\t\t}\n\t})\n\tw.Cfg.Region = ctx.Region\n\tw.Cfg.Credentials = w.UserCreds(\"\", \"alice\")\n\treturn w\n}", "func (m *MockMappedResource) HasAws() bool {\n\tret := m.ctrl.Call(m, \"HasAws\")\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func getAWSKeys() (keys s3gof3r.Keys, err error) {\n\n\tkeys, err = s3gof3r.EnvKeys()\n\tif err == nil {\n\t\treturn\n\t}\n\tkeys, err = s3gof3r.InstanceKeys()\n\tif err == nil {\n\t\treturn\n\t}\n\terr = errors.New(\"no AWS keys found\")\n\treturn\n}", "func checkAWSCLI(profile string) error {\n\tcmd := exec.Command(\"aws\", \"configure\", \"list\", \"--profile\", profile)\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"the AWS CLI is either not installed or the configured profile %q is not stored in the credentials; error: %w\", profile, err)\n\t}\n\treturn nil\n}", "func NewAWS(aws config.AWSConfig, bucket config.S3BucketConfig, noLocks, noVersioning bool) *AWS {\n\tif bucket.Bucket == \"\" {\n\t\treturn nil\n\t}\n\n\tsess := session.Must(session.NewSession())\n\tawsConfig := aws_sdk.NewConfig()\n\tvar creds *credentials.Credentials\n\tif len(aws.APPRoleArn) > 0 {\n\t\tlog.Debugf(\"Using %s role\", aws.APPRoleArn)\n\t\tcreds = stscreds.NewCredentials(sess, aws.APPRoleArn, func(p *stscreds.AssumeRoleProvider) {\n\t\t\tif aws.ExternalID != \"\" {\n\t\t\t\tp.ExternalID = aws_sdk.String(aws.ExternalID)\n\t\t\t}\n\t\t})\n\t} else {\n\t\tif aws.AccessKey == \"\" || aws.SecretAccessKey == \"\" {\n\t\t\tlog.Fatal(\"Missing AccessKey or SecretAccessKey for AWS provider. Please check your configuration and retry\")\n\t\t}\n\t\tcreds = credentials.NewStaticCredentials(aws.AccessKey, aws.SecretAccessKey, aws.SessionToken)\n\t}\n\tawsConfig.WithCredentials(creds)\n\n\tif e := aws.Endpoint; e != \"\" {\n\t\tawsConfig.WithEndpoint(e)\n\t}\n\tif e := aws.Region; e != \"\" {\n\t\tawsConfig.WithRegion(e)\n\t}\n\tawsConfig.S3ForcePathStyle = &bucket.ForcePathStyle\n\n\treturn &AWS{\n\t\tsvc: s3.New(sess, awsConfig),\n\t\tbucket: bucket.Bucket,\n\t\tkeyPrefix: bucket.KeyPrefix,\n\t\tfileExtension: bucket.FileExtension,\n\t\tdynamoSvc: dynamodbiface.DynamoDBAPI(dynamodb.New(sess, awsConfig)),\n\t\tdynamoTable: aws.DynamoDBTable,\n\t\tnoLocks: noLocks,\n\t\tnoVersioning: noVersioning,\n\t}\n}", "func TestAWSIAM(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\n\t// Setup AWS database objects.\n\trdsInstance := &rds.DBInstance{\n\t\tDBInstanceArn: aws.String(\"arn:aws:rds:us-west-1:123456789012:db:postgres-rds\"),\n\t\tDBInstanceIdentifier: aws.String(\"postgres-rds\"),\n\t\tDbiResourceId: aws.String(\"db-xyz\"),\n\t}\n\n\tauroraCluster := &rds.DBCluster{\n\t\tDBClusterArn: aws.String(\"arn:aws:rds:us-east-1:123456789012:cluster:postgres-aurora\"),\n\t\tDBClusterIdentifier: aws.String(\"postgres-aurora\"),\n\t\tDbClusterResourceId: aws.String(\"cluster-xyz\"),\n\t}\n\n\tredshiftCluster := &redshift.Cluster{\n\t\tClusterNamespaceArn: aws.String(\"arn:aws:redshift:us-east-2:123456789012:namespace:namespace-xyz\"),\n\t\tClusterIdentifier: aws.String(\"redshift-cluster-1\"),\n\t}\n\n\t// Configure mocks.\n\tstsClient := &mocks.STSMock{\n\t\tARN: \"arn:aws:iam::123456789012:role/test-role\",\n\t}\n\n\trdsClient := &mocks.RDSMock{\n\t\tDBInstances: []*rds.DBInstance{rdsInstance},\n\t\tDBClusters: []*rds.DBCluster{auroraCluster},\n\t}\n\n\tredshiftClient := &mocks.RedshiftMock{\n\t\tClusters: []*redshift.Cluster{redshiftCluster},\n\t}\n\n\tiamClient := &mocks.IAMMock{}\n\n\t// Setup database resources.\n\trdsDatabase, err := types.NewDatabaseV3(types.Metadata{\n\t\tName: \"postgres-rds\",\n\t}, types.DatabaseSpecV3{\n\t\tProtocol: defaults.ProtocolPostgres,\n\t\tURI: \"localhost\",\n\t\tAWS: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", RDS: types.RDS{InstanceID: \"postgres-rds\", ResourceID: \"postgres-rds-resource-id\"}},\n\t})\n\trequire.NoError(t, err)\n\n\tauroraDatabase, err := types.NewDatabaseV3(types.Metadata{\n\t\tName: \"postgres-aurora\",\n\t}, types.DatabaseSpecV3{\n\t\tProtocol: defaults.ProtocolPostgres,\n\t\tURI: \"localhost\",\n\t\tAWS: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", RDS: types.RDS{ClusterID: \"postgres-aurora\", ResourceID: \"postgres-aurora-resource-id\"}},\n\t})\n\trequire.NoError(t, err)\n\n\trdsProxy, err := types.NewDatabaseV3(types.Metadata{\n\t\tName: \"rds-proxy\",\n\t}, types.DatabaseSpecV3{\n\t\tProtocol: defaults.ProtocolPostgres,\n\t\tURI: \"localhost\",\n\t\tAWS: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", RDSProxy: types.RDSProxy{Name: \"rds-proxy\", ResourceID: \"rds-proxy-resource-id\"}},\n\t})\n\trequire.NoError(t, err)\n\n\tredshiftDatabase, err := types.NewDatabaseV3(types.Metadata{\n\t\tName: \"redshift\",\n\t}, types.DatabaseSpecV3{\n\t\tProtocol: defaults.ProtocolPostgres,\n\t\tURI: \"localhost\",\n\t\tAWS: types.AWS{Region: \"localhost\", AccountID: \"123456789012\", Redshift: types.Redshift{ClusterID: \"redshift-cluster-1\"}},\n\t})\n\trequire.NoError(t, err)\n\n\telasticache, err := types.NewDatabaseV3(types.Metadata{\n\t\tName: \"aws-elasticache\",\n\t}, types.DatabaseSpecV3{\n\t\tProtocol: \"redis\",\n\t\tURI: \"clustercfg.my-redis-cluster.xxxxxx.cac1.cache.amazonaws.com:6379\",\n\t\tAWS: types.AWS{\n\t\t\tAccountID: \"123456789012\",\n\t\t\tElastiCache: types.ElastiCache{\n\t\t\t\tReplicationGroupID: \"some-group\",\n\t\t\t},\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\n\t// Make configurator.\n\ttaskChan := make(chan struct{})\n\twaitForTaskProcessed := func(t *testing.T) {\n\t\tselect {\n\t\tcase <-taskChan:\n\t\tcase <-time.After(5 * time.Second):\n\t\t\trequire.Fail(t, \"Failed to wait for task is processed\")\n\t\t}\n\t}\n\tassumedRole := types.AssumeRole{\n\t\tRoleARN: \"arn:aws:iam::123456789012:role/role-to-assume\",\n\t\tExternalID: \"externalid123\",\n\t}\n\tconfigurator, err := NewIAM(ctx, IAMConfig{\n\t\tAccessPoint: &mockAccessPoint{},\n\t\tClients: &clients.TestCloudClients{\n\t\t\tRDS: rdsClient,\n\t\t\tRedshift: redshiftClient,\n\t\t\tSTS: stsClient,\n\t\t\tIAM: iamClient,\n\t\t},\n\t\tHostID: \"host-id\",\n\t\tonProcessedTask: func(iamTask, error) {\n\t\t\ttaskChan <- struct{}{}\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\trequire.NoError(t, configurator.Start(ctx))\n\n\tpolicyName, err := configurator.getPolicyName()\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\tdatabase types.Database\n\t\twantPolicyContains string\n\t\tgetIAMAuthEnabled func() bool\n\t}{\n\t\t\"RDS\": {\n\t\t\tdatabase: rdsDatabase,\n\t\t\twantPolicyContains: rdsDatabase.GetAWS().RDS.ResourceID,\n\t\t\tgetIAMAuthEnabled: func() bool {\n\t\t\t\tout := aws.BoolValue(rdsInstance.IAMDatabaseAuthenticationEnabled)\n\t\t\t\t// reset it\n\t\t\t\trdsInstance.IAMDatabaseAuthenticationEnabled = aws.Bool(false)\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t\t\"Aurora\": {\n\t\t\tdatabase: auroraDatabase,\n\t\t\twantPolicyContains: auroraDatabase.GetAWS().RDS.ResourceID,\n\t\t\tgetIAMAuthEnabled: func() bool {\n\t\t\t\tout := aws.BoolValue(auroraCluster.IAMDatabaseAuthenticationEnabled)\n\t\t\t\t// reset it\n\t\t\t\tauroraCluster.IAMDatabaseAuthenticationEnabled = aws.Bool(false)\n\t\t\t\treturn out\n\t\t\t},\n\t\t},\n\t\t\"RDS Proxy\": {\n\t\t\tdatabase: rdsProxy,\n\t\t\twantPolicyContains: rdsProxy.GetAWS().RDSProxy.ResourceID,\n\t\t\tgetIAMAuthEnabled: func() bool {\n\t\t\t\treturn true // it always is for rds proxy.\n\t\t\t},\n\t\t},\n\t\t\"Redshift\": {\n\t\t\tdatabase: redshiftDatabase,\n\t\t\twantPolicyContains: redshiftDatabase.GetAWS().Redshift.ClusterID,\n\t\t\tgetIAMAuthEnabled: func() bool {\n\t\t\t\treturn true // it always is for redshift.\n\t\t\t},\n\t\t},\n\t\t\"ElastiCache\": {\n\t\t\tdatabase: elasticache,\n\t\t\twantPolicyContains: elasticache.GetAWS().ElastiCache.ReplicationGroupID,\n\t\t\tgetIAMAuthEnabled: func() bool {\n\t\t\t\treturn true // it always is for ElastiCache.\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tfor _, assumeRole := range []types.AssumeRole{{}, assumedRole} {\n\t\t\tgetRolePolicyInput := &iam.GetRolePolicyInput{\n\t\t\t\tRoleName: aws.String(\"test-role\"),\n\t\t\t\tPolicyName: aws.String(policyName),\n\t\t\t}\n\t\t\tdatabase := tt.database.Copy()\n\t\t\tif assumeRole.RoleARN != \"\" {\n\t\t\t\ttestName += \" with assumed role\"\n\t\t\t\tgetRolePolicyInput.RoleName = aws.String(\"role-to-assume\")\n\t\t\t\tmeta := database.GetAWS()\n\t\t\t\tmeta.AssumeRoleARN = assumeRole.RoleARN\n\t\t\t\tmeta.ExternalID = assumeRole.ExternalID\n\t\t\t\tdatabase.SetStatusAWS(meta)\n\t\t\t}\n\t\t\tt.Run(testName, func(t *testing.T) {\n\t\t\t\t// Initially unspecified since no tasks has ran yet.\n\t\t\t\trequire.Equal(t, types.IAMPolicyStatus_IAM_POLICY_STATUS_UNSPECIFIED, database.GetAWS().IAMPolicyStatus)\n\n\t\t\t\t// Configure database and make sure IAM is enabled and policy was attached.\n\t\t\t\terr = configurator.Setup(ctx, database)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\twaitForTaskProcessed(t)\n\t\t\t\toutput, err := iamClient.GetRolePolicyWithContext(ctx, getRolePolicyInput)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.True(t, tt.getIAMAuthEnabled())\n\t\t\t\trequire.Contains(t, aws.StringValue(output.PolicyDocument), tt.wantPolicyContains)\n\n\t\t\t\terr = configurator.UpdateIAMStatus(database)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, types.IAMPolicyStatus_IAM_POLICY_STATUS_SUCCESS, database.GetAWS().IAMPolicyStatus, \"must be success because iam policy was set up\")\n\n\t\t\t\t// Deconfigure database, policy should get detached.\n\t\t\t\terr = configurator.Teardown(ctx, database)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\twaitForTaskProcessed(t)\n\t\t\t\t_, err = iamClient.GetRolePolicyWithContext(ctx, getRolePolicyInput)\n\t\t\t\trequire.True(t, trace.IsNotFound(err))\n\t\t\t\tmeta := database.GetAWS()\n\t\t\t\tif meta.AssumeRoleARN != \"\" {\n\t\t\t\t\trequire.Equal(t, []string{meta.AssumeRoleARN}, stsClient.GetAssumedRoleARNs())\n\t\t\t\t\trequire.Equal(t, []string{meta.ExternalID}, stsClient.GetAssumedRoleExternalIDs())\n\t\t\t\t\tstsClient.ResetAssumeRoleHistory()\n\t\t\t\t}\n\n\t\t\t\terr = configurator.UpdateIAMStatus(database)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, types.IAMPolicyStatus_IAM_POLICY_STATUS_UNSPECIFIED, database.GetAWS().IAMPolicyStatus, \"must be unspecified because task is tearing down\")\n\t\t\t})\n\t\t}\n\t}\n}", "func initAWSSvc() *s3.S3 {\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tsdkLoadConfig := os.Getenv(\"AWS_SDK_LOAD_CONFIG\")\n\tif sdkLoadConfig != \"true\" {\n\t\tlog.Fatal(`Env var \"AWS_SDK_LOAD_CONFIG\" needs to be true to read credentials.\\n\\n Run \"export AWS_SDK_LOAD_CONFIG=true\" to fix this. Aborting run.`)\n\t}\n\n\treturn s3.New(sess)\n}", "func initAwsClient() aws.Client {\n\treturn aws.NewClient(\"elastisys-billing-data\")\n}", "func (p *AWS) Initialize(config *types.ProviderConfig) error {\n\tp.Storage = &S3{}\n\n\tif config.Zone == \"\" {\n\t\treturn errors.New(\"zone missing\")\n\t}\n\n\terr := loadAWSCreds()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsession, err := session.NewSession(\n\t\t&aws.Config{\n\t\t\tRegion: aws.String(stripZone(config.Zone)),\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.session = session\n\tp.dnsService = route53.New(session)\n\tp.ec2 = ec2.New(session)\n\tp.volumeService = ebs.New(session,\n\t\taws.NewConfig().\n\t\t\tWithRegion(stripZone(config.Zone)).\n\t\t\tWithMaxRetries(7))\n\n\t_, err = p.ec2.DescribeRegions(&ec2.DescribeRegionsInput{RegionNames: aws.StringSlice([]string{stripZone(config.Zone)})})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"region with name %v is invalid\", config.Zone)\n\t}\n\n\treturn nil\n}", "func TestEndpointCase33(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase91(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(true),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.control-kinesis-fips.us-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestS3BucketDoesNotExist(t *testing.T) {\n\tdefer leaktest.AfterTest(t)()\n\n\tq := make(url.Values)\n\texpect := map[string]string{\n\t\t\"AWS_S3_ENDPOINT\": cloudimpl.AWSEndpointParam,\n\t\t\"AWS_S3_ENDPOINT_KEY\": cloudimpl.AWSAccessKeyParam,\n\t\t\"AWS_S3_ENDPOINT_REGION\": cloudimpl.S3RegionParam,\n\t\t\"AWS_S3_ENDPOINT_SECRET\": cloudimpl.AWSSecretParam,\n\t}\n\tfor env, param := range expect {\n\t\tv := os.Getenv(env)\n\t\tif v == \"\" {\n\t\t\tskip.IgnoreLintf(t, \"%s env var must be set\", env)\n\t\t}\n\t\tq.Add(param, v)\n\t}\n\n\tbucket := \"invalid-bucket\"\n\tu := url.URL{\n\t\tScheme: \"s3\",\n\t\tHost: bucket,\n\t\tPath: \"backup-test\",\n\t\tRawQuery: q.Encode(),\n\t}\n\n\tctx := context.Background()\n\tuser := security.RootUserName()\n\n\tconf, err := cloudimpl.ExternalStorageConfFromURI(u.String(), user)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Setup a sink for the given args.\n\tclientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir)\n\ts, err := cloudimpl.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings,\n\t\tclientFactory, nil, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Close()\n\n\tif readConf := s.Conf(); readConf != conf {\n\t\tt.Fatalf(\"conf does not roundtrip: started with %+v, got back %+v\", conf, readConf)\n\t}\n\n\t_, err = s.ReadFile(ctx, \"\")\n\trequire.Error(t, err, \"\")\n\trequire.True(t, errors.Is(err, cloudimpl.ErrFileDoesNotExist))\n}", "func Check() error {\n\tfmt.Println(\"Checking S3...\")\n\tchecks := []CheckFn{\n\t\tCheckPublicAccessBlockingEnabled,\n\t}\n\n\tfor _, check := range checks {\n\t\terr := check()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func GetAmazonPrice(url string) (string, error) {\n\trand.Seed(time.Now().UnixNano())\n\n\tproxyURL, _ := url2.Parse(\"http://114.239.171.181:4216\")\n\tclient := &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)}}\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn \"Error\", err\n\t}\n\n\treq.Header.Set(\"User-Agent\", \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.16; rv:79.0) Gecko/20100101 Firefox/79.0\")\n\treq.Header.Set(\"Cookie\", \"aws-ubid-main=656-2461571-6267250; aws-session-id=132-3199980-3265813; aws-session-id-time=2227936015l; csm-hit=tb:EZA90K41RH7TPYY2T8Y3+b-NSHGSSSE64MJER40A9CX|1597892738291&t:1597892738291&adb:adblk_no; aws-session-token=\\\"S1PRmUPtAXqce+Id2xIstoU7qf07Xsaf9V/2Nzy7snBw30aVYKO7iU824l4cdHSiray8NiC1vWNtGLnkeEuda8NzM4lENMTn8/QjhlBZ+M72XoWeMQ+zM9LuerXD+qpYLPQCvqV/yifXj+6JBTsbntx5x/2LOdBvuOhl1iMwXqghvJqxalZGvUNqLQGwzVAMMmCbTKJqiwYpTIZ9a8aUkdQM6NzOXA5ySoA6YltnC2s=\\\"; aws-x-main=\\\"895o@GGNHaCyn0ZrcAxZ17?rm2DktswOzt98aj6…t-main=\\\"EoMCkUfeFtJyk6Jc/Z8+ICmd3lmgZN9TcJFfWONpvNk=\\\"; sst-main=Sst1|PQGHZ9cjWrhIwxC6pG1jHbdMC1oqShS_0KZ2NPTVGVfLXHS8XsidXC_rh7Z1qeC9VAc09z_GXgoj5YbUES6MYUTN1cwlabhmo28JV7OGGBKIMsoXUFBXpue404SzcU9YMF5i8TXNqcgcVOvUaH5JksenLQIyXy4xh7UUkN7ThQwZ7dIc0MFzpi1FdcTD1CYI2X2XynxFdZjCenZCvDffFnrm2Tmgq4FG_t_ncllnswCRAOeT-sj1_ExVCYdjc-TXm3Q8lmsGd4nBWdy_YHiMHdjN8mNarq99-Y6O7FOqdWU-trL0XyKIIPOqIlcI2hAnW7Cbt1eVn8bVjSb7YzxhJgwHXw; lc-main=en_US; i18n-prefs=USD; lc-main-av=en_US; ubid-main-av=135-2193274-6489920; skin=noskin\")\n\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn \"Error\", err\n\t}\n\n\tif res.StatusCode > 500 {\n\t\tfmt.Println(\"So uh, there was a problem. We got blocked.\")\n\t}\n\n\tdoc, err := goquery.NewDocumentFromReader(res.Body)\n\tif err != nil {\n\t\treturn \"Error\", err\n\t}\n\n\tfmt.Println(doc.Contents().Text())\n\n\tvar prices []string\n\tdoc.Find(\".a-offscreen\").Each(func(i int, s *goquery.Selection) {\n\t\tprices = append(prices, s.Text())\n\t})\n\n\treturn prices[1], err\n}", "func TestEndpointCase38(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-east-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func New(cfg *eksconfig.Config) (*Tester, error) {\n\tif err := cfg.ValidateAndSetDefaults(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlcfg := logutil.AddOutputPaths(logutil.DefaultZapLoggerConfig, cfg.LogOutputs, cfg.LogOutputs)\n\tlcfg.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))\n\tlg, err := lcfg.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = fileutil.EnsureExecutable(cfg.AWSCLIPath); err != nil {\n\t\t// file may be already executable while the process does not own the file/directory\n\t\t// ref. https://github.com/aws/aws-k8s-tester/issues/66\n\t\tlg.Error(\"failed to ensure executable\", zap.Error(err))\n\t}\n\n\t// aws --version\n\tctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)\n\tvo, verr := exec.New().CommandContext(\n\t\tctx,\n\t\tcfg.AWSCLIPath,\n\t\t\"--version\",\n\t).CombinedOutput()\n\tcancel()\n\tif verr != nil {\n\t\treturn nil, fmt.Errorf(\"'aws --version' failed (output %q, error %v)\", string(vo), verr)\n\t}\n\tlg.Info(\n\t\t\"aws version\",\n\t\tzap.String(\"aws-cli-path\", cfg.AWSCLIPath),\n\t\tzap.String(\"aws-version\", string(vo)),\n\t)\n\n\tlg.Info(\"mkdir\", zap.String(\"kubectl-path-dir\", filepath.Dir(cfg.KubectlPath)))\n\tif err := os.MkdirAll(filepath.Dir(cfg.KubectlPath), 0700); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create %q (%v)\", filepath.Dir(cfg.KubectlPath), err)\n\t}\n\tlg.Info(\"downloading kubectl\", zap.String(\"kubectl-path\", cfg.KubectlPath))\n\tif err := os.RemoveAll(cfg.KubectlPath); err != nil {\n\t\treturn nil, err\n\t}\n\tf, err := os.Create(cfg.KubectlPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create %q (%v)\", cfg.KubectlPath, err)\n\t}\n\tcfg.KubectlPath = f.Name()\n\tcfg.KubectlPath, _ = filepath.Abs(cfg.KubectlPath)\n\tif err := httpDownloadFile(lg, cfg.KubectlDownloadURL, f); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to close kubectl %v\", err)\n\t}\n\tif err := fileutil.EnsureExecutable(cfg.KubectlPath); err != nil {\n\t\t// file may be already executable while the process does not own the file/directory\n\t\t// ref. https://github.com/aws/aws-k8s-tester/issues/66\n\t\tlg.Error(\"failed to ensure executable\", zap.Error(err))\n\t}\n\t// kubectl version --client=true\n\tctx, cancel = context.WithTimeout(context.Background(), 15*time.Second)\n\tvo, verr = exec.New().CommandContext(\n\t\tctx,\n\t\tcfg.KubectlPath,\n\t\t\"version\",\n\t\t\"--client=true\",\n\t).CombinedOutput()\n\tcancel()\n\tif verr != nil {\n\t\treturn nil, fmt.Errorf(\"'kubectl version' failed (output %q, error %v)\", string(vo), verr)\n\t}\n\tlg.Info(\n\t\t\"kubectl version\",\n\t\tzap.String(\"kubectl-path\", cfg.KubectlPath),\n\t\tzap.String(\"kubectl-version\", string(vo)),\n\t)\n\n\tif cfg.AWSIAMAuthenticatorPath != \"\" && cfg.AWSIAMAuthenticatorDownloadURL != \"\" {\n\t\tlg.Info(\"mkdir\", zap.String(\"aws-iam-authenticator-path-dir\", filepath.Dir(cfg.AWSIAMAuthenticatorPath)))\n\t\tif err := os.MkdirAll(filepath.Dir(cfg.AWSIAMAuthenticatorPath), 0700); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not create %q (%v)\", filepath.Dir(cfg.AWSIAMAuthenticatorPath), err)\n\t\t}\n\t\tlg.Info(\"downloading aws-iam-authenticator\", zap.String(\"aws-iam-authenticator-path\", cfg.AWSIAMAuthenticatorPath))\n\t\tif err := os.RemoveAll(cfg.AWSIAMAuthenticatorPath); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tf, err := os.Create(cfg.AWSIAMAuthenticatorPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create %q (%v)\", cfg.AWSIAMAuthenticatorPath, err)\n\t\t}\n\t\tcfg.AWSIAMAuthenticatorPath = f.Name()\n\t\tcfg.AWSIAMAuthenticatorPath, _ = filepath.Abs(cfg.AWSIAMAuthenticatorPath)\n\t\tif err := httpDownloadFile(lg, cfg.AWSIAMAuthenticatorDownloadURL, f); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := f.Close(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to close aws-iam-authenticator %v\", err)\n\t\t}\n\t\tif err := fileutil.EnsureExecutable(cfg.AWSIAMAuthenticatorPath); err != nil {\n\t\t\t// file may be already executable while the process does not own the file/directory\n\t\t\t// ref. https://github.com/aws/aws-k8s-tester/issues/66\n\t\t\tlg.Error(\"failed to ensure executable\", zap.Error(err))\n\t\t}\n\t\t// aws-iam-authenticator version\n\t\tctx, cancel = context.WithTimeout(context.Background(), 15*time.Second)\n\t\tvo, verr = exec.New().CommandContext(\n\t\t\tctx,\n\t\t\tcfg.AWSIAMAuthenticatorPath,\n\t\t\t\"version\",\n\t\t).CombinedOutput()\n\t\tcancel()\n\t\tif verr != nil {\n\t\t\treturn nil, fmt.Errorf(\"'aws-iam-authenticator version' failed (output %q, error %v)\", string(vo), verr)\n\t\t}\n\t\tlg.Info(\n\t\t\t\"aws-iam-authenticator version\",\n\t\t\tzap.String(\"aws-iam-authenticator-path\", cfg.AWSIAMAuthenticatorPath),\n\t\t\tzap.String(\"aws-iam-authenticator-version\", string(vo)),\n\t\t)\n\t}\n\n\tts := &Tester{\n\t\tstopCreationCh: make(chan struct{}),\n\t\tstopCreationChOnce: new(sync.Once),\n\t\tinterruptSig: make(chan os.Signal),\n\t\tlg: lg,\n\t\tcfg: cfg,\n\t\tdownMu: new(sync.Mutex),\n\t\tfetchLogsManagedNodeGroupMu: new(sync.RWMutex),\n\t}\n\tsignal.Notify(ts.interruptSig, syscall.SIGTERM, syscall.SIGINT)\n\n\tdefer ts.cfg.Sync()\n\n\tawsCfg := &awsapi.Config{\n\t\tLogger: ts.lg,\n\t\tDebugAPICalls: ts.cfg.LogLevel == \"debug\",\n\t\tRegion: ts.cfg.Region,\n\t}\n\tvar stsOutput *sts.GetCallerIdentityOutput\n\tts.awsSession, stsOutput, _, err = awsapi.New(awsCfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tts.cfg.Status.AWSAccountID = *stsOutput.Account\n\n\tts.iamAPI = iam.New(ts.awsSession)\n\tts.ssmAPI = ssm.New(ts.awsSession)\n\tts.cfnAPI = cloudformation.New(ts.awsSession)\n\tts.ec2API = ec2.New(ts.awsSession)\n\tts.asgAPI = autoscaling.New(ts.awsSession)\n\tts.elbAPI = elb.New(ts.awsSession)\n\n\t// create a separate session for EKS (for resolver endpoint)\n\tts.eksSession, _, ts.cfg.Status.AWSCredentialPath, err = awsapi.New(&awsapi.Config{\n\t\tLogger: ts.lg,\n\t\tDebugAPICalls: ts.cfg.LogLevel == \"debug\",\n\t\tRegion: ts.cfg.Region,\n\t\tResolverURL: ts.cfg.Parameters.ClusterResolverURL,\n\t\tSigningName: ts.cfg.Parameters.ClusterSigningName,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tts.eksAPI = awseks.New(ts.eksSession)\n\n\t// reuse existing role\n\tif ts.cfg.Parameters.ClusterRoleARN != \"\" {\n\t\tts.lg.Info(\"reuse existing IAM role\", zap.String(\"cluster-role-arn\", ts.cfg.Parameters.ClusterRoleARN))\n\t\tts.cfg.Status.ClusterRoleARN = ts.cfg.Parameters.ClusterRoleARN\n\t}\n\n\treturn ts, nil\n}", "func EnsureAWSRegion(region string) string {\n\tr := strings.ToLower(strings.Replace(string(region), \"_\", \"-\", -1))\n log.Printf(\"EnsureAWSRegion--- region:%s r:%s\", region,r)\n\treturn r\n}", "func TestEndpointCase34(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase96(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-northwest-1\"),\n\t\tUseFIPS: ptr.Bool(true),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tConsumerARN: ptr.String(\"arn:aws-cn:kinesis:cn-northwest-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.data-kinesis-fips.cn-northwest-1.api.amazonwebservices.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase87(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.control-kinesis.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func testSSECEncryptedGetObjectReadAtFunctional() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"GetObject(bucketName, objectName)\"\n\targs := map[string]interface{}{}\n\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object.\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket.\n\terr = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"us-east-1\"})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tdefer cleanupBucket(bucketName, c)\n\n\t// Generate 129MiB of data.\n\tbufSize := dataFileMap[\"datafile-129-MB\"]\n\treader := getDataReader(\"datafile-129-MB\")\n\tdefer reader.Close()\n\n\tobjectName := randString(60, rand.NewSource(time.Now().UnixNano()), \"\")\n\targs[\"objectName\"] = objectName\n\n\tbuf, err := io.ReadAll(reader)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAll failed\", err)\n\t\treturn\n\t}\n\n\t// Save the data\n\t_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{\n\t\tContentType: \"binary/octet-stream\",\n\t\tServerSideEncryption: encrypt.DefaultPBKDF([]byte(\"correct horse battery staple\"), []byte(bucketName+objectName)),\n\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"PutObject failed\", err)\n\t\treturn\n\t}\n\n\t// read the data back\n\tr, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{\n\t\tServerSideEncryption: encrypt.DefaultPBKDF([]byte(\"correct horse battery staple\"), []byte(bucketName+objectName)),\n\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"PutObject failed\", err)\n\t\treturn\n\t}\n\tdefer r.Close()\n\n\toffset := int64(2048)\n\n\t// read directly\n\tbuf1 := make([]byte, 512)\n\tbuf2 := make([]byte, 512)\n\tbuf3 := make([]byte, 512)\n\tbuf4 := make([]byte, 512)\n\n\t// Test readAt before stat is called such that objectInfo doesn't change.\n\tm, err := r.ReadAt(buf1, offset)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\treturn\n\t}\n\tif m != len(buf1) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf1))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf1, buf[offset:offset+512]) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read between two ReadAt from same offset\", err)\n\t\treturn\n\t}\n\toffset += 512\n\n\tst, err := r.Stat()\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"Stat failed\", err)\n\t\treturn\n\t}\n\n\tif st.Size != int64(bufSize) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Number of bytes in stat does not match, expected \"+string(int64(bufSize))+\", got \"+string(st.Size), err)\n\t\treturn\n\t}\n\n\tm, err = r.ReadAt(buf2, offset)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\treturn\n\t}\n\tif m != len(buf2) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf2))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf2, buf[offset:offset+512]) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read between two ReadAt from same offset\", err)\n\t\treturn\n\t}\n\toffset += 512\n\tm, err = r.ReadAt(buf3, offset)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\treturn\n\t}\n\tif m != len(buf3) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf3))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf3, buf[offset:offset+512]) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read between two ReadAt from same offset\", err)\n\t\treturn\n\t}\n\toffset += 512\n\tm, err = r.ReadAt(buf4, offset)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\treturn\n\t}\n\tif m != len(buf4) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf4))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf4, buf[offset:offset+512]) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read between two ReadAt from same offset\", err)\n\t\treturn\n\t}\n\n\tbuf5 := make([]byte, len(buf))\n\t// Read the whole object.\n\tm, err = r.ReadAt(buf5, 0)\n\tif err != nil {\n\t\tif err != io.EOF {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tif m != len(buf5) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf5))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf, buf5) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect data read in GetObject, than what was previously uploaded\", err)\n\t\treturn\n\t}\n\n\tbuf6 := make([]byte, len(buf)+1)\n\t// Read the whole object and beyond.\n\t_, err = r.ReadAt(buf6, 0)\n\tif err != nil {\n\t\tif err != io.EOF {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func (be *s3) Test(t backend.Type, name string) (bool, error) {\n\tfound := false\n\tpath := be.s3path(t, name)\n\t_, err := be.client.StatObject(be.bucketname, path)\n\tif err == nil {\n\t\tfound = true\n\t}\n\n\t// If error, then not found\n\treturn found, nil\n}", "func TestTags(t *testing.T) {\n awsRegion := \"us-east-2\"\n tagName := \"Flugel-test\"\n tagOwner := \"InfraTeam-test\"\n\n terraformOpts := terraform.WithDefaultRetryableErrors(t, &terraform.Options{\n TerraformDir: \"../\",\n\n //Now i must map the tags.\n Vars: map[string]interface{}{\n \"tag_name\": tagName,\n \"tag_owner\": tagOwner,\n },\n\n //Then set the region to make the deploy in.\n EnvVars: map[string]string{\n \"AWS_DEFAULT_REGION\": awsRegion,\n },\n },\n )\n\n //After all the testing, the infra must be destroyed.\n defer terraform.Destroy(t, terraformOpts)\n\n //Now, let's run the deploy with all the parameters set.\n terraform.InitAndApply(t, terraformOpts)\n\n //I get the instance and bucket id's, and make first verifications.\n instanceID1 := terraform.Output(t, terraformOpts, \"instance_name_web1\")\n instanceTags1 := aws.GetTagsForEc2Instance(t, awsRegion, instanceID1)\n testTag1, containsTag := instanceTags1[\"Name\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagName, testTag1)\n testTag2, containsTag := instanceTags1[\"Owner\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagOwner, testTag2)\n\n instanceID2 := terraform.Output(t, terraformOpts, \"instance_name_web2\")\n instanceTags2 := aws.GetTagsForEc2Instance(t, awsRegion, instanceID2)\n testTag3, containsTag := instanceTags2[\"Name\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagName, testTag3)\n testTag4, containsTag := instanceTags2[\"Owner\"]\n assert.True(t, containsTag, \"True\")\n assert.Equal(t, tagOwner, testTag4)\n\n //It would be easier to simply parse plain text, but as i put myself into this let's ride with it.\n\n lburl := \"http://\" + terraform.Output(t, terraformOpts, \"load_balancer_url\") + \"/index.html\"\n maxRetries := 3\n timeBetweenRetries := 5 * time.Second\n\n http_helper.HttpGetWithRetryWithCustomValidation(t, lburl, nil, maxRetries, timeBetweenRetries, validate)\n\n // There's no module with \"get X bucket tags\", so i get the bucket id from TF, and separately i seek the bucket that contains\n // tags \"Name\" and \"Owner\" with the desired content, and make sure the id returned matches the previously deployed bucket. \n bucketID := terraform.Output(t, terraformOpts, \"bucket_id\")\n bucketwithTagN := aws.FindS3BucketWithTag (t, awsRegion, \"Name\", tagName)\n bucketwithTagO := aws.FindS3BucketWithTag (t, awsRegion, \"Owner\", tagOwner)\n assert.Equal(t, bucketwithTagN, bucketID)\n assert.Equal(t, bucketwithTagO, bucketID)\n\n}", "func TestEndpointCase99(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-isob-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tConsumerARN: ptr.String(\"arn:aws-iso-b:kinesis:us-isob-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-isob-east-1.sc2s.sgov.gov\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func Test_BucketPolicySample_1(t *testing.T) {\n\tsc := NewS3()\n\terr := sc.PutObject(TEST_BUCKET, TEST_KEY, TEST_VALUE)\n\tif err != nil {\n\t\tt.Fatal(\"PutObject err:\", err)\n\t}\n\n\t//Anonymous to get\n\turl := \"http://\" + *sc.Client.Config.Endpoint + string(os.PathSeparator) + TEST_BUCKET + string(os.PathSeparator) + TEST_KEY\n\tstatusCode, _, err := HTTPRequestToGetObject(url)\n\tif err != nil {\n\t\tt.Fatal(\"GetObject err:\", err)\n\t}\n\t//StatusCode should be AccessDenied\n\tif statusCode != http.StatusForbidden {\n\t\tt.Fatal(\"StatusCode should be AccessDenied(403), but the code is:\", statusCode)\n\t}\n\n\terr = sc.PutBucketPolicy(TEST_BUCKET, GetObjectPolicy_1)\n\tif err != nil {\n\t\tt.Fatal(\"PutBucketPolicy err:\", err)\n\t}\n\n\tpolicy, err := sc.GetBucketPolicy(TEST_BUCKET)\n\tif err != nil {\n\t\tt.Fatal(\"GetBucketPolicy err:\", err)\n\t}\n\tt.Log(\"Bucket policy:\", Format(policy))\n\n\t// After set policy\n\tstatusCode, data, err := HTTPRequestToGetObject(url)\n\tif err != nil {\n\t\tt.Fatal(\"GetObject err:\", err)\n\t}\n\t//StatusCode should be STATUS_OK\n\tif statusCode != http.StatusOK {\n\t\tt.Fatal(\"StatusCode should be STATUS_OK(200), but the code is:\", statusCode)\n\t}\n\tt.Log(\"Get object value:\", string(data))\n\n\terr = sc.DeleteBucketPolicy(TEST_BUCKET)\n\tif err != nil {\n\t\tt.Fatal(\"DeleteBucketPolicy err:\", err)\n\t}\n\n\t//After delete policy\n\tstatusCode, _, err = HTTPRequestToGetObject(url)\n\tif err != nil {\n\t\tt.Fatal(\"GetObject err:\", err)\n\t}\n\t//StatusCode should be AccessDenied\n\tif statusCode != http.StatusForbidden {\n\t\tt.Fatal(\"StatusCode should be AccessDenied(403), but the code is:\", statusCode)\n\t}\n\n\terr = sc.DeleteObject(TEST_BUCKET, TEST_KEY)\n\tif err != nil {\n\t\tt.Fatal(\"DeleteObject err:\", err)\n\t}\n}", "func TestEndpointCase20(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-east-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase97(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-iso-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws-iso:kinesis:us-iso-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-iso-east-1.c2s.ic.gov\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func testMakeBucketRegions() {\n\tregion := \"eu-central-1\"\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"MakeBucket(bucketName, region)\"\n\t// initialize logging params\n\targs := map[string]interface{}{\n\t\t\"bucketName\": \"\",\n\t\t\"region\": region,\n\t}\n\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object.\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO client creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket in 'eu-central-1'.\n\tif err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\t// Delete all objects and buckets\n\tif err = cleanupBucket(bucketName, c); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"CleanupBucket failed\", err)\n\t\treturn\n\t}\n\n\t// Make a new bucket with '.' in its name, in 'us-west-2'. This\n\t// request is internally staged into a path style instead of\n\t// virtual host style.\n\tregion = \"us-west-2\"\n\targs[\"region\"] = region\n\tif err = c.MakeBucket(context.Background(), bucketName+\".withperiod\", minio.MakeBucketOptions{Region: region}); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\t// Delete all objects and buckets\n\tif err = cleanupBucket(bucketName+\".withperiod\", c); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"CleanupBucket failed\", err)\n\t\treturn\n\t}\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func TestEndpointCase86(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t\tEndpoint: ptr.String(\"https://example.com\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://example.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func (k cdgWithStaticTestIV) isAWSFixture() bool {\n\treturn true\n}", "func (a *Amazon) VerifyAWSCredentials() error {\n\tsvc, err := a.EC2()\n\tif err != nil {\n\t\treturn err\n\t}\n\tinput := &ec2.DescribeRegionsInput{}\n\n\t_, err = svc.DescribeRegions(input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"there was a problem with veryfing your AWS credentials: %s\", err)\n\t}\n\n\treturn nil\n}", "func TestEndpointCase37(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-east-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-gov-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase73(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-iso-east-1\"),\n\t\tUseFIPS: ptr.Bool(true),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tStreamARN: ptr.String(\"arn:aws-iso:kinesis:us-iso-east-1:123:stream/test-stream\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-iso-east-1.c2s.ic.gov\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase103(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tStreamARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/foobar\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123456789123:stream/foobar/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.data-kinesis.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase74(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-isob-east-1\"),\n\t\tUseFIPS: ptr.Bool(true),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tStreamARN: ptr.String(\"arn:aws-iso-b:kinesis:us-isob-east-1:123:stream/test-stream\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-isob-east-1.sc2s.sgov.gov\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEKSNetworking(t *testing.T){\n\n\ttfOptions := &terraform.Options{\n\t\tTerraformDir: \"./\",\n\t\tEnvVars: map[string]string{\n\t\t\t\"AWS_REGION\": \"ap-southeast-1\",\n\t\t},\n\t\tVars: map[string]interface{}{\n\t\t\t\"vpc_name\" : \"eks_vpc\",\n\t\t\t\"cluster_name\": \"eks_poc\",\n\t\t\t\"vpc_cidr_block\": \"10.0.0.0/16\",\n\t\t\t\"workers_cidr_block\": []string{\n\t\t\t\t\"10.0.0.0/20\", \"10.0.16.0/20\", \"10.0.32.0/20\",\n\t\t\t},\n\t\t\t\"nat_cidr_block\": \"10.0.48.0/20\",\n\t\t},\n\t}\n\n\tdefer terraform.Destroy(t, tfOptions)\n\n\tterraform.InitAndApply(t, tfOptions)\n\n\toutput := terraform.OutputAll(t, tfOptions)\n\n\tt.Logf(\"%+v\", output)\n\n}", "func TestEndpointCase59(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tStreamARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream\"),\n\t\tEndpoint: ptr.String(\"https://example.com\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://example.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func AWSScale() {\n\tSetClusterName()\n\t// Scale the AWS infrastructure\n\tfmt.Printf(\"\\t\\t===============Starting AWS Scaling====================\\n\\n\")\n\tsshUser, osLabel := distSelect()\n\tprepareConfigFiles(osLabel)\n\tprovisioner.ExecuteTerraform(\"apply\", \"./inventory/\"+common.Name+\"/provisioner/\")\n\tmvHost := exec.Command(\"mv\", \"./inventory/hosts\", \"./inventory/\"+common.Name+\"/provisioner/hosts\")\n\tmvHost.Run()\n\tmvHost.Wait()\n\t// waiting for Infrastructure\n\ttime.Sleep(30)\n\t// Scale the Kubernetes cluster\n\tfmt.Printf(\"\\n\\n\\t\\t===============Starting Kubernetes Scaling====================\\n\\n\")\n\t_, err := os.Stat(\"./inventory/\" + common.Name + \"/provisioner/hosts\")\n\tcommon.ErrorCheck(\"No host file found.\", err)\n\tcpHost := exec.Command(\"cp\", \"./inventory/\"+common.Name+\"/provisioner/hosts\", \"./inventory/\"+common.Name+\"/installer/hosts\")\n\tcpHost.Run()\n\tcpHost.Wait()\n\tinstaller.RunPlaybook(\"./inventory/\"+common.Name+\"/installer/\", \"scale.yml\", sshUser, osLabel)\n\n\treturn\n}", "func testSSECEncryptedGetObjectReadSeekFunctional() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"GetObject(bucketName, objectName)\"\n\targs := map[string]interface{}{}\n\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object.\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket.\n\terr = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"us-east-1\"})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\t// Delete all objects and buckets\n\t\tif err = cleanupBucket(bucketName, c); err != nil {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"CleanupBucket failed\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\t// Generate 129MiB of data.\n\tbufSize := dataFileMap[\"datafile-129-MB\"]\n\treader := getDataReader(\"datafile-129-MB\")\n\tdefer reader.Close()\n\n\tobjectName := randString(60, rand.NewSource(time.Now().UnixNano()), \"\")\n\targs[\"objectName\"] = objectName\n\n\tbuf, err := io.ReadAll(reader)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAll failed\", err)\n\t\treturn\n\t}\n\n\t// Save the data\n\t_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{\n\t\tContentType: \"binary/octet-stream\",\n\t\tServerSideEncryption: encrypt.DefaultPBKDF([]byte(\"correct horse battery staple\"), []byte(bucketName+objectName)),\n\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"PutObject failed\", err)\n\t\treturn\n\t}\n\n\t// Read the data back\n\tr, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{\n\t\tServerSideEncryption: encrypt.DefaultPBKDF([]byte(\"correct horse battery staple\"), []byte(bucketName+objectName)),\n\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"GetObject failed\", err)\n\t\treturn\n\t}\n\tdefer r.Close()\n\n\tst, err := r.Stat()\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"Stat object failed\", err)\n\t\treturn\n\t}\n\n\tif st.Size != int64(bufSize) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Number of bytes does not match, expected \"+string(int64(bufSize))+\", got \"+string(st.Size), err)\n\t\treturn\n\t}\n\n\t// This following function helps us to compare data from the reader after seek\n\t// with the data from the original buffer\n\tcmpData := func(r io.Reader, start, end int) {\n\t\tif end-start == 0 {\n\t\t\treturn\n\t\t}\n\t\tbuffer := bytes.NewBuffer([]byte{})\n\t\tif _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlogError(testName, function, args, startTime, \"\", \"CopyN failed\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif !bytes.Equal(buf[start:end], buffer.Bytes()) {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read bytes v/s original buffer\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttestCases := []struct {\n\t\toffset int64\n\t\twhence int\n\t\tpos int64\n\t\terr error\n\t\tshouldCmp bool\n\t\tstart int\n\t\tend int\n\t}{\n\t\t// Start from offset 0, fetch data and compare\n\t\t{0, 0, 0, nil, true, 0, 0},\n\t\t// Start from offset 2048, fetch data and compare\n\t\t{2048, 0, 2048, nil, true, 2048, bufSize},\n\t\t// Start from offset larger than possible\n\t\t{int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},\n\t\t// Move to offset 0 without comparing\n\t\t{0, 0, 0, nil, false, 0, 0},\n\t\t// Move one step forward and compare\n\t\t{1, 1, 1, nil, true, 1, bufSize},\n\t\t// Move larger than possible\n\t\t{int64(bufSize), 1, 0, io.EOF, false, 0, 0},\n\t\t// Provide negative offset with CUR_SEEK\n\t\t{int64(-1), 1, 0, fmt.Errorf(\"Negative position not allowed for 1\"), false, 0, 0},\n\t\t// Test with whence SEEK_END and with positive offset\n\t\t{1024, 2, 0, io.EOF, false, 0, 0},\n\t\t// Test with whence SEEK_END and with negative offset\n\t\t{-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},\n\t\t// Test with whence SEEK_END and with large negative offset\n\t\t{-int64(bufSize) * 2, 2, 0, fmt.Errorf(\"Seeking at negative offset not allowed for 2\"), false, 0, 0},\n\t\t// Test with invalid whence\n\t\t{0, 3, 0, fmt.Errorf(\"Invalid whence 3\"), false, 0, 0},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\t// Perform seek operation\n\t\tn, err := r.Seek(testCase.offset, testCase.whence)\n\t\tif err != nil && testCase.err == nil {\n\t\t\t// We expected success.\n\t\t\tlogError(testName, function, args, startTime, \"\",\n\t\t\t\tfmt.Sprintf(\"Test %d, unexpected err value: expected: %s, found: %s\", i+1, testCase.err, err), err)\n\t\t\treturn\n\t\t}\n\t\tif err == nil && testCase.err != nil {\n\t\t\t// We expected failure, but got success.\n\t\t\tlogError(testName, function, args, startTime, \"\",\n\t\t\t\tfmt.Sprintf(\"Test %d, unexpected err value: expected: %s, found: %s\", i+1, testCase.err, err), err)\n\t\t\treturn\n\t\t}\n\t\tif err != nil && testCase.err != nil {\n\t\t\tif err.Error() != testCase.err.Error() {\n\t\t\t\t// We expect a specific error\n\t\t\t\tlogError(testName, function, args, startTime, \"\",\n\t\t\t\t\tfmt.Sprintf(\"Test %d, unexpected err value: expected: %s, found: %s\", i+1, testCase.err, err), err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// Check the returned seek pos\n\t\tif n != testCase.pos {\n\t\t\tlogError(testName, function, args, startTime, \"\",\n\t\t\t\tfmt.Sprintf(\"Test %d, number of bytes seeked does not match, expected %d, got %d\", i+1, testCase.pos, n), err)\n\t\t\treturn\n\t\t}\n\t\t// Compare only if shouldCmp is activated\n\t\tif testCase.shouldCmp {\n\t\t\tcmpData(r, testCase.start, testCase.end)\n\t\t}\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func AllAWSResources(c *awsup.MockAWSCloud) map[string]interface{} {\n\tall := make(map[string]interface{})\n\tfor k, v := range c.MockEC2.(*mockec2.MockEC2).All() {\n\t\tall[k] = v\n\t}\n\treturn all\n}", "func TestExistingSG(t *testing.T) {\n\tnewIntegrationTest(\"existingsg.example.com\", \"existing_sg\").withZones(3).\n\t\twithAddons(\n\t\t\tawsEBSCSIAddon,\n\t\t\tdnsControllerAddon,\n\t\t\tawsCCMAddon,\n\t\t).\n\t\trunTestTerraformAWS(t)\n}", "func CheckAWSEnvVars(t *testing.T) {\n\tCheckEnvVars(t,\n\t\t\"AWS_ACCESS_KEY\",\n\t\t\"AWS_SECRET_ACCESS_KEY\",\n\t\t\"AWS_ROLE\",\n\t\t\"AWS_REGION\",\n\t)\n}", "func InitAWS() error {\n\tsession, err := session.NewSession(&aws.Config{\n\t\tRegion: aws.String(config.GetConfig().GetString(\"aws.s3_region\")),\n\t\tCredentials: credentials.NewStaticCredentials(\n\t\t\tconfig.GetConfig().GetString(\"aws.access_key_id\"),\n\t\t\tconfig.GetConfig().GetString(\"aws.secret_access_key\"),\n\t\t\t\"\"),\n\t})\n\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to create aws session, error: %v\", err)\n\t\treturn err\n\t}\n\n\tawsSession = session\n\n\treturn nil\n}", "func TestEndpointCase27(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase30(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-north-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.cn-north-1.api.amazonwebservices.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func (d *driver) getSVC() (*s3.S3, error) {\n\tif s3Service != nil {\n\t\treturn s3Service, nil\n\t}\n\n\tcfg, err := clusterconfig.GetAWSConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsess, err := session.NewSession(&aws.Config{\n\t\tCredentials: credentials.NewStaticCredentials(cfg.Storage.S3.AccessKey, cfg.Storage.S3.SecretKey, \"\"),\n\t\tRegion: &cfg.Storage.S3.Region,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts3Service := s3.New(sess)\n\n\treturn s3Service, nil\n\n}", "func TestEndpointCase35(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-west-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-west-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestAPIS(t *testing.T) {\n\tmus, sigmas := APISFamily(NewSampler(UniDistAB{-10, 10}, Seed()), 10)\n\n\t// Can a Gaussian estimate a Gaussian?\n\tnorm := NormalDist{0, 1}\n\tapis := APIS{\n\t\tfunc(x float64) float64 { return x * x }, norm.Prob,\n\t\t64, 32,\n\t\tmus, sigmas,\n\t\tNoise(10),\n\t}\n\n\tI, Z := apis.Estimate()\n\tif math.Abs(I-1) > 1e-2 {\n\t\tt.Error(fmt.Sprintf(\"I = %v should be 1\", I))\n\t}\n\tif math.Abs(Z-1) > 1e-2 {\n\t\tt.Error(fmt.Sprintf(\"Z = %v should be 1\", Z))\n\t}\n\n\t// A slightly more interesting problem\n\tapis.Function = func(x float64) float64 {\n\t\tif x > 0 {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\t}\n\n\tI, Z = apis.Estimate()\n\tif math.Abs(I-0.5) > 1e-2 {\n\t\tt.Error(fmt.Sprintf(\"I = %v should be 0.5\", I))\n\t}\n\tif math.Abs(Z-1) > 1e-2 {\n\t\tt.Error(fmt.Sprintf(\"Z = %v should be 1\", Z))\n\t}\n\n\t// An even more interesting problem\n\tlambda := 2.345\n\tapis.Function = func(x float64) float64 {\n\t\treturn x\n\t}\n\t// Un-normalized exponential distribution\n\tapis.Pi = func(x float64) float64 {\n\t\tif x < 0 {\n\t\t\treturn 0\n\t\t}\n\t\treturn math.Exp(-lambda * x)\n\t}\n\n\tI, Z = apis.Estimate()\n\tif math.Abs(I-1/lambda) > 1e-1 {\n\t\tt.Error(fmt.Sprintf(\"I = %v should be %v\", I, 1/lambda))\n\t}\n\tif math.Abs(Z-1/lambda) > 1e-1 {\n\t\tt.Error(fmt.Sprintf(\"Z = %v should be %v\", I, 1/lambda))\n\t}\n}", "func testSSES3EncryptedGetObjectReadAtFunctional() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"GetObject(bucketName, objectName)\"\n\targs := map[string]interface{}{}\n\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object.\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket.\n\terr = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"us-east-1\"})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tdefer cleanupBucket(bucketName, c)\n\n\t// Generate 129MiB of data.\n\tbufSize := dataFileMap[\"datafile-129-MB\"]\n\treader := getDataReader(\"datafile-129-MB\")\n\tdefer reader.Close()\n\n\tobjectName := randString(60, rand.NewSource(time.Now().UnixNano()), \"\")\n\targs[\"objectName\"] = objectName\n\n\tbuf, err := io.ReadAll(reader)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAll failed\", err)\n\t\treturn\n\t}\n\n\t// Save the data\n\t_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{\n\t\tContentType: \"binary/octet-stream\",\n\t\tServerSideEncryption: encrypt.NewSSE(),\n\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"PutObject failed\", err)\n\t\treturn\n\t}\n\n\t// read the data back\n\tr, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"PutObject failed\", err)\n\t\treturn\n\t}\n\tdefer r.Close()\n\n\toffset := int64(2048)\n\n\t// read directly\n\tbuf1 := make([]byte, 512)\n\tbuf2 := make([]byte, 512)\n\tbuf3 := make([]byte, 512)\n\tbuf4 := make([]byte, 512)\n\n\t// Test readAt before stat is called such that objectInfo doesn't change.\n\tm, err := r.ReadAt(buf1, offset)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\treturn\n\t}\n\tif m != len(buf1) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf1))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf1, buf[offset:offset+512]) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read between two ReadAt from same offset\", err)\n\t\treturn\n\t}\n\toffset += 512\n\n\tst, err := r.Stat()\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"Stat failed\", err)\n\t\treturn\n\t}\n\n\tif st.Size != int64(bufSize) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Number of bytes in stat does not match, expected \"+string(int64(bufSize))+\", got \"+string(st.Size), err)\n\t\treturn\n\t}\n\n\tm, err = r.ReadAt(buf2, offset)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\treturn\n\t}\n\tif m != len(buf2) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf2))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf2, buf[offset:offset+512]) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read between two ReadAt from same offset\", err)\n\t\treturn\n\t}\n\toffset += 512\n\tm, err = r.ReadAt(buf3, offset)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\treturn\n\t}\n\tif m != len(buf3) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf3))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf3, buf[offset:offset+512]) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read between two ReadAt from same offset\", err)\n\t\treturn\n\t}\n\toffset += 512\n\tm, err = r.ReadAt(buf4, offset)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\treturn\n\t}\n\tif m != len(buf4) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf4))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf4, buf[offset:offset+512]) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read between two ReadAt from same offset\", err)\n\t\treturn\n\t}\n\n\tbuf5 := make([]byte, len(buf))\n\t// Read the whole object.\n\tm, err = r.ReadAt(buf5, 0)\n\tif err != nil {\n\t\tif err != io.EOF {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tif m != len(buf5) {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt read shorter bytes before reaching EOF, expected \"+string(len(buf5))+\", got \"+string(m), err)\n\t\treturn\n\t}\n\tif !bytes.Equal(buf, buf5) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect data read in GetObject, than what was previously uploaded\", err)\n\t\treturn\n\t}\n\n\tbuf6 := make([]byte, len(buf)+1)\n\t// Read the whole object and beyond.\n\t_, err = r.ReadAt(buf6, 0)\n\tif err != nil {\n\t\tif err != io.EOF {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"ReadAt failed\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func TestEndpointCase101(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-isob-east-1\"),\n\t\tUseFIPS: ptr.Bool(true),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tConsumerARN: ptr.String(\"arn:aws-iso-b:kinesis:us-isob-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-isob-east-1.sc2s.sgov.gov\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase72(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-isob-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tStreamARN: ptr.String(\"arn:aws-iso-b:kinesis:us-isob-east-1:123:stream/test-stream\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-isob-east-1.sc2s.sgov.gov\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase18(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func testDescribeImagesOutput() *ec2.DescribeImagesOutput {\n\treturn &ec2.DescribeImagesOutput{\n\t\tImages: []*ec2.Image{\n\t\t\t&ec2.Image{\n\t\t\t\tArchitecture: aws.String(\"x86_64\"),\n\t\t\t\tBlockDeviceMappings: []*ec2.BlockDeviceMapping{\n\t\t\t\t\t&ec2.BlockDeviceMapping{\n\t\t\t\t\t\tDeviceName: aws.String(\"/dev/xvda\"),\n\t\t\t\t\t\tEbs: &ec2.EbsBlockDevice{\n\t\t\t\t\t\t\tDeleteOnTermination: aws.Bool(true),\n\t\t\t\t\t\t\tEncrypted: aws.Bool(false),\n\t\t\t\t\t\t\tSnapshotId: aws.String(\"snap-6d465049\"),\n\t\t\t\t\t\t\tVolumeSize: aws.Int64(8),\n\t\t\t\t\t\t\tVolumeType: aws.String(\"gp2\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCreationDate: aws.String(\"2014-06-22T09:19:44.000Z\"),\n\t\t\t\tDescription: aws.String(\"Amazon Linux AMI 2014.03.3 x86_64 HVM GP2\"),\n\t\t\t\tHypervisor: aws.String(\"xen\"),\n\t\t\t\tImageId: aws.String(\"ami-8172b616\"),\n\t\t\t\tImageLocation: aws.String(\"amazon/amzn-ami-hvm-2014.03.3.x86_64-gp2\"),\n\t\t\t\tImageOwnerAlias: aws.String(\"amazon\"),\n\t\t\t\tImageType: aws.String(\"machine\"),\n\t\t\t\tName: aws.String(\"amzn-ami-hvm-2014.03.3.x86_64-gp2\"),\n\t\t\t\tOwnerId: aws.String(\"137112412989\"),\n\t\t\t\tPublic: aws.Bool(true),\n\t\t\t\tRootDeviceName: aws.String(\"/dev/xvda\"),\n\t\t\t\tRootDeviceType: aws.String(\"ebs\"),\n\t\t\t\tSriovNetSupport: aws.String(\"simple\"),\n\t\t\t\tState: aws.String(\"available\"),\n\t\t\t\tVirtualizationType: aws.String(\"hvm\"),\n\t\t\t},\n\t\t\t&ec2.Image{\n\t\t\t\tArchitecture: aws.String(\"x86_64\"),\n\t\t\t\tBlockDeviceMappings: []*ec2.BlockDeviceMapping{\n\t\t\t\t\t&ec2.BlockDeviceMapping{\n\t\t\t\t\t\tDeviceName: aws.String(\"/dev/xvda\"),\n\t\t\t\t\t\tEbs: &ec2.EbsBlockDevice{\n\t\t\t\t\t\t\tDeleteOnTermination: aws.Bool(true),\n\t\t\t\t\t\t\tEncrypted: aws.Bool(false),\n\t\t\t\t\t\t\tSnapshotId: aws.String(\"snap-d465048a\"),\n\t\t\t\t\t\t\tVolumeSize: aws.Int64(8),\n\t\t\t\t\t\t\tVolumeType: aws.String(\"gp2\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCreationDate: aws.String(\"2016-06-22T09:19:44.000Z\"),\n\t\t\t\tDescription: aws.String(\"Amazon Linux AMI 2016.03.3 x86_64 HVM GP2\"),\n\t\t\t\tHypervisor: aws.String(\"xen\"),\n\t\t\t\tImageId: aws.String(\"ami-7172b611\"),\n\t\t\t\tImageLocation: aws.String(\"amazon/amzn-ami-hvm-2016.03.3.x86_64-gp2\"),\n\t\t\t\tImageOwnerAlias: aws.String(\"amazon\"),\n\t\t\t\tImageType: aws.String(\"machine\"),\n\t\t\t\tName: aws.String(\"amzn-ami-hvm-2016.03.3.x86_64-gp2\"),\n\t\t\t\tOwnerId: aws.String(\"137112412989\"),\n\t\t\t\tPublic: aws.Bool(true),\n\t\t\t\tRootDeviceName: aws.String(\"/dev/xvda\"),\n\t\t\t\tRootDeviceType: aws.String(\"ebs\"),\n\t\t\t\tSriovNetSupport: aws.String(\"simple\"),\n\t\t\t\tState: aws.String(\"available\"),\n\t\t\t\tVirtualizationType: aws.String(\"hvm\"),\n\t\t\t},\n\t\t\t&ec2.Image{\n\t\t\t\tArchitecture: aws.String(\"x86_64\"),\n\t\t\t\tBlockDeviceMappings: []*ec2.BlockDeviceMapping{\n\t\t\t\t\t&ec2.BlockDeviceMapping{\n\t\t\t\t\t\tDeviceName: aws.String(\"/dev/xvda\"),\n\t\t\t\t\t\tEbs: &ec2.EbsBlockDevice{\n\t\t\t\t\t\t\tDeleteOnTermination: aws.Bool(true),\n\t\t\t\t\t\t\tEncrypted: aws.Bool(false),\n\t\t\t\t\t\t\tSnapshotId: aws.String(\"snap-5d465048\"),\n\t\t\t\t\t\t\tVolumeSize: aws.Int64(8),\n\t\t\t\t\t\t\tVolumeType: aws.String(\"gp2\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCreationDate: aws.String(\"2015-06-22T09:19:44.000Z\"),\n\t\t\t\tDescription: aws.String(\"Amazon Linux AMI 2015.03.3 x86_64 HVM GP2\"),\n\t\t\t\tHypervisor: aws.String(\"xen\"),\n\t\t\t\tImageId: aws.String(\"ami-7172b612\"),\n\t\t\t\tImageLocation: aws.String(\"amazon/amzn-ami-hvm-2015.03.3.x86_64-gp2\"),\n\t\t\t\tImageOwnerAlias: aws.String(\"amazon\"),\n\t\t\t\tImageType: aws.String(\"machine\"),\n\t\t\t\tName: aws.String(\"amzn-ami-hvm-2015.03.3.x86_64-gp2\"),\n\t\t\t\tOwnerId: aws.String(\"137112412989\"),\n\t\t\t\tPublic: aws.Bool(true),\n\t\t\t\tRootDeviceName: aws.String(\"/dev/xvda\"),\n\t\t\t\tRootDeviceType: aws.String(\"ebs\"),\n\t\t\t\tSriovNetSupport: aws.String(\"simple\"),\n\t\t\t\tState: aws.String(\"available\"),\n\t\t\t\tVirtualizationType: aws.String(\"hvm\"),\n\t\t\t},\n\t\t},\n\t}\n}", "func TestEndpointCase95(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-northwest-1\"),\n\t\tUseFIPS: ptr.Bool(true),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tConsumerARN: ptr.String(\"arn:aws-cn:kinesis:cn-northwest-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.data-kinesis-fips.cn-northwest-1.amazonaws.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase39(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-iso-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-iso-east-1.c2s.ic.gov\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestTerraformAwsSpotScheduler(t *testing.T) {\n\tt.Parallel()\n\n\t// Pick aws region Ireland\n\tawsRegion := \"eu-west-1\"\n\n\t// Give this Spot Instance a unique ID for a name tag so we can distinguish it from any other EC2 Instance running\n\tterratestTag := fmt.Sprintf(\"terratest-tag-%s\", random.UniqueId())\n\n\tterraformOptions := &terraform.Options{\n\t\t// The path to where our Terraform code is located\n\t\tTerraformDir: \"../../examples/spot-schedule\",\n\n\t\t// Variables to pass to our Terraform code using -var options\n\t\tVars: map[string]interface{}{\n\t\t\t\"random_tag\": terratestTag,\n\t\t},\n\n\t\t// Environment variables to set when running Terraform\n\t\tEnvVars: map[string]string{\n\t\t\t\"AWS_DEFAULT_REGION\": awsRegion,\n\t\t},\n\t}\n\n\t// At the end of the test, run `terraform destroy` to clean up any resources that were created\n\tdefer terraform.Destroy(t, terraformOptions)\n\n\t// This will run `terraform init` and `terraform apply` and fail the test if there are any errors\n\tterraform.InitAndApply(t, terraformOptions)\n\n\t// Run `terraform output` to get the value of an output variables\n\tlambdaStopName := terraform.Output(t, terraformOptions, \"lambda_stop_name\")\n\n\t// Get all ec2 spot IDs with the tag \"topstop:true\" and the state running\n\tfiltersSpotToTerminateRunning := map[string][]string{\n\t\t\"instance-state-name\": {\"running\"},\n\t\t\"tag:tostop\": {\"true\"},\n\t\t\"tag:terratest_tag\": {terratestTag},\n\t\t\"instance-lifecycle\": {\"spot\"},\n\t}\n\tSpotIDsToStopRunning := aws.GetEc2InstanceIdsByFilters(t, awsRegion, filtersSpotToTerminateRunning)\n\n\t// Get all ec2 spot IDs with the tag \"topstop:false\" and the state running\n\tfiltersSpotsNoTerminateRunning := map[string][]string{\n\t\t\"instance-state-name\": {\"running\"},\n\t\t\"tag:tostop\": {\"false\"},\n\t\t\"tag:terratest_tag\": {terratestTag},\n\t\t\"instance-lifecycle\": {\"spot\"},\n\t}\n\tSpotIDsNoTerminateRunning := aws.GetEc2InstanceIdsByFilters(t, awsRegion, filtersSpotsNoTerminateRunning)\n\n\t// Invoke lambda function to terminate all spot instances with the tag:value `tostop:true`\n\tL.RunAwslambda(awsRegion, lambdaStopName)\n\n\t// Wait for scheduler exectuion\n\ttime.Sleep(160 * time.Second)\n\n\t// Get all spot instances IDs with the tag \"topstop:true\" and the state terminate\n\tfiltersSpotToStopTerminate := map[string][]string{\n\t\t\"instance-state-name\": {\"terminated\"},\n\t\t\"tag:tostop\": {\"true\"},\n\t\t\"tag:terratest_tag\": {terratestTag},\n\t\t\"instance-lifecycle\": {\"spot\"},\n\t}\n\tSpotIDsToStopTerminate := aws.GetEc2InstanceIdsByFilters(t, awsRegion, filtersSpotToStopTerminate)\n\n\t// Get all ec2 instances IDs with the tag \"topstop:false\" and the state running\n\tfiltersSpotNoStopTerminate := map[string][]string{\n\t\t\"instance-state-name\": {\"running\"},\n\t\t\"tag:tostop\": {\"false\"},\n\t\t\"tag:terratest_tag\": {terratestTag},\n\t\t\"instance-lifecycle\": {\"spot\"},\n\t}\n\tSpotIDsNoTerminate := aws.GetEc2InstanceIdsByFilters(t, awsRegion, filtersSpotNoStopTerminate)\n\n\t// Verify the instances trigger by scheduler terminate-spot with tag \"topstop:true\" should be terminated\n\tassert.Equal(t, SpotIDsToStopRunning, SpotIDsToStopTerminate)\n\n\t// Verify the instances trigger by scheduler terminate-spot with tag \"topstop:false\" should be running\n\tassert.Equal(t, SpotIDsNoTerminateRunning, SpotIDsNoTerminate)\n}", "func (j *jit) ReadAWS() (<-chan read, <-chan error) {\n\trdc := make(chan read)\n\terrc := make(chan error, 1)\n\n\tgo func() {\n\t\tdefer close(rdc)\n\t\tdefer close(errc)\n\n\t\texists, err := j.ac.Open(file.BananasCfgName, &j.cfgFile)\n\t\tif err != nil {\n\t\t\terrc <- util.Err(err)\n\t\t\trdc <- false\n\t\t\treturn\n\t\t}\n\t\tif !exists {\n\t\t\tj.cfgFile.LastLA = util.LANow().Add(-24 * time.Hour)\n\t\t}\n\n\t\terr = j.ac.OpenDir(dir.BananasMonName, j.monDir)\n\t\tif err != nil {\n\t\t\terrc <- util.Err(err)\n\t\t\trdc <- false\n\t\t\treturn\n\t\t}\n\t\terrc <- nil\n\t\trdc <- true\n\t}()\n\n\treturn rdc, errc\n}", "func getAWSRegion(s3Bucket string, config *aws.Config, settings map[string]string) (string, error) {\n\tif region, ok := settings[RegionSetting]; ok {\n\t\treturn region, nil\n\t}\n\tif config.Endpoint == nil ||\n\t\t*config.Endpoint == \"\" ||\n\t\tstrings.HasSuffix(*config.Endpoint, \".amazonaws.com\") {\n\t\tregion, err := findBucketRegion(s3Bucket, config)\n\t\treturn region, errors.Wrapf(err, \"%s is not set and s3:GetBucketLocation failed\", RegionSetting)\n\t} else {\n\t\t// For S3 compatible services like Minio, Ceph etc. use `us-east-1` as region\n\t\t// ref: https://github.com/minio/cookbook/blob/master/docs/aws-sdk-for-go-with-minio.md\n\t\treturn \"us-east-1\", nil\n\t}\n}", "func test_getGasPrice(t *testing.T) {\n\t//services.RunOnTestNet()\n\tt.Skip(nil)\n\t// get the suggested gas price\n\tgasPrice, err := eth_gateway.EthWrapper.GetGasPrice()\n\tif err != nil {\n\t\tt.Fatalf(\"error retrieving gas price: %v\\n\", err)\n\t}\n\tif gasPrice.IsUint64() && gasPrice.Uint64() > 0 {\n\t\tt.Logf(\"gas price verified: %v\\n\", gasPrice)\n\t} else {\n\t\tt.Fatalf(\"gas price less than zero: %v\\n\", gasPrice)\n\t}\n\tt.Logf(\"current network gas price :%v\", gasPrice.String())\n}", "func TestEndpointCase29(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-northwest-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.cn-northwest-1.amazonaws.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func isAwsStatusEqual(aws *corev1.Secret, bkp *v1alpha1.Backup) bool {\n\treturn aws.Name != bkp.Status.AWSSecretName || aws.Namespace != bkp.Status.AwsCredentialsSecretNamespace\n}", "func TestEndpointCase21(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-east-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase28(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-north-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.cn-north-1.amazonaws.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase17(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"sa-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.sa-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase36(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-west-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-west-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase19(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase3(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"ap-northeast-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.ap-northeast-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase90(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(true),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.control-kinesis-fips.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func NewAWSClient(config *types.Configuration, stats *types.Statistics, statsdClient, dogstatsdClient *statsd.Client) (*Client, error) {\r\n\r\n\tif config.AWS.AccessKeyID != \"\" && config.AWS.SecretAccessKey != \"\" && config.AWS.Region != \"\" {\r\n\t\tos.Setenv(\"AWS_ACCESS_KEY_ID\", config.AWS.AccessKeyID)\r\n\t\tos.Setenv(\"AWS_SECRET_ACCESS_KEY\", config.AWS.SecretAccessKey)\r\n\t\tos.Setenv(\"AWS_DEFAULT_REGION\", config.AWS.Region)\r\n\t}\r\n\r\n\tsess, err := session.NewSession(&aws.Config{\r\n\t\tRegion: aws.String(config.AWS.Region)},\r\n\t)\r\n\tif err != nil {\r\n\t\tlog.Printf(\"[ERROR] : AWS - %v\\n\", \"Error while creating AWS Session\")\r\n\t\treturn nil, errors.New(\"Error while creating AWS Session\")\r\n\t}\r\n\r\n\t_, err = sts.New(session.New()).GetCallerIdentity(&sts.GetCallerIdentityInput{})\r\n\tif err != nil {\r\n\t\tlog.Printf(\"[ERROR] : AWS - %v\\n\", \"Error while getting AWS Token\")\r\n\t\treturn nil, errors.New(\"Error while getting AWS Token\")\r\n\t}\r\n\r\n\tvar endpointURL *url.URL\r\n\tendpointURL, err = url.Parse(config.AWS.SQS.URL)\r\n\tif err != nil {\r\n\t\tlog.Printf(\"[ERROR] : AWS SQS - %v\\n\", err.Error())\r\n\t\treturn nil, ErrClientCreation\r\n\t}\r\n\r\n\treturn &Client{\r\n\t\tOutputType: \"AWS\",\r\n\t\tEndpointURL: endpointURL,\r\n\t\tConfig: config,\r\n\t\tAWSSession: sess,\r\n\t\tStats: stats,\r\n\t\tStatsdClient: statsdClient,\r\n\t\tDogstatsdClient: dogstatsdClient,\r\n\t}, nil\r\n}", "func TestEndpointCase88(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.data-kinesis.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestEndpointCase65(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-west-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tStreamARN: ptr.String(\"arn:aws:kinesis:us-west-1:123:stream/test-stream\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.data-kinesis.us-west-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func TestMain(t *testing.T) {\n\n\tos.Setenv(\"AWS_ACCESS_KEY_ID\", \"XXX\")\n\n\t_, err := session.NewSession()\n\n\tif err == nil {\n\t\tt.Logf(\"Credentials found [expected]\")\n\t} else {\n\t\tt.Logf(\"NO Credentials found [expected]\")\n\t}\n\n}", "func OpenAWS(dataFolder string, ttl time.Duration) (*awsStorage, error) {\n\n\tvar prefix string\n\tparts := strings.SplitN(dataFolder, \"/\", 2)\n\tbucket := parts[0]\n\tif len(parts) > 1 {\n\t\tprefix = parts[1]\n\t\tif !strings.HasSuffix(prefix, \"/\") {\n\t\t\tprefix += \"/\"\n\t\t}\n\t}\n\n\tcfg, err := external.LoadDefaultAWSConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb := dynamodb.New(cfg)\n\n\t_, err = db.DescribeTableRequest(&dynamodb.DescribeTableInput{\n\t\tTableName: aws.String(bucket),\n\t}).Send(context.TODO())\n\n\tif err != nil {\n\t\treturn nil, err // table does not exist ?\n\t}\n\n\tstore := s3.New(cfg)\n\n\t_, err = store.GetBucketLocationRequest(&s3.GetBucketLocationInput{\n\t\tBucket: aws.String(prefix),\n\t}).Send(context.TODO())\n\n\tif err != nil {\n\t\treturn nil, err // table does not exist ?\n\t}\n\n\treturn &awsStorage{db: db, store: store, bucket: bucket, prefix: prefix, ttl: ttl}, nil\n}", "func main() {\n\tif len(os.Args) != 3 {\n\t\texitErrorf(\"AMI ID and Instance Type are required\"+\n\t\t\t\"\\nUsage: %s image_id instance_type\", os.Args[0])\n\t}\n\n\n\t//Initialize the session that the SDK uses to load credentials from the shared credentials file ~/.aws/credentials\n\tsess, err := session.NewSession(&aws.Config{Region: aws.String(\"eu-west-1\")}, )\n\tsvc := ec2.New(sess)\n\tif err != nil {\n\t\texitErrorf(\"Error creating session, %v\", err)\n\t}\n\n}", "func TestAWSCloudValidation(t *testing.T) {\n\tos.Setenv(\"CLOUD_NAME\", \"CLOUD_AWS\")\n\tutils.SetCloudName(\"CLOUD_AWS\")\n\tos.Setenv(\"SERVICE_TYPE\", \"NodePort\")\n\tos.Setenv(\"VIP_NETWORK_LIST\", `[]`)\n\n\taddConfigMap(t)\n\n\tif !ctrl.DisableSync {\n\t\tt.Fatalf(\"CLOUD_AWS should not be allowed if VIP_NETWORK_LIST is empty\")\n\t}\n\tDeleteConfigMap(t)\n\tos.Setenv(\"VIP_NETWORK_LIST\", `[{\"networkName\":\"net123\"}]`)\n}", "func TestEndpointCase93(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-west-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-west-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.control-kinesis.us-west-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func ProvidersAWS(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\n\tif err := json.NewEncoder(w).Encode(awsResources); err != nil {\n\t\tpanic(err)\n\t}\n}", "func awsError(err error) {\n\tif aerr, ok := err.(awserr.Error); ok {\n\t\tlog.Fatalf(\"%s\\n\", aerr.Error())\n\t} else {\n\t\tlog.Fatalf(\"%s\\n\", err.Error())\n\t}\n}", "func testSSES3EncryptedGetObjectReadSeekFunctional() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"GetObject(bucketName, objectName)\"\n\targs := map[string]interface{}{}\n\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object.\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket.\n\terr = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"us-east-1\"})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\t// Delete all objects and buckets\n\t\tif err = cleanupBucket(bucketName, c); err != nil {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"CleanupBucket failed\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\t// Generate 129MiB of data.\n\tbufSize := dataFileMap[\"datafile-129-MB\"]\n\treader := getDataReader(\"datafile-129-MB\")\n\tdefer reader.Close()\n\n\tobjectName := randString(60, rand.NewSource(time.Now().UnixNano()), \"\")\n\targs[\"objectName\"] = objectName\n\n\tbuf, err := io.ReadAll(reader)\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"ReadAll failed\", err)\n\t\treturn\n\t}\n\n\t// Save the data\n\t_, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{\n\t\tContentType: \"binary/octet-stream\",\n\t\tServerSideEncryption: encrypt.NewSSE(),\n\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"PutObject failed\", err)\n\t\treturn\n\t}\n\n\t// Read the data back\n\tr, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"GetObject failed\", err)\n\t\treturn\n\t}\n\tdefer r.Close()\n\n\tst, err := r.Stat()\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"Stat object failed\", err)\n\t\treturn\n\t}\n\n\tif st.Size != int64(bufSize) {\n\t\tlogError(testName, function, args, startTime, \"\", \"Number of bytes does not match, expected \"+string(int64(bufSize))+\", got \"+string(st.Size), err)\n\t\treturn\n\t}\n\n\t// This following function helps us to compare data from the reader after seek\n\t// with the data from the original buffer\n\tcmpData := func(r io.Reader, start, end int) {\n\t\tif end-start == 0 {\n\t\t\treturn\n\t\t}\n\t\tbuffer := bytes.NewBuffer([]byte{})\n\t\tif _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlogError(testName, function, args, startTime, \"\", \"CopyN failed\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif !bytes.Equal(buf[start:end], buffer.Bytes()) {\n\t\t\tlogError(testName, function, args, startTime, \"\", \"Incorrect read bytes v/s original buffer\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttestCases := []struct {\n\t\toffset int64\n\t\twhence int\n\t\tpos int64\n\t\terr error\n\t\tshouldCmp bool\n\t\tstart int\n\t\tend int\n\t}{\n\t\t// Start from offset 0, fetch data and compare\n\t\t{0, 0, 0, nil, true, 0, 0},\n\t\t// Start from offset 2048, fetch data and compare\n\t\t{2048, 0, 2048, nil, true, 2048, bufSize},\n\t\t// Start from offset larger than possible\n\t\t{int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0},\n\t\t// Move to offset 0 without comparing\n\t\t{0, 0, 0, nil, false, 0, 0},\n\t\t// Move one step forward and compare\n\t\t{1, 1, 1, nil, true, 1, bufSize},\n\t\t// Move larger than possible\n\t\t{int64(bufSize), 1, 0, io.EOF, false, 0, 0},\n\t\t// Provide negative offset with CUR_SEEK\n\t\t{int64(-1), 1, 0, fmt.Errorf(\"Negative position not allowed for 1\"), false, 0, 0},\n\t\t// Test with whence SEEK_END and with positive offset\n\t\t{1024, 2, 0, io.EOF, false, 0, 0},\n\t\t// Test with whence SEEK_END and with negative offset\n\t\t{-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},\n\t\t// Test with whence SEEK_END and with large negative offset\n\t\t{-int64(bufSize) * 2, 2, 0, fmt.Errorf(\"Seeking at negative offset not allowed for 2\"), false, 0, 0},\n\t\t// Test with invalid whence\n\t\t{0, 3, 0, fmt.Errorf(\"Invalid whence 3\"), false, 0, 0},\n\t}\n\n\tfor i, testCase := range testCases {\n\t\t// Perform seek operation\n\t\tn, err := r.Seek(testCase.offset, testCase.whence)\n\t\tif err != nil && testCase.err == nil {\n\t\t\t// We expected success.\n\t\t\tlogError(testName, function, args, startTime, \"\",\n\t\t\t\tfmt.Sprintf(\"Test %d, unexpected err value: expected: %s, found: %s\", i+1, testCase.err, err), err)\n\t\t\treturn\n\t\t}\n\t\tif err == nil && testCase.err != nil {\n\t\t\t// We expected failure, but got success.\n\t\t\tlogError(testName, function, args, startTime, \"\",\n\t\t\t\tfmt.Sprintf(\"Test %d, unexpected err value: expected: %s, found: %s\", i+1, testCase.err, err), err)\n\t\t\treturn\n\t\t}\n\t\tif err != nil && testCase.err != nil {\n\t\t\tif err.Error() != testCase.err.Error() {\n\t\t\t\t// We expect a specific error\n\t\t\t\tlogError(testName, function, args, startTime, \"\",\n\t\t\t\t\tfmt.Sprintf(\"Test %d, unexpected err value: expected: %s, found: %s\", i+1, testCase.err, err), err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// Check the returned seek pos\n\t\tif n != testCase.pos {\n\t\t\tlogError(testName, function, args, startTime, \"\",\n\t\t\t\tfmt.Sprintf(\"Test %d, number of bytes seeked does not match, expected %d, got %d\", i+1, testCase.pos, n), err)\n\t\t\treturn\n\t\t}\n\t\t// Compare only if shouldCmp is activated\n\t\tif testCase.shouldCmp {\n\t\t\tcmpData(r, testCase.start, testCase.end)\n\t\t}\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func TestEndpointCase10(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"eu-central-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.eu-central-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}", "func getAWSClient(ctx context.Context, conf *config.Config, sess *session.Session, region config.Region) (*cziAWS.Client, error) {\n\t// for things meant to be run as a user\n\tuserConf := &aws.Config{\n\t\tRegion: aws.String(region.AWSRegion),\n\t}\n\n\tlambdaConf := userConf\n\tif conf.LambdaConfig.RoleARN != nil {\n\t\t// for things meant to be run as an assumed role\n\t\tlambdaConf = &aws.Config{\n\t\t\tRegion: aws.String(region.AWSRegion),\n\t\t\tCredentials: stscreds.NewCredentials(\n\t\t\t\tsess,\n\t\t\t\t*conf.LambdaConfig.RoleARN, func(p *stscreds.AssumeRoleProvider) {\n\t\t\t\t\tp.TokenProvider = stscreds.StdinTokenProvider\n\t\t\t\t},\n\t\t\t),\n\t\t}\n\t}\n\tawsClient := cziAWS.New(sess).\n\t\tWithIAM(userConf).\n\t\tWithKMS(userConf).\n\t\tWithSTS(userConf).\n\t\tWithLambda(lambdaConf)\n\treturn awsClient, nil\n}", "func testMakeBucketRegionsV2() {\n\t// initialize logging params\n\tstartTime := time.Now()\n\ttestName := getFuncName()\n\tfunction := \"MakeBucket(bucketName, region)\"\n\targs := map[string]interface{}{\n\t\t\"bucketName\": \"\",\n\t\t\"region\": \"eu-west-1\",\n\t}\n\n\t// Seed random based on current time.\n\trand.Seed(time.Now().Unix())\n\n\t// Instantiate new minio client object.\n\tc, err := minio.New(os.Getenv(serverEndpoint),\n\t\t&minio.Options{\n\t\t\tCreds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), \"\"),\n\t\t\tSecure: mustParseBool(os.Getenv(enableHTTPS)),\n\t\t})\n\tif err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MinIO v2 client object creation failed\", err)\n\t\treturn\n\t}\n\n\t// Enable tracing, write to stderr.\n\t// c.TraceOn(os.Stderr)\n\n\t// Set user agent.\n\tc.SetAppInfo(\"MinIO-go-FunctionalTest\", \"0.1.0\")\n\n\t// Generate a new random bucket name.\n\tbucketName := randString(60, rand.NewSource(time.Now().UnixNano()), \"minio-go-test-\")\n\targs[\"bucketName\"] = bucketName\n\n\t// Make a new bucket in 'eu-central-1'.\n\tif err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: \"eu-west-1\"}); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket failed\", err)\n\t\treturn\n\t}\n\n\tif err = cleanupBucket(bucketName, c); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"CleanupBucket failed while removing bucket recursively\", err)\n\t\treturn\n\t}\n\n\t// Make a new bucket with '.' in its name, in 'us-west-2'. This\n\t// request is internally staged into a path style instead of\n\t// virtual host style.\n\tif err = c.MakeBucket(context.Background(), bucketName+\".withperiod\", minio.MakeBucketOptions{Region: \"us-west-2\"}); err != nil {\n\t\targs[\"bucketName\"] = bucketName + \".withperiod\"\n\t\targs[\"region\"] = \"us-west-2\"\n\t\tlogError(testName, function, args, startTime, \"\", \"MakeBucket test with a bucket name with period, '.', failed\", err)\n\t\treturn\n\t}\n\n\t// Delete all objects and buckets\n\tif err = cleanupBucket(bucketName+\".withperiod\", c); err != nil {\n\t\tlogError(testName, function, args, startTime, \"\", \"CleanupBucket failed while removing bucket recursively\", err)\n\t\treturn\n\t}\n\n\tsuccessLogger(testName, function, args, startTime).Info()\n}", "func getS3Client() *http.Client {\n\t// return aws.RetryingClient\n\treturn http.DefaultClient\n}", "func (si *ServiceInformation) HasAWSInfo() bool {\n\tif si.RequiredArn == nil {\n\t\treturn false\n\t}\n\treturn len(si.RequiredArn) > 0\n}" ]
[ "0.7175263", "0.6250028", "0.61728734", "0.60939246", "0.6013488", "0.59578866", "0.5909698", "0.58715963", "0.5866461", "0.5859541", "0.57206744", "0.5703349", "0.57020265", "0.5686423", "0.5682464", "0.5666682", "0.5651969", "0.56266624", "0.56212664", "0.56192636", "0.5612565", "0.55685836", "0.55604", "0.54983175", "0.5462202", "0.54528755", "0.54428124", "0.5439501", "0.54340154", "0.54149973", "0.5408374", "0.54071766", "0.5385062", "0.53722006", "0.53676283", "0.5358479", "0.53552926", "0.53525186", "0.5348112", "0.5341149", "0.5333581", "0.5332834", "0.5320247", "0.5305619", "0.53021735", "0.52950525", "0.5291718", "0.5291363", "0.52908516", "0.52810663", "0.5280028", "0.52746433", "0.5270093", "0.52697563", "0.526735", "0.52605563", "0.52570856", "0.52477896", "0.52440447", "0.52437866", "0.5240136", "0.5232401", "0.52293724", "0.5223588", "0.5220526", "0.5215154", "0.52144593", "0.52090466", "0.52082497", "0.52081877", "0.52061206", "0.5194922", "0.51825356", "0.5177358", "0.5174895", "0.51717174", "0.5171435", "0.5159949", "0.5155179", "0.51537573", "0.51526546", "0.5152528", "0.51508254", "0.5148227", "0.51461375", "0.5145724", "0.5143481", "0.514205", "0.5141185", "0.51410365", "0.51386255", "0.5134058", "0.5134008", "0.51284546", "0.512326", "0.51199883", "0.511858", "0.5115539", "0.51152456", "0.51151764" ]
0.758776
0
Initialize a new cluster
func Initialize(config config.Cluster, clusterStateChannel chan state.ClusterState, loadBalancerChannel chan state.LoadBalancerChange) *Cluster { obj := Cluster{ config: config, ingressEvents: make(chan state.IngressChange, 2), backendEvents: make(chan state.BackendChange, 2), clusterStateChannel: clusterStateChannel, loadBalancerChannel: loadBalancerChannel, readinessChannel: make(chan bool, 2), clearChannel: make(chan bool, 2), aggregatorStopChannel: make(chan bool, 2), shallExit: false, knownIngresses: map[string]state.K8RouterIngress{}, knownPods: map[string]state.K8RouterBackend{}, isFirstConnectionAttempt: true, } obj.currentClusterState.Name = config.Name return &obj }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Orchestrator) initCluster(readTx store.ReadTx) error {\n\tclusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(clusters) != 1 {\n\t\t// we'll just pick it when it is created.\n\t\treturn nil\n\t}\n\n\tr.cluster = clusters[0]\n\treturn nil\n}", "func NewCluster(name string, newGroup NewGroup, raftBind, raftDir string) *Cluster {\n\tslots := make(map[int]*Slot, SlotNum)\n\tfor i := 0; i < SlotNum; i++ {\n\t\tslots[i] = NewSlot(i, SlotStateOffline, nil, nil)\n\t}\n\treturn &Cluster{\n\t\tname: name,\n\t\tslots: slots,\n\t\tnewGroup: newGroup,\n\t\tgroups: make(map[int]Group),\n\t\traftBind: raftBind,\n\t\traftDir: raftDir,\n\t}\n}", "func NewCluster(MyCluster []Barebone) Cluster {\n\tvar retCluster Cluster\n\tretCluster.Machines = &MyCluster\n\treturn retCluster\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"applyImmediately\"] = nil\n\t\tinputs[\"availabilityZones\"] = nil\n\t\tinputs[\"backupRetentionPeriod\"] = nil\n\t\tinputs[\"clusterIdentifier\"] = nil\n\t\tinputs[\"clusterIdentifierPrefix\"] = nil\n\t\tinputs[\"engine\"] = nil\n\t\tinputs[\"engineVersion\"] = nil\n\t\tinputs[\"finalSnapshotIdentifier\"] = nil\n\t\tinputs[\"iamDatabaseAuthenticationEnabled\"] = nil\n\t\tinputs[\"iamRoles\"] = nil\n\t\tinputs[\"kmsKeyArn\"] = nil\n\t\tinputs[\"neptuneClusterParameterGroupName\"] = nil\n\t\tinputs[\"neptuneSubnetGroupName\"] = nil\n\t\tinputs[\"port\"] = nil\n\t\tinputs[\"preferredBackupWindow\"] = nil\n\t\tinputs[\"preferredMaintenanceWindow\"] = nil\n\t\tinputs[\"replicationSourceIdentifier\"] = nil\n\t\tinputs[\"skipFinalSnapshot\"] = nil\n\t\tinputs[\"snapshotIdentifier\"] = nil\n\t\tinputs[\"storageEncrypted\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t\tinputs[\"vpcSecurityGroupIds\"] = nil\n\t} else {\n\t\tinputs[\"applyImmediately\"] = args.ApplyImmediately\n\t\tinputs[\"availabilityZones\"] = args.AvailabilityZones\n\t\tinputs[\"backupRetentionPeriod\"] = args.BackupRetentionPeriod\n\t\tinputs[\"clusterIdentifier\"] = args.ClusterIdentifier\n\t\tinputs[\"clusterIdentifierPrefix\"] = args.ClusterIdentifierPrefix\n\t\tinputs[\"engine\"] = args.Engine\n\t\tinputs[\"engineVersion\"] = args.EngineVersion\n\t\tinputs[\"finalSnapshotIdentifier\"] = args.FinalSnapshotIdentifier\n\t\tinputs[\"iamDatabaseAuthenticationEnabled\"] = args.IamDatabaseAuthenticationEnabled\n\t\tinputs[\"iamRoles\"] = args.IamRoles\n\t\tinputs[\"kmsKeyArn\"] = args.KmsKeyArn\n\t\tinputs[\"neptuneClusterParameterGroupName\"] = args.NeptuneClusterParameterGroupName\n\t\tinputs[\"neptuneSubnetGroupName\"] = args.NeptuneSubnetGroupName\n\t\tinputs[\"port\"] = args.Port\n\t\tinputs[\"preferredBackupWindow\"] = args.PreferredBackupWindow\n\t\tinputs[\"preferredMaintenanceWindow\"] = args.PreferredMaintenanceWindow\n\t\tinputs[\"replicationSourceIdentifier\"] = args.ReplicationSourceIdentifier\n\t\tinputs[\"skipFinalSnapshot\"] = args.SkipFinalSnapshot\n\t\tinputs[\"snapshotIdentifier\"] = args.SnapshotIdentifier\n\t\tinputs[\"storageEncrypted\"] = args.StorageEncrypted\n\t\tinputs[\"tags\"] = args.Tags\n\t\tinputs[\"vpcSecurityGroupIds\"] = args.VpcSecurityGroupIds\n\t}\n\tinputs[\"arn\"] = nil\n\tinputs[\"clusterMembers\"] = nil\n\tinputs[\"clusterResourceId\"] = nil\n\tinputs[\"endpoint\"] = nil\n\tinputs[\"hostedZoneId\"] = nil\n\tinputs[\"readerEndpoint\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:neptune/cluster:Cluster\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func initCluster() (_ *cluster, cbChan <-chan Callback) {\n cb := make(chan Callback)\n c := &cluster{\n nil,\n chan<- Callback(cb),\n make(map[Nid]*Node),\n make(map[Nid]*GossipNode),\n nil,\n sync.RWMutex{},\n }\n return c, (<-chan Callback)(cb)\n}", "func (cs Cluster) Init(ctx context.Context, out, errOut io.Writer) {\n\t// this retries until it succeeds, it won't return unless it does\n\te2e.Run(ctx, out, errOut, \"./cockroach\",\n\t\t\"init\",\n\t\t\"--insecure\",\n\t\t\"--host=\"+cs[0].Addr,\n\t)\n}", "func NewCluster() *Cluster {\n\treturn &Cluster{proxies: make(map[string]*httputil.ReverseProxy)}\n}", "func NewCluster(closing chan bool) *Cluster {\n\tcluster := &Cluster{\n\t\tname: getLocalPeerName(),\n\t\tactions: make(chan func()),\n\t\tclosing: closing,\n\t\tstate: newSubscriptionState(),\n\t\tmembers: new(sync.Map),\n\t}\n\n\t// Get the cluster binding address\n\tlistenAddr, err := parseAddr(config.Conf.Broker.Cluster.ListenAddr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Get the advertised address\n\tadvertiseAddr, err := parseAddr(config.Conf.Broker.Cluster.AdvertiseAddr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Create a new router\n\trouter, err := mesh.NewRouter(mesh.Config{\n\t\tHost: listenAddr.IP.String(),\n\t\tPort: listenAddr.Port,\n\t\tProtocolMinVersion: mesh.ProtocolMinVersion,\n\t\tPassword: []byte(config.Conf.Broker.Cluster.Passphrase),\n\t\tConnLimit: 128,\n\t\tPeerDiscovery: true,\n\t\tTrustedSubnets: []*net.IPNet{},\n\t}, cluster.name, advertiseAddr.String(), mesh.NullOverlay{}, logging.Discard)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Create a new gossip layer\n\tgossip, err := router.NewGossip(\"cluster\", cluster)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t//Store the gossip and the router\n\tcluster.gossip = gossip\n\tcluster.router = router\n\treturn cluster\n}", "func (af *flight) NewCluster(rconf *platform.RuntimeConfig) (platform.Cluster, error) {\n\tbc, err := platform.NewBaseCluster(af.BaseFlight, rconf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tac := &cluster{\n\t\tBaseCluster: bc,\n\t\tflight: af,\n\t}\n\n\tif !rconf.NoSSHKeyInMetadata {\n\t\tac.sshKey = af.SSHKey\n\t}\n\n\tac.ResourceGroup, err = af.api.CreateResourceGroup(\"kola-cluster\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tac.StorageAccount, err = af.api.CreateStorageAccount(ac.ResourceGroup)\n\tif err != nil {\n\t\tif e := af.api.TerminateResourceGroup(ac.ResourceGroup); e != nil {\n\t\t\tplog.Errorf(\"Deleting resource group %v: %v\", ac.ResourceGroup, e)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t_, err = af.api.PrepareNetworkResources(ac.ResourceGroup)\n\tif err != nil {\n\t\tif e := af.api.TerminateResourceGroup(ac.ResourceGroup); e != nil {\n\t\t\tplog.Errorf(\"Deleting resource group %v: %v\", ac.ResourceGroup, e)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\taf.AddCluster(ac)\n\n\treturn ac, nil\n}", "func NewCluster(config *Config, loggers []Logger) *Cluster {\n\tcluster := &Cluster{config: config, loggers: loggers}\n\treturn cluster\n}", "func NewCluster() *Cluster {\n\treturn &Cluster{}\n}", "func NewCluster() *Cluster {\n\treturn &Cluster{}\n}", "func NewCluster() *Cluster {\n\tthis := Cluster{}\n\treturn &this\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tif args == nil || args.BrokerNodeGroupInfo == nil {\n\t\treturn nil, errors.New(\"missing required argument 'BrokerNodeGroupInfo'\")\n\t}\n\tif args == nil || args.ClusterName == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ClusterName'\")\n\t}\n\tif args == nil || args.KafkaVersion == nil {\n\t\treturn nil, errors.New(\"missing required argument 'KafkaVersion'\")\n\t}\n\tif args == nil || args.NumberOfBrokerNodes == nil {\n\t\treturn nil, errors.New(\"missing required argument 'NumberOfBrokerNodes'\")\n\t}\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"brokerNodeGroupInfo\"] = nil\n\t\tinputs[\"clientAuthentication\"] = nil\n\t\tinputs[\"clusterName\"] = nil\n\t\tinputs[\"configurationInfo\"] = nil\n\t\tinputs[\"encryptionInfo\"] = nil\n\t\tinputs[\"enhancedMonitoring\"] = nil\n\t\tinputs[\"kafkaVersion\"] = nil\n\t\tinputs[\"numberOfBrokerNodes\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t} else {\n\t\tinputs[\"brokerNodeGroupInfo\"] = args.BrokerNodeGroupInfo\n\t\tinputs[\"clientAuthentication\"] = args.ClientAuthentication\n\t\tinputs[\"clusterName\"] = args.ClusterName\n\t\tinputs[\"configurationInfo\"] = args.ConfigurationInfo\n\t\tinputs[\"encryptionInfo\"] = args.EncryptionInfo\n\t\tinputs[\"enhancedMonitoring\"] = args.EnhancedMonitoring\n\t\tinputs[\"kafkaVersion\"] = args.KafkaVersion\n\t\tinputs[\"numberOfBrokerNodes\"] = args.NumberOfBrokerNodes\n\t\tinputs[\"tags\"] = args.Tags\n\t}\n\tinputs[\"arn\"] = nil\n\tinputs[\"bootstrapBrokers\"] = nil\n\tinputs[\"bootstrapBrokersTls\"] = nil\n\tinputs[\"currentVersion\"] = nil\n\tinputs[\"zookeeperConnectString\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:msk/cluster:Cluster\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func Init(localCluster, remoteCluster discoveryv1alpha1.ClusterIdentity, nodeName, nodeIP string) {\n\tLocalCluster = localCluster\n\tRemoteCluster = remoteCluster\n\n\tLiqoNodeName = nodeName\n\tLiqoNodeIP = nodeIP\n\tStartTime = time.Now().Truncate(time.Second)\n\n\t// The kubernetes service port is directly retrieved from the corresponding environment variable,\n\t// since it is the one used locally. In case it is not found, it is defaulted to 443.\n\tKubernetesServicePort = os.Getenv(\"KUBERNETES_SERVICE_PORT\")\n\tif KubernetesServicePort == \"\" {\n\t\tKubernetesServicePort = \"443\"\n\t}\n}", "func NewCluster(path string, n int) *Cluster {\n\tc := &Cluster{}\n\n\t// Construct a list of temporary peers.\n\tpeers := make([]string, n)\n\tfor i := range peers {\n\t\tpeers[i] = \"127.0.0.1:0\"\n\t}\n\n\t// Create new stores with temporary peers.\n\tfor i := 0; i < n; i++ {\n\t\tconfig := NewConfig(filepath.Join(path, strconv.Itoa(i)))\n\t\tconfig.Peers = peers\n\t\ts := NewStore(config)\n\t\tc.Stores = append(c.Stores, s)\n\t}\n\n\treturn c\n}", "func newCluster(t *testing.T, opt ...interface{}) *cluster.Cluster {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping cluster test in short mode\")\n\t}\n\n\tconfig := cluster.DefaultConfig()\n\n\tconfig.Goshimmer.Hostname = *goShimmerHostname\n\tconfig.Goshimmer.UseProvidedNode = *goShimmerUseProvidedNode\n\tif *goShimmerUseProvidedNode {\n\t\tconfig.Goshimmer.FaucetPoWTarget = -1\n\t}\n\tconfig.Goshimmer.TxStreamPort = *goShimmerPort\n\n\tnNodes := *numNodes\n\tif len(opt) > 0 {\n\t\tn, ok := opt[0].(int)\n\t\tif ok {\n\t\t\tnNodes = n\n\t\t}\n\t}\n\n\tif len(opt) > 1 {\n\t\tcustomConfig, ok := opt[1].(*cluster.ClusterConfig)\n\t\tif ok {\n\t\t\tconfig = customConfig\n\t\t}\n\t}\n\n\tvar modifyNodesConfig cluster.ModifyNodesConfigFn\n\n\tif len(opt) > 2 {\n\t\tfn, ok := opt[2].(cluster.ModifyNodesConfigFn)\n\t\tif ok {\n\t\t\tmodifyNodesConfig = fn\n\t\t}\n\t}\n\n\tconfig.Wasp.NumNodes = nNodes\n\n\tclu := cluster.New(t.Name(), config)\n\n\tdataPath := path.Join(os.TempDir(), \"wasp-cluster\")\n\terr := clu.InitDataPath(\".\", dataPath, true, modifyNodesConfig)\n\trequire.NoError(t, err)\n\n\terr = clu.Start(dataPath)\n\trequire.NoError(t, err)\n\n\tt.Cleanup(clu.Stop)\n\n\treturn clu\n}", "func newCluster(computeNames ...string) *clusteroperator.Cluster {\n\tcomputes := make([]clusteroperator.ClusterMachineSet, len(computeNames))\n\tfor i, computeName := range computeNames {\n\t\tcomputes[i] = clusteroperator.ClusterMachineSet{\n\t\t\tName: computeName,\n\t\t\tMachineSetConfig: clusteroperator.MachineSetConfig{\n\t\t\t\tSize: 1,\n\t\t\t\tNodeType: clusteroperator.NodeTypeCompute,\n\t\t\t},\n\t\t}\n\t}\n\treturn newClusterWithSizes(1, computes...)\n}", "func NewCluster(n int) Cluster {\n\tcs := make([]*Node, 0, n)\n\tpeers := make([]string, 0, n)\n\n\tport := 26257\n\thttp := 8080\n\tfor i := 0; i < n; i++ {\n\t\taddr := net.JoinHostPort(\"localhost\", strconv.Itoa(port+i))\n\t\tpeers = append(peers, addr)\n\t\tcs = append(cs, &Node{\n\t\t\tID: strconv.Itoa(i + 1),\n\t\t\tAddr: addr,\n\t\t\tHttpaddr: net.JoinHostPort(\"localhost\", strconv.Itoa(http+i)),\n\t\t})\n\t}\n\tfor i := range cs {\n\t\tcs[i].Peers = peers\n\t}\n\treturn cs\n}", "func New() *Cluster {\n\treturn &Cluster{\n\t\tmembers: make(map[string]time.Time),\n\t}\n}", "func NewCluster(sessionManager *SessionManager, channelManager *ChannelManager) *Cluster {\n\tc := &Cluster{\n\t\tsessionManager: sessionManager,\n\t\tchannelManager: channelManager,\n\t}\n\n\treturn c\n}", "func Start(ccfg Config) (clus *Cluster, err error) {\n\tif ccfg.Size > 7 {\n\t\treturn nil, fmt.Errorf(\"max cluster size is 7, got %d\", ccfg.Size)\n\t}\n\n\tlg.Infof(\"starting %d Members (root directory %q, root port :%d)\", ccfg.Size, ccfg.RootDir, ccfg.RootPort)\n\n\tdt := ccfg.DialTimeout\n\tif dt == time.Duration(0) {\n\t\tdt = defaultDialTimeout\n\t}\n\n\tclus = &Cluster{\n\t\tembeddedClient: ccfg.EmbeddedClient,\n\t\tStarted: time.Now(),\n\t\tsize: ccfg.Size,\n\t\tMembers: make([]*Member, ccfg.Size),\n\t\tclientHostToIndex: make(map[string]int, ccfg.Size),\n\t\tclientDialTimeout: dt,\n\t\tstopc: make(chan struct{}),\n\t\trootCtx: ccfg.RootCtx,\n\t\trootCancel: ccfg.RootCancel,\n\n\t\tbasePort: ccfg.RootPort,\n\t\trootDir: ccfg.RootDir,\n\t\tccfg: ccfg,\n\t}\n\n\tif !existFileOrDir(ccfg.RootDir) {\n\t\tlg.Infof(\"creating root directory %q\", ccfg.RootDir)\n\t\tif err = mkdirAll(ccfg.RootDir); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tlg.Infof(\"removing root directory %q\", ccfg.RootDir)\n\t\tos.RemoveAll(ccfg.RootDir)\n\t}\n\n\tlg.Infof(\"getting default host\")\n\tdhost, err := netutil.GetDefaultHost()\n\tif err != nil {\n\t\tlg.Warn(err)\n\t\tlg.Warn(\"overwriting default host with 'localhost\")\n\t\tdhost = \"localhost\"\n\t}\n\tlg.Infof(\"detected default host %q\", dhost)\n\n\tif !ccfg.PeerTLSInfo.Empty() && ccfg.PeerAutoTLS {\n\t\treturn nil, fmt.Errorf(\"choose either auto peer TLS or manual peer TLS\")\n\t}\n\tif !ccfg.ClientTLSInfo.Empty() && ccfg.ClientAutoTLS {\n\t\treturn nil, fmt.Errorf(\"choose either auto client TLS or manual client TLS\")\n\t}\n\n\tstartPort := ccfg.RootPort\n\tfor i := 0; i < ccfg.Size; i++ {\n\t\tcfg := embed.NewConfig()\n\n\t\tcfg.ClusterState = embed.ClusterStateFlagNew\n\n\t\tcfg.Name = fmt.Sprintf(\"node%d\", i+1)\n\t\tcfg.Dir = filepath.Join(ccfg.RootDir, cfg.Name+\".data-dir-etcd\")\n\t\tcfg.WalDir = filepath.Join(ccfg.RootDir, cfg.Name+\".data-dir-etcd\", \"wal\")\n\n\t\t// this is fresh cluster, so remove any conflicting data\n\t\tos.RemoveAll(cfg.Dir)\n\t\tlg.Infof(\"removed %q\", cfg.Dir)\n\t\tos.RemoveAll(cfg.WalDir)\n\t\tlg.Infof(\"removed %q\", cfg.WalDir)\n\n\t\tcurl := url.URL{Scheme: ccfg.ClientScheme(), Host: fmt.Sprintf(\"localhost:%d\", startPort)}\n\t\tcfg.ACUrls = []url.URL{curl}\n\t\tcfg.LCUrls = []url.URL{curl}\n\t\tif dhost != \"localhost\" {\n\t\t\t// expose default host to other machines in listen address (e.g. Prometheus dashboard)\n\t\t\tcurl2 := url.URL{Scheme: ccfg.ClientScheme(), Host: fmt.Sprintf(\"%s:%d\", dhost, startPort)}\n\t\t\tcfg.LCUrls = append(cfg.LCUrls, curl2)\n\t\t\tlg.Infof(\"%q is set up to listen on client url %q (default host)\", cfg.Name, curl2.String())\n\t\t}\n\t\tlg.Infof(\"%q is set up to listen on client url %q\", cfg.Name, curl.String())\n\n\t\tpurl := url.URL{Scheme: ccfg.PeerScheme(), Host: fmt.Sprintf(\"localhost:%d\", startPort+1)}\n\t\tcfg.APUrls = []url.URL{purl}\n\t\tcfg.LPUrls = []url.URL{purl}\n\t\tlg.Infof(\"%q is set up to listen on peer url %q\", cfg.Name, purl.String())\n\n\t\tcfg.ClientAutoTLS = ccfg.ClientAutoTLS\n\t\tcfg.ClientTLSInfo = ccfg.ClientTLSInfo\n\t\tcfg.PeerAutoTLS = ccfg.PeerAutoTLS\n\t\tcfg.PeerTLSInfo = ccfg.PeerTLSInfo\n\n\t\t// auto-compaction every hour\n\t\tcfg.AutoCompactionMode = embed.CompactorModePeriodic\n\t\tcfg.AutoCompactionRetention = \"1h\"\n\n\t\tcfg.Logger = \"zap\"\n\t\tcfg.LogOutputs = []string{embed.StdErrLogOutput}\n\n\t\tclus.Members[i] = &Member{\n\t\t\tclus: clus,\n\t\t\tcfg: cfg,\n\t\t\tstatus: clusterpb.MemberStatus{\n\t\t\t\tName: cfg.Name,\n\t\t\t\tEndpoint: curl.String(),\n\t\t\t\tIsLeader: false,\n\t\t\t\tState: clusterpb.StoppedMemberStatus,\n\t\t\t},\n\t\t}\n\n\t\tclus.clientHostToIndex[curl.Host] = i\n\n\t\tstartPort += 2\n\t}\n\tclus.basePort = startPort\n\n\tfor i := 0; i < clus.size; i++ {\n\t\tclus.Members[i].cfg.InitialCluster = clus.initialCluster()\n\t}\n\n\tvar g errgroup.Group\n\tfor i := 0; i < clus.size; i++ {\n\t\tidx := i\n\t\tg.Go(func() error { return clus.Members[idx].Start() })\n\t}\n\tif gerr := g.Wait(); gerr != nil {\n\t\treturn nil, gerr\n\t}\n\n\ttime.Sleep(time.Second)\n\n\treturn clus, clus.WaitForLeader()\n}", "func NewCluster(peers map[uint64]string) *Cluster {\n\treturn &Cluster{\n\t\tmembers: peers,\n\t}\n}", "func NewCluster(conf *ClusterConfig) *Cluster {\n\tc := &Cluster{\n\t\tDialTimeout: conf.DialTimeout,\n\t\tReadTimeout: conf.ReadTimeout,\n\t\tWriteTimeout: conf.WriteTimeout,\n\t\tAddressPicker: nil,\n\t\ttcpKeepAlive: defaultTCPKeepAlive,\n\t\ttcpKeepAlivePeriod: defaultTCPKeepAlivePeriod,\n\t\ttcpLinger: defaultTCPLinger,\n\t\ttcpNoDelay: defaultTCPNoDelay,\n\t}\n\tif conf.PoolConfig != nil {\n\t\tif conf.UseAsyncPool {\n\t\t\tc.connpool = NewAsyncConnPool(conf.PoolConfig)\n\t\t} else {\n\t\t\tc.connpool = NewSyncConnPool(conf.PoolConfig)\n\t\t}\n\t}\n\treturn c\n}", "func NewCluster(opts Options) *Cluster {\n\topts.setDefaults()\n\n\tq := &Cluster{\n\t\tweb: opts.Web,\n\t\tsetups: map[int]setup{},\n\t\tkeys: nil,\n\t\tcfg: opts.Config,\n\t\tcdnCfg: opts.CDNConfig,\n\t\tdomains: map[int]string{},\n\t\tready: tdsync.NewReady(),\n\t\tcommon: tgtest.NewDispatcher(),\n\t\tlog: opts.Logger,\n\t\trandom: opts.Random,\n\t\tprotocol: opts.Protocol,\n\t}\n\tconfig.NewService(&q.cfg, &q.cdnCfg).Register(q.common)\n\tq.common.Fallback(q.fallback())\n\n\treturn q\n}", "func NewCluster(mvccStore MVCCStore) *Cluster {\n\treturn &Cluster{\n\t\tstores: make(map[uint64]*Store),\n\t\tregions: make(map[uint64]*Region),\n\t\tdelayEvents: make(map[delayKey]time.Duration),\n\t\tmvccStore: mvccStore,\n\t}\n}", "func newClusterNetwork(c *Client) *clusterNetwork {\n\treturn &clusterNetwork{\n\t\tr: c,\n\t}\n}", "func newSwimCluster(size int) *swimCluster {\n\tvar nodes []*Node\n\tvar channels []*tchannel.Channel\n\tfor i := 0; i < size; i++ {\n\t\tch, err := tchannel.NewChannel(\"test\", nil)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif err := ch.ListenAndServe(\"127.0.0.1:0\"); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tchannels = append(channels, ch)\n\n\t\thostport := ch.PeerInfo().HostPort\n\t\tnode := NewNode(\"test\", hostport, ch.GetSubChannel(\"test\"), nil)\n\n\t\tnodes = append(nodes, node)\n\t}\n\treturn &swimCluster{nodes: nodes, channels: channels}\n}", "func NewCluster(path string, name string, numShards int, columns int, createTable string, idIndex int) error {\n\tif err := os.MkdirAll(path, 0777); err != nil {\n\t\treturn fmt.Errorf(\"Directory already exists\")\n\t}\n\n\t// convert path to absolute\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc := &ClusterMetadata{\n\t\tTableName: name,\n\t\tPath: path,\n\t\tNumShards: numShards,\n\t\tShards: make([]string, numShards),\n\t\tNumColumns: columns,\n\t\tIdIndex: idIndex,\n\t}\n\n\t// create each shard\n\tfor i := 0; i < numShards; i++ {\n\t\tdbName := \"shard\" + strconv.Itoa(i) + \".db\"\n\t\tdbPath := filepath.Join(path, dbName)\n\n\t\tdb, err := sql.Open(\"sqlite3\", dbPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdb.Exec(createTable)\n\t\tdb.Exec(metadataCreateTable)\n\t\tstmt, err := db.Prepare(metadataInsertInto)\n\t\tstmt.Exec(c.TableName, i)\n\n\t\tc.Shards[i] = dbName\n\t\tdb.Close()\n\t}\n\n\t// write config to JSON\n\tshardfilePath := filepath.Join(path, \"shardfile\")\n\n\tf, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(shardfilePath, f, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func newK8sCluster(c config.Config) (*k8sCluster, error) {\n\tvar kubeconfig *string\n\tif home := homedir.HomeDir(); home != \"\" {\n\t\tkubeconfig = flag.String(\"kubeconfig\", filepath.Join(home, \".kube\", \"config\"), \"(optional) absolue path to the kubeconfig file\")\n\t} else {\n\t\tkubeconfig = flag.String(\"kubeconfig\", \"\", \"absolue path to the kubeconfig file\")\n\t}\n\tflag.Parse()\n\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", *kubeconfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &k8sCluster{\n\t\tconfig: c,\n\t\tmutex: sync.Mutex{},\n\t\tpods: make(map[string]string),\n\t\tclientset: clientset,\n\t}, nil\n}", "func NewCluster() *ClusterBuilder {\n\treturn &ClusterBuilder{}\n}", "func NewCluster(port int, knownMembers ...Member) Cluster {\n\treturn &NoopCluster{}\n}", "func NewCluster(driverName, name, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {\n\trpcClient, err := types.NewClient(driverName, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{\n\t\tDriver: rpcClient,\n\t\tDriverName: driverName,\n\t\tName: name,\n\t\tConfigGetter: configGetter,\n\t\tPersistStore: persistStore,\n\t}, nil\n}", "func createCluster(\n\tctx context.Context,\n\tc *cli.Context,\n\tcfgHelper *cmdutils.ConfigHelper,\n\thost host.Host,\n\tpubsub *pubsub.PubSub,\n\tdht *dual.DHT,\n\tstore ds.Datastore,\n\traftStaging bool,\n) (*ipfscluster.Cluster, error) {\n\n\tcfgs := cfgHelper.Configs()\n\tcfgMgr := cfgHelper.Manager()\n\tcfgBytes, err := cfgMgr.ToDisplayJSON()\n\tcheckErr(\"getting configuration string\", err)\n\tlogger.Debugf(\"Configuration:\\n%s\\n\", cfgBytes)\n\n\tctx, err = tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().Pretty()))\n\tcheckErr(\"tag context with host id\", err)\n\n\terr = observations.SetupMetrics(cfgs.Metrics)\n\tcheckErr(\"setting up Metrics\", err)\n\n\ttracer, err := observations.SetupTracing(cfgs.Tracing)\n\tcheckErr(\"setting up Tracing\", err)\n\n\tvar apis []ipfscluster.API\n\tif cfgMgr.IsLoadedFromJSON(config.API, cfgs.Restapi.ConfigKey()) {\n\t\tvar api *rest.API\n\t\t// Do NOT enable default Libp2p API endpoint on CRDT\n\t\t// clusters. Collaborative clusters are likely to share the\n\t\t// secret with untrusted peers, thus the API would be open for\n\t\t// anyone.\n\t\tif cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {\n\t\t\tapi, err = rest.NewAPIWithHost(ctx, cfgs.Restapi, host)\n\t\t} else {\n\t\t\tapi, err = rest.NewAPI(ctx, cfgs.Restapi)\n\t\t}\n\t\tcheckErr(\"creating REST API component\", err)\n\t\tapis = append(apis, api)\n\n\t}\n\n\tif cfgMgr.IsLoadedFromJSON(config.API, cfgs.Pinsvcapi.ConfigKey()) {\n\t\tpinsvcapi, err := pinsvcapi.NewAPI(ctx, cfgs.Pinsvcapi)\n\t\tcheckErr(\"creating Pinning Service API component\", err)\n\n\t\tapis = append(apis, pinsvcapi)\n\t}\n\n\tif cfgMgr.IsLoadedFromJSON(config.API, cfgs.Ipfsproxy.ConfigKey()) {\n\t\tproxy, err := ipfsproxy.New(cfgs.Ipfsproxy)\n\t\tcheckErr(\"creating IPFS Proxy component\", err)\n\n\t\tapis = append(apis, proxy)\n\t}\n\n\tconnector, err := ipfshttp.NewConnector(cfgs.Ipfshttp)\n\tcheckErr(\"creating IPFS Connector component\", err)\n\n\tvar informers []ipfscluster.Informer\n\tif cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.DiskInf.ConfigKey()) {\n\t\tdiskInf, err := disk.NewInformer(cfgs.DiskInf)\n\t\tcheckErr(\"creating disk informer\", err)\n\t\tinformers = append(informers, diskInf)\n\t}\n\tif cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.TagsInf.ConfigKey()) {\n\t\ttagsInf, err := tags.New(cfgs.TagsInf)\n\t\tcheckErr(\"creating numpin informer\", err)\n\t\tinformers = append(informers, tagsInf)\n\t}\n\n\tif cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.PinQueueInf.ConfigKey()) {\n\t\tpinQueueInf, err := pinqueue.New(cfgs.PinQueueInf)\n\t\tcheckErr(\"creating pinqueue informer\", err)\n\t\tinformers = append(informers, pinQueueInf)\n\t}\n\n\t// For legacy compatibility we need to make the allocator\n\t// automatically compatible with informers that have been loaded. For\n\t// simplicity we assume that anyone that does not specify an allocator\n\t// configuration (legacy configs), will be using \"freespace\"\n\tif !cfgMgr.IsLoadedFromJSON(config.Allocator, cfgs.BalancedAlloc.ConfigKey()) {\n\t\tcfgs.BalancedAlloc.AllocateBy = []string{\"freespace\"}\n\t}\n\talloc, err := balanced.New(cfgs.BalancedAlloc)\n\tcheckErr(\"creating allocator\", err)\n\n\tcons, err := setupConsensus(\n\t\tcfgHelper,\n\t\thost,\n\t\tdht,\n\t\tpubsub,\n\t\tstore,\n\t\traftStaging,\n\t)\n\tif err != nil {\n\t\tstore.Close()\n\t\tcheckErr(\"setting up Consensus\", err)\n\t}\n\n\tvar peersF func(context.Context) ([]peer.ID, error)\n\tif cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {\n\t\tpeersF = cons.Peers\n\t}\n\n\ttracker := stateless.New(cfgs.Statelesstracker, host.ID(), cfgs.Cluster.Peername, cons.State)\n\tlogger.Debug(\"stateless pintracker loaded\")\n\n\tmon, err := pubsubmon.New(ctx, cfgs.Pubsubmon, pubsub, peersF)\n\tif err != nil {\n\t\tstore.Close()\n\t\tcheckErr(\"setting up PeerMonitor\", err)\n\t}\n\n\treturn ipfscluster.NewCluster(\n\t\tctx,\n\t\thost,\n\t\tdht,\n\t\tcfgs.Cluster,\n\t\tstore,\n\t\tcons,\n\t\tapis,\n\t\tconnector,\n\t\ttracker,\n\t\tmon,\n\t\talloc,\n\t\tinformers,\n\t\ttracer,\n\t)\n}", "func NewCluster(config Config) (*Cluster, error) {\n\tlogger := logrus.New()\n\tlogger.Out = config.LogOutput\n\tlogger.Level = logrus.Level(config.LogLevel)\n\n\tif config.SerfConfig == nil {\n\t\treturn nil, fmt.Errorf(\"Config.SerfConfig cannot be nil\")\n\t}\n\tif config.SerfConfig.EventCh != nil {\n\t\treturn nil, fmt.Errorf(\"SerfConfig.EventCh must be nil (try using Config.SerfEvents instead)\")\n\t}\n\n\tmemberMap := make(map[string]*serf.Member)\n\n\tring := &ring{\n\t\tdistribution: config.PartitionDistribution,\n\t\tpartitionCount: config.Partitions,\n\t\tmembers: make([]*serf.Member, 0, 0),\n\t}\n\n\tserfEvents := make(chan serf.Event, 256)\n\tconfig.SerfConfig.EventCh = serfEvents\n\tnodeSerf, err := serf.Create(config.SerfConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to create serf: %v\", err)\n\t}\n\n\texit := make(chan bool)\n\n\tcluster := &Cluster{\n\t\texit: exit,\n\t\tconfig: config,\n\t\tmemberMap: memberMap,\n\t\tring: ring,\n\t\tserfEvents: serfEvents,\n\t\tSerf: nodeSerf,\n\t\tlogger: logger,\n\t}\n\n\treturn cluster, nil\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\n\tvar resource Cluster\n\terr := ctx.RegisterRemoteComponentResource(\"eks:index:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewSuperCluster(t *testing.T) *SuperCluster {\n\tsc := &SuperCluster{}\n\tfor _, f := range configFiles {\n\t\tsc.Servers = append(sc.Servers, StartServer(t, f))\n\t}\n\tsc.setupClientsAndVerify(t)\n\treturn sc\n}", "func New(clusterDefinition *v1alpha1.Cassandra) (*Cluster, error) {\n\tcluster := &Cluster{}\n\tif err := CopyInto(cluster, clusterDefinition); err != nil {\n\t\treturn nil, err\n\t}\n\treturn cluster, nil\n}", "func New(config *rest.Config) (*Cluster, error) {\n\tclientSet, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kubernetes.NewForConfig: %w\", err)\n\t}\n\treturn &Cluster{clientSet}, nil\n}", "func NewCluster(cfg config.StorageClusterConfig, dialer ConnectionDialer) (*Cluster, error) {\n\tif err := cfg.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tserverCount := int64(len(cfg.Servers))\n\tavailableServerCount := serverCount\n\n\tfor _, server := range cfg.Servers {\n\t\tif server.State == config.StorageServerStateOnline {\n\t\t\tcontinue\n\t\t}\n\t\tif server.State != config.StorageServerStateRIP {\n\t\t\treturn nil, ErrServerStateNotSupported\n\t\t}\n\t\tavailableServerCount--\n\t}\n\tif availableServerCount == 0 {\n\t\treturn nil, ErrNoServersAvailable\n\t}\n\n\tif dialer == nil {\n\t\tdialer = stdConnDialer\n\t}\n\n\treturn &Cluster{\n\t\tservers: cfg.Servers,\n\t\tserverCount: serverCount,\n\t\tavailableServerCount: availableServerCount,\n\t\tdialer: dialer,\n\t}, nil\n}", "func setupCluster(synkPath string, cluster *cluster) error {\n\tkindcfg := &kindconfig.Cluster{\n\t\tNodes: []kindconfig.Node{\n\t\t\t{\n\t\t\t\tRole: kindconfig.ControlPlaneRole,\n\t\t\t\tImage: kinddefaults.Image,\n\t\t\t}, {\n\t\t\t\tRole: kindconfig.WorkerRole,\n\t\t\t\tImage: kinddefaults.Image,\n\t\t\t},\n\t\t},\n\t}\n\tcluster.kind = kindcluster.NewProvider()\n\n\t// Create kubeconfig file for use by synk or the dev.\n\tkubeConfig, err := ioutil.TempFile(\"\", \"kubeconfig-\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"create temp kubeconfig\")\n\t}\n\tcluster.kubeConfigPath = kubeConfig.Name()\n\tif err := kubeConfig.Close(); err != nil {\n\t\treturn errors.Wrap(err, \"close temp kubeconfig\")\n\t}\n\n\tif err := cluster.kind.Create(\n\t\tcluster.genName,\n\t\tkindcluster.CreateWithV1Alpha4Config(kindcfg),\n\t\tkindcluster.CreateWithKubeconfigPath(cluster.kubeConfigPath),\n\t); err != nil {\n\t\treturn errors.Wrapf(err, \"create cluster %q\", cluster.genName)\n\t}\n\tkubecfgRaw, err := ioutil.ReadFile(cluster.kubeConfigPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"read kube config\")\n\t}\n\tkubecfg, err := clientcmd.NewClientConfigFromBytes(kubecfgRaw)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"decode kube config\")\n\t}\n\tcluster.restCfg, err = kubecfg.ClientConfig()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get rest config\")\n\t}\n\tlog.Printf(\"To use the cluster, run KUBECONFIG=%s kubectl cluster-info\", cluster.kubeConfigPath)\n\n\t// Setup permissive binding we also have in cloud and robot clusters.\n\tctx := context.Background()\n\n\tc, err := client.New(cluster.restCfg, client.Options{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"create client\")\n\t}\n\tif err := c.Create(ctx, &rbac.ClusterRoleBinding{\n\t\tObjectMeta: meta.ObjectMeta{\n\t\t\tName: \"permissive-binding\",\n\t\t},\n\t\tRoleRef: rbac.RoleRef{\n\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\tKind: \"ClusterRole\",\n\t\t\tName: \"cluster-admin\",\n\t\t},\n\t\tSubjects: []rbac.Subject{{\n\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\tKind: \"Group\",\n\t\t\tName: \"system:serviceaccounts\",\n\t\t}},\n\t}); err != nil {\n\t\treturn errors.Wrap(err, \"create permissive role binding\")\n\t}\n\n\t// Setup service account and create image pull secrets.\n\tif token := os.Getenv(\"ACCESS_TOKEN\"); token != \"\" {\n\t\t// Use the same secret name as the GCR credential refresher would\n\t\t// on robots.\n\t\t// This makes some testing of components easier, that assume this\n\t\t// secret to exist, e.g. ChartAssignment controller.\n\t\tsecret := &core.Secret{\n\t\t\tObjectMeta: meta.ObjectMeta{\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tName: gcr.SecretName,\n\t\t\t},\n\t\t\tType: core.SecretTypeDockercfg,\n\t\t\tData: map[string][]byte{\n\t\t\t\t\".dockercfg\": gcr.DockerCfgJSON(token),\n\t\t\t},\n\t\t}\n\t\tif err := c.Create(ctx, secret); err != nil {\n\t\t\treturn errors.Wrap(err, \"create pull secret\")\n\t\t}\n\t\tif err := backoff.Retry(\n\t\t\tfunc() error {\n\t\t\t\tvar sa core.ServiceAccount\n\t\t\t\terr := c.Get(ctx, client.ObjectKey{\"default\", \"default\"}, &sa)\n\t\t\t\tif k8serrors.IsNotFound(err) {\n\t\t\t\t\treturn errors.New(\"not found\")\n\t\t\t\t} else if err != nil {\n\t\t\t\t\treturn backoff.Permanent(errors.Wrap(err, \"get service account\"))\n\t\t\t\t}\n\t\t\t\tsa.ImagePullSecrets = append(sa.ImagePullSecrets, core.LocalObjectReference{\n\t\t\t\t\tName: gcr.SecretName,\n\t\t\t\t})\n\t\t\t\tif err = c.Update(ctx, &sa); k8serrors.IsConflict(err) {\n\t\t\t\t\treturn fmt.Errorf(\"conflict\")\n\t\t\t\t} else if err != nil {\n\t\t\t\t\treturn backoff.Permanent(errors.Wrap(err, \"update service account\"))\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tbackoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 60),\n\t\t); err != nil {\n\t\t\treturn errors.Wrap(err, \"inject pull secret\")\n\t\t}\n\t}\n\n\t// Wait for a node to be ready, by checking for node taints (incl. NotReady)\n\t// (context: b/128660997)\n\tif err := backoff.Retry(\n\t\tfunc() error {\n\t\t\tvar nds core.NodeList\n\t\t\tif err := c.List(ctx, &nds); err != nil {\n\t\t\t\treturn backoff.Permanent(err)\n\t\t\t}\n\t\t\tfor _, n := range nds.Items {\n\t\t\t\tif len(n.Spec.Taints) == 0 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"taints not removed\")\n\t\t},\n\t\tbackoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 240),\n\t); err != nil {\n\t\treturn errors.Wrap(err, \"wait for node taints to be removed\")\n\t}\n\tcmd := exec.Command(\n\t\tsynkPath,\n\t\t\"init\",\n\t\t\"--kubeconfig\", cluster.kubeConfigPath,\n\t)\n\tif output, err := cmd.CombinedOutput(); err != nil {\n\t\treturn errors.Errorf(\"install Helm: %v; output:\\n%s\\n\", err, output)\n\t}\n\treturn nil\n}", "func (k *Kubeadm) CreateCluster() error {\n\n\tvar (\n\t\tjoinCommand string\n\t\terr error\n\t)\n\n\tif k.ClusterName == \"\" {\n\t\treturn errors.New(\"cluster name is not set\")\n\t}\n\n\terr = k.validateAndUpdateDefault()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstartTime := time.Now()\n\n\tlog.Println(\"total master - \" + fmt.Sprintf(\"%v\", len(k.MasterNodes)))\n\tlog.Println(\"total workers - \" + fmt.Sprintf(\"%v\", len(k.WorkerNodes)))\n\n\tif k.HaProxyNode != nil {\n\t\tlog.Println(\"total haproxy - \" + fmt.Sprintf(\"%v\", 1))\n\t}\n\n\tmasterCreationStartTime := time.Now()\n\tjoinCommand, err = k.setupMaster(k.determineSetup())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"time taken to create masters = %v\", time.Since(masterCreationStartTime))\n\n\tworkerCreationTime := time.Now()\n\n\tif err := k.setupWorkers(joinCommand); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"time taken to create workers = %v\", time.Since(workerCreationTime))\n\n\tfor _, file := range k.ApplyFiles {\n\t\terr := k.MasterNodes[0].applyFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif k.Networking != nil {\n\t\tlog.Printf(\"installing networking plugin = %v\", k.Networking.Name)\n\t\terr := k.MasterNodes[0].applyFile(k.Networking.Manifests)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Println(\"no network plugin found\")\n\t}\n\n\tlog.Printf(\"Time taken to create cluster %v\\n\", time.Since(startTime).String())\n\n\treturn nil\n}", "func CreateCluster(c *cli.Context) error {\n\n\t// On Error delete the cluster. If there createCluster() encounter any error,\n\t// call this function to remove all resources allocated for the cluster so far\n\t// so that they don't linger around.\n\tdeleteCluster := func() {\n\t\tlog.Println(\"ERROR: Cluster creation failed, rolling back...\")\n\t\tif err := DeleteCluster(c); err != nil {\n\t\t\tlog.Printf(\"Error: Failed to delete cluster %s\", c.String(\"name\"))\n\t\t}\n\t}\n\n\t// validate --wait flag\n\tif c.IsSet(\"wait\") && c.Int(\"wait\") < 0 {\n\t\tlog.Fatalf(\"Negative value for '--wait' not allowed (set '%d')\", c.Int(\"wait\"))\n\t}\n\n\t/**********************\n\t *\t\t\t\t\t\t\t\t\t\t*\n\t *\t\tCONFIGURATION\t\t*\n\t * vvvvvvvvvvvvvvvvvv *\n\t **********************/\n\n\t/*\n\t * --name, -n\n\t * Name of the cluster\n\t */\n\n\t// ensure that it's a valid hostname, because it will be part of container names\n\tif err := CheckClusterName(c.String(\"name\")); err != nil {\n\t\treturn err\n\t}\n\n\t// check if the cluster name is already taken\n\tif cluster, err := getClusters(false, c.String(\"name\")); err != nil {\n\t\treturn err\n\t} else if len(cluster) != 0 {\n\t\t// A cluster exists with the same name. Return with an error.\n\t\treturn fmt.Errorf(\" Cluster %s already exists\", c.String(\"name\"))\n\t}\n\n\t/*\n\t * --image, -i\n\t * The k3s image used for the k3d node containers\n\t */\n\t// define image\n\timage := c.String(\"image\")\n\t// if no registry was provided, use the default docker.io\n\tif len(strings.Split(image, \"/\")) <= 2 {\n\t\timage = fmt.Sprintf(\"%s/%s\", DefaultRegistry, image)\n\t}\n\n\t/*\n\t * Cluster network\n\t * For proper communication, all k3d node containers have to be in the same docker network\n\t */\n\t// create cluster network\n\tnetworkID, err := createClusterNetwork(c.String(\"name\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Created cluster network with ID %s\", networkID)\n\n\t/*\n\t * --env, -e\n\t * Environment variables that will be passed into the k3d node containers\n\t */\n\t// environment variables\n\tenv := []string{\"K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml\"}\n\tenv = append(env, c.StringSlice(\"env\")...)\n\tenv = append(env, fmt.Sprintf(\"K3S_CLUSTER_SECRET=%s\", GenerateRandomString(20)))\n\n\t/*\n\t * --label, -l\n\t * Docker container labels that will be added to the k3d node containers\n\t */\n\t// labels\n\tlabelmap, err := mapNodesToLabelSpecs(c.StringSlice(\"label\"), GetAllContainerNames(c.String(\"name\"), DefaultServerCount, c.Int(\"workers\")))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t/*\n\t * Arguments passed on to the k3s server and agent, will be filled later\n\t */\n\tk3AgentArgs := []string{}\n\tk3sServerArgs := []string{}\n\n\t/*\n\t * --api-port, -a\n\t * The port that will be used by the k3s API-Server\n\t * It will be mapped to localhost or to another hist interface, if specified\n\t * If another host is chosen, we also add a tls-san argument for the server to allow connections\n\t */\n\tapiPort, err := parseAPIPort(c.String(\"api-port\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tk3sServerArgs = append(k3sServerArgs, \"--https-listen-port\", apiPort.Port)\n\n\t// When the 'host' is not provided by --api-port, try to fill it using Docker Machine's IP address.\n\tif apiPort.Host == \"\" {\n\t\tapiPort.Host, err = getDockerMachineIp()\n\t\t// IP address is the same as the host\n\t\tapiPort.HostIP = apiPort.Host\n\t\t// In case of error, Log a warning message, and continue on. Since it more likely caused by a miss configured\n\t\t// DOCKER_MACHINE_NAME environment variable.\n\t\tif err != nil {\n\t\t\tlog.Warning(\"Failed to get docker machine IP address, ignoring the DOCKER_MACHINE_NAME environment variable setting.\")\n\t\t}\n\t}\n\n\t// Add TLS SAN for non default host name\n\tif apiPort.Host != \"\" {\n\t\tlog.Printf(\"Add TLS SAN for %s\", apiPort.Host)\n\t\tk3sServerArgs = append(k3sServerArgs, \"--tls-san\", apiPort.Host)\n\t}\n\n\t/*\n\t * --server-arg, -x\n\t * Add user-supplied arguments for the k3s server\n\t */\n\tif c.IsSet(\"server-arg\") || c.IsSet(\"x\") {\n\t\tk3sServerArgs = append(k3sServerArgs, c.StringSlice(\"server-arg\")...)\n\t}\n\n\t/*\n\t * --agent-arg\n\t * Add user-supplied arguments for the k3s agent\n\t */\n\tif c.IsSet(\"agent-arg\") {\n\t\tif c.Int(\"workers\") < 1 {\n\t\t\tlog.Warnln(\"--agent-arg supplied, but --workers is 0, so no agents will be created\")\n\t\t}\n\t\tk3AgentArgs = append(k3AgentArgs, c.StringSlice(\"agent-arg\")...)\n\t}\n\n\t/*\n\t * --port, -p, --publish, --add-port\n\t * List of ports, that should be mapped from some or all k3d node containers to the host system (or other interface)\n\t */\n\t// new port map\n\tportmap, err := mapNodesToPortSpecs(c.StringSlice(\"port\"), GetAllContainerNames(c.String(\"name\"), DefaultServerCount, c.Int(\"workers\")))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t/*\n\t * Image Volume\n\t * A docker volume that will be shared by every k3d node container in the cluster.\n\t * This volume will be used for the `import-image` command.\n\t * On it, all node containers can access the image tarball.\n\t */\n\t// create a docker volume for sharing image tarballs with the cluster\n\timageVolume, err := createImageVolume(c.String(\"name\"))\n\tlog.Println(\"Created docker volume \", imageVolume.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/*\n\t * --volume, -v\n\t * List of volumes: host directory mounts for some or all k3d node containers in the cluster\n\t */\n\tvolumes := c.StringSlice(\"volume\")\n\n\tvolumesSpec, err := NewVolumes(volumes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvolumesSpec.DefaultVolumes = append(volumesSpec.DefaultVolumes, fmt.Sprintf(\"%s:/images\", imageVolume.Name))\n\n\t/*\n\t * --registry-file\n\t * check if there is a registries file\n\t */\n\tregistriesFile := \"\"\n\tif c.IsSet(\"registries-file\") {\n\t\tregistriesFile = c.String(\"registries-file\")\n\t\tif !fileExists(registriesFile) {\n\t\t\tlog.Fatalf(\"registries-file %q does not exists\", registriesFile)\n\t\t}\n\t} else {\n\t\tregistriesFile, err = getGlobalRegistriesConfFilename()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif !fileExists(registriesFile) {\n\t\t\t// if the default registries file does not exists, go ahead but do not try to load it\n\t\t\tregistriesFile = \"\"\n\t\t}\n\t}\n\n\t/*\n\t * clusterSpec\n\t * Defines, with which specifications, the cluster and the nodes inside should be created\n\t */\n\tclusterSpec := &ClusterSpec{\n\t\tAgentArgs: k3AgentArgs,\n\t\tAPIPort: *apiPort,\n\t\tAutoRestart: c.Bool(\"auto-restart\"),\n\t\tClusterName: c.String(\"name\"),\n\t\tEnv: env,\n\t\tNodeToLabelSpecMap: labelmap,\n\t\tImage: image,\n\t\tNodeToPortSpecMap: portmap,\n\t\tPortAutoOffset: c.Int(\"port-auto-offset\"),\n\t\tRegistriesFile: registriesFile,\n\t\tRegistryEnabled: c.Bool(\"enable-registry\"),\n\t\tRegistryCacheEnabled: c.Bool(\"enable-registry-cache\"),\n\t\tRegistryName: c.String(\"registry-name\"),\n\t\tRegistryPort: c.Int(\"registry-port\"),\n\t\tRegistryVolume: c.String(\"registry-volume\"),\n\t\tServerArgs: k3sServerArgs,\n\t\tVolumes: volumesSpec,\n\t}\n\n\t/******************\n\t *\t\t\t\t\t\t\t\t*\n\t *\t\tCREATION\t\t*\n\t * vvvvvvvvvvvvvv\t*\n\t ******************/\n\n\tlog.Printf(\"Creating cluster [%s]\", c.String(\"name\"))\n\n\t/*\n\t * Cluster Directory\n\t */\n\t// create the directory where we will put the kubeconfig file by default (when running `k3d get-config`)\n\tcreateClusterDir(c.String(\"name\"))\n\n\t/* (1)\n\t * Registry (optional)\n\t * Create the (optional) registry container\n\t */\n\tvar registryNameExists *dnsNameCheck\n\tif clusterSpec.RegistryEnabled {\n\t\tregistryNameExists = newAsyncNameExists(clusterSpec.RegistryName, 1*time.Second)\n\t\tif _, err = createRegistry(*clusterSpec); err != nil {\n\t\t\tdeleteCluster()\n\t\t\treturn err\n\t\t}\n\t}\n\n\t/* (2)\n\t * Server\n\t * Create the server node container\n\t */\n\tserverContainerID, err := createServer(clusterSpec)\n\tif err != nil {\n\t\tdeleteCluster()\n\t\treturn err\n\t}\n\n\t/* (2.1)\n\t * Wait\n\t * Wait for k3s server to be done initializing, if wanted\n\t */\n\t// We're simply scanning the container logs for a line that tells us that everything's up and running\n\t// TODO: also wait for worker nodes\n\tif c.IsSet(\"wait\") {\n\t\tif err := waitForContainerLogMessage(serverContainerID, \"Wrote kubeconfig\", c.Int(\"wait\")); err != nil {\n\t\t\tdeleteCluster()\n\t\t\treturn fmt.Errorf(\"ERROR: failed while waiting for server to come up\\n%+v\", err)\n\t\t}\n\t}\n\n\t/* (3)\n\t * Workers\n\t * Create the worker node containers\n\t */\n\t// TODO: do this concurrently in different goroutines\n\tif c.Int(\"workers\") > 0 {\n\t\tlog.Printf(\"Booting %s workers for cluster %s\", strconv.Itoa(c.Int(\"workers\")), c.String(\"name\"))\n\t\tfor i := 0; i < c.Int(\"workers\"); i++ {\n\t\t\tworkerID, err := createWorker(clusterSpec, i)\n\t\t\tif err != nil {\n\t\t\t\tdeleteCluster()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Printf(\"Created worker with ID %s\\n\", workerID)\n\t\t}\n\t}\n\n\t/* (4)\n\t * Done\n\t * Finished creating resources.\n\t */\n\tlog.Printf(\"SUCCESS: created cluster [%s]\", c.String(\"name\"))\n\n\tif clusterSpec.RegistryEnabled {\n\t\tlog.Printf(\"A local registry has been started as %s:%d\", clusterSpec.RegistryName, clusterSpec.RegistryPort)\n\n\t\texists, err := registryNameExists.Exists()\n\t\tif !exists || err != nil {\n\t\t\tlog.Printf(\"Make sure you have an alias in your /etc/hosts file like '127.0.0.1 %s'\", clusterSpec.RegistryName)\n\t\t}\n\t}\n\n\tlog.Printf(`You can now use the cluster with:\n\nexport KUBECONFIG=\"$(%s get-kubeconfig --name='%s')\"\nkubectl cluster-info`, os.Args[0], c.String(\"name\"))\n\n\treturn nil\n}", "func (c *Cassandra) Init(_ context.Context, metadata state.Metadata) error {\n\tmeta, err := getCassandraMetadata(metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := c.createClusterConfig(meta)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating cluster config: %w\", err)\n\t}\n\tc.cluster = cluster\n\n\tsession, err := cluster.CreateSession()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating session: %w\", err)\n\t}\n\tc.session = session\n\n\terr = c.tryCreateKeyspace(meta.Keyspace, meta.ReplicationFactor)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating keyspace %s: %w\", meta.Keyspace, err)\n\t}\n\n\terr = c.tryCreateTable(meta.Table, meta.Keyspace)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating table %s: %w\", meta.Table, err)\n\t}\n\n\tc.table = meta.Keyspace + \".\" + meta.Table\n\n\treturn nil\n}", "func NewCluster(name string, nameSpaces, chartName, chartVersion, values string) (*Cluster, error) {\n\tvar spec MapStringInterface\n\terr := yaml.Unmarshal([]byte(values), &spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcluster := &Cluster{\n\t\tUuid: uuid.NewV4().String(),\n\t\tName: name,\n\t\tNameSpace: nameSpaces,\n\t\tRevision: 0,\n\t\tStatus: ClusterStatusPending,\n\t\tChartName: chartName,\n\t\tChartVersion: chartVersion,\n\t\tValues: values,\n\t\tSpec: spec,\n\t}\n\n\treturn cluster, nil\n}", "func NewCluster(ctx context.Context, o Options) (cluster *Cluster, err error) {\n\tif err := o.applyDefaults(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to apply defaults to options: %w\", err)\n\t}\n\n\tcontainer, err := k3s.RunContainer(ctx)\n\tdefer func() {\n\t\t// We don't want to leak the cluster here, and we can't really be sure how\n\t\t// many resources exist, even if ClusterRun fails. If we never set our\n\t\t// cluster return argument, we'll delete the k3s cluster. This also\n\t\t// gracefully handles panics.\n\t\tif cluster == nil && container != nil {\n\t\t\t_ = container.Terminate(ctx)\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to run cluster: %w\", err)\n\t}\n\n\trawConfig, err := container.GetKubeConfig(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get kubeconfig: %w\", err)\n\t}\n\trestCfg, err := clientcmd.RESTConfigFromKubeConfig(rawConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse kubeconfig: %w\", err)\n\t}\n\n\tkubeClient, err := client.New(restCfg, client.Options{\n\t\tScheme: o.Scheme,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to generate client: %w\", err)\n\t}\n\n\treturn &Cluster{\n\t\tk3sContainer: container,\n\t\trestConfig: restCfg,\n\t\tkubeClient: kubeClient,\n\t}, nil\n}", "func (c *ClusterManager) Init(zl instances.ZoneLister, pp backends.ProbeProvider) {\n\tc.instancePool.Init(zl)\n\tc.backendPool.Init(pp)\n\t// TODO: Initialize other members as needed.\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\n\tvar resource Cluster\n\terr := ctx.RegisterResource(\"aws:elasticache/cluster:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func BuildMinimalCluster(clusterName string) *kops.Cluster {\n\tc := &kops.Cluster{}\n\tc.ObjectMeta.Name = clusterName\n\tc.Spec.KubernetesVersion = \"1.23.2\"\n\tc.Spec.Networking.Subnets = []kops.ClusterSubnetSpec{\n\t\t{Name: \"subnet-us-test-1a\", Zone: \"us-test-1a\", CIDR: \"172.20.1.0/24\", Type: kops.SubnetTypePrivate},\n\t}\n\n\tc.Spec.ContainerRuntime = \"containerd\"\n\tc.Spec.Containerd = &kops.ContainerdConfig{}\n\n\tc.Spec.API.PublicName = fmt.Sprintf(\"api.%v\", clusterName)\n\tc.Spec.API.Access = []string{\"0.0.0.0/0\"}\n\tc.Spec.SSHAccess = []string{\"0.0.0.0/0\"}\n\n\t// Default to public topology\n\tc.Spec.Networking.Topology = &kops.TopologySpec{\n\t\tDNS: kops.DNSTypePublic,\n\t}\n\n\tc.Spec.Networking.NetworkCIDR = \"172.20.0.0/16\"\n\tc.Spec.Networking.Subnets = []kops.ClusterSubnetSpec{\n\t\t{Name: \"subnet-us-test-1a\", Zone: \"us-test-1a\", CIDR: \"172.20.1.0/24\", Type: kops.SubnetTypePublic},\n\t\t{Name: \"subnet-us-test-1b\", Zone: \"us-test-1b\", CIDR: \"172.20.2.0/24\", Type: kops.SubnetTypePublic},\n\t\t{Name: \"subnet-us-test-1c\", Zone: \"us-test-1c\", CIDR: \"172.20.3.0/24\", Type: kops.SubnetTypePublic},\n\t}\n\n\tc.Spec.Networking.NonMasqueradeCIDR = \"100.64.0.0/10\"\n\tc.Spec.CloudProvider.AWS = &kops.AWSSpec{}\n\n\tc.Spec.ConfigStore = kops.ConfigStoreSpec{\n\t\tBase: \"memfs://unittest-bucket/\" + clusterName,\n\t}\n\n\tc.Spec.DNSZone = \"test.com\"\n\n\tc.Spec.SSHKeyName = fi.PtrTo(\"test\")\n\n\taddEtcdClusters(c)\n\n\treturn c\n}", "func New(name, platformName, path, format string, parentUI *ui.UI, envConfig map[string]string) (*Kluster, error) {\n\tif len(format) == 0 {\n\t\tformat = DefaultFormat\n\t}\n\tif !validFormat(format) {\n\t\treturn nil, fmt.Errorf(\"invalid format %q for the kubernetes cluster config file\", format)\n\t}\n\tpath = filepath.Join(path, DefaultConfigFilename+\".\"+format)\n\n\tif _, err := os.Stat(path); os.IsExist(err) {\n\t\treturn nil, fmt.Errorf(\"the Kluster config file %q already exists\", path)\n\t}\n\n\tnewUI := parentUI.Copy()\n\n\tcluster := Kluster{\n\t\tVersion: Version,\n\t\tKind: \"cluster\",\n\t\tName: name,\n\t\tpath: path,\n\t\tui: newUI,\n\t}\n\n\t// // TODO: Improve this, all platforms are not needed\n\t// allPlatforms := provisioner.SupportedPlatforms(name, envConfig)\n\t// platform, ok := allPlatforms[platformName]\n\t// if !ok {\n\t// \treturn nil, fmt.Errorf(\"platform %q is not supported\", platformName)\n\t// }\n\n\tplatform, err := provisioner.New(name, platformName, envConfig, newUI, Version)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"platform %q is not supported. %s\", platformName, err)\n\t}\n\n\tlogPrefix := fmt.Sprintf(\"KubeKit [ %s@%s ]\", cluster.Name, platformName)\n\tcluster.ui.SetLogPrefix(logPrefix)\n\n\tcluster.Platforms = make(map[string]interface{}, 1)\n\tcluster.provisioner = make(map[string]provisioner.Provisioner, 1)\n\tcluster.State = make(map[string]*State, 1)\n\n\tcluster.Platforms[platformName] = platform.Config()\n\tcluster.provisioner[platformName] = platform\n\tcluster.State[platformName] = &State{\n\t\tStatus: AbsentStatus.String(),\n\t}\n\n\tcluster.Resources = resources.DefaultResourcesFor(platformName)\n\n\t// return if this is a platform with no configuration, such as EKS or AKS\n\tswitch platformName {\n\tcase \"eks\", \"aks\":\n\t\treturn &cluster, nil\n\t}\n\n\tcluster.Config, err = configurator.DefaultConfig(envConfig)\n\n\treturn &cluster, err\n}", "func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) {\n\tclus, err := read(lg, fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclus.agentConns = make([]*grpc.ClientConn, len(clus.Members))\n\tclus.agentClients = make([]rpcpb.TransportClient, len(clus.Members))\n\tclus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members))\n\tclus.agentRequests = make([]*rpcpb.Request, len(clus.Members))\n\tclus.cases = make([]Case, 0)\n\n\tfor i, ap := range clus.Members {\n\t\tvar err error\n\t\tclus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i])\n\t\tclus.lg.Info(\"connected\", zap.String(\"agent-address\", ap.AgentAddr))\n\n\t\tclus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclus.lg.Info(\"created stream\", zap.String(\"agent-address\", ap.AgentAddr))\n\t}\n\n\tmux := http.NewServeMux()\n\tmux.Handle(\"/metrics\", promhttp.Handler())\n\tif clus.Tester.EnablePprof {\n\t\tfor p, h := range debugutil.PProfHandlers() {\n\t\t\tmux.Handle(p, h)\n\t\t}\n\t}\n\tclus.testerHTTPServer = &http.Server{\n\t\tAddr: clus.Tester.Addr,\n\t\tHandler: mux,\n\t}\n\tgo clus.serveTesterServer()\n\n\tclus.updateCases()\n\n\tclus.rateLimiter = rate.NewLimiter(\n\t\trate.Limit(int(clus.Tester.StressQPS)),\n\t\tint(clus.Tester.StressQPS),\n\t)\n\n\tclus.setStresserChecker()\n\n\treturn clus, nil\n}", "func InitRedisCluster(addrs []string, password string) error {\n\tRedisCluster = redis.NewClusterClient(&redis.ClusterOptions{\n\t\tAddrs: addrs,\n\t\tPassword: password,\n\t})\n\t_, err := RedisCluster.Ping().Result()\n\treturn err\n}", "func init() {\n\tRootCmd.AddCommand(ClusterCommand)\n}", "func initCluster(apiRouter *mux.Router, context *Context) {\n\taddContext := func(handler contextHandlerFunc) *contextHandler {\n\t\treturn newContextHandler(context, handler)\n\t}\n\n\tclustersRouter := apiRouter.PathPrefix(\"/clusters\").Subrouter()\n\tclustersRouter.Handle(\"\", addContext(handleGetClusters)).Methods(\"GET\")\n\tclustersRouter.Handle(\"\", addContext(handleCreateCluster)).Methods(\"POST\")\n\n\tclusterRouter := apiRouter.PathPrefix(\"/cluster/{cluster:[A-Za-z0-9]{26}}\").Subrouter()\n\tclusterRouter.Handle(\"\", addContext(handleGetCluster)).Methods(\"GET\")\n\tclusterRouter.Handle(\"\", addContext(handleRetryCreateCluster)).Methods(\"POST\")\n\tclusterRouter.Handle(\"\", addContext(handleUpdateClusterConfiguration)).Methods(\"PUT\")\n\tclusterRouter.Handle(\"/provision\", addContext(handleProvisionCluster)).Methods(\"POST\")\n\tclusterRouter.Handle(\"/kubernetes\", addContext(handleUpgradeKubernetes)).Methods(\"PUT\")\n\tclusterRouter.Handle(\"/size\", addContext(handleResizeCluster)).Methods(\"PUT\")\n\tclusterRouter.Handle(\"/utilities\", addContext(handleGetAllUtilityMetadata)).Methods(\"GET\")\n\tclusterRouter.Handle(\"/annotations\", addContext(handleAddClusterAnnotations)).Methods(\"POST\")\n\tclusterRouter.Handle(\"/annotation/{annotation-name}\", addContext(handleDeleteClusterAnnotation)).Methods(\"DELETE\")\n\tclusterRouter.Handle(\"/nodegroups\", addContext(handleCreateNodegroups)).Methods(\"POST\")\n\tclusterRouter.Handle(\"/nodegroup/{nodegroup}\", addContext(handleDeleteNodegroup)).Methods(\"DELETE\")\n\tclusterRouter.Handle(\"\", addContext(handleDeleteCluster)).Methods(\"DELETE\")\n}", "func NewCluster(pointers ...Pointer) *Cluster {\n\tvar (\n\t\tsumX, sumY float64\n\t\tcount int\n\t)\n\n\tc := &Cluster{\n\t\tPointers: pointers,\n\t}\n\n\tif len(pointers) == 0 {\n\t\tc.Centroid = geo.NewPoint(0, 0)\n\t\treturn c\n\t}\n\n\tif len(pointers) == 1 {\n\t\tc.Centroid = pointers[0].CenterPoint().Clone()\n\t\treturn c\n\t}\n\n\t// find the center/centroid of multiple points\n\tfor _, pointer := range c.Pointers {\n\t\tcp := pointer.CenterPoint()\n\n\t\tsumX += cp.X()\n\t\tsumY += cp.Y()\n\t\tcount++\n\t}\n\tc.Centroid = geo.NewPoint(sumX/float64(count), sumY/float64(count))\n\n\treturn c\n}", "func New(createCRD bool, namespace string) *Cluster {\n\t\n\tclientset := utils.MustNewKubeClient(); \n\treturn &Cluster{\n\t\tlogger: logrus.WithField(\"pkg\", \"controller\"),\n\t\tnamespace: namespace,\n\t\tkubeClientset: clientset,\n\t\tcreateCustomResource: createCRD,\n\t}\n}", "func New(t *testing.T, cfg Config) *Environment {\n\te := &Environment{\n\t\thelmPath: \"../kubernetes_helm/helm\",\n\t\tsynkPath: \"src/go/cmd/synk/synk_/synk\",\n\t\tt: t,\n\t\tcfg: cfg,\n\t\tscheme: k8sruntime.NewScheme(),\n\t\tclusters: map[string]*cluster{},\n\t}\n\tif cfg.SchemeFunc != nil {\n\t\tcfg.SchemeFunc(e.scheme)\n\t}\n\tscheme.AddToScheme(e.scheme)\n\n\tvar g errgroup.Group\n\t// Setup cluster concurrently.\n\tfor _, cfg := range cfg.Clusters {\n\t\t// Make name unique to avoid collisions across parallel tests.\n\t\tuniqName := fmt.Sprintf(\"%s-%x\", cfg.Name, time.Now().UnixNano())\n\t\tt.Logf(\"Assigned unique name %q to cluster %q\", uniqName, cfg.Name)\n\n\t\tcluster := &cluster{\n\t\t\tgenName: uniqName,\n\t\t\tcfg: cfg,\n\t\t}\n\t\te.clusters[cfg.Name] = cluster\n\n\t\tg.Go(func() error {\n\t\t\tif err := setupCluster(e.synkPath, cluster); err != nil {\n\t\t\t\t// If cluster has already been created, delete it.\n\t\t\t\tif cluster.kind != nil && os.Getenv(\"NO_TEARDOWN\") == \"\" {\n\t\t\t\t\tcluster.kind.Delete(cfg.Name, \"\")\n\t\t\t\t\tif cluster.kubeConfigPath != \"\" {\n\t\t\t\t\t\tos.Remove(cluster.kubeConfigPath)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn errors.Wrapf(err, \"Create cluster %q\", cfg.Name)\n\t\t\t}\n\t\t\tlog.Printf(\"Created cluster %q\", cfg.Name)\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn e\n}", "func (c *Cluster) initializeNodes() {\n\tfor i := 0; i < c.nodeCount; i++ {\n\t\tn := NewNode(strconv.Itoa(i))\n\t\tc.nodes = append(c.nodes, n)\n\t}\n}", "func initializeClusterOperator(co *configv1.ClusterOperator) {\n\tco.Status.Versions = []configv1.OperandVersion{\n\t\t{\n\t\t\tName: OperatorVersionName,\n\t\t\tVersion: UnknownVersionValue,\n\t\t},\n\t\t{\n\t\t\tName: CoreDNSVersionName,\n\t\t\tVersion: UnknownVersionValue,\n\t\t},\n\t\t{\n\t\t\tName: OpenshiftCLIVersionName,\n\t\t\tVersion: UnknownVersionValue,\n\t\t},\n\t}\n\tco.Status.Conditions = []configv1.ClusterOperatorStatusCondition{\n\t\t{\n\t\t\tType: configv1.OperatorDegraded,\n\t\t\tStatus: configv1.ConditionUnknown,\n\t\t},\n\t\t{\n\t\t\tType: configv1.OperatorProgressing,\n\t\t\tStatus: configv1.ConditionUnknown,\n\t\t},\n\t\t{\n\t\t\tType: configv1.OperatorAvailable,\n\t\t\tStatus: configv1.ConditionUnknown,\n\t\t},\n\t}\n}", "func New(cfg Config) (*Cluster, error) {\n\tcluster := &Cluster{\n\t\tID: cfg.ID,\n\t\tName: cfg.Name,\n\t}\n\n\tdiscoargs := buildPFlagSlice(cfg.DiscoveryFlagsByImpl[cfg.DiscoveryImpl])\n\n\tdisco, err := discovery.New(cfg.DiscoveryImpl, cluster.ToProto(), discoargs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating discovery impl (%s): %w\", cfg.DiscoveryImpl, err)\n\t}\n\n\tcluster.Discovery = disco\n\n\tprotocluster := cluster.ToProto()\n\n\tvtsqlargs := buildPFlagSlice(cfg.VtSQLFlags)\n\n\tvtsqlCfg, err := vtsql.Parse(protocluster, disco, vtsqlargs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating vtsql connection config: %w\", err)\n\t}\n\n\tvtctldargs := buildPFlagSlice(cfg.VtctldFlags)\n\n\tvtctldCfg, err := vtctldclient.Parse(protocluster, disco, vtctldargs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating vtctldclient proxy config: %w\", err)\n\t}\n\n\tcluster.DB = vtsql.New(vtsqlCfg)\n\tcluster.Vtctld = vtctldclient.New(vtctldCfg)\n\n\tif cfg.TabletFQDNTmplStr != \"\" {\n\t\tcluster.TabletFQDNTmpl, err = template.New(cluster.ID + \"-tablet-fqdn\").Parse(cfg.TabletFQDNTmplStr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse tablet fqdn template %s: %w\", cfg.TabletFQDNTmplStr, err)\n\t\t}\n\t}\n\n\treturn cluster, nil\n}", "func (api *clusterAPI) Create(obj *cluster.Cluster) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ClusterV1().Cluster().Create(context.Background(), obj)\n\t\tif err != nil && strings.Contains(err.Error(), \"AlreadyExists\") {\n\t\t\t_, err = apicl.ClusterV1().Cluster().Update(context.Background(), obj)\n\n\t\t}\n\t\treturn err\n\t}\n\n\tapi.ct.handleClusterEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Created})\n\treturn nil\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\tvar resource Cluster\n\terr := ctx.RegisterResource(\"aws:docdb/cluster:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func InitMasterClusterComm(_routeHandlerMap RouteHandlerMap) {\n\trouteHandlerMap = _routeHandlerMap\n\n\t// Setup router\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/KillAll\", killAllRoute).Methods(\"GET\")\n\trouter.HandleFunc(\"/CreateNode\", createNodeRoute).Methods(\"GET\")\n\trouter.HandleFunc(\"/Send\", sendRoute).Methods(\"GET\")\n\trouter.HandleFunc(\"/Receive\", receiveRoute).Methods(\"GET\")\n\trouter.HandleFunc(\"/ReceiveAll\", receiveAllRoute).Methods(\"GET\")\n\trouter.HandleFunc(\"/BeginSnapshot\", beginSnapshotRoute).Methods(\"GET\")\n\trouter.HandleFunc(\"/CollectState\", collectStateRoute).Methods(\"GET\")\n\trouter.HandleFunc(\"/PrintSnapshot\", printSnapshotRoute).Methods(\"GET\")\n\n\t// Starting Cluster Server\n\t// fmt.Println(\"\\nStarting Cluster Server @ http://localhost:8118\")\n\tif err := http.ListenAndServe(\":8118\", router); err != nil {\n\t\t// fmt.Printf(\"\\nmux server: %v\\n\", err)\n\t}\n}", "func NewCluster(options *ClusterOptions) (*Cluster, error) {\n\tc := &Cluster{}\n\n\tif options == nil || options.Reader == nil || options.Builder == nil {\n\t\treturn nil, errors.New(\"invalid options\")\n\t}\n\tshards := options.Reader.ReadNodes()\n\tif shards == nil {\n\t\treturn nil, ErrReadShard\n\t}\n\tring := options.Builder.BuildRing(shards)\n\tif ring == nil {\n\t\treturn nil, ErrBuildRing\n\t}\n\n\tif options.Poolsize > 0 {\n\t\tc.poolsize = options.Poolsize\n\t} else {\n\t\tc.poolsize = 4\n\t}\n\tc.shards = shards\n\tc.ring = ring\n\tc.pool = make(map[*Shard]*pool.Pool, len(c.shards))\n\tc.failover = options.Failover\n\n\tc.checker = options.Checker\n\tc.status = make(map[string]ShardStatus, len(c.shards))\n\n\tupdates := c.checker.Start(c.shards)\n\tgo c.statusUpdateReceiver(updates)\n\n\treturn c, nil\n}", "func (cfg Config) Cluster(ctx context.Context) (*Cluster, error) {\n\treturn New(ctx, cfg)\n}", "func (c *InitRainbondCluster) Run(ctx context.Context) {\n\tdefer c.rollback(\"Close\", \"\", \"\")\n\tc.rollback(\"Init\", \"\", \"start\")\n\t// create adaptor\n\tadaptor, err := factory.GetCloudFactory().GetRainbondClusterAdaptor(c.config.Provider, c.config.AccessKey, c.config.SecretKey)\n\tif err != nil {\n\t\tc.rollback(\"Init\", fmt.Sprintf(\"create cloud adaptor failure %s\", err.Error()), \"failure\")\n\t\treturn\n\t}\n\n\tc.rollback(\"Init\", \"cloud adaptor create success\", \"success\")\n\tc.rollback(\"CheckCluster\", \"\", \"start\")\n\t// get kubernetes cluster info\n\tcluster, err := adaptor.DescribeCluster(c.config.EnterpriseID, c.config.ClusterID)\n\tif err != nil {\n\t\tcluster, err = adaptor.DescribeCluster(c.config.EnterpriseID, c.config.ClusterID)\n\t\tif err != nil {\n\t\t\tc.rollback(\"CheckCluster\", err.Error(), \"failure\")\n\t\t\treturn\n\t\t}\n\t}\n\t// check cluster status\n\tif cluster.State != \"running\" {\n\t\tc.rollback(\"CheckCluster\", fmt.Sprintf(\"cluster status is %s,not support init rainbond\", cluster.State), \"failure\")\n\t\treturn\n\t}\n\t// check cluster version\n\tif !versionutil.CheckVersion(cluster.KubernetesVersion) {\n\t\tc.rollback(\"CheckCluster\", fmt.Sprintf(\"current cluster version is %s, init rainbond support kubernetes version is 1.16-1.19\", cluster.KubernetesVersion), \"failure\")\n\t\treturn\n\t}\n\t// check cluster connection status\n\tlogrus.Infof(\"init kubernetes url %s\", cluster.MasterURL)\n\tif cluster.MasterURL.APIServerEndpoint == \"\" {\n\t\tc.rollback(\"CheckCluster\", \"cluster api not open eip,not support init rainbond\", \"failure\")\n\t\treturn\n\t}\n\n\tkubeConfig, err := adaptor.GetKubeConfig(c.config.EnterpriseID, c.config.ClusterID)\n\tif err != nil {\n\t\tkubeConfig, err = adaptor.GetKubeConfig(c.config.EnterpriseID, c.config.ClusterID)\n\t\tif err != nil {\n\t\t\tc.rollback(\"CheckCluster\", fmt.Sprintf(\"get kube config failure %s\", err.Error()), \"failure\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check cluster not init rainbond\n\tcoreClient, _, err := kubeConfig.GetKubeClient()\n\tif err != nil {\n\t\tc.rollback(\"CheckCluster\", fmt.Sprintf(\"get kube config failure %s\", err.Error()), \"failure\")\n\t\treturn\n\t}\n\n\t// get cluster node lists\n\tgetctx, cancel := context.WithTimeout(ctx, time.Second*10)\n\tnodes, err := coreClient.CoreV1().Nodes().List(getctx, metav1.ListOptions{})\n\tif err != nil {\n\t\tnodes, err = coreClient.CoreV1().Nodes().List(getctx, metav1.ListOptions{})\n\t\tcancel()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"get kubernetes cluster node failure %s\", err.Error())\n\t\t\tc.rollback(\"CheckCluster\", \"cluster node list can not found, please check cluster public access and account authorization\", \"failure\")\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tcancel()\n\t}\n\tif len(nodes.Items) == 0 {\n\t\tc.rollback(\"CheckCluster\", \"node num is 0, can not init rainbond\", \"failure\")\n\t\treturn\n\t}\n\tc.rollback(\"CheckCluster\", c.config.ClusterID, \"success\")\n\n\t// select gateway and chaos node\n\tgatewayNodes, chaosNodes := c.GetRainbondGatewayNodeAndChaosNodes(nodes.Items)\n\tinitConfig := adaptor.GetRainbondInitConfig(c.config.EnterpriseID, cluster, gatewayNodes, chaosNodes, c.rollback)\n\tinitConfig.RainbondVersion = version.RainbondRegionVersion\n\t// init rainbond\n\tc.rollback(\"InitRainbondRegionOperator\", \"\", \"start\")\n\tif len(initConfig.EIPs) == 0 {\n\t\tc.rollback(\"InitRainbondRegionOperator\", \"can not select eip\", \"failure\")\n\t\treturn\n\t}\n\n\trri := operator.NewRainbondRegionInit(*kubeConfig, repo.NewRainbondClusterConfigRepo(datastore.GetGDB()))\n\tif err := rri.InitRainbondRegion(initConfig); err != nil {\n\t\tc.rollback(\"InitRainbondRegionOperator\", err.Error(), \"failure\")\n\t\treturn\n\t}\n\tticker := time.NewTicker(time.Second * 5)\n\ttimer := time.NewTimer(time.Minute * 60)\n\tdefer timer.Stop()\n\tdefer ticker.Stop()\n\tvar operatorMessage, imageHubMessage, packageMessage, apiReadyMessage bool\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tc.rollback(\"InitRainbondRegion\", \"context cancel\", \"failure\")\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\tcase <-timer.C:\n\t\t\tc.rollback(\"InitRainbondRegion\", \"waiting rainbond region ready timeout\", \"failure\")\n\t\t\treturn\n\t\t}\n\t\tstatus, err := rri.GetRainbondRegionStatus(initConfig.ClusterID)\n\t\tif err != nil {\n\t\t\tif k8sErrors.IsNotFound(err) {\n\t\t\t\tc.rollback(\"InitRainbondRegion\", err.Error(), \"failure\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogrus.Errorf(\"get rainbond region status failure %s\", err.Error())\n\t\t}\n\t\tif status == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif status.OperatorReady && !operatorMessage {\n\t\t\tc.rollback(\"InitRainbondRegionOperator\", \"\", \"success\")\n\t\t\tc.rollback(\"InitRainbondRegionImageHub\", \"\", \"start\")\n\t\t\toperatorMessage = true\n\t\t\tcontinue\n\t\t}\n\n\t\tif idx, condition := status.RainbondCluster.Status.GetCondition(rainbondv1alpha1.RainbondClusterConditionTypeImageRepository); !imageHubMessage && idx != -1 && condition.Status == v1.ConditionTrue {\n\t\t\tc.rollback(\"InitRainbondRegionImageHub\", \"\", \"success\")\n\t\t\tc.rollback(\"InitRainbondRegionPackage\", \"\", \"start\")\n\t\t\timageHubMessage = true\n\t\t\tcontinue\n\t\t}\n\t\tstatusStr := fmt.Sprintf(\"Push Images:%d/%d\\t\", len(status.RainbondPackage.Status.ImagesPushed), status.RainbondPackage.Status.ImagesNumber)\n\t\tfor _, con := range status.RainbondCluster.Status.Conditions {\n\t\t\tif con.Status == v1.ConditionTrue {\n\t\t\t\tstatusStr += fmt.Sprintf(\"%s=>%s;\\t\", con.Type, con.Status)\n\t\t\t} else {\n\t\t\t\tstatusStr += fmt.Sprintf(\"%s=>%s=>%s=>%s;\\t\", con.Type, con.Status, con.Reason, con.Message)\n\t\t\t}\n\t\t}\n\t\tlogrus.Infof(\"cluster %s states: %s\", cluster.Name, statusStr)\n\n\t\tfor _, con := range status.RainbondPackage.Status.Conditions {\n\t\t\tif con.Type == rainbondv1alpha1.Ready && con.Status == rainbondv1alpha1.Completed && !packageMessage {\n\t\t\t\tc.rollback(\"InitRainbondRegionPackage\", \"\", \"success\")\n\t\t\t\tc.rollback(\"InitRainbondRegionRegionConfig\", \"\", \"start\")\n\t\t\t\tpackageMessage = true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tidx, condition := status.RainbondCluster.Status.GetCondition(rainbondv1alpha1.RainbondClusterConditionTypeRunning)\n\t\tif idx != -1 && condition.Status == v1.ConditionTrue && packageMessage && !apiReadyMessage {\n\t\t\tapiReadyMessage = true\n\t\t\tbreak\n\t\t}\n\t}\n\tc.rollback(\"InitRainbondRegion\", cluster.ClusterID, \"success\")\n}", "func CreateCluster(request *restful.Request, response *restful.Response) {\n\tstart := time.Now()\n\n\tform := CreateClusterForm{}\n\t_ = request.ReadEntity(&form)\n\n\terr := utils.Validate.Struct(&form)\n\tif err != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\t_ = response.WriteHeaderAndEntity(400, utils.FormatValidationError(err))\n\t\treturn\n\t}\n\n\tuser := auth.GetUser(request)\n\tcluster := &models.BcsCluster{\n\t\tID: form.ClusterID,\n\t\tCreatorId: user.ID,\n\t}\n\tswitch form.ClusterType {\n\tcase \"k8s\":\n\t\tcluster.ClusterType = BcsK8sCluster\n\tcase \"mesos\":\n\t\tcluster.ClusterType = BcsMesosCluster\n\tcase \"tke\":\n\t\tcluster.ClusterType = BcsTkeCluster\n\t\tif form.TkeClusterID == \"\" || form.TkeClusterRegion == \"\" {\n\t\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\t\tblog.Warnf(\"create tke cluster failed, empty tke clusterid or region\")\n\t\t\tmessage := fmt.Sprintf(\"errcode: %d, create tke cluster failed, empty tke clusterid or region\", common.BcsErrApiBadRequest)\n\t\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\t\treturn\n\t\t}\n\t\tcluster.TkeClusterId = form.TkeClusterID\n\t\tcluster.TkeClusterRegion = form.TkeClusterRegion\n\tdefault:\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Warnf(\"create failed, cluster type invalid\")\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create failed, cluster type invalid\", common.BcsErrApiBadRequest)\n\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\treturn\n\t}\n\n\tclusterInDb := sqlstore.GetCluster(cluster.ID)\n\tif clusterInDb != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Warnf(\"create cluster failed, cluster [%s] already exist\", cluster.ID)\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create cluster failed, cluster [%s] already exist\", common.BcsErrApiBadRequest, cluster.ID)\n\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\treturn\n\t}\n\n\terr = sqlstore.CreateCluster(cluster)\n\tif err != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Errorf(\"failed to create cluster [%s]: %s\", cluster.ID, err.Error())\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create cluster [%s] failed, error: %s\", common.BcsErrApiInternalDbError, cluster.ID, err.Error())\n\t\tutils.WriteServerError(response, common.BcsErrApiInternalDbError, message)\n\t\treturn\n\t}\n\n\tdata := utils.CreateResponseData(nil, \"success\", *cluster)\n\t_, _ = response.Write([]byte(data))\n\n\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.SucStatus, start)\n}", "func Create(req clusterapi.Request) (clusterapi.ClusterAPI, error) {\n\t// Validates parameters\n\tif req.Name == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.Name: can't be empty\")\n\t}\n\tif req.CIDR == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.CIDR: can't be empty\")\n\t}\n\n\t// We need at first the Metadata container to be present\n\terr := utils.CreateMetadataContainer()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to create Object Container: %s\\n\", err.Error())\n\t}\n\n\tvar network *pb.Network\n\tvar instance clusterapi.ClusterAPI\n\n\tlog.Printf(\"Creating infrastructure for cluster '%s'\", req.Name)\n\n\ttenant, err := utils.GetCurrentTenant()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Creates network\n\tlog.Printf(\"Creating Network 'net-%s'\", req.Name)\n\treq.Name = strings.ToLower(req.Name)\n\tnetwork, err = utils.CreateNetwork(\"net-\"+req.Name, req.CIDR)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to create Network '%s': %s\", req.Name, err.Error())\n\t\treturn nil, err\n\t}\n\n\tswitch req.Flavor {\n\tcase Flavor.DCOS:\n\t\treq.NetworkID = network.ID\n\t\treq.Tenant = tenant\n\t\tinstance, err = dcos.NewCluster(req)\n\t\tif err != nil {\n\t\t\t//utils.DeleteNetwork(network.ID)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Printf(\"Cluster '%s' created and initialized successfully\", req.Name)\n\treturn instance, nil\n}", "func NewCluster(client ExtendedClient, applier Applier, sshKeyRing ssh.KeyRing, logger log.Logger, allowedNamespaces map[string]struct{}, imageIncluder cluster.Includer, resourceExcludeList []string) *Cluster {\n\tif imageIncluder == nil {\n\t\timageIncluder = cluster.AlwaysInclude\n\t}\n\n\tc := &Cluster{\n\t\tclient: client,\n\t\tapplier: applier,\n\t\tlogger: logger,\n\t\tsshKeyRing: sshKeyRing,\n\t\tallowedNamespaces: allowedNamespaces,\n\t\tloggedAllowedNS: map[string]bool{},\n\t\timageIncluder: imageIncluder,\n\t\tresourceExcludeList: resourceExcludeList,\n\t}\n\n\treturn c\n}", "func NewCluster(hosts []string, opts ...WrapOption) *ClusterConfig {\n\treturn &ClusterConfig{\n\t\tClusterConfig: gocql.NewCluster(hosts...),\n\t\thosts: hosts,\n\t\topts: opts,\n\t}\n}", "func newClusterWithSizes(masterSize int, computes ...clusteroperator.ClusterMachineSet) *clusteroperator.Cluster {\n\tcluster := &clusteroperator.Cluster{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tUID: testClusterUUID,\n\t\t\tName: testClusterName,\n\t\t\tNamespace: testNamespace,\n\t\t},\n\t\tSpec: clusteroperator.ClusterSpec{\n\t\t\tMachineSets: append(computes, clusteroperator.ClusterMachineSet{\n\t\t\t\tName: \"master\",\n\t\t\t\tMachineSetConfig: clusteroperator.MachineSetConfig{\n\t\t\t\t\tInfra: true,\n\t\t\t\t\tSize: masterSize,\n\t\t\t\t\tNodeType: clusteroperator.NodeTypeMaster,\n\t\t\t\t},\n\t\t\t}),\n\t\t},\n\t\tStatus: clusteroperator.ClusterStatus{\n\t\t\tMasterMachineSetName: testClusterName + \"-master-random\",\n\t\t\tInfraMachineSetName: testClusterName + \"-master-random\",\n\t\t},\n\t}\n\treturn cluster\n}", "func CreateCluster(data []int) Cluster {\n\treturn Cluster{\n\t\tindices: append([]int(nil), data...),\n\t}\n}", "func newKrakenClusters(c *SamsungV1alpha1Client, namespace string) *krakenClusters {\n\treturn &krakenClusters{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func NewCluster(config string, channels ...string) (*client, error) {\n\t// parse the url provided\n\toptions, err := redis.ParseURL(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create the Redis client from failover options\n\tqueue := redis.NewFailoverClient(failoverFromOptions(options))\n\n\t// setup queue with proper configuration\n\terr = setupQueue(queue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create the client object\n\tclient := &client{\n\t\tQueue: queue,\n\t\tOptions: options,\n\t}\n\n\treturn client, nil\n}", "func Cluster(context *cli.Context) error {\n\tregion := context.String(flags.RegionFlag)\n\tif err := fieldEmpty(region, flags.RegionFlag); err != nil {\n\t\treturn err\n\t}\n\tclusterProfileName := context.String(flags.ConfigNameFlag)\n\tif err := fieldEmpty(clusterProfileName, flags.ConfigNameFlag); err != nil {\n\t\treturn err\n\t}\n\tcluster := context.String(flags.ClusterFlag)\n\tif err := fieldEmpty(cluster, flags.ClusterFlag); err != nil {\n\t\treturn err\n\t}\n\n\tlaunchType := context.String(flags.DefaultLaunchTypeFlag)\n\tif err := config.ValidateLaunchType(launchType); err != nil {\n\t\treturn err\n\t}\n\n\tcfnStackName := context.String(flags.CFNStackNameFlag)\n\tcomposeServiceNamePrefix := context.String(flags.ComposeServiceNamePrefixFlag)\n\n\tclusterConfig := &config.Cluster{\n\t\tCluster: cluster,\n\t\tRegion: region,\n\t\tCFNStackName: cfnStackName,\n\t\tComposeServiceNamePrefix: composeServiceNamePrefix,\n\t\tDefaultLaunchType: launchType,\n\t}\n\n\trdwr, err := config.NewReadWriter()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error saving cluster configuration\")\n\t}\n\tif err = rdwr.SaveCluster(clusterProfileName, clusterConfig); err != nil {\n\t\treturn errors.Wrap(err, \"Error saving cluster configuration\")\n\t}\n\n\tlogrus.Infof(\"Saved ECS CLI cluster configuration %s.\", clusterProfileName)\n\treturn nil\n}", "func (c *AKSCluster) CreateCluster() error {\n\n\tlog := logger.WithFields(logrus.Fields{\"action\": constants.TagCreateCluster})\n\n\t// create profiles model for the request\n\tvar profiles []containerservice.AgentPoolProfile\n\tif nodePools := c.modelCluster.Azure.NodePools; nodePools != nil {\n\t\tfor _, np := range nodePools {\n\t\t\tif np != nil {\n\t\t\t\tcount := int32(np.Count)\n\t\t\t\tname := np.Name\n\t\t\t\tprofiles = append(profiles, containerservice.AgentPoolProfile{\n\t\t\t\t\tName: &name,\n\t\t\t\t\tCount: &count,\n\t\t\t\t\tVMSize: containerservice.VMSizeTypes(np.NodeInstanceType),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tr := azureCluster.CreateClusterRequest{\n\t\tName: c.modelCluster.Name,\n\t\tLocation: c.modelCluster.Location,\n\t\tResourceGroup: c.modelCluster.Azure.ResourceGroup,\n\t\tKubernetesVersion: c.modelCluster.Azure.KubernetesVersion,\n\t\tProfiles: profiles,\n\t}\n\tclient, err := c.GetAKSClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.With(log.Logger)\n\n\t// call creation\n\tcreatedCluster, err := azureClient.CreateUpdateCluster(client, &r)\n\tif err != nil {\n\t\t// creation failed\n\t\t// todo status code!??\n\t\treturn err\n\t}\n\t// creation success\n\tlog.Info(\"Cluster created successfully!\")\n\n\tc.azureCluster = &createdCluster.Value\n\n\t// polling cluster\n\tpollingResult, err := azureClient.PollingCluster(client, r.Name, r.ResourceGroup)\n\tif err != nil {\n\t\t// polling error\n\t\t// todo status code!??\n\t\treturn err\n\t}\n\tlog.Info(\"Cluster is ready...\")\n\tc.azureCluster = &pollingResult.Value\n\treturn nil\n}", "func New(config LoadAgentClusterConfig, ltConfig loadtest.Config, log *mlog.Logger) (*LoadAgentCluster, error) {\n\tif log == nil {\n\t\treturn nil, errors.New(\"logger should not be nil\")\n\t}\n\tif err := defaults.Validate(config); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not validate configuration: %w\", err)\n\t}\n\tagents := make([]*client.Agent, len(config.Agents))\n\terrMap := make(map[*client.Agent]*errorTrack)\n\tfor i := 0; i < len(agents); i++ {\n\t\tagent, err := client.New(config.Agents[i].Id, config.Agents[i].ApiURL, nil)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cluster: failed to create api client: %w\", err)\n\t\t}\n\t\tagents[i] = agent\n\t\terrMap[agent] = &errorTrack{}\n\n\t\t// We check if the agent has already been created.\n\t\tif _, err := agent.Status(); err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := createAgent(agent, ltConfig); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &LoadAgentCluster{\n\t\tagents: agents,\n\t\tconfig: config,\n\t\tltConfig: ltConfig,\n\t\terrMap: errMap,\n\t\tlog: log,\n\t}, nil\n}", "func (ac *AWSCluster) CreateAWSKubernetesCluster() {\n\tsess := session.Must(session.NewSession())\n\tiamClient := iam.New(sess)\n\teksClient := eks.New(sess)\n\tcfClient := cloudformation.New(sess)\n\tec2Client := ec2.New(sess)\n\n\t// Creating Amazon EKS Role\n\teksRoleName := awsRolePrefix + ac.serviceSuffix\n\teksRoleArn := ac.createEksRole(iamClient, &eksRoleName)\n\n\t// Creating Amazon EKS Cluster VPC\n\tclusterStackName := awsClusterStackPrefix + ac.serviceSuffix\n\tclusterStackOutputs := ac.createEksClusterVpc(cfClient, &clusterStackName)\n\n\t// Creating Amazon EKS Cluster\n\tclusterName := awsClusterPrefix + ac.serviceSuffix\n\tcluster := ac.createEksCluster(eksClient, &clusterName, eksRoleArn, clusterStackOutputs)\n\n\t// Creating kubeconfig file\n\tac.createKubeConfigFile(cluster)\n\n\t// Creating Amazon EKS Worker Nodes\n\tkeyPairName := awsKeyPairPrefix + ac.serviceSuffix\n\tac.createEksEc2KeyPair(ec2Client, &keyPairName)\n\n\tnodesStackName := awsNodesStackPrefix + ac.serviceSuffix\n\tnodeGroupName := awsNodeGroupPrefix + ac.serviceSuffix\n\tnodeSequrityGroup, nodeInstanceRole := ac.createEksWorkerNodes(cfClient, &nodesStackName, &nodeGroupName, &clusterName, &keyPairName, clusterStackOutputs)\n\n\tac.authorizeSecurityGroupIngress(ec2Client, nodeSequrityGroup)\n\n\t// Enable worker nodes to join the cluster\n\tsf, err := ioutil.ReadFile(path.Join(ac.configPath, \"aws-auth-cm-temp.yaml\"))\n\tac.checkError(err)\n\n\tf, err := ioutil.TempFile(os.TempDir(), \"aws-auth-cm-temp-*.yaml\")\n\tac.checkError(err)\n\n\ts := string(sf)\n\t_, err = f.Write([]byte(strings.Replace(s, \"<NodeInstanceRole>\", *nodeInstanceRole, -1)))\n\tac.checkError(err)\n\n\t_ = f.Close()\n\n\tac.execCommand(\"kubectl\", \"apply\", \"-f\", f.Name())\n\t_ = os.Remove(f.Name())\n\n\tac.createSSHConfig(ec2Client, clusterStackOutputs.VpcId, \"scp-config\"+ac.serviceSuffix)\n\n\tac.execCommand(\"kubectl\", \"apply\", \"-f\", \"aws-k8s-cni.yaml\")\n}", "func (elasticCluster *ElasticCluster) Init() error {\n\t//加载配置文件\n\tif err := elasticCluster.loadConfig(); err != nil {\n\t\tlog.Logger.Error(\"load adapter file error\")\n\t\treturn err\n\t}\n\tlog.Logger.Info(\"load adapter file success\")\n\n\t//拼接urls\n\tvar urls []string\n\tfor _, elasticNode := range elasticCluster.ElasticNodes {\n\t\turl := \"http://\" + elasticNode.IP + \":\" + elasticNode.Port\n\t\turls = append(urls, url)\n\t}\n\n\t//创建client\n\telasticClient, err := elastic.NewClient(elastic.SetURL(urls...), elastic.SetSniff(elasticCluster.Sniff),\n\t\telastic.SetHealthcheck(elasticCluster.Healthcheck), elastic.SetBasicAuth(elasticCluster.User, elasticCluster.Password))\n\tif err != nil {\n\t\tlog.Logger.Error(\"create client for ES error\")\n\t\treturn err\n\t}\n\tlog.Logger.Info(\"create client for ES success\")\n\n\t//client赋值\n\telasticCluster.Client = elasticClient\n\n\t//验证index/type是否存在\n\tif mapping, err := elasticClient.GetMapping().Index(elasticCluster.Index).\n\t\tType(elasticCluster.TypeAlias).Do(context.Background()); err != nil || len(mapping) == 0 {\n\t\tlog.Logger.Warn(\"type is not exist\")\n\t\tif err := elasticCluster.createType(); err != nil {\n\t\t\tlog.Logger.Error(\"create type error\")\n\t\t\treturn err\n\t\t}\n\t\tlog.Logger.Info(\"create type success\")\n\t} else {\n\t\tlog.Logger.Info(\"type is already exist\")\n\t}\n\treturn nil\n}", "func newCluster() *cobra.Command {\n\tvar cluster *[]string\n\n\tcmd := &cobra.Command{\n\t\tUse: \"cluster\",\n\t\tShort: \"display cluster nodes.\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclient, err := getLeader(*cluster)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't connect to cluster leader\")\n\t\t\t}\n\t\t\tdefer client.Close()\n\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\t\tdefer cancel()\n\n\t\t\tvar leader *dqclient.NodeInfo\n\t\t\tvar nodes []dqclient.NodeInfo\n\t\t\tif leader, err = client.Leader(ctx); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't get leader\")\n\t\t\t}\n\n\t\t\tif nodes, err = client.Cluster(ctx); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't get cluster\")\n\t\t\t}\n\n\t\t\tfmt.Printf(\"ID \\tLeader \\tAddress\\n\")\n\t\t\tfor _, node := range nodes {\n\t\t\t\tfmt.Printf(\"%d \\t%v \\t%s\\n\", node.ID, node.ID == leader.ID, node.Address)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tflags := cmd.Flags()\n\tcluster = flags.StringSliceP(\"cluster\", \"c\", defaultCluster, \"addresses of existing cluster nodes\")\n\n\treturn cmd\n}", "func ExampleClustersClient_BeginCreateOrUpdate_putAClusterWithMinimumParameters() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewClustersClient().BeginCreateOrUpdate(ctx, \"resRg\", \"myCluster\", armservicefabric.Cluster{\n\t\tLocation: to.Ptr(\"eastus\"),\n\t\tTags: map[string]*string{},\n\t\tProperties: &armservicefabric.ClusterProperties{\n\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t\t\t},\n\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\tManagementEndpoint: to.Ptr(\"http://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t\t\t\t}},\n\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240743\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t{\n\t// \t\t\t\tCodeVersion: to.Ptr(\"7.0.470.9590\"),\n\t// \t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t}},\n\t// \t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t},\n\t// \t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t},\n\t// \t\tClusterCodeVersion: to.Ptr(\"7.0.470.9590\"),\n\t// \t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t},\n\t// \t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t}},\n\t// \t\t}},\n\t// \t\tManagementEndpoint: to.Ptr(\"http://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t},\n\t// \t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t},\n\t// \t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t}},\n\t// \t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t},\n\t// \t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:45:00\"),\n\t// \t\t\tHealthCheckStableDuration: to.Ptr(\"00:05:00\"),\n\t// \t\t\tHealthCheckWaitDuration: to.Ptr(\"00:05:00\"),\n\t// \t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](100),\n\t// \t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](100),\n\t// \t\t\t},\n\t// \t\t\tUpgradeDomainTimeout: to.Ptr(\"02:00:00\"),\n\t// \t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"10675199.02:48:05.4775807\"),\n\t// \t\t\tUpgradeTimeout: to.Ptr(\"12:00:00\"),\n\t// \t\t},\n\t// \t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t// \t},\n\t// }\n}", "func (m *K3dClusterManager) Create(ctx context.Context, opts CreateOptions) error {\n\tk3sImage := fmt.Sprintf(\"%s:%s\", types.DefaultK3sImageRepo, k3sVersion)\n\n\thostStoragePath := filepath.Join(m.cfg.WorkDir.Path, HostStorageName)\n\tif err := os.MkdirAll(hostStoragePath, 0700); err != nil {\n\t\treturn fmt.Errorf(\"failed to make the host storage directory: %w\", err)\n\t}\n\n\tlocalStorage := fmt.Sprintf(\"%s:%s\",\n\t\thostStoragePath,\n\t\tk3sLocalStoragePath)\n\tvolumes := []string{\n\t\tlocalStorage,\n\t}\n\n\t// If /dev/mapper exists, we'll automatically map it into the cluster\n\t// controller.\n\tif _, err := os.Stat(\"/dev/mapper\"); !os.IsNotExist(err) {\n\t\tvolumes = append(volumes, \"/dev/mapper:/dev/mapper:ro\")\n\t}\n\n\texposeAPI := types.ExposeAPI{\n\t\tHost: types.DefaultAPIHost,\n\t\tHostIP: types.DefaultAPIHost,\n\t\tPort: types.DefaultAPIPort,\n\t}\n\n\tregistryPortMapping := fmt.Sprintf(\"%d:%d\", opts.ImageRegistryPort, opts.ImageRegistryPort)\n\n\tserverNode := &types.Node{\n\t\tRole: types.ServerRole,\n\t\tImage: k3sImage,\n\t\tServerOpts: types.ServerOpts{\n\t\t\tExposeAPI: exposeAPI,\n\t\t},\n\t\tVolumes: volumes,\n\t\tPorts: []string{registryPortMapping},\n\t}\n\n\tnodes := []*types.Node{\n\t\tserverNode,\n\t}\n\n\tfor i := 0; i < WorkerCount; i++ {\n\t\tnode := &types.Node{\n\t\t\tRole: types.AgentRole,\n\t\t\tImage: k3sImage,\n\t\t\tArgs: agentArgs,\n\t\t\tVolumes: volumes,\n\t\t}\n\n\t\tnodes = append(nodes, node)\n\t}\n\n\tnetwork := types.ClusterNetwork{\n\t\tName: NetworkName,\n\t}\n\n\tlbHostPort := DefaultLoadBalancerHostPort\n\tif opts.LoadBalancerHostPort != 0 {\n\t\tlbHostPort = opts.LoadBalancerHostPort\n\t}\n\n\tlbPortMapping := fmt.Sprintf(\"%d:%d\", lbHostPort, DefaultLoadBalancerNodePort)\n\n\tclusterConfig := &types.Cluster{\n\t\tName: ClusterName,\n\t\tServerLoadBalancer: &types.Node{\n\t\t\tRole: types.LoadBalancerRole,\n\t\t\tPorts: []string{lbPortMapping},\n\t\t},\n\t\tNodes: nodes,\n\t\tCreateClusterOpts: &types.ClusterCreateOpts{\n\t\t\tWaitForServer: true,\n\t\t},\n\t\tNetwork: network,\n\t\tExposeAPI: exposeAPI,\n\t}\n\n\tif err := k3dcluster.ClusterCreate(ctx, m.runtime, clusterConfig); err != nil {\n\t\treturn fmt.Errorf(\"failed to create cluster: %w\", err)\n\t}\n\n\treturn nil\n}", "func (flags Etcd) CreateInitialCluster(sctx *ServiceContext) string {\n\treturn flags.createEndpoints(sctx, defaultEtcdPeerPort, func(node Node) string {\n\t\treturn node.Name + \"=\"\n\t})\n}", "func (obj *ECDSCluster) Init() {\n\tif obj.Status.ActualState == \"\" {\n\t\tobj.Status.ActualState = StateUninitialized\n\t}\n\tif obj.Spec.TargetState == \"\" {\n\t\tobj.Spec.TargetState = StateUninitialized\n\t}\n\tobj.Status.Satisfied = (obj.Spec.TargetState == obj.Status.ActualState)\n}", "func InitManagementCluster(ctx context.Context, input *InitManagementClusterInput) ManagementCluster {\n\tBy(\"initializing the management cluster\")\n\tExpect(input).ToNot(BeNil())\n\n\tBy(\"initialzing the management cluster configuration defaults\")\n\tinput.Defaults(ctx)\n\n\tBy(\"validating the management cluster configuration\")\n\tExpect(input.Validate()).To(Succeed())\n\n\tBy(\"loading the kubernetes and capi core schemes\")\n\tTryAddDefaultSchemes(input.Scheme)\n\n\tBy(\"creating the management cluster\")\n\tmanagementCluster, err := input.NewManagementClusterFn()\n\tExpect(err).ToNot(HaveOccurred())\n\tExpect(managementCluster).ToNot(BeNil())\n\n\t// Load the images.\n\tif imageLoader, ok := managementCluster.(ImageLoader); ok {\n\t\tBy(\"management cluster supports loading images\")\n\t\tfor _, image := range input.Images {\n\t\t\tswitch image.LoadBehavior {\n\t\t\tcase MustLoadImage:\n\t\t\t\tBy(fmt.Sprintf(\"must load image %s into the management cluster\", image.Name))\n\t\t\t\tExpect(imageLoader.LoadImage(ctx, image.Name)).To(Succeed())\n\t\t\tcase TryLoadImage:\n\t\t\t\tBy(fmt.Sprintf(\"try to load image %s into the management cluster\", image.Name))\n\t\t\t\timageLoader.LoadImage(ctx, image.Name) //nolint:errcheck\n\t\t\t}\n\t\t}\n\t}\n\n\t// Install the YAML from the component generators.\n\tfor _, componentGenerator := range input.ComponentGenerators {\n\t\tInstallComponents(ctx, managementCluster, componentGenerator)\n\t}\n\n\t// Install all components.\n\tfor _, component := range input.Components {\n\t\tfor _, source := range component.Sources {\n\t\t\tname := component.Name\n\t\t\tif source.Name != \"\" {\n\t\t\t\tname = fmt.Sprintf(\"%s/%s\", component.Name, source.Name)\n\t\t\t}\n\t\t\tsource.Name = name\n\t\t\tInstallComponents(ctx, managementCluster, ComponentGeneratorForComponentSource(source))\n\t\t}\n\t\tfor _, waiter := range component.Waiters {\n\t\t\tswitch waiter.Type {\n\t\t\tcase PodsWaiter:\n\t\t\t\tWaitForPodsReadyInNamespace(ctx, managementCluster, waiter.Value)\n\t\t\tcase ServiceWaiter:\n\t\t\t\tWaitForAPIServiceAvailable(ctx, managementCluster, waiter.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn managementCluster\n}", "func InitManagementCluster(ctx context.Context, input *InitManagementClusterInput) ManagementCluster {\n\tBy(\"initializing the management cluster\")\n\tExpect(input).ToNot(BeNil())\n\n\tBy(\"initialzing the management cluster configuration defaults\")\n\tinput.Defaults(ctx)\n\n\tBy(\"validating the management cluster configuration\")\n\tExpect(input.Validate()).To(Succeed())\n\n\tBy(\"loading the kubernetes and capi core schemes\")\n\tTryAddDefaultSchemes(input.Scheme)\n\n\tBy(\"creating the management cluster\")\n\tmanagementCluster, err := input.NewManagementClusterFn()\n\tExpect(err).ToNot(HaveOccurred())\n\tExpect(managementCluster).ToNot(BeNil())\n\n\t// Load the images.\n\tif imageLoader, ok := managementCluster.(ImageLoader); ok {\n\t\tBy(\"management cluster supports loading images\")\n\t\tfor _, image := range input.Images {\n\t\t\tswitch image.LoadBehavior {\n\t\t\tcase MustLoadImage:\n\t\t\t\tBy(fmt.Sprintf(\"must load image %s into the management cluster\", image.Name))\n\t\t\t\tExpect(imageLoader.LoadImage(ctx, image.Name)).To(Succeed())\n\t\t\tcase TryLoadImage:\n\t\t\t\tBy(fmt.Sprintf(\"try to load image %s into the management cluster\", image.Name))\n\t\t\t\timageLoader.LoadImage(ctx, image.Name) //nolint:errcheck\n\t\t\t}\n\t\t}\n\t}\n\n\t// Install the YAML from the component generators.\n\tfor _, componentGenerator := range input.ComponentGenerators {\n\t\tInstallComponents(ctx, managementCluster, componentGenerator)\n\t}\n\n\t// Install all components.\n\tfor _, component := range input.Components {\n\t\tfor _, source := range component.Sources {\n\t\t\tname := component.Name\n\t\t\tif source.Name != \"\" {\n\t\t\t\tname = fmt.Sprintf(\"%s/%s\", component.Name, source.Name)\n\t\t\t}\n\t\t\tsource.Name = name\n\t\t\tInstallComponents(ctx, managementCluster, ComponentGeneratorForComponentSource(source))\n\t\t}\n\t\tfor _, waiter := range component.Waiters {\n\t\t\tswitch waiter.Type {\n\t\t\tcase PodsWaiter:\n\t\t\t\tWaitForPodsReadyInNamespace(ctx, managementCluster, waiter.Value)\n\t\t\tcase ServiceWaiter:\n\t\t\t\tWaitForAPIServiceAvailable(ctx, managementCluster, waiter.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn managementCluster\n}", "func (c *Controller) processClusterNew(ecs *ecsv1.KubernetesCluster) error {\n\tdeployMode := ecs.Spec.Cluster.DeployMode\n\n\tvar err error\n\tswitch deployMode {\n\tcase ecsv1.BinaryDeployMode:\n\t\terr = c.sshInstaller.ClusterNew(ecs)\n\tcase ecsv1.ContainerDeployMode:\n\t\terr = c.grpcInstaller.ClusterNew(ecs)\n\t}\n\n\tif err != nil {\n\t\tglog.Errorf(\"install cluster %s failed with %v\", ecs.Name, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (clus *Cluster) Add() error {\n\tlg.Infof(\"getting default host\")\n\tdhost, err := netutil.GetDefaultHost()\n\tif err != nil {\n\t\tlg.Warn(err)\n\t\tlg.Warn(\"overwriting default host with 'localhost\")\n\t\tdhost = \"localhost\"\n\t}\n\tlg.Infof(\"detected default host %q\", dhost)\n\n\tclus.opLock.Lock()\n\tdefer clus.opLock.Unlock()\n\n\tclus.mmu.Lock()\n\tdefer clus.mmu.Unlock()\n\n\tcfg := embed.NewConfig()\n\n\tcfg.ClusterState = embed.ClusterStateFlagExisting\n\n\tcfg.Name = fmt.Sprintf(\"node%d\", clus.size+1)\n\tcfg.Dir = filepath.Join(clus.rootDir, cfg.Name+\".data-dir-etcd\")\n\tcfg.WalDir = filepath.Join(clus.rootDir, cfg.Name+\".data-dir-etcd\", \"wal\")\n\n\t// this is fresh cluster, so remove any conflicting data\n\tos.RemoveAll(cfg.Dir)\n\tlg.Infof(\"removed %q\", cfg.Dir)\n\tos.RemoveAll(cfg.WalDir)\n\tlg.Infof(\"removed %q\", cfg.WalDir)\n\n\tcurl := url.URL{Scheme: clus.ccfg.ClientScheme(), Host: fmt.Sprintf(\"localhost:%d\", clus.basePort)}\n\tcfg.ACUrls = []url.URL{curl}\n\tcfg.LCUrls = []url.URL{curl}\n\tif dhost != \"localhost\" {\n\t\t// expose default host to other machines in listen address (e.g. Prometheus dashboard)\n\t\tcurl2 := url.URL{Scheme: clus.ccfg.ClientScheme(), Host: fmt.Sprintf(\"%s:%d\", dhost, clus.basePort)}\n\t\tcfg.LCUrls = append(cfg.LCUrls, curl2)\n\t\tlg.Infof(\"%q is set up to listen on client url %q (default host)\", cfg.Name, curl2.String())\n\t}\n\tlg.Infof(\"%q is set up to listen on client url %q\", cfg.Name, curl.String())\n\n\tpurl := url.URL{Scheme: clus.ccfg.PeerScheme(), Host: fmt.Sprintf(\"localhost:%d\", clus.basePort+1)}\n\tcfg.APUrls = []url.URL{purl}\n\tcfg.LPUrls = []url.URL{purl}\n\n\tclus.size++\n\tclus.basePort += 2\n\n\tcfg.ClientAutoTLS = clus.ccfg.ClientAutoTLS\n\tcfg.ClientTLSInfo = clus.ccfg.ClientTLSInfo\n\tcfg.PeerAutoTLS = clus.ccfg.PeerAutoTLS\n\tcfg.PeerTLSInfo = clus.ccfg.PeerTLSInfo\n\n\t// auto-compaction every hour\n\tcfg.AutoCompactionMode = embed.CompactorModePeriodic\n\tcfg.AutoCompactionRetention = \"1h\"\n\n\tclus.Members = append(clus.Members, &Member{\n\t\tclus: clus,\n\t\tcfg: cfg,\n\t\tstatus: clusterpb.MemberStatus{\n\t\t\tName: cfg.Name,\n\t\t\tEndpoint: curl.String(),\n\t\t\tIsLeader: false,\n\t\t\tState: clusterpb.StoppedMemberStatus,\n\t\t},\n\t})\n\tidx := len(clus.Members) - 1\n\tclus.clientHostToIndex[curl.Host] = idx\n\n\tfor i := 0; i < clus.size; i++ {\n\t\tclus.Members[i].cfg.InitialCluster = clus.initialCluster()\n\t}\n\n\tlg.Infof(\"adding member %q\", clus.Members[idx].cfg.Name)\n\tcli, _, err := clus.Members[0].Client(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx, cancel := context.WithTimeout(clus.rootCtx, 3*time.Second)\n\t_, err = cli.MemberAdd(ctx, []string{clus.Members[idx].cfg.APUrls[0].String()})\n\tcancel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tlg.Infof(\"added member %q\", clus.Members[idx].cfg.Name)\n\n\tlg.Infof(\"starting member %q\", clus.Members[idx].cfg.Name)\n\tif serr := clus.Members[idx].Start(); serr != nil {\n\t\treturn serr\n\t}\n\tlg.Infof(\"started member %q\", clus.Members[idx].cfg.Name)\n\n\treturn nil\n}", "func (gs *GKEClient) Setup(numNodes *int64, nodeType *string, region *string, zone *string, project *string) (ClusterOperations, error) {\n\tvar err error\n\tgc := &GKECluster{\n\t\tRequest: &GKERequest{\n\t\t\tNumNodes: DefaultGKENumNodes,\n\t\t\tNodeType: DefaultGKENodeType,\n\t\t\tRegion: DefaultGKERegion,\n\t\t\tZone: DefaultGKEZone,\n\t\t\tBackupRegions: DefaultGKEBackupRegions},\n\t}\n\n\tctx := context.Background()\n\n\tc, err := google.DefaultClient(ctx, container.CloudPlatformScope)\n\tif nil != err {\n\t\treturn nil, fmt.Errorf(\"failed create google client: '%v'\", err)\n\t}\n\n\tcontainerService, err := container.New(c)\n\tif nil != err {\n\t\treturn nil, fmt.Errorf(\"failed create container service: '%v'\", err)\n\t}\n\tgc.operations = &GKESDKClient{containerService}\n\n\tif nil != project { // use provided project and create cluster\n\t\tgc.Project = project\n\t\tgc.NeedCleanup = true\n\t} else if err := gc.checkEnvironment(); nil != err {\n\t\treturn nil, fmt.Errorf(\"failed checking existing cluster: '%v'\", err)\n\t} else if nil != gc.Cluster { // return if Cluster was already set by kubeconfig\n\t\treturn gc, nil\n\t}\n\tif nil == gc.Cluster {\n\t\tif common.IsProw() {\n\t\t\tproject, err := boskos.AcquireGKEProject(nil)\n\t\t\tif nil != err {\n\t\t\t\treturn nil, fmt.Errorf(\"failed acquire boskos project: '%v'\", err)\n\t\t\t}\n\t\t\tgc.Project = &project.Name\n\t\t}\n\t\tif nil != numNodes {\n\t\t\tgc.Request.NumNodes = *numNodes\n\t\t}\n\t\tif nil != nodeType {\n\t\t\tgc.Request.NodeType = *nodeType\n\t\t}\n\t\tif nil != region {\n\t\t\tgc.Request.Region = *region\n\t\t}\n\t\tif \"\" != common.GetOSEnv(regionEnv) {\n\t\t\tgc.Request.Region = common.GetOSEnv(regionEnv)\n\t\t}\n\t\tif \"\" != common.GetOSEnv(backupRegionEnv) {\n\t\t\tgc.Request.BackupRegions = strings.Split(common.GetOSEnv(backupRegionEnv), \" \")\n\t\t}\n\t\tif nil != zone {\n\t\t\tgc.Request.Zone = *zone\n\t\t\tgc.Request.BackupRegions = make([]string, 0)\n\t\t}\n\t}\n\tif nil == gc.Project || \"\" == *gc.Project {\n\t\treturn nil, fmt.Errorf(\"gcp project must be set\")\n\t}\n\tlog.Printf(\"use project '%s' for running test\", *gc.Project)\n\treturn gc, nil\n}", "func init() {\n\tklog.InitFlags(nil)\n\tlogf.SetLogger(klogr.New())\n\n\t// Register required object kinds with global scheme.\n\t_ = clusterv1.AddToScheme(scheme.Scheme)\n}", "func createClusterServer(opts *NodeOptions) *clusterServer {\n\treturn &clusterServer{workers: sync.Map{}, opts: opts}\n}", "func newClusterStorage() *ClusterStorage {\n\ts := new(ClusterStorage)\n\treturn s\n}", "func NewCluster(segConfigs []SegConfig) *Cluster {\n\tcluster := Cluster{}\n\tcluster.Segments = segConfigs\n\tcluster.ByContent = make(map[int][]*SegConfig, 0)\n\tcluster.ByHost = make(map[string][]*SegConfig, 0)\n\tcluster.Executor = &GPDBExecutor{}\n\n\tfor i := range cluster.Segments {\n\t\tsegment := &cluster.Segments[i]\n\t\tcluster.ByContent[segment.ContentID] = append(cluster.ByContent[segment.ContentID], segment)\n\t\tsegmentList := cluster.ByContent[segment.ContentID]\n\t\tif len(segmentList) == 2 && segmentList[0].Role == \"m\" {\n\t\t\t/*\n\t\t\t * GetSegmentConfiguration always returns primaries before mirrors,\n\t\t\t * but we can't guarantee the []SegConfig passed in was created by\n\t\t\t * GetSegmentConfiguration, so if the mirror is first, swap them.\n\t\t\t */\n\t\t\tsegmentList[0], segmentList[1] = segmentList[1], segmentList[0]\n\t\t}\n\t\tcluster.ByHost[segment.Hostname] = append(cluster.ByHost[segment.Hostname], segment)\n\t\tif len(cluster.ByHost[segment.Hostname]) == 1 { // Only add each hostname once\n\t\t\tcluster.Hostnames = append(cluster.Hostnames, segment.Hostname)\n\t\t}\n\t}\n\tfor content := range cluster.ByContent {\n\t\tcluster.ContentIDs = append(cluster.ContentIDs, content)\n\t}\n\tsort.Ints(cluster.ContentIDs)\n\treturn &cluster\n}", "func (c *ClustersController) Create(ctx *app.CreateClustersContext) error {\n\tclustr := repository.Cluster{\n\t\tName: ctx.Payload.Data.Name,\n\t\tType: ctx.Payload.Data.Type,\n\t\tURL: ctx.Payload.Data.APIURL,\n\t\tAppDNS: ctx.Payload.Data.AppDNS,\n\t\tSAToken: ctx.Payload.Data.ServiceAccountToken,\n\t\tSAUsername: ctx.Payload.Data.ServiceAccountUsername,\n\t\tAuthClientID: ctx.Payload.Data.AuthClientID,\n\t\tAuthClientSecret: ctx.Payload.Data.AuthClientSecret,\n\t\tAuthDefaultScope: ctx.Payload.Data.AuthClientDefaultScope,\n\t}\n\tif ctx.Payload.Data.ConsoleURL != nil {\n\t\tclustr.ConsoleURL = *ctx.Payload.Data.ConsoleURL\n\t}\n\tif ctx.Payload.Data.LoggingURL != nil {\n\t\tclustr.LoggingURL = *ctx.Payload.Data.LoggingURL\n\t}\n\tif ctx.Payload.Data.MetricsURL != nil {\n\t\tclustr.MetricsURL = *ctx.Payload.Data.MetricsURL\n\t}\n\tif ctx.Payload.Data.CapacityExhausted != nil {\n\t\tclustr.CapacityExhausted = *ctx.Payload.Data.CapacityExhausted\n\t}\n\tif ctx.Payload.Data.TokenProviderID != nil {\n\t\tclustr.TokenProviderID = *ctx.Payload.Data.TokenProviderID\n\t}\n\tclusterSvc := c.app.ClusterService()\n\terr := clusterSvc.CreateOrSaveCluster(ctx, &clustr)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"error\": err,\n\t\t}, \"error while creating new cluster configuration\")\n\t\treturn app.JSONErrorResponse(ctx, err)\n\t}\n\tctx.ResponseData.Header().Set(\"Location\", app.ClustersHref(clustr.ClusterID.String()))\n\treturn ctx.Created()\n}", "func NewCluster(segConfigs []SegConfig) (*Cluster, error) {\n\tcluster := Cluster{}\n\n\tcluster.Primaries = make(map[int]SegConfig)\n\tcluster.Mirrors = make(map[int]SegConfig)\n\n\tfor _, seg := range segConfigs {\n\t\tcontent := seg.ContentID\n\n\t\tswitch seg.Role {\n\t\tcase PrimaryRole:\n\t\t\t// Check for duplication.\n\t\t\tif _, ok := cluster.Primaries[content]; ok {\n\t\t\t\treturn nil, newInvalidSegmentsError(seg, \"multiple primaries with content ID %d\", content)\n\t\t\t}\n\n\t\t\tcluster.ContentIDs = append(cluster.ContentIDs, content)\n\t\t\tcluster.Primaries[content] = seg\n\n\t\tcase MirrorRole:\n\t\t\t// Check for duplication.\n\t\t\tif _, ok := cluster.Mirrors[content]; ok {\n\t\t\t\treturn nil, newInvalidSegmentsError(seg, \"multiple mirrors with content ID %d\", content)\n\t\t\t}\n\n\t\t\tcluster.Mirrors[content] = seg\n\n\t\tdefault:\n\t\t\treturn nil, newInvalidSegmentsError(seg, \"unknown role %q\", seg.Role)\n\t\t}\n\t}\n\n\t// Make sure each mirror has a primary.\n\tfor _, seg := range cluster.Mirrors {\n\t\tcontent := seg.ContentID\n\n\t\tif _, ok := cluster.Primaries[content]; !ok {\n\t\t\treturn nil, newInvalidSegmentsError(seg, \"mirror with content ID %d has no primary\", content)\n\t\t}\n\t}\n\n\treturn &cluster, nil\n}", "func (vp *scalewayProvider) CreateCluster(log *logging.Logger, options providers.CreateClusterOptions, dnsProvider providers.DnsProvider) error {\n\twg := sync.WaitGroup{}\n\terrors := make(chan error, options.InstanceCount)\n\tinstanceDatas := make(chan instanceData, options.InstanceCount)\n\tfor i := 1; i <= options.InstanceCount; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\ttime.Sleep(time.Duration((i - 1)) * time.Second * 10)\n\t\t\tisCore := true\n\t\t\tisLB := true\n\t\t\tinstanceOptions, err := options.NewCreateInstanceOptions(isCore, isLB, i)\n\t\t\tif err != nil {\n\t\t\t\terrors <- maskAny(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tinstance, err := vp.CreateInstance(log, instanceOptions, dnsProvider)\n\t\t\tif err != nil {\n\t\t\t\terrors <- maskAny(err)\n\t\t\t} else {\n\t\t\t\tinstanceDatas <- instanceData{\n\t\t\t\t\tCreateInstanceOptions: instanceOptions,\n\t\t\t\t\tClusterInstance: instance,\n\t\t\t\t\tFleetMetadata: instanceOptions.CreateFleetMetadata(i),\n\t\t\t\t}\n\t\t\t}\n\t\t}(i)\n\t}\n\twg.Wait()\n\tclose(errors)\n\tclose(instanceDatas)\n\terr := <-errors\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\tinstances := []instanceData{}\n\tinstanceList := providers.ClusterInstanceList{}\n\tfor data := range instanceDatas {\n\t\tinstances = append(instances, data)\n\t\tinstanceList = append(instanceList, data.ClusterInstance)\n\t}\n\n\tclusterMembers, err := instanceList.AsClusterMemberList(log, nil)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\t// Create tinc network config\n\tif instanceList.ReconfigureTincCluster(vp.Logger); err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\tif err := vp.setupInstances(log, instances, clusterMembers); err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\treturn nil\n}", "func (k *K8sClusterAdapter) ProvisionCluster(ctx context.Context) errors.Error {\n\tstateStoreURL, err := k.support.StateStoreURL(ctx)\n\tif err != nil {\n\t\treturn errors.NewK8sProvisionerKopsExecError().WithCause(err)\n\t}\n\n\tvars, err := k.support.EnvironmentVariables(ctx)\n\tif err != nil {\n\t\treturn errors.NewK8sProvisionerKopsExecError().WithCause(err)\n\t}\n\n\tvar res *resources\n\n\tlog.Println(\"checking if cluster exists\")\n\tres, err = k.getCluster(ctx)\n\tif err != nil {\n\t\tif !errors.IsK8sProvisionerClusterNotFound(err) {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Println(\"creating initial cluster resources\")\n\t\tres, err = k.generateInitialClusterResourcesHack(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Println(\"setting cluster configuration\")\n\tnodeCount := int32(k.spec.NodeCount)\n\tif nodeCount == 0 {\n\t\tnodeCount = DefaultNodeCount\n\t}\n\n\tmasterCount := int32(k.spec.MasterCount)\n\tif masterCount == 0 {\n\t\tmasterCount = DefaultMasterCount\n\t}\n\n\tif err := k.writeResource(\"cluster.json\", res.Cluster); err != nil {\n\t\treturn err\n\t}\n\n\tres.MasterInstanceGroup.Spec.MinSize = &masterCount\n\tres.MasterInstanceGroup.Spec.MaxSize = &masterCount\n\tres.MasterInstanceGroup.Spec.Zones = k.spec.Zones\n\n\tif err := k.writeResource(\"master-ig.json\", res.MasterInstanceGroup); err != nil {\n\t\treturn err\n\t}\n\n\tres.NodeInstanceGroup.Spec.MinSize = &nodeCount\n\tres.NodeInstanceGroup.Spec.MaxSize = &nodeCount\n\tres.NodeInstanceGroup.Spec.Zones = k.spec.Zones\n\n\tif err := k.writeResource(\"node-ig.json\", res.NodeInstanceGroup); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"synchronizing cluster state with cloud platform\")\n\tkerr := kopsExec(ctx, command{\n\t\targs: []string{\n\t\t\t\"--state\", stateStoreURL.String(),\n\t\t\t\"replace\", \"--force\",\n\t\t\t\"-f\", filepath.Join(k.workdir, \"cluster.json\"),\n\t\t\t\"-f\", filepath.Join(k.workdir, \"master-ig.json\"),\n\t\t\t\"-f\", filepath.Join(k.workdir, \"node-ig.json\"),\n\t\t},\n\t\tenv: vars,\n\t\tstdout: os.Stdout,\n\t\tstderr: os.Stderr,\n\t})\n\tif kerr != nil {\n\t\treturn errors.NewK8sProvisionerKopsExecError().WithCause(kerr)\n\t}\n\n\tp, err := k.support.SSHPublicKey(ctx)\n\tif err != nil {\n\t\treturn errors.NewK8sProvisionerKopsExecError().WithCause(err)\n\t}\n\n\tif p != \"\" {\n\t\tkerr := kopsExec(ctx, command{\n\t\t\targs: []string{\n\t\t\t\t\"--state\", stateStoreURL.String(),\n\t\t\t\t\"create\", \"secret\", \"sshpublickey\", \"admin\", \"-i\", p,\n\t\t\t\t\"--name\", k.spec.ClusterName,\n\t\t\t},\n\t\t\tenv: vars,\n\t\t\tstdout: os.Stdout,\n\t\t\tstderr: os.Stderr,\n\t\t})\n\t\tif kerr != nil {\n\t\t\treturn errors.NewK8sProvisionerKopsExecError().WithCause(kerr)\n\t\t}\n\t}\n\n\tkerr = kopsExec(ctx, command{\n\t\targs: []string{\n\t\t\t\"--state\", stateStoreURL.String(),\n\t\t\t\"update\", \"cluster\", k.spec.ClusterName,\n\t\t\t\"--yes\",\n\t\t},\n\t\tenv: vars,\n\t\tstdout: os.Stdout,\n\t\tstderr: os.Stderr,\n\t})\n\tif kerr != nil {\n\t\treturn errors.NewK8sProvisionerKopsExecError().WithCause(kerr)\n\t}\n\n\treturn nil\n}", "func (m *InstallManager) provisionCluster() error {\n\n\tm.log.Info(\"running openshift-install create cluster\")\n\n\tif err := m.runOpenShiftInstallCommand(\"create\", \"cluster\"); err != nil {\n\t\tif m.isBootstrapComplete() {\n\t\t\tm.log.WithError(err).Warn(\"provisioning cluster failed after completing bootstrapping, waiting longer for install to complete\")\n\t\t\terr = m.runOpenShiftInstallCommand(\"wait-for\", \"install-complete\")\n\t\t}\n\t\tif err != nil {\n\t\t\tm.log.WithError(err).Error(\"error provisioning cluster\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func initConfig() {\n\tif RootConfig.clusterID == \"\" {\n\t\tlog.Fatal(\"A cluster id must be provided.\")\n\t}\n\tif RootConfig.internal {\n\t\tRootConfig.config = insideCluster()\n\t} else {\n\t\tRootConfig.config = outsideCluster()\n\t}\n}", "func (rs *RPCStatic) newClusterLink(nodename, addr string, cluster *ClusterCfg) *xhlink.HLinkUnitConfig {\n\t// 如果该link是使用了自定义link初始化函数, 就调用自定义初始化函数. 如不是, 创建默认rpc协议\n\tfor _, linker := range cluster.specialLinker {\n\t\tif strings.HasPrefix(nodename, linker.NamePrefix) {\n\t\t\treturn linker.Creator(addr)\n\t\t}\n\t}\n\treturn rs.newRPCHLink(addr)\n}" ]
[ "0.71319526", "0.6973105", "0.690308", "0.6888436", "0.68304133", "0.680293", "0.67909527", "0.6752958", "0.6737159", "0.6736451", "0.6710811", "0.6710811", "0.6698371", "0.66824687", "0.6663995", "0.6639899", "0.66329217", "0.6579526", "0.6573909", "0.65477175", "0.6539486", "0.6537924", "0.65348595", "0.6493472", "0.6470558", "0.64615494", "0.644034", "0.6407458", "0.6389952", "0.6336444", "0.6333975", "0.6328624", "0.632577", "0.6316389", "0.63125014", "0.6297826", "0.6289364", "0.626211", "0.62614936", "0.6259874", "0.62590986", "0.6242387", "0.61835825", "0.6168753", "0.61608917", "0.6157842", "0.61476266", "0.6141313", "0.6135495", "0.6129069", "0.6102437", "0.6088036", "0.6086315", "0.607159", "0.6062666", "0.6061386", "0.60502684", "0.6040419", "0.60274243", "0.60263574", "0.6017586", "0.59980184", "0.59955955", "0.59922516", "0.5987948", "0.59702045", "0.59544116", "0.59487104", "0.59485006", "0.59473866", "0.5940007", "0.59244734", "0.5921842", "0.5921331", "0.591607", "0.5915627", "0.59064263", "0.59063876", "0.59029055", "0.58938193", "0.5874245", "0.5853344", "0.5841597", "0.58376896", "0.5837589", "0.5837589", "0.58195484", "0.58163667", "0.5805816", "0.5803568", "0.5800491", "0.57916784", "0.57884836", "0.5783183", "0.5776778", "0.57505524", "0.57409954", "0.5739856", "0.57338935", "0.5731698" ]
0.71837205
0
Wait for the cluster
func (c *Cluster) Wait() { _ = <-c.readinessChannel }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func waitForClusterReachable(kubeconfig string) error {\n\tcfg, err := loadKubeconfigContents(kubeconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcfg.Timeout = 15 * time.Second\n\tclient, err := clientset.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn wait.PollImmediate(15*time.Second, 20*time.Minute, func() (bool, error) {\n\t\t_, err := client.Core().Namespaces().Get(\"openshift-apiserver\", metav1.GetOptions{})\n\t\tif err == nil {\n\t\t\treturn true, nil\n\t\t}\n\t\tlog.Printf(\"cluster is not yet reachable %s: %v\", cfg.Host, err)\n\t\treturn false, nil\n\t})\n}", "func waitForClusterReady(capiClient capiclientset.Interface, namespace, name string) error {\n\treturn waitForClusterStatus(\n\t\tcapiClient,\n\t\tnamespace, name,\n\t\tfunc(cluster *capiv1alpha1.Cluster) bool {\n\t\t\tstatus, err := controller.ClusterStatusFromClusterAPI(cluster)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn status.Ready\n\t\t},\n\t)\n}", "func (w *worker) waitHostInCluster(host *chop.ChiHost) error {\n\treturn w.c.pollHost(host, nil, w.schemer.IsHostInCluster)\n}", "func waitForGlusterContainer() error {\n\n\t//Check if docker gluster container is up and running\n\tfor {\n\t\tglusterServerContainerVal, err := helpers.GetSystemDockerNode(\"gluster-server\")\n\t\tif err != nil {\n\t\t\trwolog.Error(\"Error in checking docker gluster container for status \", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tif len(glusterServerContainerVal) > 0 {\n\t\t\tbreak\n\t\t} else {\n\t\t\trwolog.Debug(\"Sleeping for 10 seconds to get gluster docker container up\")\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\t}\n\treturn nil\n}", "func waitForClusterProvisioned(capiClient capiclientset.Interface, namespace, name string) error {\n\treturn waitForClusterStatus(\n\t\tcapiClient,\n\t\tnamespace, name,\n\t\tfunc(cluster *capiv1alpha1.Cluster) bool {\n\t\t\tstatus, err := controller.ClusterStatusFromClusterAPI(cluster)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn status.Provisioned\n\t\t},\n\t)\n}", "func waitForClusterToExist(capiClient capiclientset.Interface, namespace, name string) error {\n\treturn waitForClusterStatus(\n\t\tcapiClient,\n\t\tnamespace, name,\n\t\tfunc(cluster *capiv1alpha1.Cluster) bool { return cluster != nil },\n\t)\n}", "func (c *Client) Wait(cluster, service, arn string) error {\n\tt := time.NewTicker(c.pollInterval)\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\ts, err := c.GetDeployment(cluster, service, arn)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.logger.Printf(\"[info] --> desired: %d, pending: %d, running: %d\", *s.DesiredCount, *s.PendingCount, *s.RunningCount)\n\t\t\tif *s.RunningCount == *s.DesiredCount {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Server) Wait() {\n\ts.wg.Wait()\n}", "func (s *Server) Wait() {\n\ts.wg.Wait()\n}", "func (t *SyncTransport) Wait() {}", "func (n *Node) Wait() (int, error) {\n\tctx := context.TODO()\n\n\tclient, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn client.ContainerWait(ctx, n.id)\n}", "func (elm *etcdLeaseManager) Wait() {\n\telm.wg.Wait()\n}", "func Wait(options *WaitOptions, settings *env.Settings) error {\n\tkc, err := env.GetClient(settings)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//return status(kc, options, settings.Namespace)\n\treturn wait(kc, options, settings.Namespace)\n}", "func (g *Gateway) WaitLeadership() error {\n\tn := 80\n\tsleep := 250 * time.Millisecond\n\tfor i := 0; i < n; i++ {\n\t\tg.lock.RLock()\n\t\tisLeader, err := g.isLeader()\n\t\tif err != nil {\n\t\t\tg.lock.RUnlock()\n\t\t\treturn err\n\t\t}\n\n\t\tif isLeader {\n\t\t\tg.lock.RUnlock()\n\t\t\treturn nil\n\t\t}\n\n\t\tg.lock.RUnlock()\n\n\t\ttime.Sleep(sleep)\n\t}\n\n\treturn fmt.Errorf(\"RAFT node did not self-elect within %s\", time.Duration(n)*sleep)\n}", "func (c *NetClient) Wait() {\n\t<-c.haltedCh\n}", "func WaitForClusterToBecomeReady(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster, size int) error {\n\tt.Logf(\"waiting for cluster pods to become ready: %s\", z.Name)\n\terr := wait.Poll(RetryInterval, ReadyTimeout, func() (done bool, err error) {\n\t\tcluster, err := GetCluster(t, f, ctx, z)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tt.Logf(\"\\twaiting for pods to become ready (%d/%d), pods (%v)\", cluster.Status.ReadyReplicas, size, cluster.Status.Members.Ready)\n\n\t\t_, condition := cluster.Status.GetClusterCondition(api.ClusterConditionPodsReady)\n\t\tif condition != nil && condition.Status == corev1.ConditionTrue && cluster.Status.ReadyReplicas == int32(size) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.Logf(\"zookeeper cluster ready: %s\", z.Name)\n\treturn nil\n\n}", "func (c *Compute) wait(operation *compute.Operation) error {\n\tfor {\n\t\top, err := c.ZoneOperations.Get(c.Project, c.Zone, operation.Name).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to get operation: %v\", operation.Name, err)\n\t\t}\n\t\tlog.Printf(\"operation %q status: %s\", operation.Name, op.Status)\n\t\tif op.Status == \"DONE\" {\n\t\t\tif op.Error != nil {\n\t\t\t\treturn fmt.Errorf(\"operation error: %v\", *op.Error.Errors[0])\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn nil\n}", "func (s *sshClientExternal) Wait() error {\n\tif s.session == nil {\n\t\treturn nil\n\t}\n\treturn s.session.Wait()\n}", "func (p *Probe) wait() {\n\tp.waitGroup.Wait()\n}", "func waitForClusterToNotExist(capiClient capiclientset.Interface, namespace, name string) error {\n\treturn waitForObjectToNotExist(\n\t\tnamespace, name,\n\t\tfunc(namespace, name string) (metav1.Object, error) {\n\t\t\treturn getCluster(capiClient, namespace, name)\n\t\t},\n\t)\n}", "func (k *KubeletExecutor) Wait(containerID string) error {\n\treturn k.cli.WaitForTermination(containerID, 0)\n}", "func Wait() {\n\twaitGroup.Wait()\n}", "func (g *Group) Wait() error", "func (k *kubelet) waitForNodeReady() error {\n\tkc, _ := k.config.AdminConfig.ToYAMLString() //nolint:errcheck // This is checked in Validate().\n\n\tc, err := client.NewClient([]byte(kc))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating kubernetes client: %w\", err)\n\t}\n\n\treturn c.WaitForNodeReady(k.config.Name)\n}", "func (c *swimCluster) WaitForConvergence(t *testing.T, maxIterations int) {\n\twaitForConvergenceNodes(t, maxIterations, c.nodes...)\n}", "func (s *Supervisor) Wait() error {\n\ts.wg.Wait()\n\treturn nil\n}", "func Wait() {\n\tdefaultManager.Wait()\n}", "func (c *Client) WaitForLeader() {\n\tdefer func() {\n\t\tc.Lock()\n\t\tc.locked = false\n\t\tc.Unlock()\n\t}()\n\tc.RLock()\n\tvar locked = c.locked\n\tc.RUnlock()\n\tif !locked {\n\t\tfmt.Println(\"# waiting for leader node\")\n\t\tc.Lock()\n\t\tc.locked = true\n\t\tc.Unlock()\n\t\tinterval := time.NewTicker(ServiceTTL)\n\t\tdefer interval.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-interval.C:\n\t\t\t\tif c.IsLeader() {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"# scanning running nodes...\")\n\t\t\t\tif nodes := c.GetRunningNodes(); len(nodes) > 0 {\n\t\t\t\t\tc.events <- &models.Event{Type: EventElected, Group: GroupWorker}\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"# no nodes are ready yet\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func Wait(timeout time.Duration) error {\n\treturn server.Wait(timeout)\n}", "func (m *ConsensusNetworkMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (s *Server) Wait() {\n\t<-s.stopChan\n}", "func (s *Server) Wait() {\n\t<-s.stopChan\n}", "func (s *Serv) Wait() {\n\t<-s.done\n}", "func (clus *Cluster) WaitHealth() error {\n\tvar err error\n\t// wait 60s to check cluster health.\n\t// TODO: set it to a reasonable value. It is set that high because\n\t// follower may use long time to catch up the leader when reboot under\n\t// reasonable workload (https://github.com/coreos/etcd/issues/2698)\n\tfor i := 0; i < 60; i++ {\n\t\tfor _, m := range clus.Members {\n\t\t\tif err = m.WriteHealthKey(); err != nil {\n\t\t\t\tclus.lg.Warn(\n\t\t\t\t\t\"health check FAIL\",\n\t\t\t\t\tzap.Int(\"retries\", i),\n\t\t\t\t\tzap.String(\"endpoint\", m.EtcdClientEndpoint),\n\t\t\t\t\tzap.Error(err),\n\t\t\t\t)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tclus.lg.Info(\n\t\t\t\t\"health check PASS\",\n\t\t\t\tzap.Int(\"retries\", i),\n\t\t\t\tzap.String(\"endpoint\", m.EtcdClientEndpoint),\n\t\t\t)\n\t\t}\n\t\tif err == nil {\n\t\t\tclus.lg.Info(\"health check ALL PASS\")\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn err\n}", "func (k *kubectlContext) Wait(args ...string) error {\n\tout, err := k.do(append([]string{\"wait\"}, args...)...)\n\tk.t.Log(string(out))\n\treturn err\n}", "func (s *Server) Wait() { <-s.exited; s.BaseService.Wait() }", "func Wait() {\n\tselect {}\n}", "func WaitForBookkeeperClusterToBecomeReady(t *testing.T, k8client client.Client, b *bkapi.BookkeeperCluster, size int) error {\n\tlog.Printf(\"waiting for cluster pods to become ready: %s\", b.Name)\n\n\terr := wait.Poll(RetryInterval, ReadyTimeout, func() (done bool, err error) {\n\t\tcluster, err := GetBKCluster(t, k8client, b)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tlog.Printf(\"waiting for pods to become ready (%d/%d), pods (%v)\", cluster.Status.ReadyReplicas, size, cluster.Status.Members.Ready)\n\n\t\t_, condition := cluster.Status.GetClusterCondition(bkapi.ClusterConditionPodsReady)\n\t\tif condition != nil && condition.Status == corev1.ConditionTrue && cluster.Status.ReadyReplicas == int32(size) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"bookkeeper cluster ready: %s\", b.Name)\n\treturn nil\n}", "func Test_Cluster_Availability(t *testing.T) {\n\ttotal := 5\n\tvar server [5]cluster.Server\n\t// server[4] is not started now.. (corresponding to pid=5)\n\tfor i := 0; i < total-1; i++ {\n\t\tserver[i] = cluster.New(i+1, \"../config.json\")\n\t}\n\n\t// Random messages\n\tmessage := \"hello\"\n\n\tcount := make([]int, 5)\n\tfor i := 0; i< total; i++ {\n\t\tcount[i] = 0\n\t}\n\t\n\tfor k :=0; k < total-1; k++ {\n\t\tserver[k].Outbox() <- &cluster.Envelope{SendTo: -1, SendBy: k+1, Msg: message}\n\t}\n\n\ttime.Sleep(time.Second)\n\n\tserver[total-1] = cluster.New(total, \"../config.json\")\n\n\twg := new(sync.WaitGroup)\n\tfor i := 0; i< total; i++ {\n\t\twg.Add(1)\n\t\tgo checkInput(server[i], &count[i], wg)\n\t}\n\n\tfor i := 0; i< total; i++ {\n\t\tclose(server[i].Outbox())\n\t}\n\twg.Wait()\n\n\n\tif count[4] != 4 {\n\t\tpanic (\"All messages not recieved..\")\n\t}\n\n\tt.Log(\"test of Availability of cluster passed.\")\n}", "func (w *worker) waitHostNotInCluster(host *chop.ChiHost) error {\n\treturn w.c.pollHost(host, nil, func(host *chop.ChiHost) bool {\n\t\treturn !w.schemer.IsHostInCluster(host)\n\t})\n}", "func (node *Node) Wait() error {\n\treturn node.httpAPIServer.Wait()\n}", "func Wait() {\n\twg.Wait()\n}", "func wait() {\n\twaitImpl()\n}", "func WaitForClusterToUpgrade(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster, targetVersion string) error {\n\tt.Logf(\"waiting for cluster to upgrade: %s\", z.Name)\n\n\terr := wait.Poll(RetryInterval, UpgradeTimeout, func() (done bool, err error) {\n\t\tcluster, err := GetCluster(t, f, ctx, z)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\t_, upgradeCondition := cluster.Status.GetClusterCondition(api.ClusterConditionUpgrading)\n\t\t_, errorCondition := cluster.Status.GetClusterCondition(api.ClusterConditionError)\n\n\t\tt.Logf(\"\\twaiting for cluster to upgrade (upgrading: %s; error: %s)\", upgradeCondition.Status, errorCondition.Status)\n\n\t\tif errorCondition.Status == corev1.ConditionTrue {\n\t\t\treturn false, fmt.Errorf(\"failed upgrading cluster: [%s] %s\", errorCondition.Reason, errorCondition.Message)\n\t\t}\n\n\t\tif upgradeCondition.Status == corev1.ConditionFalse && cluster.Status.CurrentVersion == targetVersion {\n\t\t\t// Cluster upgraded\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt.Logf(\"zookeeper cluster upgraded: %s\", z.Name)\n\treturn nil\n}", "func waitForConsistency() {\n\ttime.Sleep(500 * time.Millisecond)\n}", "func (d *InMemoryTaskDB) Wait() {\n\td.modClientsWg.Wait()\n}", "func (c *Config) Wait() {\n\tc.wg.Wait()\n}", "func WaitForZookeeperClusterToBecomeReady(t *testing.T, k8client client.Client, z *zkapi.ZookeeperCluster, size int) error {\n\tlog.Printf(\"waiting for cluster pods to become ready: %s\", z.Name)\n\n\terr := wait.Poll(RetryInterval, ReadyTimeout, func() (done bool, err error) {\n\t\tcluster, err := GetZKCluster(t, k8client, z)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tlog.Printf(\"waiting for pods to become ready (%d/%d), pods (%v)\", cluster.Status.ReadyReplicas, size, cluster.Status.Members.Ready)\n\n\t\t_, condition := cluster.Status.GetClusterCondition(zkapi.ClusterConditionPodsReady)\n\t\tif condition != nil && condition.Status == corev1.ConditionTrue && cluster.Status.ReadyReplicas == int32(size) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"zookeeper cluster ready: %s\", z.Name)\n\treturn nil\n}", "func WaitForPravegaClusterToBecomeReady(t *testing.T, k8client client.Client, p *api.PravegaCluster, size int) error {\n\tlog.Printf(\"waiting for cluster pods to become ready: %s\", p.Name)\n\n\terr := wait.Poll(RetryInterval, ReadyTimeout, func() (done bool, err error) {\n\t\tcluster, err := GetPravegaCluster(t, k8client, p)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tlog.Printf(\"waiting for pods to become ready (%d/%d), pods (%v)\", cluster.Status.ReadyReplicas, size, cluster.Status.Members.Ready)\n\n\t\t_, condition := cluster.Status.GetClusterCondition(api.ClusterConditionPodsReady)\n\t\tif condition != nil && condition.Status == corev1.ConditionTrue && cluster.Status.ReadyReplicas == int32(size) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"pravega cluster ready: %s\", p.Name)\n\treturn nil\n}", "func WaitClusterToBeUpgraded(client *rancher.Client, clusterID string) (err error) {\n\tclusterStateUpgrading := \"upgrading\" // For imported RKE2 and K3s clusters\n\tclusterStateUpdating := \"updating\" // For all clusters except imported K3s and RKE2\n\n\tclusterErrorStateMessage := \"cluster is in error state\"\n\n\tvar clusterInfo string\n\topts := metav1.ListOptions{\n\t\tFieldSelector: \"metadata.name=\" + clusterID,\n\t\tTimeoutSeconds: &defaults.WatchTimeoutSeconds,\n\t}\n\n\twatchInterface, err := client.GetManagementWatchInterface(management.ClusterType, opts)\n\tif err != nil {\n\t\treturn\n\t}\n\tcheckFuncWaitToBeInUpgrade := func(event watch.Event) (bool, error) {\n\t\tclusterUnstructured := event.Object.(*unstructured.Unstructured)\n\t\tsummerizedCluster := summary.Summarize(clusterUnstructured)\n\n\t\tclusterInfo = logClusterInfoWithChanges(clusterID, clusterInfo, summerizedCluster)\n\n\t\tif summerizedCluster.Transitioning && !summerizedCluster.Error && (summerizedCluster.State == clusterStateUpdating || summerizedCluster.State == clusterStateUpgrading) {\n\t\t\treturn true, nil\n\t\t} else if summerizedCluster.Error && isClusterInaccessible(summerizedCluster.Message) {\n\t\t\treturn false, nil\n\t\t} else if summerizedCluster.Error && !isClusterInaccessible(summerizedCluster.Message) {\n\t\t\treturn false, errors.Wrap(err, clusterErrorStateMessage)\n\t\t}\n\n\t\treturn false, nil\n\t}\n\terr = wait.WatchWait(watchInterface, checkFuncWaitToBeInUpgrade)\n\tif err != nil {\n\t\treturn\n\t}\n\n\twatchInterfaceWaitUpgrade, err := client.GetManagementWatchInterface(management.ClusterType, opts)\n\tcheckFuncWaitUpgrade := func(event watch.Event) (bool, error) {\n\t\tclusterUnstructured := event.Object.(*unstructured.Unstructured)\n\t\tsummerizedCluster := summary.Summarize(clusterUnstructured)\n\n\t\tclusterInfo = logClusterInfoWithChanges(clusterID, clusterInfo, summerizedCluster)\n\n\t\tif summerizedCluster.IsReady() {\n\t\t\treturn true, nil\n\t\t} else if summerizedCluster.Error && isClusterInaccessible(summerizedCluster.Message) {\n\t\t\treturn false, nil\n\t\t} else if summerizedCluster.Error && !isClusterInaccessible(summerizedCluster.Message) {\n\t\t\treturn false, errors.Wrap(err, clusterErrorStateMessage)\n\n\t\t}\n\n\t\treturn false, nil\n\t}\n\n\terr = wait.WatchWait(watchInterfaceWaitUpgrade, checkFuncWaitUpgrade)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn\n}", "func (fmd *FakeMysqlDaemon) Wait(ctx context.Context, cnf *Mycnf) error {\n\treturn nil\n}", "func (p *ParallelManager) wait() {\n\tp.wg.Wait()\n\tclose(p.stopMonitorCh)\n}", "func (s *Server) Wait(timeout time.Duration) error {\n\treturn s.listener.Wait(timeout)\n}", "func (y *Yaraus) wait(id uint) *Error {\n\tif !y.useWait {\n\t\treturn nil\n\t}\n\n\t// get slaves count\n\tsc, err := slaveCount(y.c)\n\tif err != nil {\n\t\treturn &Error{\n\t\t\tErr: err,\n\t\t\tID: id,\n\t\t\tClientID: y.clientID,\n\t\t}\n\t}\n\tif sc == 0 {\n\t\treturn nil\n\t}\n\n\t// wait for redis slaves.\n\ti, err := y.c.Wait(sc/2+1, y.Interval).Result()\n\tif err != nil {\n\t\treturn &Error{\n\t\t\tErr: err,\n\t\t\tID: id,\n\t\t\tClientID: y.clientID,\n\t\t}\n\t}\n\tif int(i) < sc/2+1 {\n\t\treturn &Error{\n\t\t\tErr: fmt.Errorf(\"failed to sync, got %d, want %d\", int(i), sc/2+1),\n\t\t\tID: id,\n\t\t\tClientID: y.clientID,\n\t\t}\n\t}\n\n\treturn nil\n}", "func waitKcpConn() *smux.Session {\n\tfor {\n\t\tif session, err := createKcpConn(); err == nil {\n\t\t\treturn session\n\t\t} else {\n\t\t\tlog.Println(\"re-connecting:\", err)\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n}", "func (ms *Server) Wait() {\n\tms.loops.Wait()\n}", "func (c *Client) WaitForClusterAvailable(retries int, retryInterval time.Duration) (bool, error) {\n\tfor i := 0; i < retries; i++ {\n\t\tif i > 0 {\n\t\t\tklog.V(1).Infof(\"[etcd] Waiting %v until next retry\\n\", retryInterval)\n\t\t\ttime.Sleep(retryInterval)\n\t\t}\n\t\tklog.V(2).Infof(\"[etcd] attempting to see if all cluster endpoints (%s) are available %d/%d\", c.Endpoints, i+1, retries)\n\t\t_, err := c.getClusterStatus()\n\t\tif err != nil {\n\t\t\tswitch err {\n\t\t\tcase context.DeadlineExceeded:\n\t\t\t\tklog.V(1).Infof(\"[etcd] Attempt timed out\")\n\t\t\tdefault:\n\t\t\t\tklog.V(1).Infof(\"[etcd] Attempt failed with error: %v\\n\", err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, errors.New(\"timeout waiting for etcd cluster to be available\")\n}", "func (d *InMemoryJobDB) Wait() {\n\td.modClientsWg.Wait()\n}", "func waitForClusterToBecomeAwareOfAllSubscriptions(servers []server.NATSServer, subscriptionCount int) error {\n\ttimeout := time.After(time.Second * 5)\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\tfor _, server := range servers {\n\t\t\t\tif int(server.NumSubscriptions()) != subscriptionCount {\n\t\t\t\t\treturn errors.New(\"Timed out : waitForClusterToBecomeAwareOfAllSubscriptions()\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tlog.Logger.Info().Msg(\"Entire cluster is aware of all subscriptions\")\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tfor _, server := range servers {\n\t\t\t\tif int(server.NumSubscriptions()) != subscriptionCount {\n\t\t\t\t\tlog.Logger.Info().Msgf(\"Subscription count = %d\", server.NumSubscriptions())\n\t\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tlog.Logger.Info().Msg(\"Entire cluster is aware of all subscriptions\")\n\t\t\treturn nil\n\t\t}\n\n\t}\n}", "func WaitForBKClusterToTerminate(t *testing.T, k8client client.Client, b *bkapi.BookkeeperCluster) error {\n\tlog.Printf(\"waiting for Bookkeeper cluster to terminate: %s\", b.Name)\n\n\tlistOptions := []client.ListOption{\n\t\tclient.InNamespace(b.GetNamespace()),\n\t\tclient.MatchingLabelsSelector{Selector: labels.SelectorFromSet(map[string]string{\"bookkeeper_cluster\": b.GetName()})},\n\t}\n\n\t// Wait for Pods to terminate\n\terr := wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpodList := corev1.PodList{}\n\t\terr = k8client.List(goctx.TODO(), &podList, listOptions...)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range podList.Items {\n\t\t\tpod := &podList.Items[i]\n\t\t\tnames = append(names, pod.Name)\n\t\t}\n\t\tlog.Printf(\"waiting for pods to terminate, running pods (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Wait for PVCs to terminate\n\terr = wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpvcList := corev1.PersistentVolumeClaimList{}\n\t\terr = k8client.List(goctx.TODO(), &pvcList, listOptions...)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range pvcList.Items {\n\t\t\tpvc := &pvcList.Items[i]\n\t\t\tnames = append(names, pvc.Name)\n\t\t}\n\t\tlog.Printf(\"waiting for pvc to terminate (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"bookkeeper cluster terminated: %s\", b.Name)\n\treturn nil\n}", "func PollManagementClusterStatusOnANode(ctx context.Context, clusterOperWaitGroup *sync.WaitGroup, intendedCluster *operation.ConfigCluster,\n\tmctNode *operation.ClusterMemberNode, clusterConfigErrors chan OperationError) {\n\n\tdefer clusterOperWaitGroup.Done()\n\n\tlog := appcontext.Logger(ctx).WithFields(nlog.Fields{\n\t\t\"App\": \"dcfabric\",\n\t\t\"Fabric\": intendedCluster.FabricName,\n\t\t\"Operation\": \"Poll Management Cluster Status\",\n\t\t\"Switch\": mctNode.NodeMgmtIP,\n\t})\n\n\t/*Netconf client*/\n\tadapter := ad.GetAdapter(mctNode.NodeModel)\n\tclient := &client.NetconfClient{Host: mctNode.NodeMgmtIP, User: mctNode.NodeMgmtUserName, Password: mctNode.NodeMgmtPassword}\n\tclient.Login()\n\tdefer client.Close()\n\n\tintendedClusterMembers := intendedCluster.ClusterMemberNodes\n\n\tOperation := \"Poll for management cluster status\"\n\tif len(intendedClusterMembers) > 2 {\n\t\tclusterConfigErrors <- OperationError{Operation: Operation, Error: errors.New(\"Management cluster is supported for a maximum of 2 nodes\"), Host: mctNode.NodeMgmtIP}\n\t\treturn\n\t}\n\n\ttimeout := time.After(time.Duration(MgmtClusterStatePollingTimeOutInSec) * (time.Second))\n\ttick := time.Tick(time.Duration(MgmtClusterStatePollingIntervalInSec) * (time.Second))\n\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\tclusterConfigErrors <- OperationError{Operation: Operation, Error: errors.New(\"Management Cluster is not operational. Polling timed out\"), Host: mctNode.NodeMgmtIP}\n\t\t\treturn\n\t\tcase <-tick:\n\t\t\tfmt.Println(\"Management cluster status polled at\", time.Now())\n\n\t\t\toutput, operationalClusterMembers, principalNode, err := adapter.GetManagementClusterStatus(client)\n\t\t\tlog.Infof(\"Principal Node IP obtained on <%s> is <%s>\", mctNode.NodeMgmtIP, principalNode)\n\t\t\toperationalClusterMemberCount, err := strconv.Atoi(operationalClusterMembers.TotalMemberNodeCount)\n\n\t\t\tif err != nil {\n\t\t\t\tclusterConfigErrors <- OperationError{Operation: Operation, Error: err, Host: mctNode.NodeMgmtIP}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif operationalClusterMemberCount == len(intendedClusterMembers) {\n\t\t\t\tvar i = 0\n\t\t\t\tvar j = 0\n\t\t\t\tvar found = false\n\t\t\t\tfor i = 0; i < operationalClusterMemberCount; i++ {\n\t\t\t\t\tfound = false\n\t\t\t\t\toperationalClusterMember := operationalClusterMembers.MemberNodes[i]\n\t\t\t\t\tfor j = 0; j < operationalClusterMemberCount; j++ {\n\t\t\t\t\t\tvar ipClusterMember = intendedClusterMembers[j]\n\t\t\t\t\t\tif operationalClusterMember.NodeMgmtIP == ipClusterMember.NodeMgmtIP {\n\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif found == false {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif found == true {\n\t\t\t\t\tfmt.Println(\"Management Cluster is operational, hence exiting the poll on \", mctNode.NodeMgmtIP)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlog.Info(\"Raw o/p of show-cluster-management\", output)\n\t\t\t//fmt.Println(\"operationalClusterMembers\", operationalClusterMembers)\n\t\t}\n\t}\n}", "func (b *Botanist) WaitUntilClusterAutoscalerDeleted(ctx context.Context) error {\n\treturn retry.UntilTimeout(ctx, 5*time.Second, 600*time.Second, func(ctx context.Context) (done bool, err error) {\n\t\tif err := b.K8sSeedClient.Client().Get(ctx, kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameClusterAutoscaler), &appsv1.Deployment{}); err != nil {\n\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\treturn retry.Ok()\n\t\t\t}\n\t\t\treturn retry.SevereError(err)\n\t\t}\n\t\tb.Logger.Infof(\"Waiting until the %s has been deleted in the Seed cluster...\", v1beta1constants.DeploymentNameClusterAutoscaler)\n\t\treturn retry.MinorError(fmt.Errorf(\"deployment %q is still present\", v1beta1constants.DeploymentNameClusterAutoscaler))\n\t})\n}", "func (s *SeleniumServer) Wait() {\n\terr := s.cmd.Wait()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (c *C) Wait() {\n\tc.wg.Wait()\n}", "func (s *Server) Wait() {\n\tfor {\n\t\tselect {\n\t\tcase <-s.channelQuit:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (transport *Transport) Wait() {\n\ttransport.lock.Lock()\n\ttransport.lock.Unlock()\n}", "func Wait() {\n\t<-wait\n}", "func (m *HostNetworkMock) Wait(timeout time.Duration) {\n\tm.MinimockWait(timeout)\n}", "func (e *endpoint) Wait() {\n\te.completed.Wait()\n}", "func ready(c kubernetes.Interface) error {\n\tf := func() error {\n\t\tlist, err := c.CoreV1().Nodes().List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(list.Items) < 1 {\n\t\t\treturn fmt.Errorf(\"cluster is not ready, waiting for 1 or more worker nodes: %v\", len(list.Items))\n\t\t}\n\n\t\t// check for 1 or more ready nodes by ignoring nodes marked\n\t\t// unschedulable or containing taints\n\t\tvar oneReady bool\n\t\tfor _, node := range list.Items {\n\t\t\tif node.Spec.Unschedulable {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(node.Spec.Taints) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, condition := range node.Status.Conditions {\n\t\t\t\tif condition.Type == v1.NodeReady {\n\t\t\t\t\tif condition.Status == v1.ConditionTrue {\n\t\t\t\t\t\toneReady = true\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !oneReady {\n\t\t\treturn fmt.Errorf(\"waiting for one worker node to be ready\")\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif err := retry(50, 10*time.Second, f); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *progressBar) Wait() {\n\ts.container.Wait()\n}", "func WaitForPravegaClusterToUpgrade(t *testing.T, k8client client.Client, p *api.PravegaCluster, targetVersion string) error {\n\tlog.Printf(\"waiting for cluster to upgrade: %s\", p.Name)\n\n\terr := wait.Poll(RetryInterval, UpgradeTimeout, func() (done bool, err error) {\n\t\tcluster, err := GetPravegaCluster(t, k8client, p)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\t_, upgradeCondition := cluster.Status.GetClusterCondition(api.ClusterConditionUpgrading)\n\t\t_, errorCondition := cluster.Status.GetClusterCondition(api.ClusterConditionError)\n\n\t\tlog.Printf(\"waiting for cluster to upgrade (upgrading: %s; error: %s)\", upgradeCondition.Status, errorCondition.Status)\n\n\t\tif errorCondition.Status == corev1.ConditionTrue {\n\t\t\treturn false, fmt.Errorf(\"failed upgrading cluster: [%s] %s\", errorCondition.Reason, errorCondition.Message)\n\t\t}\n\n\t\tif upgradeCondition.Status == corev1.ConditionFalse && cluster.Status.CurrentVersion == targetVersion {\n\t\t\t// Cluster upgraded\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"pravega cluster upgraded: %s\", p.Name)\n\treturn nil\n}", "func (c *Client) WaitUntilClusterAvailable(ctx context.Context, input *DescribeClustersInput, opts ...aws.WaiterOption) error {\n\tw := aws.Waiter{\n\t\tName: \"WaitUntilClusterAvailable\",\n\t\tMaxAttempts: 30,\n\t\tDelay: aws.ConstantWaiterDelay(60 * time.Second),\n\t\tAcceptors: []aws.WaiterAcceptor{\n\t\t\t{\n\t\t\t\tState: aws.SuccessWaiterState,\n\t\t\t\tMatcher: aws.PathAllWaiterMatch, Argument: \"Clusters[].ClusterStatus\",\n\t\t\t\tExpected: \"available\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: aws.FailureWaiterState,\n\t\t\t\tMatcher: aws.PathAnyWaiterMatch, Argument: \"Clusters[].ClusterStatus\",\n\t\t\t\tExpected: \"deleting\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tState: aws.RetryWaiterState,\n\t\t\t\tMatcher: aws.ErrorWaiterMatch,\n\t\t\t\tExpected: \"ClusterNotFound\",\n\t\t\t},\n\t\t},\n\t\tLogger: c.Config.Logger,\n\t\tNewRequest: func(opts []aws.Option) (*aws.Request, error) {\n\t\t\tvar inCpy *DescribeClustersInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq := c.DescribeClustersRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req.Request, nil\n\t\t},\n\t}\n\tw.ApplyOptions(opts...)\n\n\treturn w.Wait(ctx)\n}", "func (w *Worker) Wait() {\n\tw.ow.Do(func() {\n\t\tw.l.Info(\"astikit: worker is now waiting...\")\n\t\tw.wg.Wait()\n\t})\n}", "func (c *Crawler) WaitForCompletion() {\n\tc.wg.Wait()\n}", "func (wal *BaseWAL) Wait() {\n\twal.group.Wait()\n}", "func waitUntilRDSClusterCreated(rdsClientSess *rds.RDS, restoreParams map[string]string) error {\n\trdsClusterName := restoreParams[\"restoreRDS\"]\n\n\tmaxWaitAttempts := 120\n\n\tinput := &rds.DescribeDBClustersInput{\n\t\tDBClusterIdentifier: aws.String(rdsClusterName),\n\t}\n\n\tfmt.Printf(\"Wait until RDS cluster [%v] is fully created ...\\n\", rdsClusterName)\n\n\tstart := time.Now()\n\n\t// Check until created\n\tfor waitAttempt := 0; waitAttempt < maxWaitAttempts; waitAttempt++ {\n\t\telapsedTime := time.Since(start)\n\t\tif waitAttempt > 0 {\n\t\t\tformattedTime := strings.Split(fmt.Sprintf(\"%6v\", elapsedTime), \".\")\n\t\t\tfmt.Printf(\"Cluster creation elapsed time: %vs\\n\", formattedTime[0])\n\t\t}\n\n\t\tresp, err := rdsClientSess.DescribeDBClusters(input)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Wait RDS cluster creation err %v\", err)\n\t\t}\n\n\t\tfmt.Printf(\"Cluster status: [%s]\\n\", *resp.DBClusters[0].Status)\n\t\tif *resp.DBClusters[0].Status == \"available\" {\n\t\t\tfmt.Printf(\"RDS cluster [%v] created successfully\\n\", rdsClusterName)\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(30 * time.Second)\n\t}\n\treturn fmt.Errorf(\"Aurora Cluster [%v] is not ready, exceed max wait attemps\\n\", rdsClusterName)\n}", "func (l *CommandQueueStatusListener) Wait() {\n\t<-l.signal\n}", "func (c *Connection) Wait() error {\n\tc.connection.Wait()\n\treturn nil\n}", "func (wg *WaitGroup) Wait() {\n\twg.waitGroup.Wait()\n}", "func (c *Container) Wait() error {\n\tif c.id == \"\" {\n\t\treturn fmt.Errorf(\"container %s absent\", c.id)\n\t}\n\t_, err := c.cli.ContainerWait(c.ctx, c.id)\n\treturn err\n}", "func waitForApiServerToBeUp(svcMasterIp string, sshClientConfig *ssh.ClientConfig,\n\ttimeout time.Duration) error {\n\tkubeConfigPath := GetAndExpectStringEnvVar(gcKubeConfigPath)\n\twaitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {\n\t\tcmd := fmt.Sprintf(\"kubectl get ns,sc --kubeconfig %s\",\n\t\t\tkubeConfigPath)\n\t\tframework.Logf(\"Invoking command '%v' on host %v\", cmd,\n\t\t\tsvcMasterIp)\n\t\tcmdResult, err := sshExec(sshClientConfig, svcMasterIp,\n\t\t\tcmd)\n\t\tframework.Logf(\"result %v\", cmdResult)\n\t\tif err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif err == nil {\n\t\t\tframework.Logf(\"Apiserver is fully up\")\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\treturn waitErr\n}", "func (l *LoadBalancer) Wait() {\n\t<-l.waitCtx.Done()\n}", "func (a *serverApp) Wait() {\n\t<-a.terminated\n}", "func (s *Service) Wait() {\n\ts.waiter.Wait()\n}", "func (s *Service) Wait() {\n\ts.waiter.Wait()\n}", "func (s *Service) Wait() {\n\ts.waiter.Wait()\n}", "func (s *server) WaitForShutdown() {\n\ts.wg.Wait()\n}", "func (s *server) WaitForShutdown() {\n\ts.wg.Wait()\n}", "func WaitForZKClusterToTerminate(t *testing.T, k8client client.Client, z *zkapi.ZookeeperCluster) error {\n\tlog.Printf(\"waiting for zookeeper cluster to terminate: %s\", z.Name)\n\n\tlistOptions := []client.ListOption{\n\t\tclient.InNamespace(z.GetNamespace()),\n\t\tclient.MatchingLabelsSelector{Selector: labels.SelectorFromSet(map[string]string{\"app\": z.GetName()})},\n\t}\n\n\t// Wait for Pods to terminate\n\terr := wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpodList := corev1.PodList{}\n\t\terr = k8client.List(goctx.TODO(), &podList, listOptions...)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range podList.Items {\n\t\t\tpod := &podList.Items[i]\n\t\t\tnames = append(names, pod.Name)\n\t\t}\n\t\tlog.Printf(\"waiting for pods to terminate, running pods (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Wait for PVCs to terminate\n\terr = wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpvcList := corev1.PersistentVolumeClaimList{}\n\t\terr = k8client.List(goctx.TODO(), &pvcList, listOptions...)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range pvcList.Items {\n\t\t\tpvc := &pvcList.Items[i]\n\t\t\tnames = append(names, pvc.Name)\n\t\t}\n\t\tlog.Printf(\"waiting for pvc to terminate (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"zookeeper cluster terminated: %s\", z.Name)\n\treturn nil\n}", "func wait() {\n\ttime.Sleep(3 * time.Second)\n}", "func (m *Manager) waitForFinish() {\n\tm.state.wg.Wait()\n}", "func waitForConductor(ctx context.Context, client *gophercloud.ServiceClient) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tlog.Printf(\"[DEBUG] Waiting for conductor API to become available...\")\n\t\t\tdriverCount := 0\n\n\t\t\tdrivers.ListDrivers(client, drivers.ListDriversOpts{\n\t\t\t\tDetail: false,\n\t\t\t}).EachPage(func(page pagination.Page) (bool, error) {\n\t\t\t\tactual, err := drivers.ExtractDrivers(page)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tdriverCount += len(actual)\n\t\t\t\treturn true, nil\n\t\t\t})\n\t\t\t// If we have any drivers, conductor is up.\n\t\t\tif driverCount > 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}\n}", "func (async *async) wait() {\n\t<-async.done\n}", "func waitUntilRDSClusterDeleted(rdsClientSess *rds.RDS, restoreParams map[string]string) error {\n\n\trdsClusterName := restoreParams[\"restoreRDS\"]\n\n\tinput := &rds.DescribeDBClustersInput{\n\t\tDBClusterIdentifier: aws.String(rdsClusterName),\n\t}\n\n\t// Check if Cluster exists\n\t_, err := rdsClientSess.DescribeDBClusters(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tif aerr.Code() == rds.ErrCodeDBClusterNotFoundFault {\n\t\t\t\tfmt.Println(rds.ErrCodeDBClusterNotFoundFault, aerr.Error())\n\t\t\t\treturn fmt.Errorf(\"Wait RDS cluster deletion err %v\", err)\n\t\t\t} else {\n\t\t\t\t// Print the error, cast err to awserr.Error to get the Code and Message from an error.\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\treturn fmt.Errorf(\"Wait RDS cluster deletion err %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// TODO: DEBUG - fmt.Println(result)\n\tfmt.Printf(\"Wait until RDS cluster [%v] is fully deleted...\\n\", rdsClusterName)\n\n\tstart := time.Now()\n\n\tmaxWaitAttempts := 120\n\n\t// Check until deleted\n\tfor waitAttempt := 0; waitAttempt < maxWaitAttempts; waitAttempt++ {\n\t\telapsedTime := time.Since(start).Seconds()\n\n\t\tif waitAttempt > 0 {\n\t\t\tformattedTime := strings.Split(fmt.Sprintf(\"%6v\", elapsedTime), \".\")\n\t\t\tfmt.Printf(\"Cluster deletion elapsed time: %vs\\n\", formattedTime[0])\n\t\t}\n\n\t\tresp, err := rdsClientSess.DescribeDBClusters(input)\n\t\tif err != nil {\n\t\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\t\tif aerr.Code() == rds.ErrCodeDBClusterNotFoundFault {\n\t\t\t\t\tfmt.Println(\"RDS Cluster deleted successfully\")\n\t\t\t\t\treturn nil\n\t\t\t\t} else {\n\t\t\t\t\t// Print the error, cast err to awserr.Error to get the Code and Message from an error.\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t\treturn fmt.Errorf(\"Wait RDS cluster deletion err %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfmt.Printf(\"Cluster status: [%s]\\n\", *resp.DBClusters[0].Status)\n\t\tif *resp.DBClusters[0].Status == \"terminated\" {\n\t\t\tfmt.Printf(\"RDS cluster [%v] deleted successfully\\n\", rdsClusterName)\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(30 * time.Second)\n\t}\n\n\t// Timeout Err\n\treturn fmt.Errorf(\"RDS Cluster [%v] could not be deleted, exceed max wait attemps\\n\", rdsClusterName)\n}", "func WaitForClusterToTerminate(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) error {\n\tt.Logf(\"waiting for zookeeper cluster to terminate: %s\", z.Name)\n\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\"app\": z.GetName()}).String(),\n\t}\n\n\t// Wait for Pods to terminate\n\terr := wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpodList, err := f.KubeClient.CoreV1().Pods(z.Namespace).List(goctx.TODO(), listOptions)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range podList.Items {\n\t\t\tpod := &podList.Items[i]\n\t\t\tnames = append(names, pod.Name)\n\t\t}\n\t\tt.Logf(\"waiting for pods to terminate, running pods (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Wait for PVCs to terminate\n\terr = wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpvcList, err := f.KubeClient.CoreV1().PersistentVolumeClaims(z.Namespace).List(goctx.TODO(), listOptions)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range pvcList.Items {\n\t\t\tpvc := &pvcList.Items[i]\n\t\t\tnames = append(names, pvc.Name)\n\t\t}\n\t\tt.Logf(\"waiting for pvc to terminate (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt.Logf(\"zookeeper cluster terminated: %s\", z.Name)\n\treturn nil\n}", "func (n *NetImpl) Await(timeout time.Duration) error {\n\treturn nil // This node is alive.\n}", "func waitUntilServerAvailable(ctx context.Context, c driver.Client) bool {\n\tinstanceUp := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tverCtx, cancel := context.WithTimeout(ctx, time.Second*5)\n\t\t\tif _, err := c.Version(verCtx); err == nil {\n\t\t\t\tcancel()\n\t\t\t\tinstanceUp <- true\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tcancel()\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}\n\t}()\n\tselect {\n\tcase up := <-instanceUp:\n\t\treturn up\n\tcase <-ctx.Done():\n\t\treturn false\n\t}\n}", "func (c *testCluster) waitForElection(i int) *EventLeaderElection {\n\tfor {\n\t\te := <-c.events[i].LeaderElection\n\t\tif e == nil {\n\t\t\tpanic(\"got nil LeaderElection event, channel likely closed\")\n\t\t}\n\t\t// Ignore events with NodeID 0; these mark elections that are in progress.\n\t\tif e.ReplicaID != 0 {\n\t\t\treturn e\n\t\t}\n\t}\n}", "func (c *apiConsumers) Wait() {\n\tc.wait()\n}" ]
[ "0.7165529", "0.7145152", "0.70604503", "0.7051447", "0.6946951", "0.6739213", "0.64901483", "0.64804006", "0.64804006", "0.6452995", "0.6435641", "0.64339143", "0.64059025", "0.6378717", "0.63488895", "0.6347486", "0.6344214", "0.632288", "0.63157916", "0.6271417", "0.62699187", "0.6259992", "0.62503135", "0.62399054", "0.6239473", "0.62282896", "0.62015224", "0.6196991", "0.6195405", "0.6194932", "0.6184387", "0.6184387", "0.6175552", "0.61485136", "0.61423314", "0.6139734", "0.6131404", "0.6128219", "0.6127098", "0.6118987", "0.61056256", "0.6088948", "0.60822225", "0.60772926", "0.6073688", "0.6073393", "0.6063541", "0.6052855", "0.6035935", "0.60283136", "0.60278744", "0.6012974", "0.60113144", "0.6010458", "0.6007328", "0.60043633", "0.6003193", "0.5987272", "0.59800255", "0.5973482", "0.5973478", "0.5971972", "0.5965473", "0.5956845", "0.59546906", "0.59395677", "0.593794", "0.5927501", "0.5927319", "0.59250915", "0.59169704", "0.59071153", "0.58999795", "0.58766747", "0.5873976", "0.5871803", "0.5869662", "0.5866144", "0.5862729", "0.5850162", "0.584908", "0.5846731", "0.58460635", "0.58420086", "0.5832712", "0.5832712", "0.5832712", "0.5832623", "0.5832623", "0.58269507", "0.58249664", "0.582191", "0.582083", "0.5812623", "0.5812443", "0.58094776", "0.5808059", "0.5806673", "0.58051604", "0.580085" ]
0.76435083
0
Aggregate all changes into a new cluster view
func (c *Cluster) aggregateClusterView() { for { select { case event := <-c.ingressEvents: if event.Created { c.currentClusterState.Ingresses = append(c.currentClusterState.Ingresses, event.Ingress) log.WithFields(log.Fields{ "cluster": c.config.Name, "ingress": event.Ingress.Name, }).Info("Detected new ingress.") } else { for i, ingress := range c.currentClusterState.Ingresses { if ingress.Name == event.Ingress.Name { c.currentClusterState.Ingresses[i] = c.currentClusterState.Ingresses[len(c.currentClusterState.Ingresses)-1] c.currentClusterState.Ingresses = c.currentClusterState.Ingresses[:len(c.currentClusterState.Ingresses)-1] log.WithFields(log.Fields{ "cluster": c.config.Name, "ingress": event.Ingress.Name, }).Info("Removed old ingress.") break } } } c.clusterStateChannel <- c.currentClusterState case event := <-c.backendEvents: if event.Created { c.currentClusterState.Backends = append(c.currentClusterState.Backends, event.Backend) log.WithFields(log.Fields{ "cluster": c.config.Name, "backend": event.Backend.Name, "ip": event.Backend.IP, }).Info("Detected new backend pod.") } else { log.WithFields(log.Fields{ "cluster": c.config.Name, "backend": event.Backend.Name, "ip": event.Backend.IP, }).Debug("Detected backend pod removal, searching...") for i, backend := range c.currentClusterState.Backends { if backend.Name == event.Backend.Name { c.currentClusterState.Backends[i] = c.currentClusterState.Backends[len(c.currentClusterState.Backends)-1] c.currentClusterState.Backends = c.currentClusterState.Backends[:len(c.currentClusterState.Backends)-1] log.WithFields(log.Fields{ "cluster": c.config.Name, "backend": event.Backend.Name, "ip": event.Backend.IP, }).Info("Removed old backend pod.") break } } } c.clusterStateChannel <- c.currentClusterState case _ = <-c.aggregatorStopChannel: return case _ = <-c.clearChannel: log.WithFields(log.Fields{ "cluster": c.config.Name, }).Debug("Clearing full cluster state...") c.currentClusterState.Backends = nil c.currentClusterState.Ingresses = nil c.clusterStateChannel <- c.currentClusterState } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *FederationSyncController) reconcileOnClusterChange() {\n\tif !s.isSynced() {\n\t\ts.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(s.clusterAvailableDelay))\n\t}\n\tfor _, obj := range s.store.List() {\n\t\tnamespacedName := s.adapter.NamespacedName(obj.(pkgruntime.Object))\n\t\ts.deliver(namespacedName, s.smallDelay, false)\n\t}\n}", "func (s *FederationSyncController) reconcileOnClusterChange() {\n\tif !s.isSynced() {\n\t\ts.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(s.clusterAvailableDelay))\n\t}\n\tfor _, obj := range s.templateStore.List() {\n\t\tqualifiedName := util.NewQualifiedName(obj.(pkgruntime.Object))\n\t\ts.worker.EnqueueWithDelay(qualifiedName, s.smallDelay)\n\t}\n}", "func (c *Controller) OnUpdate(old, new common.Cluster) {\n\tblog.Infof(\"cluster old %+v new %+v\", old, new)\n\tif _, ok := c.reconcilerMap[new.ClusterID]; !ok {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tnewReconciler, err := reconciler.NewReconciler(new, c.storageClient, c.cmdbClient, c.ops.FullSyncInterval)\n\t\tif err != nil {\n\t\t\tblog.Errorf(\"failed, to create new reconciler, err %s\", err.Error())\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t\tblog.Infof(\"add reconciler for cluster %+v\", new)\n\t\tc.reconcilerMap[new.ClusterID] = newReconciler\n\t\tc.cancelFuncMap[new.ClusterID] = cancel\n\t\tgo newReconciler.Run(ctx)\n\t} else {\n\t\tblog.Infof(\"delete old reconciler for %+v\", old)\n\t\t// call cancel function\n\t\tc.cancelFuncMap[old.ClusterID]()\n\t\tdelete(c.cancelFuncMap, old.ClusterID)\n\t\tdelete(c.reconcilerMap, old.ClusterID)\n\n\t\tblog.Infof(\"add new reconciler for %+v\", new)\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tnewReconciler, err := reconciler.NewReconciler(new, c.storageClient, c.cmdbClient, c.ops.FullSyncInterval)\n\t\tif err != nil {\n\t\t\tblog.Errorf(\"failed, to create new reconciler, err %s\", err.Error())\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t\tc.reconcilerMap[new.ClusterID] = newReconciler\n\t\tc.cancelFuncMap[new.ClusterID] = cancel\n\t\tgo newReconciler.Run(ctx)\n\t}\n}", "func (ct *ctrlerCtx) diffCluster(apicl apiclient.Services) {\n\topts := api.ListWatchOptions{}\n\n\t// get a list of all objects from API server\n\tobjlist, err := apicl.ClusterV1().Cluster().List(context.Background(), &opts)\n\tif err != nil {\n\t\tct.logger.Errorf(\"Error getting a list of objects. Err: %v\", err)\n\t\treturn\n\t}\n\n\tct.logger.Infof(\"diffCluster(): ClusterList returned %d objects\", len(objlist))\n\n\t// build an object map\n\tobjmap := make(map[string]*cluster.Cluster)\n\tfor _, obj := range objlist {\n\t\tobjmap[obj.GetKey()] = obj\n\t}\n\n\tlist, err := ct.Cluster().List(context.Background(), &opts)\n\tif err != nil && !strings.Contains(err.Error(), \"not found in local cache\") {\n\t\tct.logger.Infof(\"Failed to get a list of objects. Err: %s\", err)\n\t\treturn\n\t}\n\n\t// if an object is in our local cache and not in API server, trigger delete for it\n\tfor _, obj := range list {\n\t\t_, ok := objmap[obj.GetKey()]\n\t\tif !ok {\n\t\t\tct.logger.Infof(\"diffCluster(): Deleting existing object %#v since its not in apiserver\", obj.GetKey())\n\t\t\tevt := kvstore.WatchEvent{\n\t\t\t\tType: kvstore.Deleted,\n\t\t\t\tKey: obj.GetKey(),\n\t\t\t\tObject: &obj.Cluster,\n\t\t\t}\n\t\t\tct.handleClusterEvent(&evt)\n\t\t}\n\t}\n\n\t// trigger create event for all others\n\tfor _, obj := range objlist {\n\t\tct.logger.Infof(\"diffCluster(): Adding object %#v\", obj.GetKey())\n\t\tevt := kvstore.WatchEvent{\n\t\t\tType: kvstore.Created,\n\t\t\tKey: obj.GetKey(),\n\t\t\tObject: obj,\n\t\t}\n\t\tct.handleClusterEvent(&evt)\n\t}\n}", "func (c *Controller) onUpdate(oldObj, newObj interface{}) {\n\toldcluster := oldObj.(*crv1.Pgcluster)\n\tnewcluster := newObj.(*crv1.Pgcluster)\n\n\tlog.Debugf(\"pgcluster onUpdate for cluster %s (namespace %s)\", newcluster.ObjectMeta.Namespace,\n\t\tnewcluster.ObjectMeta.Name)\n\n\t// if the status of the pgcluster shows that it has been bootstrapped, then proceed with\n\t// creating the cluster (i.e. the cluster deployment, services, etc.)\n\tif newcluster.Status.State == crv1.PgclusterStateBootstrapped {\n\t\tclusteroperator.AddClusterBase(c.Client, newcluster, newcluster.GetNamespace())\n\t\treturn\n\t}\n\n\t// if the 'shutdown' parameter in the pgcluster update shows that the cluster should be either\n\t// shutdown or started but its current status does not properly reflect that it is, then\n\t// proceed with the logic needed to either shutdown or start the cluster\n\tif newcluster.Spec.Shutdown && newcluster.Status.State != crv1.PgclusterStateShutdown {\n\t\tclusteroperator.ShutdownCluster(c.Client, *newcluster)\n\t} else if !newcluster.Spec.Shutdown &&\n\t\tnewcluster.Status.State == crv1.PgclusterStateShutdown {\n\t\tclusteroperator.StartupCluster(c.Client, *newcluster)\n\t}\n\n\t// check to see if the \"autofail\" label on the pgcluster CR has been changed from either true to false, or from\n\t// false to true. If it has been changed to false, autofail will then be disabled in the pg cluster. If has\n\t// been changed to true, autofail will then be enabled in the pg cluster\n\tif newcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL] != \"\" {\n\t\tautofailEnabledOld, err := strconv.ParseBool(oldcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL])\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\tautofailEnabledNew, err := strconv.ParseBool(newcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL])\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif autofailEnabledNew != autofailEnabledOld {\n\t\t\tutil.ToggleAutoFailover(c.Client, autofailEnabledNew,\n\t\t\t\tnewcluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE],\n\t\t\t\tnewcluster.ObjectMeta.Namespace)\n\t\t}\n\n\t}\n\n\t// handle standby being enabled and disabled for the cluster\n\tif oldcluster.Spec.Standby && !newcluster.Spec.Standby {\n\t\tif err := clusteroperator.DisableStandby(c.Client, *newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t} else if !oldcluster.Spec.Standby && newcluster.Spec.Standby {\n\t\tif err := clusteroperator.EnableStandby(c.Client, *newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the resource values have changed, and if so, update them\n\tif !reflect.DeepEqual(oldcluster.Spec.Resources, newcluster.Spec.Resources) ||\n\t\t!reflect.DeepEqual(oldcluster.Spec.Limits, newcluster.Spec.Limits) {\n\t\tif err := clusteroperator.UpdateResources(c.Client, c.Client.Config, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the pgBackRest repository resource values have changed, and\n\t// if so, update them\n\tif !reflect.DeepEqual(oldcluster.Spec.BackrestResources, newcluster.Spec.BackrestResources) ||\n\t\t!reflect.DeepEqual(oldcluster.Spec.BackrestLimits, newcluster.Spec.BackrestLimits) {\n\t\tif err := backrestoperator.UpdateResources(c.Client, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// see if any of the pgBouncer values have changed, and if so, update the\n\t// pgBouncer deployment\n\tif !reflect.DeepEqual(oldcluster.Spec.PgBouncer, newcluster.Spec.PgBouncer) {\n\t\tif err := updatePgBouncer(c, oldcluster, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// if we are not in a standby state, check to see if the tablespaces have\n\t// differed, and if so, add the additional volumes to the primary and replicas\n\tif !reflect.DeepEqual(oldcluster.Spec.TablespaceMounts, newcluster.Spec.TablespaceMounts) {\n\t\tif err := updateTablespaces(c, oldcluster, newcluster); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"aerospikecluster-controller\", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: maxConcurrentReconciles})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource AerospikeCluster\n\terr = c.Watch(\n\t\t&source.Kind{Type: &aerospikev1alpha1.AerospikeCluster{}},\n\t\t&handler.EnqueueRequestForObject{},\n\t\t// Skip where cluster object generation is not changed\n\t\tpredicate.GenerationChangedPredicate{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: Do we need to monitor this? Statefulset is updated many times in reconcile and this add new entry in\n\t// update queue. If user will change only cr then we may not need to monitor statefulset.\n\t// Think all possible situation\n\n\t// Watch for changes to secondary resource StatefulSet and requeue the owner AerospikeCluster\n\terr = c.Watch(\n\t\t&source.Kind{Type: &appsv1.StatefulSet{}},\n\t\t&handler.EnqueueRequestForOwner{\n\t\t\tIsController: true,\n\t\t\tOwnerType: &aerospikev1alpha1.AerospikeCluster{},\n\t\t}, predicate.Funcs{\n\t\t\tCreateFunc: func(e event.CreateEvent) bool {\n\t\t\t\treturn false\n\t\t\t},\n\t\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\t\treturn false\n\t\t\t},\n\t\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Controller) OnAdd(add common.Cluster) {\n\tblog.Infof(\"cluster %+v add\", add)\n\t// add new reconciler there is no reconciler for the cluster\n\tif _, ok := c.reconcilerMap[add.ClusterID]; !ok {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tnewReconciler, err := reconciler.NewReconciler(add, c.storageClient, c.cmdbClient, c.ops.FullSyncInterval)\n\t\tif err != nil {\n\t\t\tblog.Errorf(\"failed, to create new reconciler, err %s\", err.Error())\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t\tblog.Infof(\"add reconciler for cluster %+v\", add)\n\t\tc.reconcilerMap[add.ClusterID] = newReconciler\n\t\tc.cancelFuncMap[add.ClusterID] = cancel\n\t\tgo newReconciler.Run(ctx)\n\t} else {\n\t\tblog.Warnf(\"duplicated add cluster\")\n\t}\n}", "func List(ctx context.Context, client *v1.ServiceClient, clusterID string) ([]*View, *v1.ResponseResult, error) {\n\turl := strings.Join([]string{client.Endpoint, v1.ResourceURLCluster, clusterID, v1.ResourceURLNodegroup}, \"/\")\n\tresponseResult, err := client.DoRequest(ctx, http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif responseResult.Err != nil {\n\t\treturn nil, responseResult, responseResult.Err\n\t}\n\n\t// Extract nodegroups from the response body.\n\tvar result struct {\n\t\tNodegroups []*View `json:\"nodegroups\"`\n\t}\n\terr = responseResult.ExtractResult(&result)\n\tif err != nil {\n\t\treturn nil, responseResult, err\n\t}\n\n\treturn result.Nodegroups, responseResult, err\n}", "func (c *Controller) refresh() error {\n\terr := c.channelConfigSourcer.Update(context.Background(), c.logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclusters, err := c.registry.ListClusters(registry.Filter{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.clusterList.UpdateAvailable(c.channelConfigSourcer, c.dropUnsupported(clusters))\n\treturn nil\n}", "func (e Exporter) Collect(ch chan<- prometheus.Metric) {\n\tctx := context.Background()\n\n\tcontainerService, err := container.NewService(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprojectsListResponse, err := cloudresourcemanagerService.Projects.List().Filter(\"lifecycleState:ACTIVE\").Context(ctx).Do()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Infof(\"Found %d projects\", len(projectsListResponse.Projects))\n\n\tvar mutex = &sync.Mutex{}\n\tvar wg sync.WaitGroup\n\twg.Add(len(projectsListResponse.Projects))\n\n\tvalidMasterVersions := map[string][]string{}\n\tmasterVersionCount := map[string]float64{}\n\n\tfor _, p := range projectsListResponse.Projects {\n\t\tgo func(p *cloudresourcemanager.Project) {\n\t\t\tdefer wg.Done()\n\t\t\tresp, err := containerService.Projects.Locations.Clusters.List(\"projects/\" + p.ProjectId + \"/locations/-\").Context(ctx).Do()\n\t\t\tif err != nil {\n\t\t\t\tif ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusForbidden {\n\t\t\t\t\tlog.Warnf(\"Missing roles/container.clusterViewer on %s (%s)\", p.Name, p.ProjectId)\n\t\t\t\t\treturn\n\t\t\t\t} else if ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusTooManyRequests {\n\t\t\t\t\tlog.Warn(\"Quota exceeded\")\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, c := range resp.Clusters {\n\t\t\t\tmutex.Lock()\n\t\t\t\tif _, ok := validMasterVersions[c.Location]; !ok {\n\t\t\t\t\tlog.Infof(\"Pulling server configs for location %s\", c.Location)\n\t\t\t\t\tserverConfig, err := containerService.Projects.Locations.GetServerConfig(\"projects/\" + p.ProjectId + \"/locations/\" + c.Location).Do()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif ae, ok := err.(*googleapi.Error); ok && ae.Code == http.StatusTooManyRequests {\n\t\t\t\t\t\t\tlog.Warn(\"Quota exceeded\")\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tvalidMasterVersions[c.Location] = serverConfig.ValidMasterVersions\n\t\t\t\t}\n\n\t\t\t\tif _, ok := masterVersionCount[c.CurrentMasterVersion]; !ok {\n\t\t\t\t\tmasterVersionCount[c.CurrentMasterVersion] = 1\n\t\t\t\t} else {\n\t\t\t\t\tmasterVersionCount[c.CurrentMasterVersion]++\n\t\t\t\t}\n\t\t\t\tmutex.Unlock()\n\n\t\t\t\tif !contains(c.CurrentMasterVersion, validMasterVersions[c.Location]) {\n\t\t\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\t\t\te.Metrics[\"gkeUnsupportedMasterVersion\"],\n\t\t\t\t\t\tprometheus.CounterValue,\n\t\t\t\t\t\t1,\n\t\t\t\t\t\t[]string{\n\t\t\t\t\t\t\tc.CurrentMasterVersion,\n\t\t\t\t\t\t\tp.ProjectId,\n\t\t\t\t\t\t\tp.Name,\n\t\t\t\t\t\t\tc.Name,\n\t\t\t\t\t\t\tc.Location,\n\t\t\t\t\t\t}...,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t}(p)\n\t}\n\n\twg.Wait()\n\n\tfor version, cnt := range masterVersionCount {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\te.Metrics[\"gkeMasterVersion\"],\n\t\t\tprometheus.CounterValue,\n\t\t\tcnt,\n\t\t\t[]string{\n\t\t\t\tversion,\n\t\t\t}...,\n\t\t)\n\t}\n\n\tlog.Info(\"Done\")\n}", "func (m *Monitor) updateCluster(managedCluster *clusterv1.ManagedCluster) {\n\tglog.V(2).Info(\"Processing Cluster Update.\")\n\n\tclusterToUpdate := managedCluster.GetName()\n\tclusterVendor, version, clusterID := GetClusterClaimInfo(managedCluster)\n\tclusterIdx, found := Find(m.ManagedClusterInfo, types.ManagedClusterInfo{\n\t\tNamespace: clusterToUpdate,\n\t\tClusterID: clusterID,\n\t})\n\tif found && clusterID != m.ManagedClusterInfo[clusterIdx].ClusterID {\n\t\t// If the cluster ID has changed update it - otherwise do nothing.\n\t\tglog.Infof(\"Updating %s from Insights cluster list\", clusterToUpdate)\n\t\tm.ManagedClusterInfo[clusterIdx] = types.ManagedClusterInfo{\n\t\t\tClusterID: clusterID,\n\t\t\tNamespace: managedCluster.GetName(),\n\t\t}\n\t\treturn\n\t}\n\n\t// Case to add a ManagedCluster to cluster list after it has been upgraded to version >= 4.X\n\tif !found && clusterVendor == \"OpenShift\" && version >= 4 {\n\t\tglog.Infof(\"Adding %s to Insights cluster list - Cluster was upgraded\", managedCluster.GetName())\n\t\tm.ManagedClusterInfo = append(m.ManagedClusterInfo, types.ManagedClusterInfo{\n\t\t\tClusterID: clusterID,\n\t\t\tNamespace: managedCluster.GetName(),\n\t\t})\n\t}\n}", "func (ctrler CtrlDefReactor) OnClusterUpdate(oldObj *Cluster, newObj *cluster.Cluster) error {\n\tlog.Info(\"OnClusterUpdate is not implemented\")\n\treturn nil\n}", "func (w *worker) reconcileCluster(cluster *chop.ChiCluster) error {\n\tw.a.V(2).M(cluster).S().P()\n\tdefer w.a.V(2).M(cluster).E().P()\n\n\t// Add Cluster's Service\n\tservice := w.creator.CreateServiceCluster(cluster)\n\tif service == nil {\n\t\t// TODO\n\t\t// For somewhat reason Service is not created, this is an error, but not clear what to do about it\n\t\treturn nil\n\t}\n\treturn w.reconcileService(cluster.CHI, service)\n}", "func clusterOperations(adapter federatedtypes.FederatedTypeAdapter, selectedClusters []*federationapi.Cluster, unselectedClusters []*federationapi.Cluster, obj pkgruntime.Object, key string, schedulingInfo *federatedtypes.SchedulingInfo, accessor clusterObjectAccessorFunc) ([]util.FederatedOperation, error) {\n\toperations := make([]util.FederatedOperation, 0)\n\n\tkind := adapter.Kind()\n\tfor _, cluster := range selectedClusters {\n\t\t// The data should not be modified.\n\t\tdesiredObj := adapter.Copy(obj)\n\n\t\tclusterObj, found, err := accessor(cluster.Name)\n\t\tif err != nil {\n\t\t\twrappedErr := fmt.Errorf(\"Failed to get %s %q from cluster %q: %v\", kind, key, cluster.Name, err)\n\t\t\truntime.HandleError(wrappedErr)\n\t\t\treturn nil, wrappedErr\n\t\t}\n\n\t\tshouldCreateIfNeeded := true\n\t\tif adapter.IsSchedulingAdapter() {\n\t\t\tschedulingAdapter, ok := adapter.(federatedtypes.SchedulingAdapter)\n\t\t\tif !ok {\n\t\t\t\terr = fmt.Errorf(\"adapter for kind %s does not properly implement SchedulingAdapter.\", kind)\n\t\t\t\tglog.Fatalf(\"Error: %v\", err)\n\t\t\t}\n\t\t\tvar clusterTypedObj pkgruntime.Object = nil\n\t\t\tif clusterObj != nil {\n\t\t\t\tclusterTypedObj = clusterObj.(pkgruntime.Object)\n\t\t\t}\n\t\t\tdesiredObj, shouldCreateIfNeeded, err = schedulingAdapter.ScheduleObject(cluster, clusterTypedObj, desiredObj, schedulingInfo)\n\t\t\tif err != nil {\n\t\t\t\truntime.HandleError(err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tvar operationType util.FederatedOperationType = \"\"\n\t\tif found {\n\t\t\tclusterObj := clusterObj.(pkgruntime.Object)\n\t\t\tif !adapter.Equivalent(desiredObj, clusterObj) {\n\t\t\t\toperationType = util.OperationTypeUpdate\n\t\t\t}\n\t\t} else if shouldCreateIfNeeded {\n\t\t\toperationType = util.OperationTypeAdd\n\t\t}\n\n\t\tif len(operationType) > 0 {\n\t\t\toperations = append(operations, util.FederatedOperation{\n\t\t\t\tType: operationType,\n\t\t\t\tObj: desiredObj,\n\t\t\t\tClusterName: cluster.Name,\n\t\t\t\tKey: key,\n\t\t\t})\n\t\t}\n\t}\n\n\tfor _, cluster := range unselectedClusters {\n\t\tclusterObj, found, err := accessor(cluster.Name)\n\t\tif err != nil {\n\t\t\twrappedErr := fmt.Errorf(\"Failed to get %s %q from cluster %q: %v\", kind, key, cluster.Name, err)\n\t\t\truntime.HandleError(wrappedErr)\n\t\t\treturn nil, wrappedErr\n\t\t}\n\t\tif found {\n\t\t\toperations = append(operations, util.FederatedOperation{\n\t\t\t\tType: util.OperationTypeDelete,\n\t\t\t\tObj: clusterObj.(pkgruntime.Object),\n\t\t\t\tClusterName: cluster.Name,\n\t\t\t\tKey: key,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn operations, nil\n}", "func cloneView(oldView View) View { \n\tnewView := View{}\n\tnewView.ViewNum = oldView.ViewNum\n\tnewView.TaskParams = map[TaskID]TaskParams{}\n\tfor k, v := range oldView.TaskParams { \n\t\tnewView.TaskParams[k] = v\n\t} \n\tnewView.TaskAssignments = map[TaskID][]ClientID{}\n\tfor k, v := range oldView.TaskAssignments {\n\t\tnewView.TaskAssignments[k] = v\n\t}\n\tnewView.FinishedTasks = map[TaskID][]ClientID{}\n\tfor k, v := range oldView.FinishedTasks {\n\t\tnewView.FinishedTasks[k] = v\n\t}\n\tnewView.ClientInfo = map[ClientID]string{}\n\tfor k, v := range oldView.ClientInfo {\n\t\tnewView.ClientInfo[k] = v\n\t}\n\t// Build a new task info map out of the existing info \n\tnewView.Tasks = map[TaskID]TaskInfo{}\n\tfor tid, params := range newView.TaskParams { \n\t\tfinishedClients := make([]ClientID, len(newView.FinishedTasks[tid]))\n\t\tcopy(finishedClients, newView.FinishedTasks[tid])\n\t\t// Figure out what the pending clients are \n\t\tpendingClients := make([]ClientID, len(newView.TaskAssignments[tid]) - len(finishedClients))\n\t\ti := 0\n\t\tfor _, cid := range newView.TaskAssignments[tid] {\n\t\t\t// Check to see if this cid is already finished\n\t\t\tcidFinished := false\n\t\t\tfor _, finishedCid := range finishedClients {\n\t\t\t\tif cid == finishedCid {\n\t\t\t\t\tcidFinished = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !cidFinished {\n\t\t\t\tpendingClients[i] = cid\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\t\tnewView.Tasks[tid] = TaskInfo{ params, pendingClients, finishedClients }\n\t}\n\treturn newView\n}", "func (s *Server) updateCluster(report *healthReport) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s.connectivity.startTime.Before(report.startTime) {\n\t\ts.connectivity = report\n\t}\n}", "func (r *FoundationDBClusterReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {\n\tcluster := &fdbtypes.FoundationDBCluster{}\n\n\terr := r.Get(ctx, request.NamespacedName, cluster)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn ctrl.Result{}, nil\n\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tclusterLog := log.WithValues(\"namespace\", cluster.Namespace, \"cluster\", cluster.Name)\n\n\tif cluster.Spec.Skip {\n\t\tclusterLog.Info(\"Skipping cluster with skip value true\", \"skip\", cluster.Spec.Skip)\n\t\t// Don't requeue\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\terr = internal.NormalizeClusterSpec(cluster, r.DeprecationOptions)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tadminClient, err := r.getDatabaseClientProvider().GetAdminClient(cluster, r)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tdefer adminClient.Close()\n\n\tsupportedVersion, err := adminClient.VersionSupported(cluster.Spec.Version)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tif !supportedVersion {\n\t\treturn ctrl.Result{}, fmt.Errorf(\"version %s is not supported\", cluster.Spec.Version)\n\t}\n\n\tsubReconcilers := []clusterSubReconciler{\n\t\tupdateStatus{},\n\t\tupdateLockConfiguration{},\n\t\tupdateConfigMap{},\n\t\tcheckClientCompatibility{},\n\t\treplaceMisconfiguredProcessGroups{},\n\t\treplaceFailedProcessGroups{},\n\t\tdeletePodsForBuggification{},\n\t\taddProcessGroups{},\n\t\taddServices{},\n\t\taddPVCs{},\n\t\taddPods{},\n\t\tgenerateInitialClusterFile{},\n\t\tupdateSidecarVersions{},\n\t\tupdatePodConfig{},\n\t\tupdateLabels{},\n\t\tupdateDatabaseConfiguration{},\n\t\tchooseRemovals{},\n\t\texcludeInstances{},\n\t\tchangeCoordinators{},\n\t\tbounceProcesses{},\n\t\tupdatePods{},\n\t\tremoveServices{},\n\t\tremoveProcessGroups{},\n\t\tupdateStatus{},\n\t}\n\n\toriginalGeneration := cluster.ObjectMeta.Generation\n\tnormalizedSpec := cluster.Spec.DeepCopy()\n\tdelayedRequeue := false\n\n\tfor _, subReconciler := range subReconcilers {\n\t\t// We have to set the normalized spec here again otherwise any call to Update() for the status of the cluster\n\t\t// will reset all normalized fields...\n\t\tcluster.Spec = *(normalizedSpec.DeepCopy())\n\t\tclusterLog.Info(\"Attempting to run sub-reconciler\", \"subReconciler\", fmt.Sprintf(\"%T\", subReconciler))\n\n\t\trequeue := subReconciler.reconcile(r, ctx, cluster)\n\t\tif requeue == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif requeue.delayedRequeue {\n\t\t\tclusterLog.Info(\"Delaying requeue for sub-reconciler\",\n\t\t\t\t\"subReconciler\", fmt.Sprintf(\"%T\", subReconciler),\n\t\t\t\t\"message\", requeue.message)\n\t\t\tdelayedRequeue = true\n\t\t\tcontinue\n\t\t}\n\n\t\treturn processRequeue(requeue, subReconciler, cluster, r.Recorder, clusterLog)\n\t}\n\n\tif cluster.Status.Generations.Reconciled < originalGeneration || delayedRequeue {\n\t\tclusterLog.Info(\"Cluster was not fully reconciled by reconciliation process\", \"status\", cluster.Status.Generations)\n\n\t\treturn ctrl.Result{Requeue: true}, nil\n\t}\n\n\tclusterLog.Info(\"Reconciliation complete\", \"generation\", cluster.Status.Generations.Reconciled)\n\tr.Recorder.Event(cluster, corev1.EventTypeNormal, \"ReconciliationComplete\", fmt.Sprintf(\"Reconciled generation %d\", cluster.Status.Generations.Reconciled))\n\n\treturn ctrl.Result{}, nil\n}", "func (cc *ClusterReconciler) sync(c *scyllav1.ScyllaCluster) error {\n\tctx := log.WithNewTraceID(context.Background())\n\tlogger := cc.Logger.With(\"cluster\", c.Namespace+\"/\"+c.Name, \"resourceVersion\", c.ResourceVersion)\n\tlogger.Info(ctx, \"Starting reconciliation...\")\n\tlogger.Debug(ctx, \"Cluster State\", \"object\", c)\n\n\t// Before syncing, ensure that all StatefulSets are up-to-date\n\tstale, err := util.AreStatefulSetStatusesStale(ctx, c, cc.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to check sts staleness\")\n\t}\n\tif stale {\n\t\tlogger.Debug(ctx, \"StatefulSets are not ready!\")\n\t\treturn nil\n\t}\n\tlogger.Debug(ctx, \"All StatefulSets are up-to-date!\")\n\n\t// Cleanup Cluster resources\n\tif err := cc.cleanup(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessageCleanupFailed)\n\t}\n\n\t// Sync Headless Service for Cluster\n\tif err := cc.syncClusterHeadlessService(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessageHeadlessServiceSyncFailed)\n\t\treturn errors.Wrap(err, \"failed to sync headless service\")\n\t}\n\n\t// Sync Cluster Pod Disruption Budget\n\tif err := cc.syncPodDisruptionBudget(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessagePodDisruptionBudgetSyncFailed)\n\t\treturn errors.Wrap(err, \"failed to sync pod disruption budget\")\n\t}\n\n\t// Sync Agent auth token\n\tif err := cc.syncAgentAuthToken(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessageAgentTokenSyncFailed)\n\t\treturn errors.Wrap(err, \"failed to sync agent auth token\")\n\t}\n\n\t// Sync Cluster Member Services\n\tif err := cc.syncMemberServices(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, MessageMemberServicesSyncFailed)\n\t\treturn errors.Wrap(err, \"failed to sync member service\")\n\t}\n\n\t// Update Status\n\tlogger.Info(ctx, \"Calculating cluster status...\")\n\tif err := cc.updateStatus(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, fmt.Sprintf(MessageUpdateStatusFailed, err))\n\t\treturn errors.Wrap(err, \"failed to update status\")\n\t}\n\n\t// Calculate and execute next action\n\tif act, err := cc.nextAction(ctx, c); err != nil {\n\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, fmt.Sprintf(MessageUpdateStatusFailed, err))\n\t\treturn errors.Wrap(err, \"failed to determine next action\")\n\t} else if act != nil {\n\t\ts := actions.NewState(cc.Client, cc.KubeClient, cc.Recorder)\n\t\tlogger.Debug(ctx, \"New action\", \"name\", act.Name())\n\t\tif err := act.Execute(ctx, s); err != nil {\n\t\t\tcc.Recorder.Event(c, corev1.EventTypeWarning, naming.ErrSyncFailed, fmt.Sprintf(MessageClusterSyncFailed, errors.Cause(err)))\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"cluster-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to Cluster\n\terr = c.Watch(&source.Kind{Type: &clusterv1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *Cluster) ApplyTo(m *model.Cluster) {\n\tm.Name = r.Name\n\tm.Description = r.Description\n\tm.DataCenter = r.DataCenter.ID\n\tm.HaReservation = r.bool(r.HaReservation)\n\tm.KsmEnabled = r.bool(r.KSM.Enabled)\n}", "func (c *leaseController) sync(ctx context.Context, syncCtx factory.SyncContext) error {\n\tclusters, err := c.clusterLister.List(labels.Everything())\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, cluster := range clusters {\n\t\t// cluster is not accepted, skip it.\n\t\tif !meta.IsStatusConditionTrue(cluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// get the lease of a cluster, if the lease is not found, create it\n\t\tleaseName := \"managed-cluster-lease\"\n\t\tobservedLease, err := c.leaseLister.Leases(cluster.Name).Get(leaseName)\n\t\tswitch {\n\t\tcase errors.IsNotFound(err):\n\t\t\tlease := &coordv1.Lease{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: leaseName,\n\t\t\t\t\tNamespace: cluster.Name,\n\t\t\t\t\tLabels: map[string]string{\"open-cluster-management.io/cluster-name\": cluster.Name},\n\t\t\t\t},\n\t\t\t\tSpec: coordv1.LeaseSpec{\n\t\t\t\t\tHolderIdentity: pointer.StringPtr(leaseName),\n\t\t\t\t\tRenewTime: &metav1.MicroTime{Time: time.Now()},\n\t\t\t\t},\n\t\t\t}\n\t\t\tif _, err := c.kubeClient.CoordinationV1().Leases(cluster.Name).Create(ctx, lease, metav1.CreateOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\tcase err != nil:\n\t\t\treturn err\n\t\t}\n\n\t\tleaseDurationSeconds := cluster.Spec.LeaseDurationSeconds\n\t\t// for backward compatible, release-2.1 has mutating admission webhook to mutate this field,\n\t\t// but release-2.0 does not have the mutating admission webhook\n\t\tif leaseDurationSeconds == 0 {\n\t\t\tleaseDurationSeconds = 60\n\t\t}\n\n\t\tgracePeriod := time.Duration(leaseDurationTimes*leaseDurationSeconds) * time.Second\n\t\t// the lease is constantly updated, do nothing\n\t\tnow := time.Now()\n\t\tif now.Before(observedLease.Spec.RenewTime.Add(gracePeriod)) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// for backward compatible, before release-2.3, the format of lease name is cluster-lease-<managed-cluster-name>\n\t\t// TODO: after release-2.3, we will eliminate these\n\t\toldVersionLeaseName := fmt.Sprintf(\"cluster-lease-%s\", cluster.Name)\n\t\toldVersionLease, err := c.leaseLister.Leases(cluster.Name).Get(oldVersionLeaseName)\n\t\tswitch {\n\t\tcase err == nil:\n\t\t\tif time.Now().Before(oldVersionLease.Spec.RenewTime.Add(gracePeriod)) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase errors.IsNotFound(err):\n\t\t\t// the old version does not exist, create a new one\n\t\t\toldVersionLease := &coordv1.Lease{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: oldVersionLeaseName,\n\t\t\t\t\tNamespace: cluster.Name,\n\t\t\t\t\tLabels: map[string]string{\"open-cluster-management.io/cluster-name\": cluster.Name},\n\t\t\t\t},\n\t\t\t\tSpec: coordv1.LeaseSpec{\n\t\t\t\t\tHolderIdentity: pointer.StringPtr(oldVersionLeaseName),\n\t\t\t\t\tRenewTime: &metav1.MicroTime{Time: time.Now()},\n\t\t\t\t},\n\t\t\t}\n\t\t\tif _, err := c.kubeClient.CoordinationV1().Leases(cluster.Name).Create(ctx, oldVersionLease, metav1.CreateOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\tcase err != nil:\n\t\t\treturn err\n\t\t}\n\n\t\t// the lease is not constantly updated, update it to unknown\n\t\tconditionUpdateFn := helpers.UpdateManagedClusterConditionFn(metav1.Condition{\n\t\t\tType: clusterv1.ManagedClusterConditionAvailable,\n\t\t\tStatus: metav1.ConditionUnknown,\n\t\t\tReason: \"ManagedClusterLeaseUpdateStopped\",\n\t\t\tMessage: fmt.Sprintf(\"Registration agent stopped updating its lease.\"),\n\t\t})\n\t\t_, updated, err := helpers.UpdateManagedClusterStatus(ctx, c.clusterClient, cluster.Name, conditionUpdateFn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif updated {\n\t\t\tsyncCtx.Recorder().Eventf(\"ManagedClusterAvailableConditionUpdated\",\n\t\t\t\t\"update managed cluster %q available condition to unknown, due to its lease is not updated constantly\",\n\t\t\t\tcluster.Name)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *StatusReconciler) calculateClusterStatus(stat *vapi.VerticaDBStatus) {\n\tstat.SubclusterCount = 0\n\tstat.InstallCount = 0\n\tstat.AddedToDBCount = 0\n\tstat.UpNodeCount = 0\n\tfor _, sc := range stat.Subclusters {\n\t\tstat.SubclusterCount++\n\t\tstat.InstallCount += sc.InstallCount\n\t\tstat.AddedToDBCount += sc.AddedToDBCount\n\t\tstat.UpNodeCount += sc.UpNodeCount\n\t}\n}", "func (a *Actuator) Reconcile(cluster *clusterv1.Cluster) error {\n\tlog := a.log.WithValues(\"cluster-name\", cluster.Name, \"cluster-namespace\", cluster.Namespace)\n\tlog.Info(\"Reconciling Cluster\")\n\n\tscope, err := scope.NewClusterScope(scope.ClusterScopeParams{\n\t\tCluster: cluster,\n\t\tLogger: a.log,\n\t})\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to create scope: %+v\", err)\n\t}\n\n\tdefer scope.Close()\n\n\tec2svc := ec2.NewService(scope)\n\telbsvc := elb.NewService(scope)\n\tcertSvc := certificates.NewService(scope)\n\n\t// Store cert material in spec.\n\tif err := certSvc.ReconcileCertificates(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile certificates for cluster %q\", cluster.Name)\n\t}\n\n\tif err := ec2svc.ReconcileNetwork(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile network for cluster %q\", cluster.Name)\n\t}\n\n\tif err := ec2svc.ReconcileBastion(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile bastion host for cluster %q\", cluster.Name)\n\t}\n\n\tif err := elbsvc.ReconcileLoadbalancers(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile load balancers for cluster %q\", cluster.Name)\n\t}\n\n\tif cluster.Annotations == nil {\n\t\tcluster.Annotations = make(map[string]string)\n\t}\n\tcluster.Annotations[v1alpha2.AnnotationClusterInfrastructureReady] = v1alpha2.ValueReady\n\n\t// Store KubeConfig for Cluster API NodeRef controller to use.\n\tkubeConfigSecretName := remote.KubeConfigSecretName(cluster.Name)\n\tsecretClient := a.coreClient.Secrets(cluster.Namespace)\n\tif _, err := secretClient.Get(kubeConfigSecretName, metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) {\n\t\tkubeConfig, err := a.Deployer.GetKubeConfig(cluster, nil)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to get kubeconfig for cluster %q\", cluster.Name)\n\t\t}\n\n\t\tkubeConfigSecret := &apiv1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: kubeConfigSecretName,\n\t\t\t},\n\t\t\tStringData: map[string]string{\n\t\t\t\t\"value\": kubeConfig,\n\t\t\t},\n\t\t}\n\n\t\tif _, err := secretClient.Create(kubeConfigSecret); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to create kubeconfig secret for cluster %q\", cluster.Name)\n\t\t}\n\t} else if err != nil {\n\t\treturn errors.Wrapf(err, \"failed to get kubeconfig secret for cluster %q\", cluster.Name)\n\t}\n\n\t// If the control plane is ready, try to delete the control plane configmap lock, if it exists, and return.\n\tif cluster.Annotations[v1alpha2.AnnotationControlPlaneReady] == v1alpha2.ValueReady {\n\t\tconfigMapName := scope.ControlPlaneConfigMapName()\n\t\tlog.Info(\"Checking for existence of control plane configmap lock\", \"configmap-name\", configMapName)\n\n\t\t_, err := a.coreClient.ConfigMaps(cluster.Namespace).Get(configMapName, metav1.GetOptions{})\n\t\tswitch {\n\t\tcase apierrors.IsNotFound(err):\n\t\t\t// It doesn't exist - no-op\n\t\tcase err != nil:\n\t\t\treturn errors.Wrapf(err, \"Error retrieving control plane configmap lock %q\", configMapName)\n\t\tdefault:\n\t\t\tif err := a.coreClient.ConfigMaps(cluster.Namespace).Delete(configMapName, nil); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Error deleting control plane configmap lock %q\", configMapName)\n\t\t\t}\n\t\t}\n\n\t\t// Nothing more to reconcile - return early.\n\t\treturn nil\n\t}\n\n\tlog.Info(\"Cluster does not have ready annotation - checking for ready control plane machines\")\n\n\tmachineList := &clusterv1.MachineList{}\n\tif err := a.List(context.Background(), machineList, scope.ListOptionsLabelSelector()); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to retrieve machines in cluster %q\", cluster.Name)\n\t}\n\n\tcontrolPlaneMachines := util.GetControlPlaneMachinesFromList(machineList)\n\n\tmachineReady := false\n\tfor _, machine := range controlPlaneMachines {\n\t\tif machine.Status.NodeRef != nil {\n\t\t\tmachineReady = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !machineReady {\n\t\tlog.Info(\"No control plane machines are ready - requeuing cluster\")\n\t\treturn &controllerError.RequeueAfterError{RequeueAfter: waitForControlPlaneMachineDuration}\n\t}\n\n\tlog.Info(\"Setting cluster ready annotation\")\n\tcluster.Annotations[v1alpha2.AnnotationControlPlaneReady] = v1alpha2.ValueReady\n\n\treturn nil\n}", "func (cv ClusterVersion) ClusterVersionImpl() {}", "func (c *Controller) detect(eventCh chan<- *Event, errCh chan<- error) {\n\tnewClusters, err := c.findAllClusters()\n\tif err != nil {\n\t\terrCh <- err\n\t\treturn\n\t}\n\tfor _, newCluster := range newClusters {\n\t\toldCluster, ok := c.clusters[newCluster.Metadata.Name]\n\t\tif !ok {\n\t\t\teventCh <- &Event{Type: kwatch.Added, Object: newCluster}\n\t\t} else if !oldCluster.Get().Equals(newCluster) {\n\t\t\teventCh <- &Event{Type: kwatch.Modified, Object: newCluster}\n\t\t}\n\t}\n\tfor _, oldCluster := range c.clusters {\n\t\tif _, ok := newClusters[oldCluster.Get().Metadata.Name]; !ok {\n\t\t\teventCh <- &Event{Type: kwatch.Deleted, Object: *oldCluster.Get()}\n\t\t}\n\t}\n}", "func (s *BasePlSqlParserListener) EnterAlter_cluster(ctx *Alter_clusterContext) {}", "func ResyncAll(params ResyncAllParams) (*models.ModelVersionIndexSynchronizationResults, error) {\n\tif err := params.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := params.API.V1API.ClustersKibana.ResyncKibanaClusters(\n\t\tclusters_kibana.NewResyncKibanaClustersParams(),\n\t\tparams.API.AuthWriter,\n\t)\n\tif err != nil {\n\t\treturn nil, api.UnwrapError(err)\n\t}\n\n\treturn res.Payload, nil\n}", "func (s *FederationSyncController) clusterOperations(selectedClusters, unselectedClusters []string,\n\ttemplate, override *unstructured.Unstructured, key string) ([]util.FederatedOperation, error) {\n\n\toperations := make([]util.FederatedOperation, 0)\n\n\toverridesMap, err := util.GetOverrides(override)\n\tif err != nil {\n\t\toverrideKind := s.typeConfig.GetOverride().Kind\n\t\treturn nil, fmt.Errorf(\"Error reading cluster overrides for %s %q: %v\", overrideKind, key, err)\n\t}\n\n\tversionMap, err := s.versionManager.Get(template, override)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error retrieving version map: %v\", err)\n\t}\n\n\ttargetKind := s.typeConfig.GetTarget().Kind\n\tfor _, clusterName := range selectedClusters {\n\t\t// TODO(marun) Create the desired object only if needed\n\t\tdesiredObj, err := s.objectForCluster(template, overridesMap[clusterName])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// TODO(marun) Wait until result of add operation has reached\n\t\t// the target store before attempting subsequent operations?\n\t\t// Otherwise the object won't be found but an add operation\n\t\t// will fail with AlreadyExists.\n\t\tclusterObj, found, err := s.informer.GetTargetStore().GetByKey(clusterName, key)\n\t\tif err != nil {\n\t\t\twrappedErr := fmt.Errorf(\"Failed to get %s %q from cluster %q: %v\", targetKind, key, clusterName, err)\n\t\t\truntime.HandleError(wrappedErr)\n\t\t\treturn nil, wrappedErr\n\t\t}\n\n\t\tvar operationType util.FederatedOperationType = \"\"\n\n\t\tif found {\n\t\t\tclusterObj := clusterObj.(*unstructured.Unstructured)\n\n\t\t\t// This controller does not perform updates to namespaces\n\t\t\t// in the host cluster. Such operations need to be\n\t\t\t// performed via the Kube API.\n\t\t\t//\n\t\t\t// The Namespace type is a special case because it is the\n\t\t\t// only container in the Kubernetes API. This controller\n\t\t\t// presumes a separation between the template and target\n\t\t\t// resources, but a namespace in the host cluster is\n\t\t\t// necessarily both template and target.\n\t\t\tif targetKind == util.NamespaceKind && util.IsPrimaryCluster(template, clusterObj) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdesiredObj, err = s.objectForUpdateOp(desiredObj, clusterObj)\n\t\t\tif err != nil {\n\t\t\t\twrappedErr := fmt.Errorf(\"Failed to determine desired object %s %q for cluster %q: %v\", targetKind, key, clusterName, err)\n\t\t\t\truntime.HandleError(wrappedErr)\n\t\t\t\treturn nil, wrappedErr\n\t\t\t}\n\n\t\t\tversion, ok := versionMap[clusterName]\n\t\t\tif !ok {\n\t\t\t\t// No target version recorded for template+override version\n\t\t\t\toperationType = util.OperationTypeUpdate\n\t\t\t} else {\n\t\t\t\ttargetVersion := s.comparisonHelper.GetVersion(clusterObj)\n\n\t\t\t\t// Check if versions don't match. If they match then check its\n\t\t\t\t// ObjectMeta which only applies to resources where Generation\n\t\t\t\t// is used to track versions because Generation is only updated\n\t\t\t\t// when Spec changes.\n\t\t\t\tif version != targetVersion {\n\t\t\t\t\toperationType = util.OperationTypeUpdate\n\t\t\t\t} else if !s.comparisonHelper.Equivalent(desiredObj, clusterObj) {\n\t\t\t\t\t// TODO(marun) Since only the metadata is compared\n\t\t\t\t\t// in the call to Equivalent(), use the template\n\t\t\t\t\t// to avoid having to worry about overrides.\n\t\t\t\t\toperationType = util.OperationTypeUpdate\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// A namespace in the host cluster will never need to be\n\t\t\t// added since by definition it must already exist.\n\n\t\t\toperationType = util.OperationTypeAdd\n\t\t}\n\n\t\tif len(operationType) > 0 {\n\t\t\toperations = append(operations, util.FederatedOperation{\n\t\t\t\tType: operationType,\n\t\t\t\tObj: desiredObj,\n\t\t\t\tClusterName: clusterName,\n\t\t\t\tKey: key,\n\t\t\t})\n\t\t}\n\t}\n\n\tfor _, clusterName := range unselectedClusters {\n\t\trawClusterObj, found, err := s.informer.GetTargetStore().GetByKey(clusterName, key)\n\t\tif err != nil {\n\t\t\twrappedErr := fmt.Errorf(\"Failed to get %s %q from cluster %q: %v\", targetKind, key, clusterName, err)\n\t\t\truntime.HandleError(wrappedErr)\n\t\t\treturn nil, wrappedErr\n\t\t}\n\t\tif found {\n\t\t\tclusterObj := rawClusterObj.(pkgruntime.Object)\n\t\t\t// This controller does not initiate deletion of namespaces in the host cluster.\n\t\t\tif targetKind == util.NamespaceKind && util.IsPrimaryCluster(template, clusterObj) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toperations = append(operations, util.FederatedOperation{\n\t\t\t\tType: util.OperationTypeDelete,\n\t\t\t\tObj: clusterObj,\n\t\t\t\tClusterName: clusterName,\n\t\t\t\tKey: key,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn operations, nil\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"rethinkdbcluster-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource RethinkDBCluster\n\terr = c.Watch(&source.Kind{Type: &rethinkdbv1alpha1.RethinkDBCluster{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to secondary resource ConfigMap and requeue the owner RethinkDBCluster\n\terr = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &rethinkdbv1alpha1.RethinkDBCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to secondary resource PersistentVolumeClaims and requeue the owner RethinkDBCluster\n\terr = c.Watch(&source.Kind{Type: &corev1.PersistentVolumeClaim{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &rethinkdbv1alpha1.RethinkDBCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to secondary resource Pods and requeue the owner RethinkDBCluster\n\terr = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &rethinkdbv1alpha1.RethinkDBCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to secondary resource Secret and requeue the owner RethinkDBCluster\n\terr = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &rethinkdbv1alpha1.RethinkDBCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to secondary resource Service and requeue the owner RethinkDBCluster\n\terr = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &rethinkdbv1alpha1.RethinkDBCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func newClusterVulnerabilityReports(c *AquasecurityV1alpha1Client) *clusterVulnerabilityReports {\n\treturn &clusterVulnerabilityReports{\n\t\tclient: c.RESTClient(),\n\t}\n}", "func (s *IngestStep) Cluster(schemaFile string, dataset string,\n\trootDataPath string, outputFolder string, hasHeader bool) error {\n\toutputSchemaPath := path.Join(outputFolder, D3MSchemaPathRelative)\n\toutputDataPath := path.Join(outputFolder, D3MDataPathRelative)\n\tsourceFolder := path.Dir(dataset)\n\n\t// copy the source folder to have all the linked files for merging\n\tos.MkdirAll(outputFolder, os.ModePerm)\n\terr := copy.Copy(sourceFolder, outputFolder)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to copy source data\")\n\t}\n\n\t// delete the existing files that will be overwritten\n\tos.Remove(outputSchemaPath)\n\tos.Remove(outputDataPath)\n\n\t// load metadata from original schema\n\tmeta, err := metadata.LoadMetadataFromOriginalSchema(schemaFile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to load original schema file\")\n\t}\n\tmainDR := meta.GetMainDataResource()\n\n\t// add feature variables\n\tfeatures, err := getClusterVariables(meta, \"_cluster_\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to get cluster variables\")\n\t}\n\n\td3mIndexField := getD3MIndexField(mainDR)\n\n\t// open the input file\n\tdataPath := path.Join(rootDataPath, mainDR.ResPath)\n\tlines, err := s.readCSVFile(dataPath, hasHeader)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error reading raw data\")\n\t}\n\n\t// add the cluster data to the raw data\n\tfor _, f := range features {\n\t\tmainDR.Variables = append(mainDR.Variables, f.Variable)\n\n\t\tlines, err = s.appendFeature(sourceFolder, d3mIndexField, false, f, lines)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error appending clustered data\")\n\t\t}\n\t}\n\n\t// initialize csv writer\n\toutput := &bytes.Buffer{}\n\twriter := csv.NewWriter(output)\n\n\t// output the header\n\theader := make([]string, len(mainDR.Variables))\n\tfor _, v := range mainDR.Variables {\n\t\theader[v.Index] = v.Name\n\t}\n\terr = writer.Write(header)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error storing clustered header\")\n\t}\n\n\tfor _, line := range lines {\n\t\terr = writer.Write(line)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error storing clustered output\")\n\t\t}\n\t}\n\n\t// output the data with the new feature\n\twriter.Flush()\n\n\terr = util.WriteFileWithDirs(outputDataPath, output.Bytes(), os.ModePerm)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error writing clustered output\")\n\t}\n\n\trelativePath := getRelativePath(path.Dir(outputSchemaPath), outputDataPath)\n\tmainDR.ResPath = relativePath\n\n\t// write the new schema to file\n\terr = metadata.WriteSchema(meta, outputSchemaPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to store cluster schema\")\n\t}\n\n\treturn nil\n}", "func (r *ReconcileVirtualcluster) Reconcile(request reconcile.Request) (rncilRslt reconcile.Result, err error) {\n\tlog.Info(\"reconciling Virtualcluster...\")\n\tvc := &tenancyv1alpha1.Virtualcluster{}\n\terr = r.Get(context.TODO(), request.NamespacedName, vc)\n\tif err != nil {\n\t\t// set NotFound error as nil\n\t\tif apierrors.IsNotFound(err) {\n\t\t\terr = nil\n\t\t}\n\t\treturn\n\t}\n\n\t// TODO implement the delete logic (finalizer)\n\n\t// reconcile Virtualcluster (vc) based on vc status\n\t// NOTE: vc status is required by other components (e.g. syncer need to\n\t// know the vc status in order to setup connection to tenant master)\n\tswitch vc.Status.Phase {\n\tcase \"\":\n\t\t// set vc status as ClusterPending if no status is set\n\t\tlog.Info(\"will create a Virtualcluster\", \"vc\", vc.Name)\n\t\terr = retry.RetryOnConflict(retry.DefaultRetry, func() error {\n\t\t\tvc.Status.Phase = tenancyv1alpha1.ClusterPending\n\t\t\tvc.Status.Message = \"creating virtual cluster...\"\n\t\t\tvc.Status.Reason = \"ClusterCreating\"\n\t\t\tupdateErr := r.Update(context.TODO(), vc)\n\t\t\tif err = r.Get(context.TODO(), request.NamespacedName, vc); err != nil {\n\t\t\t\tlog.Info(\"fail to get vc on update failure\", \"error\", err.Error())\n\t\t\t}\n\t\t\treturn updateErr\n\t\t})\n\t\treturn\n\tcase tenancyv1alpha1.ClusterPending:\n\t\t// create new virtualcluster when vc is pending\n\t\tlog.Info(\"Virtualcluster is pending\", \"vc\", vc.Name)\n\t\tcvs := &tenancyv1alpha1.ClusterVersionList{}\n\t\terr = r.List(context.TODO(), cvs, client.InNamespace(\"\"))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tcv := getClusterVersion(cvs, vc.Spec.ClusterVersionName)\n\t\tif cv == nil {\n\t\t\terr = fmt.Errorf(\"desired ClusterVersion %s not found\",\n\t\t\t\tvc.Spec.ClusterVersionName)\n\t\t\treturn\n\t\t}\n\t\terr = r.createVirtualcluster(vc, cv)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t// all components are ready, update vc status\n\t\terr = retry.RetryOnConflict(retry.DefaultRetry, func() error {\n\t\t\tvc.Status.Phase = \"Running\"\n\t\t\tvc.Status.Message = \"tenant master is running\"\n\t\t\tvc.Status.Reason = \"TenantMasterRunning\"\n\t\t\tupdateErr := r.Update(context.TODO(), vc)\n\t\t\tif err = r.Get(context.TODO(), request.NamespacedName, vc); err != nil {\n\t\t\t\tlog.Info(\"fail to get vc on update failure\", \"error\", err.Error())\n\t\t\t}\n\t\t\treturn updateErr\n\t\t})\n\t\treturn\n\tcase tenancyv1alpha1.ClusterRunning:\n\t\tlog.Info(\"Virtualcluster is running\", \"vc\", vc.Name)\n\t\treturn\n\tdefault:\n\t\terr = fmt.Errorf(\"unknown vc phase: %s\", vc.Status.Phase)\n\t\treturn\n\t}\n}", "func createCluster(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tctx := r.Context()\n\tallowed := permission.Check(t, permission.PermClusterCreate)\n\tif !allowed {\n\t\treturn permission.ErrUnauthorized\n\t}\n\n\terr = deprecateFormContentType(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar provCluster provTypes.Cluster\n\terr = ParseJSON(r, &provCluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\tevt, err := event.New(&event.Opts{\n\t\tTarget: event.Target{Type: event.TargetTypeCluster, Value: provCluster.Name},\n\t\tKind: permission.PermClusterCreate,\n\t\tOwner: t,\n\t\tRemoteAddr: r.RemoteAddr,\n\t\tCustomData: event.FormToCustomData(InputFields(r)),\n\t\tAllowed: event.Allowed(permission.PermClusterReadEvents),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { evt.Done(err) }()\n\t_, err = servicemanager.Cluster.FindByName(ctx, provCluster.Name)\n\tif err == nil {\n\t\treturn &tsuruErrors.HTTP{\n\t\t\tCode: http.StatusConflict,\n\t\t\tMessage: \"cluster already exists\",\n\t\t}\n\t}\n\tfor _, poolName := range provCluster.Pools {\n\t\t_, err = pool.GetPoolByName(ctx, poolName)\n\t\tif err != nil {\n\t\t\tif err == pool.ErrPoolNotFound {\n\t\t\t\treturn &tsuruErrors.HTTP{\n\t\t\t\t\tCode: http.StatusNotFound,\n\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\tstreamResponse := strings.HasPrefix(r.Header.Get(\"Accept\"), \"application/x-json-stream\")\n\tif streamResponse {\n\t\tw.Header().Set(\"Content-Type\", \"application/x-json-stream\")\n\t\tkeepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 30*time.Second, \"\")\n\t\tdefer keepAliveWriter.Stop()\n\t\twriter := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}\n\t\tevt.SetLogWriter(writer)\n\t}\n\tprovCluster.Writer = evt\n\terr = servicemanager.Cluster.Create(ctx, provCluster)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}", "func afficheCluster(cls Cluster) {\n\tfmt.Print(\" | \")\n\n\tfor i := range cls[0].valeurs {\n\t\tfmt.Printf(\"%d | \", i+1)\n\t}\n\n\tfmt.Println()\n\n\tfor _, exemple := range cls {\n\t\tfmt.Printf(\"%d |\", exemple.id+1)\n\n\t\tfor _, valeur := range exemple.valeurs {\n\t\t\ttext := \"F\"\n\t\t\tif valeur {\n\t\t\t\ttext = \"T\"\n\t\t\t}\n\n\t\t\tfmt.Printf(\" %s |\", text)\n\t\t}\n\n\t\tfmt.Println()\n\t}\n\n\tfmt.Println()\n\tfmt.Println()\n}", "func (this *Graph) Cluster() []*Graph {\n /*\n\n Algorithm synopsis:\n\n Loop over the Starters, for each unvisited Starter,\n define an empty sub-graph and, put it into the toVisit set\n\n Loop over the toVisit node set, for each node in it, \n skip if already visited\n add the node to the sub-graph\n remove the nodes into the hasVisited node set\n put all its incoming and outgoing edge into the the toWalk set while\n stop at the hub nodes (edges from the hub nodes are not put in the toWalk set)\n then iterate through the toWalk edge set \n skip if already walked\n add the edge to the sub-graph\n put its connected nodes into the toVisit node set\n remove the edge from the toWalk edge set into the hasWalked edge set\n\n */\n \n // sub-graph index\n sgNdx := -1\n sgRet := make([]*Graph,0)\n\n toVisit := make(nodeSet); hasVisited := make(nodeSet)\n toWalk := make(edgeSet); hasWalked := make(edgeSet)\n\n for starter := range *this.Starters() {\n // define an empty sub-graph and, put it into the toVisit set\n sgRet = append(sgRet, NewGraph(gographviz.NewGraph())); sgNdx++; \n sgRet[sgNdx].Attrs = this.Attrs\n sgRet[sgNdx].SetDir(this.Directed)\n graphName := fmt.Sprintf(\"%s_%03d\\n\", this.Name, sgNdx);\n sgRet[sgNdx].SetName(graphName)\n toVisit.Add(starter)\n hubVisited := make(nodeSet)\n for len(toVisit) > 0 { for nodep := range toVisit {\n toVisit.Del(nodep); //print(\"O \")\n if this.IsHub(nodep) && hasVisited.Has(nodep) && !hubVisited.Has(nodep) { \n // add the already-visited but not-in-this-graph hub node to the sub-graph\n sgRet[sgNdx].AddNode(nodep)\n hubVisited.Add(nodep)\n continue \n }\n if hasVisited.Has(nodep) { continue }\n //spew.Dump(\"toVisit\", nodep)\n // add the node to the sub-graph\n sgRet[sgNdx].AddNode(nodep)\n // remove the nodes into the hasVisited node set\n hasVisited.Add(nodep)\n // stop at the hub nodes\n if this.IsHub(nodep) { continue }\n // put all its incoming and outgoing edge into the the toWalk set\n noden := nodep.Name\n for _, ep := range this.EdgesToParents(noden) {\n toWalk.Add(ep)\n }\n for _, ep := range this.EdgesToChildren(noden) {\n toWalk.Add(ep)\n }\n for edgep := range toWalk {\n toWalk.Del(edgep); //print(\"- \")\n if hasWalked.Has(edgep) { continue }\n //spew.Dump(\"toWalk\", edgep)\n sgRet[sgNdx].Edges.Add(edgep)\n // put its connected nodes into the toVisit node set\n toVisit.Add(this.Lookup(edgep.Src))\n toVisit.Add(this.Lookup(edgep.Dst))\n // remove the edge into the hasWalked edge set\n hasWalked.Add(edgep)\n }\n }}\n //spew.Dump(sgNdx)\n }\n return sgRet\n}", "func (th *transitionHandler) PostRefreshCluster(reason string) stateswitch.PostTransition {\n\tret := func(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error {\n\t\tsCluster, ok := sw.(*stateCluster)\n\t\tif !ok {\n\t\t\treturn errors.New(\"PostRefreshCluster incompatible type of StateSwitch\")\n\t\t}\n\t\tparams, ok := args.(*TransitionArgsRefreshCluster)\n\t\tif !ok {\n\t\t\treturn errors.New(\"PostRefreshCluster invalid argument\")\n\t\t}\n\t\tvar (\n\t\t\tb []byte\n\t\t\terr error\n\t\t\tupdatedCluster *common.Cluster\n\t\t)\n\t\tb, err = json.Marshal(&params.validationResults)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tupdatedCluster, err = updateClusterStatus(logutil.FromContext(params.ctx, th.log), params.db, *sCluster.cluster.ID, sCluster.srcState, *sCluster.cluster.Status,\n\t\t\treason, \"validations_info\", string(b))\n\t\t//update hosts status to models.HostStatusResettingPendingUserAction if needed\n\t\tcluster := sCluster.cluster\n\t\tif updatedCluster != nil {\n\t\t\tcluster = updatedCluster\n\t\t}\n\t\tsetPendingUserResetIfNeeded(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, params.hostApi, cluster)\n\t\t//if status was changed - we need to send event and metrics\n\t\tif err == nil && updatedCluster != nil && sCluster.srcState != swag.StringValue(updatedCluster.Status) {\n\t\t\tmsg := fmt.Sprintf(\"Updated status of cluster %s to %s\", updatedCluster.Name, *updatedCluster.Status)\n\t\t\tparams.eventHandler.AddEvent(params.ctx, *updatedCluster.ID, nil, models.EventSeverityInfo, msg, time.Now())\n\t\t\t//report installation finished metric if needed\n\t\t\treportInstallationCompleteStatuses := []string{models.ClusterStatusInstalled, models.ClusterStatusError}\n\t\t\tif sCluster.srcState == models.ClusterStatusInstalling &&\n\t\t\t\tfunk.ContainsString(reportInstallationCompleteStatuses, swag.StringValue(updatedCluster.Status)) {\n\t\t\t\tparams.metricApi.ClusterInstallationFinished(logutil.FromContext(params.ctx, th.log), swag.StringValue(updatedCluster.Status),\n\t\t\t\t\tupdatedCluster.OpenshiftVersion, *updatedCluster.ID, updatedCluster.InstallStartedAt)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\treturn ret\n}", "func ListAllClusters(response *JsonListClustersMap) *JsonListClustersMap {\n\tvar SIDCluster int\n\tvar SName string\n\tvar SAWSAccount int64\n\tvar SAWSRegion string\n\tvar SAWSEnvironment string\n\tvar SK8sVersion string\n\n\tvar SNodeType string\n\tvar SNodeInstance string\n\tvar STotalInstances int\n\n\tvar totalInstances int\n\n\tdescription := make(DescriptionMap)\n\n\tdb, err := sql.Open(\"mysql\", UserDB+\":\"+PassDB+\"@tcp(\"+HostDB+\":\"+PortDB+\")/\"+DatabaseDB+\"?charset=utf8\")\n\tcheckErr(err)\n\n\tdefer db.Close()\n\n\trows, err := db.Query(\"SELECT id_cluster, nome, aws_account, aws_region, aws_env, k8s_version FROM clusters ORDER BY nome\")\n\tcheckErr(err)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&SIDCluster, &SName, &SAWSAccount, &SAWSRegion, &SAWSEnvironment, &SK8sVersion)\n\t\tcheckErr(err)\n\n\t\tdescription = DescriptionMap{}\n\t\ttotalInstances = 0\n\n\t\trows1, err := db.Query(\"SELECT node_type, node_instance, total_instances FROM nodes WHERE id_cluster=?\", SIDCluster)\n\t\tcheckErr(err)\n\n\t\tfor rows1.Next() {\n\t\t\terr = rows1.Scan(&SNodeType, &SNodeInstance, &STotalInstances)\n\t\t\tcheckErr(err)\n\n\t\t\tdescription[SNodeType] = append(\n\t\t\t\tdescription[SNodeType],\n\t\t\t\tDescriptionStruct{\n\t\t\t\t\tDescription{\n\t\t\t\t\t\tType: SNodeInstance,\n\t\t\t\t\t\tTotalTypeInstances: STotalInstances,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\n\t\t\ttotalInstances = totalInstances + STotalInstances\n\t\t}\n\n\t\t*response = append(\n\t\t\t*response,\n\t\t\tjsonListClusters{\n\t\t\t\tClusterName: SName,\n\t\t\t\tAws: AWS{\n\t\t\t\t\tAccount: SAWSAccount,\n\t\t\t\t\tRegion: SAWSRegion,\n\t\t\t\t\tEnvironment: SAWSEnvironment,\n\t\t\t\t},\n\t\t\t\tK8SVersion: SK8sVersion,\n\t\t\t\tInstances: Instances{\n\t\t\t\t\tTotalInstances: totalInstances,\n\t\t\t\t\tDescription: description,\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t}\n\n\treturn response\n}", "func (g *Generator) fill(op *updateOp) (*cke.Cluster, error) {\n\tfor i := len(g.nextControlPlanes); i < g.constraints.ControlPlaneCount; i++ {\n\t\tm := g.selectControlPlane(g.nextUnused)\n\t\tif m != nil {\n\t\t\top.addControlPlane(m)\n\t\t\tg.nextControlPlanes = append(g.nextControlPlanes, m)\n\t\t\tg.nextUnused = removeMachine(g.nextUnused, m)\n\t\t\tcontinue\n\t\t}\n\n\t\t// If no unused machines available, steal a redundant worker and promote it as a control plane.\n\t\tif len(g.nextWorkers) > g.constraints.MinimumWorkers {\n\t\t\tpromote := g.selectControlPlane(g.nextWorkers)\n\t\t\tif promote != nil {\n\t\t\t\top.promoteWorker(promote)\n\t\t\t\tg.nextControlPlanes = append(g.nextControlPlanes, promote)\n\t\t\t\tg.removeNextWorker(promote)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn nil, errNotAvailable\n\t}\n\n\tfor i := len(g.nextWorkers); i < g.constraints.MinimumWorkers; i++ {\n\t\tm := g.selectWorker(g.nextUnused)\n\t\tif m == nil {\n\t\t\treturn nil, errNotAvailable\n\t\t}\n\t\top.addWorker(m)\n\t\tg.appendNextWorker(m)\n\t\tg.nextUnused = removeMachine(g.nextUnused, m)\n\t}\n\n\terr := log.Info(\"sabakan: generated cluster\", map[string]interface{}{\n\t\t\"op\": op.name,\n\t\t\"changes\": op.changes,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tnodes := make([]*cke.Node, 0, len(g.nextControlPlanes)+len(g.nextWorkers))\n\tfor _, m := range g.nextControlPlanes {\n\t\tnodes = append(nodes, MachineToNode(m, g.cpTmpl.Node))\n\t}\n\tfor _, m := range g.nextWorkers {\n\t\tnodes = append(nodes, MachineToNode(m, g.getWorkerTmpl(m.Spec.Role).Node))\n\t}\n\n\tc := *g.template\n\tc.Nodes = nodes\n\tif err := c.Validate(false); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}", "func (c *MSCluster_ClusterCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {\n\tvar dst []MSCluster_Cluster\n\tq := queryAll(&dst, c.logger)\n\tif err := wmi.QueryNamespace(q, &dst, \"root/MSCluster\"); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range dst {\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AddEvictDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AddEvictDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AdminAccessPoint,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AdminAccessPoint),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoAssignNodeSite,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoAssignNodeSite),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoBalancerLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoBalancerLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.AutoBalancerMode,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.AutoBalancerMode),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.BackupInProgress,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.BackupInProgress),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.BlockCacheSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.BlockCacheSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcHangTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcHangTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupOpeningTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupOpeningTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupPruningTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupPruningTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupStageTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupStageTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusSvcRegroupTickInMilliseconds,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusSvcRegroupTickInMilliseconds),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterEnforcedAntiAffinity,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterEnforcedAntiAffinity),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterFunctionalLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterFunctionalLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterGroupWaitDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterGroupWaitDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterLogLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterLogLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterLogSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterLogSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ClusterUpgradeVersion,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ClusterUpgradeVersion),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSiteDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSiteDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSiteThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSiteThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSubnetDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSubnetDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CrossSubnetThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CrossSubnetThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.CsvBalancer,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.CsvBalancer),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DatabaseReadWriteMode,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DatabaseReadWriteMode),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DefaultNetworkRole,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DefaultNetworkRole),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectedCloudPlatform,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectedCloudPlatform),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectManagedEvents,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectManagedEvents),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DetectManagedEventsThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DetectManagedEventsThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DisableGroupPreferredOwnerRandomization,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DisableGroupPreferredOwnerRandomization),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DrainOnShutdown,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DrainOnShutdown),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.DynamicQuorumEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.DynamicQuorumEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.EnableSharedVolumes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.EnableSharedVolumes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.FixQuorum,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.FixQuorum),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GracePeriodEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GracePeriodEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GracePeriodTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GracePeriodTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.GroupDependencyTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.GroupDependencyTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.HangRecoveryAction,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.HangRecoveryAction),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.IgnorePersistentStateOnStartup,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.IgnorePersistentStateOnStartup),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.LogResourceControls,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.LogResourceControls),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.LowerQuorumPriorityNodeId,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.LowerQuorumPriorityNodeId),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MaxNumberOfNodes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MaxNumberOfNodes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MessageBufferLength,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MessageBufferLength),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinimumNeverPreemptPriority,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MinimumNeverPreemptPriority),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.MinimumPreemptorPriority,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.MinimumPreemptorPriority),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.NetftIPSecEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.NetftIPSecEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PlacementOptions,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PlacementOptions),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PlumbAllCrossSubnetRoutes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PlumbAllCrossSubnetRoutes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.PreventQuorum,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.PreventQuorum),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuarantineDuration,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuarantineDuration),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuarantineThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuarantineThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumArbitrationTimeMax,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumArbitrationTimeMax),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumArbitrationTimeMin,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumArbitrationTimeMin),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumLogFileSize,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumLogFileSize),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.QuorumTypeValue,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.QuorumTypeValue),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RequestReplyTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RequestReplyTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResiliencyDefaultPeriod,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResiliencyDefaultPeriod),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResiliencyLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResiliencyLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ResourceDllDeadlockPeriod,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ResourceDllDeadlockPeriod),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RootMemoryReserved,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RootMemoryReserved),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RouteHistoryLength,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.RouteHistoryLength),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DBusTypes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DBusTypes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCacheDesiredState,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCacheDesiredState),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCacheFlashReservePercent,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCacheFlashReservePercent),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DCachePageSizeKBytes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DCachePageSizeKBytes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DEnabled,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DEnabled),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DIOLatencyThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DIOLatencyThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.S2DOptimizations,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.S2DOptimizations),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SameSubnetDelay,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SameSubnetDelay),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SameSubnetThreshold,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SameSubnetThreshold),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SecurityLevel,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SecurityLevel),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SecurityLevelForStorage,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SecurityLevelForStorage),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.SharedVolumeVssWriterOperationTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.SharedVolumeVssWriterOperationTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.ShutdownTimeoutInMinutes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.ShutdownTimeoutInMinutes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.UseClientAccessNetworksForSharedVolumes,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.UseClientAccessNetworksForSharedVolumes),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessDatabaseWriteTimeout,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessDatabaseWriteTimeout),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessDynamicWeight,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessDynamicWeight),\n\t\t\tv.Name,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.WitnessRestartInterval,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(v.WitnessRestartInterval),\n\t\t\tv.Name,\n\t\t)\n\n\t}\n\n\treturn nil\n}", "func (r clusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {\n\t// Get the VSphereCluster resource for this request.\n\tvsphereCluster := &infrav1.VSphereCluster{}\n\tif err := r.Client.Get(r, req.NamespacedName, vsphereCluster); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tr.Logger.V(4).Info(\"VSphereCluster not found, won't reconcile\", \"key\", req.NamespacedName)\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Fetch the CAPI Cluster.\n\tcluster, err := clusterutilv1.GetOwnerCluster(r, r.Client, vsphereCluster.ObjectMeta)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tif cluster == nil {\n\t\tr.Logger.Info(\"Waiting for Cluster Controller to set OwnerRef on VSphereCluster\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\tif annotations.IsPaused(cluster, vsphereCluster) {\n\t\tr.Logger.V(4).Info(\"VSphereCluster %s/%s linked to a cluster that is paused\",\n\t\t\tvsphereCluster.Namespace, vsphereCluster.Name)\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// Create the patch helper.\n\tpatchHelper, err := patch.NewHelper(vsphereCluster, r.Client)\n\tif err != nil {\n\t\treturn reconcile.Result{}, errors.Wrapf(\n\t\t\terr,\n\t\t\t\"failed to init patch helper for %s %s/%s\",\n\t\t\tvsphereCluster.GroupVersionKind(),\n\t\t\tvsphereCluster.Namespace,\n\t\t\tvsphereCluster.Name)\n\t}\n\n\t// Create the cluster context for this request.\n\tclusterContext := &capvcontext.ClusterContext{\n\t\tControllerContext: r.ControllerContext,\n\t\tCluster: cluster,\n\t\tVSphereCluster: vsphereCluster,\n\t\tLogger: r.Logger.WithName(req.Namespace).WithName(req.Name),\n\t\tPatchHelper: patchHelper,\n\t}\n\n\t// Always issue a patch when exiting this function so changes to the\n\t// resource are patched back to the API server.\n\tdefer func() {\n\t\tif err := clusterContext.Patch(); err != nil {\n\t\t\tif reterr == nil {\n\t\t\t\treterr = err\n\t\t\t}\n\t\t\tclusterContext.Logger.Error(err, \"patch failed\", \"cluster\", clusterContext.String())\n\t\t}\n\t}()\n\n\tif err := setOwnerRefsOnVsphereMachines(clusterContext); err != nil {\n\t\treturn reconcile.Result{}, errors.Wrapf(err, \"failed to set owner refs on VSphereMachine objects\")\n\t}\n\n\t// Handle deleted clusters\n\tif !vsphereCluster.DeletionTimestamp.IsZero() {\n\t\treturn r.reconcileDelete(clusterContext)\n\t}\n\n\t// If the VSphereCluster doesn't have our finalizer, add it.\n\t// Requeue immediately after adding finalizer to avoid the race condition between init and delete\n\tif !ctrlutil.ContainsFinalizer(vsphereCluster, infrav1.ClusterFinalizer) {\n\t\tctrlutil.AddFinalizer(vsphereCluster, infrav1.ClusterFinalizer)\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// Handle non-deleted clusters\n\treturn r.reconcileNormal(clusterContext)\n}", "func (d *DBGenerator) setSubclusterDetail(ctx context.Context) error {\n\tq := Queries[SubclusterQueryKey]\n\trows, err := d.Conn.QueryContext(ctx, q)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t}\n\tdefer rows.Close()\n\n\t// Map to have fast lookup of subcluster name to index in the\n\t// d.Objs.Vdb.Spec.Subclusters array\n\tsubclusterInxMap := map[string]int{}\n\n\tfor rows.Next() {\n\t\tif rows.Err() != nil {\n\t\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, rows.Err())\n\t\t}\n\t\tvar name string\n\t\tvar isPrimary bool\n\t\tif err := rows.Scan(&name, &isPrimary); err != nil {\n\t\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t\t}\n\n\t\tif !vapi.IsValidSubclusterName(name) {\n\t\t\treturn fmt.Errorf(\"subcluster names are included in the name of statefulsets, but the name \"+\n\t\t\t\t\"'%s' cannot be used as it will violate Kubernetes naming. Please rename the subcluster and \"+\n\t\t\t\t\"retry this command again\", name)\n\t\t}\n\n\t\tinx, ok := subclusterInxMap[name]\n\t\tif !ok {\n\t\t\tinx = len(d.Objs.Vdb.Spec.Subclusters)\n\t\t\t// Add an empty subcluster. We increment the count a few lines down.\n\t\t\td.Objs.Vdb.Spec.Subclusters = append(d.Objs.Vdb.Spec.Subclusters,\n\t\t\t\tvapi.Subcluster{Name: name, Size: 0, IsPrimary: isPrimary})\n\t\t\tsubclusterInxMap[name] = inx\n\t\t}\n\t\td.Objs.Vdb.Spec.Subclusters[inx].Size++\n\n\t\t// Maintain the ReviveOrder. Update the count of the prior unless the\n\t\t// previous node was for a different subcluster.\n\t\trevSz := len(d.Objs.Vdb.Spec.ReviveOrder)\n\t\tif revSz == 0 || d.Objs.Vdb.Spec.ReviveOrder[revSz-1].SubclusterIndex != inx {\n\t\t\td.Objs.Vdb.Spec.ReviveOrder = append(d.Objs.Vdb.Spec.ReviveOrder, vapi.SubclusterPodCount{SubclusterIndex: inx, PodCount: 1})\n\t\t} else {\n\t\t\td.Objs.Vdb.Spec.ReviveOrder[revSz-1].PodCount++\n\t\t}\n\t}\n\n\tif len(subclusterInxMap) == 0 {\n\t\treturn errors.New(\"not subclusters found\")\n\t}\n\treturn nil\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"toolchaincluster-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource ToolchainCluster\n\treturn c.Watch(&source.Kind{Type: &toolchainv1alpha1.ToolchainCluster{}}, &handler.EnqueueRequestForObject{})\n}", "func (r *KindClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := log.FromContext(ctx).WithValues(\"kindcluster\", req.NamespacedName)\n\n\t// Fetch the KindCluster instance\n\tkindCluster := &infrastructurev1alpha4.KindCluster{}\n\tif err := r.Get(ctx, req.NamespacedName, kindCluster); err != nil {\n\t\tif client.IgnoreNotFound(err) != nil {\n\t\t\tlog.Error(err, \"unable to fetch KindCluster\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\t// Cluster no longer exists so lets stop now\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Fetch the owner Cluster\n\tcluster, err := util.GetOwnerCluster(ctx, r.Client, kindCluster.ObjectMeta)\n\tif err != nil {\n\t\tlog.Error(err, \"failed to get owner cluster\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif cluster == nil {\n\t\tlog.Info(\"Cluster Controller has not yet set OwnerRef\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tif annotations.IsPaused(cluster, kindCluster) {\n\t\tlog.Info(\"KindCluster or linked Cluster is marked as paused. Won't reconcile\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tlog = log.WithValues(\"cluster\", kindCluster.Name)\n\thelper, err := patch.NewHelper(kindCluster, r.Client)\n\tif err != nil {\n\t\treturn reconcile.Result{}, errors.Wrap(err, \"failed to init patch helper\")\n\t}\n\n\t// Ensure we always patch the resource with the latest changes when exiting function\n\tdefer func() {\n\t\thelper.Patch(\n\t\t\tcontext.TODO(),\n\t\t\tkindCluster,\n\t\t\tpatch.WithOwnedConditions{\n\t\t\t\tConditions: []clusterv1.ConditionType{\n\t\t\t\t\tclusterv1.ReadyCondition,\n\t\t\t\t}},\n\t\t)\n\t}()\n\n\tif !kindCluster.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The KindCluster is being deleted\n\t\tif controllerutil.ContainsFinalizer(kindCluster, finalizerName) {\n\t\t\tlog.Info(\"Deleting cluster\")\n\n\t\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseDeleting\n\t\t\tkindCluster.Status.Ready = false\n\t\t\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\t\t\tlog.Error(err, \"failed to update KindCluster status\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tif err := kindClient.DeleteCluster(kindCluster.NamespacedName()); err != nil {\n\t\t\t\tlog.Error(err, \"failed to delete cluster\")\n\t\t\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonDeleteFailed\n\t\t\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tcontrollerutil.RemoveFinalizer(kindCluster, finalizerName)\n\t\t\tlog.Info(\"Removed finalizer\")\n\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Ensure our finalizer is present\n\tcontrollerutil.AddFinalizer(kindCluster, finalizerName)\n\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif kindCluster.Status.Phase == nil || *kindCluster.Status.Phase == infrastructurev1alpha4.KindClusterPhasePending {\n\t\tlog.Info(\"Creating new cluster in Kind\")\n\n\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseCreating\n\t\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\t\tlog.Error(err, \"failed to update KindCluster status\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\tif err := kindClient.CreateCluster(kindCluster); err != nil {\n\t\t\tlog.Error(err, \"failed to create cluster in kind\")\n\t\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonCreateFailed\n\t\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\tkindCluster.Status.Ready = true\n\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseReady\n\t\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\t\tlog.Error(err, \"failed to update KindCluster status\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\tlog.Info(\"Cluster created\")\n\t}\n\n\t// Ensure ready status is up-to-date\n\tisReady, err := kindClient.IsReady(kindCluster.NamespacedName())\n\tif err != nil {\n\t\tlog.Error(err, \"failed to check status of cluster\")\n\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonClusterNotFound\n\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\tkindCluster.Status.Ready = isReady\n\tif isReady {\n\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseReady\n\t} else {\n\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseCreating\n\t}\n\n\t// Ensure kubeconfig is up-to-date\n\tkc, err := kindClient.GetKubeConfig(kindCluster.NamespacedName())\n\tif err != nil {\n\t\tlog.Error(err, \"failed to check status of cluster\")\n\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonKubeConfig\n\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\tkindCluster.Status.KubeConfig = &kc\n\n\t// Populate the server endpoint details\n\tendpoint, err := kubeconfig.ExtractEndpoint(kc, kindCluster.NamespacedName())\n\tif err != nil {\n\t\tlog.Error(err, \"failed to get control plane endpoint\")\n\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonEndpoint\n\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\tkindCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{\n\t\tHost: endpoint.Host,\n\t\tPort: endpoint.Port,\n\t}\n\n\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\tlog.Error(err, \"failed to update KindCluster status\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"clustersync-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource ClusterSync\n\terr = c.Watch(&source.Kind{Type: &hiveinternal.ClusterSync{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"virtualcluster-controller\",\n\t\tmgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to Virtualcluster\n\terr = c.Watch(&source.Kind{\n\t\tType: &tenancyv1alpha1.Virtualcluster{}},\n\t\t&handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (d *Dao) OverlordClusters(c context.Context, zone, appid string) (ocs []*model.OverlordCluster, err error) {\n\tvar res struct {\n\t\tData []*model.OverlordApiserver `json:\"grouped_clusters\"`\n\t}\n\tif err = d.client.RESTfulGet(c, apiserverURI, \"\", nil, &res, appid); err != nil {\n\t\tlog.Error(\"overlord cluster url(%s) appid(%s) error(%v)\", apiserverURI, appid, err)\n\t\treturn\n\t}\nGETALL:\n\tfor _, oa := range res.Data {\n\t\tif zone == \"\" || oa.Group == zone {\n\t\t\tfor _, oc := range oa.Clusters {\n\t\t\t\tcluster := &model.OverlordCluster{\n\t\t\t\t\tName: oc.Name,\n\t\t\t\t\tType: oc.Type,\n\t\t\t\t\tZone: zone,\n\t\t\t\t\tHashMethod: \"fnv1a_64\",\n\t\t\t\t\tHashDistribution: \"ketama\",\n\t\t\t\t\tHashTag: \"{}\",\n\t\t\t\t\tListenProto: \"tcp\",\n\t\t\t\t\tListenAddr: net.JoinHostPort(\"0.0.0.0\", strconv.Itoa(oc.FrontEndPort)),\n\t\t\t\t\tDailTimeout: 1000,\n\t\t\t\t\tReadTimeout: 1000,\n\t\t\t\t\tWriteTimeout: 1000,\n\t\t\t\t\tNodeConn: 2,\n\t\t\t\t\tPingFailLimit: 3,\n\t\t\t\t\tPingAutoEject: true,\n\t\t\t\t}\n\t\t\t\tfor _, oci := range oc.Instances {\n\t\t\t\t\tif oc.Type == \"redis_cluster\" && oci.Role != \"master\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ton := &model.OverlordNode{\n\t\t\t\t\t\tAlias: oci.Alias,\n\t\t\t\t\t\tAddr: net.JoinHostPort(oci.IP, strconv.Itoa(oci.Port)),\n\t\t\t\t\t\tWeight: oci.Weight,\n\t\t\t\t\t}\n\t\t\t\t\tcluster.Nodes = append(cluster.Nodes, on)\n\t\t\t\t}\n\t\t\t\tocs = append(ocs, cluster)\n\t\t\t}\n\t\t}\n\t}\n\tif len(ocs) == 0 && zone != \"\" {\n\t\tzone = \"\"\n\t\tgoto GETALL\n\t}\n\treturn\n}", "func (c spokeClusterController) sync(ctx context.Context, syncCtx factory.SyncContext) error {\n\tspokeCluster, err := c.hubClusterLister.Get(c.clusterName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get spoke cluster with name %q from hub: %w\", c.clusterName, err)\n\t}\n\n\t// current spoke cluster is not accepted, do nothing.\n\tacceptedCondition := helpers.FindSpokeClusterCondition(spokeCluster.Status.Conditions, clusterv1.SpokeClusterConditionHubAccepted)\n\tif !helpers.IsConditionTrue(acceptedCondition) {\n\t\tsyncCtx.Recorder().Eventf(\"SpokeClusterIsNotAccepted\", \"Spoke cluster %q is not accepted by hub yet\", c.clusterName)\n\t\treturn nil\n\t}\n\n\t// current spoke cluster is accepted, update its status if necessary.\n\tcapacity, allocatable, err := c.getClusterResources()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get capacity and allocatable of spoke cluster %q: %w\", c.clusterName, err)\n\t}\n\n\tspokeVersion, err := c.getSpokeVersion()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get server version of spoke cluster %q: %w\", c.clusterName, err)\n\t}\n\n\tupdateStatusFuncs := []helpers.UpdateSpokeClusterStatusFunc{}\n\tjoinedCondition := helpers.FindSpokeClusterCondition(spokeCluster.Status.Conditions, clusterv1.SpokeClusterConditionJoined)\n\tjoined := helpers.IsConditionTrue(joinedCondition)\n\t// current spoke cluster did not join the hub cluster, join it.\n\tif !joined {\n\t\tupdateStatusFuncs = append(updateStatusFuncs, helpers.UpdateSpokeClusterConditionFn(clusterv1.StatusCondition{\n\t\t\tType: clusterv1.SpokeClusterConditionJoined,\n\t\t\tStatus: metav1.ConditionTrue,\n\t\t\tReason: \"SpokeClusterJoined\",\n\t\t\tMessage: \"Spoke cluster joined\",\n\t\t}))\n\t}\n\n\tupdateStatusFuncs = append(updateStatusFuncs, updateClusterResourcesFn(clusterv1.SpokeClusterStatus{\n\t\tCapacity: capacity,\n\t\tAllocatable: allocatable,\n\t\tVersion: *spokeVersion,\n\t}))\n\n\t_, updated, err := helpers.UpdateSpokeClusterStatus(ctx, c.hubClusterClient, c.clusterName, updateStatusFuncs...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to update status of spoke cluster %q: %w\", c.clusterName, err)\n\t}\n\tif updated {\n\t\tif !joined {\n\t\t\tsyncCtx.Recorder().Eventf(\"SpokeClusterJoined\", \"Spoke cluster %q joined hub\", c.clusterName)\n\t\t}\n\t\tklog.V(4).Infof(\"The status of spoke cluster %q has been updated\", c.clusterName)\n\t}\n\treturn nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileAerospikeCluster{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func (r *RqliteClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\t_ = context.Background()\n\tlog := r.Log.WithValues(\"Reconcile RqliteCluster \", req.Name, \" in namespace \", req.NamespacedName)\n\n\tlog.V(1).Info(\"Get Object Info\")\n\t//objectInfo := new(rqlitev1.RqliteCluster{})\n\tobjectInfo := &rqlitev1.RqliteCluster{}\n\terr := r.Get(context.TODO(), req.NamespacedName, objectInfo)\n\n\tif err != nil {\n\t\tlog.Error(err, \"Error during r.Get\")\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\tlog.Info(\"Dump Object Info\", \"ClusterName\", objectInfo.Spec.Name, \"ClusterSize\", objectInfo.Spec.ClusterSize)\n\n\tlog.V(1).Info(\"Update Object Status\")\n\tlog.V(1).Info(\"Get Object Current Status\", \"NAme\", objectInfo.Spec.Name, \"Status\", objectInfo.Status.CurrentStatus)\n\tif objectInfo.Status.CurrentStatus == \"\" {\n\t\tlog.V(1).Info(\"Creating new RqliteCluster)\n\t\tpod := newRqliteCluster(objectInfo)\n\t\tobjectInfo.Status.CurrentStatus = \"OK\"\n\t}\n\n\tlog.V(1).Info(\"Set Object Target Status : \", \"Name\", objectInfo.Spec.Name, \"Status \", objectInfo.Status.CurrentStatus)\n\n\terr = r.Status().Update(context.TODO(), objectInfo)\n\tif err != nil {\n\t\tlog.Error(err, \"Error during r.Status\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t//if anything else happens\n\treturn ctrl.Result{}, nil\n}", "func newLayerView(gui *gocui.Gui, layers []*image.Layer) (controller *Layer, err error) {\n\tcontroller = new(Layer)\n\n\tcontroller.listeners = make([]LayerChangeListener, 0)\n\n\t// populate main fields\n\tcontroller.name = \"layer\"\n\tcontroller.gui = gui\n\tcontroller.Layers = layers\n\n\tswitch mode := viper.GetBool(\"layer.show-aggregated-changes\"); mode {\n\tcase true:\n\t\tcontroller.CompareMode = CompareAll\n\tcase false:\n\t\tcontroller.CompareMode = CompareLayer\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown layer.show-aggregated-changes value: %v\", mode)\n\t}\n\n\treturn controller, err\n}", "func newCluster() *cobra.Command {\n\tvar cluster *[]string\n\n\tcmd := &cobra.Command{\n\t\tUse: \"cluster\",\n\t\tShort: \"display cluster nodes.\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclient, err := getLeader(*cluster)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't connect to cluster leader\")\n\t\t\t}\n\t\t\tdefer client.Close()\n\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\t\tdefer cancel()\n\n\t\t\tvar leader *dqclient.NodeInfo\n\t\t\tvar nodes []dqclient.NodeInfo\n\t\t\tif leader, err = client.Leader(ctx); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't get leader\")\n\t\t\t}\n\n\t\t\tif nodes, err = client.Cluster(ctx); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't get cluster\")\n\t\t\t}\n\n\t\t\tfmt.Printf(\"ID \\tLeader \\tAddress\\n\")\n\t\t\tfor _, node := range nodes {\n\t\t\t\tfmt.Printf(\"%d \\t%v \\t%s\\n\", node.ID, node.ID == leader.ID, node.Address)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tflags := cmd.Flags()\n\tcluster = flags.StringSliceP(\"cluster\", \"c\", defaultCluster, \"addresses of existing cluster nodes\")\n\n\treturn cmd\n}", "func (r *ReconcileClusterVersion) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\t// Fetch the ClusterVersion instance\n\tr.Log.Info(\"reconciling ClusterVersion...\")\n\tcv := &tenancyv1alpha1.ClusterVersion{}\n\terr := r.Get(context.TODO(), request.NamespacedName, cv)\n\tif err != nil {\n\t\t// Error reading the object - requeue the request.\n\t\tif apierrors.IsNotFound(err) {\n\t\t\terr = nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\tr.Log.Info(\"new ClusterVersion event\", \"ClusterVersionName\", cv.Name)\n\n\t// Register finalizers\n\tcvf := \"clusterVersion.finalizers\"\n\n\tif cv.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// the object has not been deleted yet, registers the finalizers\n\t\tif strutil.ContainString(cv.ObjectMeta.Finalizers, cvf) == false {\n\t\t\tcv.ObjectMeta.Finalizers = append(cv.ObjectMeta.Finalizers, cvf)\n\t\t\tr.Log.Info(\"register finalizer for ClusterVersion\", \"finalizer\", cvf)\n\t\t\tif err := r.Update(context.Background(), cv); err != nil {\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// the object is being deleted, star the finalizer\n\t\tif strutil.ContainString(cv.ObjectMeta.Finalizers, cvf) == true {\n\t\t\t// the finalizer logic\n\t\t\tr.Log.Info(\"a ClusterVersion object is deleted\", \"ClusterVersion\", cv.Name)\n\n\t\t\t// remove the finalizer after done\n\t\t\tcv.ObjectMeta.Finalizers = strutil.RemoveString(cv.ObjectMeta.Finalizers, cvf)\n\t\t\tif err := r.Update(context.Background(), cv); err != nil {\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (reconciler *ClusterReconciler) reconcile() (ctrl.Result, error) {\n\tvar err error\n\n\t// Child resources of the cluster CR will be automatically reclaimed by K8S.\n\tif reconciler.observed.cluster == nil {\n\t\treconciler.log.Info(\"The cluster has been deleted, no action to take\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\terr = reconciler.reconcileConfigMap()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\terr = reconciler.reconcileJobManagerDeployment()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\terr = reconciler.reconcileJobManagerService()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\terr = reconciler.reconcileJobManagerIngress()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\terr = reconciler.reconcileTaskManagerDeployment()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tresult, err := reconciler.reconcileJob()\n\n\treturn result, nil\n}", "func (c *Controller) processCluster(updateCtx context.Context, workerNum uint, clusterInfo *ClusterInfo) {\n\tdefer c.clusterList.ClusterProcessed(clusterInfo)\n\n\tcluster := clusterInfo.Cluster\n\tclusterLog := c.logger.WithField(\"cluster\", cluster.Alias).WithField(\"worker\", workerNum)\n\n\tclusterLog.Infof(\"Processing cluster (%s)\", cluster.LifecycleStatus)\n\n\terr := c.doProcessCluster(updateCtx, clusterLog, clusterInfo)\n\n\t// log the error and resolve the special error cases\n\tif err != nil {\n\t\tclusterLog.Errorf(\"Failed to process cluster: %s\", err)\n\n\t\t// treat \"provider not supported\" as no error\n\t\tif err == provisioner.ErrProviderNotSupported {\n\t\t\terr = nil\n\t\t}\n\t} else {\n\t\tclusterLog.Infof(\"Finished processing cluster\")\n\t}\n\n\t// update the cluster state in the registry\n\tif !c.dryRun {\n\t\tif err != nil {\n\t\t\tif cluster.Status.Problems == nil {\n\t\t\t\tcluster.Status.Problems = make([]*api.Problem, 0, 1)\n\t\t\t}\n\t\t\tcluster.Status.Problems = append(cluster.Status.Problems, &api.Problem{\n\t\t\t\tTitle: err.Error(),\n\t\t\t\tType: errTypeGeneral,\n\t\t\t})\n\n\t\t\tif len(cluster.Status.Problems) > errorLimit {\n\t\t\t\tcluster.Status.Problems = cluster.Status.Problems[len(cluster.Status.Problems)-errorLimit:]\n\t\t\t\tcluster.Status.Problems[0] = &api.Problem{\n\t\t\t\t\tType: errTypeCoalescedProblems,\n\t\t\t\t\tTitle: \"<multiple problems>\",\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tcluster.Status.Problems = []*api.Problem{}\n\t\t}\n\t\terr = c.registry.UpdateCluster(cluster)\n\t\tif err != nil {\n\t\t\tclusterLog.Errorf(\"Unable to update cluster state: %s\", err)\n\t\t}\n\t}\n}", "func updateViews(db *sql.DB) {\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\tlog.Printf(\"ERROR: failed to start pq txn for materialized view updates => %s\", err.Error())\n\t\treturn\n\t}\n\n\tfor _, view := range materializedViews {\n\t\tif _, err = txn.Exec(fmt.Sprintf(\"REFRESH MATERIALIZED VIEW %s\", view)); err != nil {\n\t\t\tlog.Printf(\"ERROR: Failed to update materialized view, %s => %s\", view, err.Error())\n\t\t}\n\t}\n\n\terr = txn.Commit()\n\tif err != nil {\n\t\tlog.Printf(\"ERROR: Failed to commit transaction => {%s}\", err)\n\t}\n}", "func (m *ClusterService) getTopology(ctx context.Context, args struct{}) (*proto.GeneralResp, error) {\n\tif _, _, err := permissions(ctx, ADMIN); err != nil {\n\t\treturn nil, err\n\t}\n\ttv := &TopologyView{\n\t\tZones: make([]*ZoneView, 0),\n\t}\n\tzones := m.cluster.t.getAllZones()\n\tfor _, zone := range zones {\n\t\tcv := newZoneView(zone.name)\n\t\tcv.Status = zone.getStatusToString()\n\t\ttv.Zones = append(tv.Zones, cv)\n\t\tnsc := zone.getAllNodeSet()\n\t\tfor _, ns := range nsc {\n\t\t\tnsView := newNodeSetView(ns.dataNodeLen(), ns.metaNodeLen())\n\t\t\tcv.NodeSet[ns.ID] = nsView\n\t\t\tns.dataNodes.Range(func(key, value interface{}) bool {\n\t\t\t\tdataNode := value.(*DataNode)\n\t\t\t\tnsView.DataNodes = append(nsView.DataNodes, proto.NodeView{ID: dataNode.ID, Addr: dataNode.Addr, Status: dataNode.isActive, IsWritable: dataNode.isWriteAble()})\n\t\t\t\treturn true\n\t\t\t})\n\t\t\tns.metaNodes.Range(func(key, value interface{}) bool {\n\t\t\t\tmetaNode := value.(*MetaNode)\n\t\t\t\tnsView.MetaNodes = append(nsView.MetaNodes, proto.NodeView{ID: metaNode.ID, Addr: metaNode.Addr, Status: metaNode.IsActive, IsWritable: metaNode.isWritable()})\n\t\t\t\treturn true\n\t\t\t})\n\t\t}\n\t}\n\n\tbs, e := json.Marshal(tv)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn proto.Success(string(bs)), e\n}", "func SnapShotCluster() int32 {\n\n\tfor _, v := range VolMgrHosts {\n\n\t\tconn, err := utils.Dial(v)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"SnapShotVol failed,Dial to MetaNodeHosts %v fail :%v\", v, err)\n\t\t\treturn -1\n\t\t}\n\n\t\tdefer conn.Close()\n\n\t\tvc := vp.NewVolMgrClient(conn)\n\t\tpSnapShotClusterReq := &vp.SnapShotClusterReq{}\n\t\tctx, _ := context.WithTimeout(context.Background(), SNAPSHOT_TIMEOUT_SECONDS*time.Second)\n\t\tpSnapShotClusterAck, err := vc.SnapShotCluster(ctx, pSnapShotClusterReq)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"SnapShotVol failed,grpc func err :%v\", err)\n\t\t\treturn -1\n\t\t}\n\n\t\tif pSnapShotClusterAck.Ret != 0 {\n\t\t\tlogger.Error(\"SnapShotCluster failed,rpc func ret:%v\", pSnapShotClusterAck.Ret)\n\t\t\treturn -1\n\t\t}\n\t}\n\n\treturn 0\n}", "func newAzureClusterReconciler(scope *scope.ClusterScope) *azureClusterReconciler {\n\treturn &azureClusterReconciler{\n\t\tscope: scope,\n\t\tgroupsSvc: groups.NewService(scope),\n\t\tvnetSvc: virtualnetworks.NewService(scope),\n\t\tsecurityGroupSvc: securitygroups.NewService(scope),\n\t\trouteTableSvc: routetables.NewService(scope),\n\t\tsubnetsSvc: subnets.NewService(scope),\n\t\tpublicIPSvc: publicips.NewService(scope),\n\t\tloadBalancerSvc: loadbalancers.NewService(scope),\n\t\tskuCache: resourceskus.NewCache(scope, scope.Location()),\n\t}\n}", "func bindClusters(clusterTag string, state ecsState, tmpls []containerBindTemplate) []api.Cluster {\n\t// cluster (service) name as specified by clusterTag mapped to the API cluster\n\t// that is collecting instances\n\tclusterMap := map[string]*api.Cluster{}\n\tgetCluster := func(name string) *api.Cluster {\n\t\tif _, ok := clusterMap[name]; !ok {\n\t\t\tclusterMap[name] = &api.Cluster{}\n\t\t\tclusterMap[name].Name = name\n\t\t}\n\t\treturn clusterMap[name]\n\t}\n\n\t// containerinstance ARN -> ports that have been bound to a Cluster instance\n\tusedPorts := map[arn]map[int]bool{}\n\tusePort := func(carn arn, port int) {\n\t\tif _, ok := usedPorts[carn]; !ok {\n\t\t\tusedPorts[carn] = map[int]bool{}\n\t\t}\n\t\tusedPorts[carn][port] = true\n\t}\n\tisUsed := func(carn arn, port int) bool {\n\t\tif _, ok := usedPorts[carn]; !ok {\n\t\t\treturn false\n\t\t}\n\t\treturn usedPorts[carn][port]\n\t}\n\n\tfindPort := func(c *ecs.Container, destPort int) (int, error) {\n\t\treturn findHostPort(c, destPort, usePort, isUsed)\n\t}\n\n\tmissing := func(cluster string, tarn arn, desc string) {\n\t\tconsole.Error().Printf(\n\t\t\t\"%s / %s disconnect between layout and live: %s not found\", cluster, tarn, desc)\n\t}\n\n\tfor _, tmpl := range tmpls {\n\t\ttaskARNs := state.live.getTaskARNs(tmpl.cluster, tmpl.service)\n\n\t\tfor i, tmplSvc := range tmpl.svcs {\n\t\t\t// the associated service port\n\t\t\tcSvcPort := tmpl.ports[i]\n\n\t\t\t// find the API Cluster we're adding instances to\n\t\t\tc := getCluster(tmplSvc)\n\n\t\t\t// look at each running task as it will create one or more instances\n\t\t\tfor _, tarn := range taskARNs {\n\t\t\t\t// the running task\n\t\t\t\ttinst, ok := state.live.taskInstances[tarn]\n\t\t\t\tif !ok {\n\t\t\t\t\tmissing(tmpl.cluster, tmpl.service, string(tarn))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Find the container host's metadata\n\t\t\t\tciarn := arnValue(tinst.ContainerInstanceArn)\n\t\t\t\tcinst, ok := state.live.containerInstances[ciarn]\n\t\t\t\tif !ok {\n\t\t\t\t\tmissing(tmpl.cluster, tmpl.service, string(ciarn))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// a container instance is bound to a particular EC2 host\n\t\t\t\tec2id := ptr.StringValue(cinst.Ec2InstanceId)\n\t\t\t\tec2inst, ok := state.live.ec2Hosts[ec2id]\n\t\t\t\tif !ok {\n\t\t\t\t\tmissing(tmpl.cluster, tmpl.service, fmt.Sprintf(\"EC2 host %s\", ec2id))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// grab the container our template is for out of the task instance\n\t\t\t\tcontainer := tinst.getContainer(tmpl.container)\n\t\t\t\t// gets the ip address to be used for communicating with enovy container\n\t\t\t\tcontainerIP := getIP(ec2inst,container)\n\t\t\t\tif container == nil {\n\t\t\t\t\tmissing(\n\t\t\t\t\t\ttmpl.cluster,\n\t\t\t\t\t\ttmpl.service,\n\t\t\t\t\t\tfmt.Sprintf(\"container %s in task %s\", tmpl.container, tarn))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// find a host port bound to the container service port\n\t\t\t\thostPort, err := findPort(container, cSvcPort)\n\t\t\t\tif err != nil {\n\t\t\t\t\tmissing(\n\t\t\t\t\t\ttmpl.cluster,\n\t\t\t\t\t\ttmpl.service,\n\t\t\t\t\t\tfmt.Sprintf(\"container port %d not exposed on host %s\", cSvcPort, container))\n\t\t\t\t\tconsole.Error().Printf(err.Error())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tc.Instances = append(\n\t\t\t\t\tc.Instances,\n\t\t\t\t\tmkInstance(clusterTag, ec2id, containerIP, hostPort, tmpl, ciarn, tarn))\n\t\t\t}\n\t\t}\n\t}\n\n\tresults := []api.Cluster{}\n\tfor _, v := range clusterMap {\n\t\tresults = append(results, *v)\n\t}\n\n\treturn results\n}", "func (s *FederationSyncController) syncToClusters(selectedClusters, unselectedClusters []string,\n\ttemplate, override *unstructured.Unstructured) util.ReconciliationStatus {\n\n\ttemplateKind := s.typeConfig.GetTemplate().Kind\n\tkey := util.NewQualifiedName(template).String()\n\n\tglog.V(3).Infof(\"Syncing %s %q in underlying clusters, selected clusters are: %s, unselected clusters are: %s\",\n\t\ttemplateKind, key, selectedClusters, unselectedClusters)\n\n\toperations, err := s.clusterOperations(selectedClusters, unselectedClusters, template, override, key)\n\tif err != nil {\n\t\ts.eventRecorder.Eventf(template, corev1.EventTypeWarning, \"FedClusterOperationsError\",\n\t\t\t\"Error obtaining sync operations for %s: %s error: %s\", templateKind, key, err.Error())\n\t\treturn util.StatusError\n\t}\n\n\tif len(operations) == 0 {\n\t\treturn util.StatusAllOK\n\t}\n\n\t// TODO(marun) raise the visibility of operationErrors to aid in debugging\n\tversionMap, operationErrors := s.updater.Update(operations)\n\n\terr = s.versionManager.Update(template, override, selectedClusters, versionMap)\n\tif err != nil {\n\t\truntime.HandleError(fmt.Errorf(\"Failed to update version status for %s %q: %v\", templateKind, key, err))\n\t\treturn util.StatusError\n\t}\n\n\tif len(operationErrors) > 0 {\n\t\truntime.HandleError(fmt.Errorf(\"Failed to execute updates for %s %q: %v\", templateKind,\n\t\t\tkey, operationErrors))\n\t\treturn util.StatusError\n\t}\n\n\treturn util.StatusAllOK\n}", "func (th *transitionHandler) PostRefreshCluster(reason string) stateswitch.PostTransition {\n\tret := func(sw stateswitch.StateSwitch, args stateswitch.TransitionArgs) error {\n\t\tsCluster, ok := sw.(*stateCluster)\n\t\tif !ok {\n\t\t\treturn errors.New(\"PostRefreshCluster incompatible type of StateSwitch\")\n\t\t}\n\t\tparams, ok := args.(*TransitionArgsRefreshCluster)\n\t\tif !ok {\n\t\t\treturn errors.New(\"PostRefreshCluster invalid argument\")\n\t\t}\n\n\t\tvar (\n\t\t\terr error\n\t\t\tupdatedCluster *common.Cluster\n\t\t)\n\t\t//update cluster record if the state or the reason has changed\n\t\tif sCluster.srcState != swag.StringValue(sCluster.cluster.Status) || reason != swag.StringValue(sCluster.cluster.StatusInfo) {\n\t\t\tvar extra []interface{}\n\t\t\tvar log = logutil.FromContext(params.ctx, th.log)\n\t\t\textra, err = addExtraParams(log, sCluster.cluster, sCluster.srcState)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tupdatedCluster, err = updateClusterStatus(params.ctx, log, params.db, th.stream, *sCluster.cluster.ID, sCluster.srcState, *sCluster.cluster.Status,\n\t\t\t\treason, params.eventHandler, extra...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t//update hosts status to models.HostStatusResettingPendingUserAction if needed\n\t\tcluster := sCluster.cluster\n\t\tif updatedCluster != nil {\n\t\t\tcluster = updatedCluster\n\t\t\tparams.updatedCluster = updatedCluster\n\t\t}\n\t\tsetPendingUserResetIfNeeded(params.ctx, logutil.FromContext(params.ctx, th.log), params.db, params.hostApi, cluster)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t//report cluster install duration metrics in case of an installation halt. Cancel and Installed cases are\n\t\t//treated separately in CancelInstallation and CompleteInstallation respectively\n\t\tif sCluster.srcState != swag.StringValue(sCluster.cluster.Status) &&\n\t\t\tsCluster.srcState != models.ClusterStatusInstallingPendingUserAction &&\n\t\t\tfunk.ContainsString([]string{models.ClusterStatusError, models.ClusterStatusInstallingPendingUserAction}, swag.StringValue(sCluster.cluster.Status)) {\n\n\t\t\tparams.metricApi.ClusterInstallationFinished(params.ctx, *sCluster.cluster.Status, sCluster.srcState,\n\t\t\t\tsCluster.cluster.OpenshiftVersion, *sCluster.cluster.ID, sCluster.cluster.EmailDomain,\n\t\t\t\tsCluster.cluster.InstallStartedAt)\n\t\t}\n\t\treturn nil\n\t}\n\treturn ret\n}", "func (adm Admin) AddCluster(cluster string, recreateIfExists bool) bool {\n\tkb := &KeyBuilder{cluster}\n\t// c = \"/<cluster>\"\n\tc := kb.cluster()\n\n\t// check if cluster already exists\n\texists, _, err := adm.zkClient.Exists(c)\n\tif err != nil || (exists && !recreateIfExists) {\n\t\treturn false\n\t}\n\n\tif recreateIfExists {\n\t\tif err := adm.zkClient.DeleteTree(c); err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tadm.zkClient.CreateEmptyNode(c)\n\n\t// PROPERTYSTORE is an empty node\n\tpropertyStore := fmt.Sprintf(\"/%s/PROPERTYSTORE\", cluster)\n\tadm.zkClient.CreateEmptyNode(propertyStore)\n\n\t// STATEMODELDEFS has 6 children\n\tstateModelDefs := fmt.Sprintf(\"/%s/STATEMODELDEFS\", cluster)\n\tadm.zkClient.CreateEmptyNode(stateModelDefs)\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/LeaderStandby\", []byte(_helixDefaultNodes[\"LeaderStandby\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/MasterSlave\", []byte(_helixDefaultNodes[\"MasterSlave\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/OnlineOffline\", []byte(_helixDefaultNodes[StateModelNameOnlineOffline]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/STORAGE_DEFAULT_SM_SCHEMATA\",\n\t\t[]byte(_helixDefaultNodes[\"STORAGE_DEFAULT_SM_SCHEMATA\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/SchedulerTaskQueue\", []byte(_helixDefaultNodes[\"SchedulerTaskQueue\"]))\n\tadm.zkClient.CreateDataWithPath(\n\t\tstateModelDefs+\"/Task\", []byte(_helixDefaultNodes[\"Task\"]))\n\n\t// INSTANCES is initailly an empty node\n\tinstances := fmt.Sprintf(\"/%s/INSTANCES\", cluster)\n\tadm.zkClient.CreateEmptyNode(instances)\n\n\t// CONFIGS has 3 children: CLUSTER, RESOURCE, PARTICIPANT\n\tconfigs := fmt.Sprintf(\"/%s/CONFIGS\", cluster)\n\tadm.zkClient.CreateEmptyNode(configs)\n\tadm.zkClient.CreateEmptyNode(configs + \"/PARTICIPANT\")\n\tadm.zkClient.CreateEmptyNode(configs + \"/RESOURCE\")\n\tadm.zkClient.CreateEmptyNode(configs + \"/CLUSTER\")\n\n\tclusterNode := model.NewMsg(cluster)\n\taccessor := newDataAccessor(adm.zkClient, kb)\n\taccessor.createMsg(configs+\"/CLUSTER/\"+cluster, clusterNode)\n\n\t// empty ideal states\n\tidealStates := fmt.Sprintf(\"/%s/IDEALSTATES\", cluster)\n\tadm.zkClient.CreateEmptyNode(idealStates)\n\n\t// empty external view\n\texternalView := fmt.Sprintf(\"/%s/EXTERNALVIEW\", cluster)\n\tadm.zkClient.CreateEmptyNode(externalView)\n\n\t// empty live instances\n\tliveInstances := fmt.Sprintf(\"/%s/LIVEINSTANCES\", cluster)\n\tadm.zkClient.CreateEmptyNode(liveInstances)\n\n\t// CONTROLLER has four childrens: [ERRORS, HISTORY, MESSAGES, STATUSUPDATES]\n\tcontroller := fmt.Sprintf(\"/%s/CONTROLLER\", cluster)\n\tadm.zkClient.CreateEmptyNode(controller)\n\tadm.zkClient.CreateEmptyNode(controller + \"/ERRORS\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/HISTORY\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/MESSAGES\")\n\tadm.zkClient.CreateEmptyNode(controller + \"/STATUSUPDATES\")\n\n\treturn true\n}", "func (c *Controller) onAdd(obj interface{}) {\n\tcluster := obj.(*crv1.Pgcluster)\n\tlog.Debugf(\"[pgcluster Controller] ns %s onAdd %s\", cluster.ObjectMeta.Namespace, cluster.ObjectMeta.SelfLink)\n\n\t//handle the case when the operator restarts and don't\n\t//process already processed pgclusters\n\tif cluster.Status.State == crv1.PgclusterStateProcessed {\n\t\tlog.Debug(\"pgcluster \" + cluster.ObjectMeta.Name + \" already processed\")\n\t\treturn\n\t}\n\n\tkey, err := cache.MetaNamespaceKeyFunc(obj)\n\tif err == nil {\n\t\tlog.Debugf(\"cluster putting key in queue %s\", key)\n\t\tc.Queue.Add(key)\n\t}\n\n}", "func (cc *ClusterController) CollectAndSetUserClusterIndex(sessions *userSessionsStruct, clientAddress uint64, protocol *common.Protocol) {\n\tif !cc.useClusters && !cc.collectClusterInfo {\n\t\tsessions.userClusterIndex = DefaultClusterIndex\n\t\treturn\n\t}\n\tprotocolKey := protocol.ProtocolKey\n\tuserInfos := cc.getUserInfo(sessions, clientAddress)\n\n\t// Predict\n\tif !cc.useClusters {\n\t\tsessions.userClusterIndex = DefaultClusterIndex\n\t} else {\n\t\tprotocolKey := protocol.ProtocolKey\n\t\tif _, ok := cc.userModel[protocolKey]; !ok {\n\t\t\tlog.Fatalln(\"No user Model for prediction\")\n\t\t}\n\t\tuserData := clustering.GetDataOfUser(userInfos)\n\t\tclustering.ScaleData(userData, cc.userModel[protocolKey].options)\n\t\tclusterIdx, err := cc.userModel[protocolKey].model.Predict(userData)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error while predicting values\", err)\n\t\t}\n\t\t// Predict always returns a vector. First element is the cluster index\n\t\tsessions.userClusterIndex = int(clusterIdx[0])\n\t}\n\n\tif !cc.collectClusterInfo {\n\t\treturn\n\t}\n\n\tcc.usersInfoMutex.Lock()\n\tif _, ok := cc.usersInfo[protocolKey]; !ok {\n\t\tcc.usersInfo[protocolKey] = &UsersInfos{\n\t\t\tProtocol: *protocol,\n\t\t\tUsers: &dataformat.Users{Users: make([]*dataformat.User, 0)},\n\t\t}\n\t}\n\tcc.usersInfo[protocolKey].Users.Users = append(cc.usersInfo[protocolKey].Users.Users, userInfos)\n\tcc.usersInfoMutex.Unlock()\n}", "func (c *Controller) clusterAction(admin submarine.AdminInterface, cluster *rapi.SubmarineCluster, infos *submarine.ClusterInfos) (bool, error) {\n\tglog.Info(\"clusterAction()\")\n\tvar err error\n\t/* run sanity check if needed\n\tneedSanity, err := sanitycheck.RunSanityChecks(admin, &c.config.submarine, c.podControl, cluster, infos, true)\n\tif err != nil {\n\t\tglog.Errorf(\"[clusterAction] cluster %s/%s, an error occurs during sanitycheck: %v \", cluster.Namespace, cluster.Name, err)\n\t\treturn false, err\n\t}\n\tif needSanity {\n\t\tglog.V(3).Infof(\"[clusterAction] run sanitycheck cluster: %s/%s\", cluster.Namespace, cluster.Name)\n\t\treturn sanitycheck.RunSanityChecks(admin, &c.config.submarine, c.podControl, cluster, infos, false)\n\t}*/\n\n\t// Start more pods in needed\n\tif needMorePods(cluster) {\n\t\tif setScalingCondition(&cluster.Status, true) {\n\t\t\tif cluster, err = c.updateHandler(cluster); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\tpod, err2 := c.podControl.CreatePod(cluster)\n\t\tif err2 != nil {\n\t\t\tglog.Errorf(\"[clusterAction] unable to create a pod associated to the SubmarineCluster: %s/%s, err: %v\", cluster.Namespace, cluster.Name, err2)\n\t\t\treturn false, err2\n\t\t}\n\n\t\tglog.V(3).Infof(\"[clusterAction]create a Pod %s/%s\", pod.Namespace, pod.Name)\n\t\treturn true, nil\n\t}\n\tif setScalingCondition(&cluster.Status, false) {\n\t\tif cluster, err = c.updateHandler(cluster); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// Reconfigure the Cluster if needed\n\thasChanged, err := c.applyConfiguration(admin, cluster)\n\tif err != nil {\n\t\tglog.Errorf(\"[clusterAction] cluster %s/%s, an error occurs: %v \", cluster.Namespace, cluster.Name, err)\n\t\treturn false, err\n\t}\n\n\tif hasChanged {\n\t\tglog.V(6).Infof(\"[clusterAction] cluster has changed cluster: %s/%s\", cluster.Namespace, cluster.Name)\n\t\treturn true, nil\n\t}\n\n\tglog.Infof(\"[clusterAction] cluster hasn't changed cluster: %s/%s\", cluster.Namespace, cluster.Name)\n\treturn false, nil\n}", "func (c *KubernetesCollector) Collect(ch chan<- prometheus.Metric) {\n\tctx, cancel := context.WithTimeout(context.Background(), c.timeout)\n\tdefer cancel()\n\tclusters, _, err := c.client.Kubernetes.List(ctx, nil)\n\tif err != nil {\n\t\tc.errors.WithLabelValues(\"kubernetes\").Add(1)\n\t\tlevel.Warn(c.logger).Log(\n\t\t\t\"msg\", \"can't list clusters\",\n\t\t\t\"err\", err,\n\t\t)\n\t}\n\n\tfor _, cluster := range clusters {\n\t\tlabels := []string{\n\t\t\tcluster.ID,\n\t\t\tcluster.Name,\n\t\t\tcluster.RegionSlug,\n\t\t\tcluster.VersionSlug,\n\t\t}\n\n\t\tvar active float64\n\t\t//TODO(dazwilkin) better reflect richer Kubernetes cluster states\n\t\tif cluster.Status.State == godo.KubernetesClusterStatusRunning {\n\t\t\tactive = 1.0\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.Up,\n\t\t\tprometheus.GaugeValue,\n\t\t\tactive,\n\t\t\tlabels...,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.NodePools,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(len(cluster.NodePools)),\n\t\t\tlabels...,\n\t\t)\n\n\t\tfor _, nodepool := range cluster.NodePools {\n\t\t\t// Assume NodePools are constrained to the cluster's Region\n\t\t\t// If so, we can labels a cluster's NodePools by the cluster's region\n\t\t\tlabels := []string{\n\t\t\t\tnodepool.ID,\n\t\t\t\tnodepool.Name,\n\t\t\t\tcluster.RegionSlug,\n\t\t\t}\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tc.Nodes,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tfloat64(nodepool.Count),\n\t\t\t\tlabels...,\n\t\t\t)\n\t\t}\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileRethinkDBCluster{client: mgr.GetClient(), config: mgr.GetConfig(), scheme: mgr.GetScheme()}\n}", "func (cc *ClusterController) CollectAndSetSessionClusterIndex(session *session, clientAddress uint64, protocol *common.Protocol) {\n\tif !cc.useClusters && !cc.collectClusterInfo {\n\t\tsession.sessionClusterIndex = DefaultClusterIndex\n\t\treturn\n\t}\n\tprotocolKey := protocol.ProtocolKey\n\tsessionInfos := cc.getSessionInfo(session, clientAddress)\n\n\t// Predict\n\tif !cc.useClusters {\n\t\tsession.sessionClusterIndex = DefaultClusterIndex\n\t} else {\n\t\tif _, ok := cc.sessionModel[protocolKey]; !ok {\n\t\t\tlog.Fatalln(\"No Session Model for prediction\")\n\t\t}\n\t\tsessionData := clustering.GetDataOfSession(sessionInfos)\n\t\tclustering.ScaleData(sessionData, cc.sessionModel[protocolKey].options)\n\t\tclusterIdx, err := cc.sessionModel[protocolKey].model.Predict(sessionData)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error while predicting values\", err)\n\t\t}\n\t\t// Predict always returns a vector. First element is the cluster index\n\t\tsession.sessionClusterIndex = int(clusterIdx[0])\n\t}\n\n\tif !cc.collectClusterInfo {\n\t\treturn\n\t}\n\t// Collect Session Info\n\tcc.sessionsInfoMutex.Lock()\n\tif _, ok := cc.sessionsInfo[protocolKey]; !ok {\n\t\tcc.sessionsInfo[protocolKey] = &SessionsInfos{\n\t\t\tProtocol: *protocol,\n\t\t\tSessions: &dataformat.Sessions{Sessions: make([]*dataformat.Session, 0)},\n\t\t}\n\t}\n\tcc.sessionsInfo[protocolKey].Sessions.Sessions = append(cc.sessionsInfo[protocolKey].Sessions.Sessions, sessionInfos)\n\tcc.sessionsInfoMutex.Unlock()\n}", "func (a ClustersAPI) Edit(editReq httpmodels.EditReq) error {\n\t_, err := a.Client.performQuery(http.MethodPost, \"/clusters/edit\", editReq, nil)\n\treturn err\n}", "func newCluster(ids []int64) []*TRaft {\n\n\tcluster := make(map[int64]string)\n\n\ttrafts := make([]*TRaft, 0)\n\n\tfor _, id := range ids {\n\t\taddr := fmt.Sprintf(\":%d\", basePort+int64(id))\n\t\tcluster[id] = addr\n\t}\n\n\tfor _, id := range ids {\n\t\tsrv := NewTRaft(id, cluster)\n\t\ttrafts = append(trafts, srv)\n\t}\n\n\treturn trafts\n}", "func updateTablespaces(c *Controller, oldCluster *crv1.Pgcluster, newCluster *crv1.Pgcluster) error {\n\t// to help the Operator function do less work, we will get a list of new\n\t// tablespaces. Though these are already present in the CRD, this will isolate\n\t// exactly which PVCs need to be created\n\t//\n\t// To do this, iterate through the the tablespace mount map that is present in\n\t// the new cluster.\n\tnewTablespaces := map[string]crv1.PgStorageSpec{}\n\n\tfor tablespaceName, storageSpec := range newCluster.Spec.TablespaceMounts {\n\t\t// if the tablespace does not exist in the old version of the cluster,\n\t\t// then add it in!\n\t\tif _, ok := oldCluster.Spec.TablespaceMounts[tablespaceName]; !ok {\n\t\t\tlog.Debugf(\"new tablespace found: [%s]\", tablespaceName)\n\n\t\t\tnewTablespaces[tablespaceName] = storageSpec\n\t\t}\n\t}\n\n\t// alright, update the tablespace entries for this cluster!\n\t// if it returns an error, pass the error back up to the caller\n\tif err := clusteroperator.UpdateTablespaces(c.Client, c.Client.Config, newCluster, newTablespaces); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t *taskContext) recluster(ctx context.Context) (done bool, err error) {\n\tctx, s := trace.StartSpan(ctx, \"go.chromium.org/luci/analysis/internal/clustering/reclustering.recluster\")\n\ts.Attribute(\"project\", t.task.Project)\n\ts.Attribute(\"currentChunkID\", t.currentChunkID)\n\tdefer func() { s.End(err) }()\n\n\treadOpts := state.ReadNextOptions{\n\t\tStartChunkID: t.currentChunkID,\n\t\tEndChunkID: t.task.EndChunkId,\n\t\tAlgorithmsVersion: t.task.AlgorithmsVersion,\n\t\tConfigVersion: t.task.ConfigVersion.AsTime(),\n\t\tRulesVersion: t.task.RulesVersion.AsTime(),\n\t}\n\tentries, err := state.ReadNextN(span.Single(ctx), t.task.Project, readOpts, batchSize)\n\tif err != nil {\n\t\treturn false, errors.Annotate(err, \"read next chunk state\").Err()\n\t}\n\tif len(entries) == 0 {\n\t\t// We have finished re-clustering.\n\t\terr = t.updateProgress(ctx, shards.MaxProgress)\n\t\tif err != nil {\n\t\t\treturn true, err\n\t\t}\n\t\treturn true, nil\n\t}\n\n\tpendingUpdates := NewPendingUpdates(ctx)\n\n\tfor i, entry := range entries {\n\t\t// Read the test results from GCS.\n\t\tchunk, err := t.worker.chunkStore.Get(ctx, t.task.Project, entry.ObjectID)\n\t\tif err != nil {\n\t\t\treturn false, errors.Annotate(err, \"read chunk\").Err()\n\t\t}\n\n\t\t// Obtain a recent ruleset of at least RulesVersion.\n\t\truleset, err := Ruleset(ctx, t.task.Project, t.task.RulesVersion.AsTime())\n\t\tif err != nil {\n\t\t\treturn false, errors.Annotate(err, \"obtain ruleset\").Err()\n\t\t}\n\n\t\t// Obtain a recent configuration of at least ConfigVersion.\n\t\tcfg, err := compiledcfg.Project(ctx, t.task.Project, t.task.ConfigVersion.AsTime())\n\t\tif err != nil {\n\t\t\treturn false, errors.Annotate(err, \"obtain config\").Err()\n\t\t}\n\n\t\t// Re-cluster the test results in spanner, then export\n\t\t// the re-clustering to BigQuery for analysis.\n\t\tupdate, err := PrepareUpdate(ctx, ruleset, cfg, chunk, entry)\n\t\tif err != nil {\n\t\t\treturn false, errors.Annotate(err, \"re-cluster chunk\").Err()\n\t\t}\n\n\t\tpendingUpdates.Add(update)\n\n\t\tif pendingUpdates.ShouldApply(ctx) || (i == len(entries)-1) {\n\t\t\tif err := pendingUpdates.Apply(ctx, t.worker.analysis); err != nil {\n\t\t\t\tif err == UpdateRaceErr {\n\t\t\t\t\t// Our update raced with another update.\n\t\t\t\t\t// This is retriable if we re-read the chunk again.\n\t\t\t\t\terr = transient.Tag.Apply(err)\n\t\t\t\t}\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tpendingUpdates = NewPendingUpdates(ctx)\n\n\t\t\t// Advance our position only on successful commit.\n\t\t\tt.currentChunkID = entry.ChunkID\n\n\t\t\tif err := t.calculateAndReportProgress(ctx); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// More to do.\n\treturn false, nil\n}", "func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {\n\t_ = r.Log.WithValues(\"cluster\", req.NamespacedName)\n\t// Fetch the Cluster instance.\n\tcluster, err := r.resourceFetcher.FetchCluster(ctx, req.NamespacedName)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\t// Initialize the patch helper\n\tpatchHelper, err := patch.NewHelper(cluster, r.Client)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tdefer func() {\n\t\t// Always attempt to patch the object and status after each reconciliation.\n\t\tif err := patchHelper.Patch(ctx, cluster); err != nil {\n\t\t\treterr = kerrors.NewAggregate([]error{reterr, err})\n\t\t}\n\t}()\n\n\t// Ignore deleted Clusters, this can happen when foregroundDeletion\n\t// is enabled\n\tif !cluster.DeletionTimestamp.IsZero() {\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// If the external object is paused, return without any further processing.\n\tif cluster.IsReconcilePaused() {\n\t\tr.Log.Info(\"eksa reconciliation is paused\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// dry run\n\tresult, err := r.reconcile(ctx, req.NamespacedName, true)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Dry run failed to reconcile Cluster\")\n\t\treturn result, err\n\t}\n\t// non dry run\n\tresult, err = r.reconcile(ctx, req.NamespacedName, false)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Failed to reconcile Cluster\")\n\t}\n\treturn result, err\n}", "func RenderCluster(out io.Writer, cluster sind.ClusterStatus) {\n\twr := tabwriter.NewWriter(out, 4, 8, 2, '\\t', 0)\n\tdefer wr.Flush()\n\n\tfmt.Fprintf(\n\t\twr,\n\t\t\"Name: %s\\tStatus: %s\\tManagers: %s\\t Workers: %s\\t\\n\",\n\t\tstyle.Important(cluster.Name),\n\t\tstyle.Important(status(cluster)),\n\t\tstyle.Important(fmt.Sprintf(\"%d/%d\", cluster.ManagersRunning, cluster.Managers)),\n\t\tstyle.Important(fmt.Sprintf(\"%d/%d\", cluster.WorkersRunning, cluster.Workers)),\n\t)\n\n\tfmt.Fprintf(wr, \"ID\\tImage\\tRole\\tStatus\\tIPs\\t\\n\")\n\tfmt.Fprintf(wr, \"--\\t-----\\t----\\t------\\t---\\t\\n\")\n\n\tfor _, node := range cluster.Nodes {\n\t\tfmt.Fprintf(\n\t\t\twr,\n\t\t\t\"%s\\t%s\\t%s\\t%s\\t%s\\t\\n\",\n\t\t\tnode.ID[0:11],\n\t\t\tnode.Image,\n\t\t\tclusterRole(node),\n\t\t\tnode.Status,\n\t\t\tnodeIP(node),\n\t\t)\n\t}\n}", "func ExampleClustersClient_BeginUpdate() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpoller, err := clientFactory.NewClustersClient().BeginUpdate(ctx, \"resRg\", \"myCluster\", armservicefabric.ClusterUpdateParameters{\n\t\tProperties: &armservicefabric.ClusterPropertiesUpdateParameters{\n\t\t\tEventStoreServiceEnabled: to.Ptr(true),\n\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: to.Ptr(\"testnt1\"),\n\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](2000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](1000),\n\t\t\t\t\t},\n\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](0),\n\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t\t\t\t\t\tEndPort: to.Ptr[int32](4000),\n\t\t\t\t\t\tStartPort: to.Ptr[int32](3000),\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](0),\n\t\t\t\t\tIsPrimary: to.Ptr(false),\n\t\t\t\t\tVMInstanceCount: to.Ptr[int32](3),\n\t\t\t\t}},\n\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelBronze),\n\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t\t\tUpgradePauseEndTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-25T22:00:00Z\"); return t }()),\n\t\t\tUpgradePauseStartTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-21T22:00:00Z\"); return t }()),\n\t\t\tUpgradeWave: to.Ptr(armservicefabric.ClusterUpgradeCadence(\"Wave\")),\n\t\t},\n\t\tTags: map[string]*string{\n\t\t\t\"a\": to.Ptr(\"b\"),\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\tres, err := poller.PollUntilDone(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to pull the result: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240744\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t\t\"a\": to.Ptr(\"b\"),\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t{\n\t// \t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t}},\n\t// \t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t}},\n\t// \t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t},\n\t// \t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t},\n\t// \t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t},\n\t// \t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t},\n\t// \t\tEventStoreServiceEnabled: to.Ptr(true),\n\t// \t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t}},\n\t// \t\t}},\n\t// \t\tManagementEndpoint: to.Ptr(\"http://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t},\n\t// \t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t},\n\t// \t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"testnt1\"),\n\t// \t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](2000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](1000),\n\t// \t\t\t\t},\n\t// \t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](0),\n\t// \t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\tEndPort: to.Ptr[int32](4000),\n\t// \t\t\t\t\tStartPort: to.Ptr[int32](3000),\n\t// \t\t\t\t},\n\t// \t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](0),\n\t// \t\t\t\tIsPrimary: to.Ptr(false),\n\t// \t\t\t\tVMInstanceCount: to.Ptr[int32](3),\n\t// \t\t}},\n\t// \t\tNotifications: []*armservicefabric.Notification{\n\t// \t\t\t{\n\t// \t\t\t\tIsEnabled: to.Ptr(true),\n\t// \t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t// \t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelCritical),\n\t// \t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t// \t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t// \t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t// \t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t// \t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tIsEnabled: to.Ptr(true),\n\t// \t\t\t\t\t\tNotificationCategory: to.Ptr(armservicefabric.NotificationCategoryWaveProgress),\n\t// \t\t\t\t\t\tNotificationLevel: to.Ptr(armservicefabric.NotificationLevelAll),\n\t// \t\t\t\t\t\tNotificationTargets: []*armservicefabric.NotificationTarget{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailUser),\n\t// \t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\"),\n\t// \t\t\t\t\t\t\t\t\tto.Ptr(\"****@microsoft.com\")},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tNotificationChannel: to.Ptr(armservicefabric.NotificationChannelEmailSubscription),\n\t// \t\t\t\t\t\t\t\t\tReceivers: []*string{\n\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(\"Owner\"),\n\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(\"AccountAdmin\")},\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelBronze),\n\t// \t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeAutomatic),\n\t// \t\t\t\t\t\tUpgradePauseEndTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-25T22:00:00Z\"); return t}()),\n\t// \t\t\t\t\t\tUpgradePauseStartTimestampUTC: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2021-06-21T22:00:00Z\"); return t}()),\n\t// \t\t\t\t\t\tUpgradeWave: to.Ptr(armservicefabric.ClusterUpgradeCadenceWave2),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t}\n}", "func RotateCluster(cluster *model.Cluster, logger *logrus.Entry, rotatorMetadata *RotatorMetadata) (*RotatorMetadata, error) {\n\tclientset, err := getk8sClientset(cluster)\n\tif err != nil {\n\t\treturn rotatorMetadata, err\n\t}\n\n\tif rotatorMetadata.MasterGroups == nil && rotatorMetadata.WorkerGroups == nil {\n\t\terr = rotatorMetadata.GetSetAutoscalingGroups(cluster)\n\t\tif err != nil {\n\t\t\treturn rotatorMetadata, err\n\t\t}\n\t}\n\n\tfor index, masterASG := range rotatorMetadata.MasterGroups {\n\t\tlogger.Infof(\"The autoscaling group %s has %d instance(s)\", masterASG.Name, masterASG.DesiredCapacity)\n\n\t\terr = MasterNodeRotation(cluster, &masterASG, clientset, logger)\n\t\tif err != nil {\n\t\t\trotatorMetadata.MasterGroups[index] = masterASG\n\t\t\treturn rotatorMetadata, err\n\t\t}\n\n\t\trotatorMetadata.MasterGroups[index] = masterASG\n\n\t\tlogger.Infof(\"Checking that all %d nodes are running...\", masterASG.DesiredCapacity)\n\t\terr = FinalCheck(&masterASG, clientset, logger)\n\t\tif err != nil {\n\t\t\treturn rotatorMetadata, err\n\t\t}\n\n\t\tlogger.Infof(\"ASG %s rotated successfully.\", masterASG.Name)\n\t}\n\n\tfor index, workerASG := range rotatorMetadata.WorkerGroups {\n\t\tlogger.Infof(\"The autoscaling group %s has %d instance(s)\", workerASG.Name, workerASG.DesiredCapacity)\n\n\t\terr = WorkerNodeRotation(cluster, &workerASG, clientset, logger)\n\t\tif err != nil {\n\t\t\trotatorMetadata.WorkerGroups[index] = workerASG\n\t\t\treturn rotatorMetadata, err\n\t\t}\n\n\t\trotatorMetadata.WorkerGroups[index] = workerASG\n\n\t\tlogger.Infof(\"Checking that all %d nodes are running...\", workerASG.DesiredCapacity)\n\t\terr = FinalCheck(&workerASG, clientset, logger)\n\t\tif err != nil {\n\t\t\treturn rotatorMetadata, err\n\t\t}\n\n\t\tlogger.Infof(\"ASG %s rotated successfully.\", workerASG.Name)\n\t}\n\n\tlogger.Info(\"All ASGs rotated successfully\")\n\treturn rotatorMetadata, nil\n}", "func (s *StatusReconciler) Reconcile(ctx context.Context, req *ctrl.Request) (ctrl.Result, error) {\n\t// We base our status on the pod facts, so ensure our facts are up to date.\n\tif err := s.PFacts.Collect(ctx, s.Vdb); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Use all subclusters, even ones that are scheduled for removal. We keep\n\t// reporting status on the deleted ones until the statefulsets are gone.\n\tfinder := MakeSubclusterFinder(s.Client, s.Vdb)\n\tsubclusters, err := finder.FindSubclusters(ctx, FindAll)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\trefreshStatus := func(vdbChg *vapi.VerticaDB) error {\n\t\tvdbChg.Status.Subclusters = []vapi.SubclusterStatus{}\n\t\tfor i := range subclusters {\n\t\t\tif i == len(vdbChg.Status.Subclusters) {\n\t\t\t\tvdbChg.Status.Subclusters = append(vdbChg.Status.Subclusters, vapi.SubclusterStatus{})\n\t\t\t}\n\t\t\tif err := s.calculateSubclusterStatus(ctx, subclusters[i], &vdbChg.Status.Subclusters[i]); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to calculate subcluster status %s %w\", subclusters[i].Name, err)\n\t\t\t}\n\t\t}\n\t\ts.calculateClusterStatus(&vdbChg.Status)\n\t\treturn nil\n\t}\n\n\tif err := status.Update(ctx, s.Client, s.Vdb, refreshStatus); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\treturn ctrl.Result{}, nil\n}", "func MutateCluster(cluster *clusterapis.Cluster) {\n\tmutateClusterTaints(cluster.Spec.Taints)\n\tmigrateZoneToZones(cluster)\n}", "func (c *HandlerComp) intClusterUpdate(params ops.ClusterUpdateParams, ai *auth.Info, oObj *models.Cluster) (*models.Cluster, error) {\n\tctx := params.HTTPRequest.Context()\n\tvar err error\n\tif ai == nil {\n\t\tai, err = c.GetAuthInfo(params.HTTPRequest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar uP = [centrald.NumActionTypes][]string{\n\t\tcentrald.UpdateRemove: params.Remove,\n\t\tcentrald.UpdateAppend: params.Append,\n\t\tcentrald.UpdateSet: params.Set,\n\t}\n\tua, err := c.MakeStdUpdateArgs(emptyCluster, params.ID, params.Version, uP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif params.Payload == nil {\n\t\terr = c.eUpdateInvalidMsg(\"missing payload\")\n\t\treturn nil, err\n\t}\n\tif ua.IsModified(\"Name\") && params.Payload.Name == \"\" {\n\t\terr := c.eUpdateInvalidMsg(\"non-empty name is required\")\n\t\treturn nil, err\n\t}\n\tif oObj == nil {\n\t\tc.RLock()\n\t\tdefer c.RUnlock()\n\t\tc.ClusterLock()\n\t\tdefer c.ClusterUnlock()\n\t\toObj, err = c.DS.OpsCluster().Fetch(ctx, params.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"ClusterUsagePolicy\") {\n\t\tif oObj.State != common.ClusterStateDeployable {\n\t\t\terr := c.eUpdateInvalidMsg(\"invalid state\")\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := c.validateClusterUsagePolicy(params.Payload.ClusterUsagePolicy, common.AccountSecretScopeCluster); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.Version == 0 {\n\t\tua.Version = int32(oObj.Meta.Version)\n\t} else if int32(oObj.Meta.Version) != ua.Version {\n\t\terr = centrald.ErrorIDVerNotFound\n\t\treturn nil, err\n\t}\n\tif err = c.app.AuditLog.Ready(); err != nil {\n\t\treturn nil, err\n\t}\n\tif ua.IsModified(\"ClusterVersion\") || ua.IsModified(\"Service\") || ua.IsModified(\"ClusterAttributes\") || ua.IsModified(\"ClusterIdentifier\") || ua.IsModified(\"State\") || ua.IsModified(\"Messages\") {\n\t\tif err = ai.InternalOK(); err != nil {\n\t\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", true, \"Update unauthorized\")\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err = ai.CapOK(centrald.CSPDomainManagementCap, models.ObjIDMutable(oObj.AccountID)); err != nil {\n\t\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", true, \"Update unauthorized\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"ClusterIdentifier\") {\n\t\tif !ua.IsModified(\"State\") {\n\t\t\terr := c.eMissingMsg(\"state must be set with clusterIdentifier\")\n\t\t\treturn nil, err\n\t\t}\n\t\t// when transitioning to DEPLOYABLE state ClusterIdentifier must be reset, e.g. set to empty string\n\t\tif params.Payload.State == common.ClusterStateDeployable && params.Payload.ClusterIdentifier != \"\" {\n\t\t\terr := c.eMissingMsg(\"clusterIdentifier must be cleared when transitioning to %s\", common.ClusterStateDeployable)\n\t\t\treturn nil, err\n\t\t}\n\t\t// ClusterIdentifier may be modified (set to non-empty value) only when changing state from DEPLOYABLE to MANAGED\n\t\tif !(oObj.State == common.ClusterStateDeployable && params.Payload.State == common.ClusterStateManaged) {\n\t\t\terr := c.eInvalidState(\"invalid state transition (%s ⇒ %s)\", oObj.State, params.Payload.State)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ua.IsModified(\"State\") {\n\t\tif !c.validateClusterState(params.Payload.State) {\n\t\t\terr := c.eUpdateInvalidMsg(\"invalid cluster state\")\n\t\t\treturn nil, err\n\t\t}\n\t\t// when transitioning from DEPLOYABLE state to MANAGED ClusterIdentifier is required\n\t\tif oObj.State == common.ClusterStateDeployable && params.Payload.State == common.ClusterStateManaged && (!ua.IsModified(\"ClusterIdentifier\") || params.Payload.ClusterIdentifier == \"\") {\n\t\t\terr := c.eMissingMsg(\"clusterIdentifier must be set when transitioning to %s\", common.ClusterStateManaged)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdom, err := c.ops.intCspDomainFetch(ctx, ai, string(oObj.CspDomainID))\n\tif err != nil {\n\t\tc.Log.Errorf(\"Cluster[%s]: error looking up CSPDomain[%s]: %s\", oObj.Meta.ID, oObj.CspDomainID, err.Error())\n\t\treturn nil, err\n\t}\n\tdetail := \"\"\n\tif a := ua.FindUpdateAttr(\"AuthorizedAccounts\"); a != nil && a.IsModified() {\n\t\tdetail, err = c.authAccountValidator.validateAuthorizedAccountsUpdate(ctx, ai, centrald.ClusterUpdateAction, params.ID, models.ObjName(oObj.Name), a, oObj.AuthorizedAccounts, params.Payload.AuthorizedAccounts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t// TBD: validate clusterAttributes by clusterType\n\tobj, err := c.DS.OpsCluster().Update(ctx, ua, params.Payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.clusterApplyInheritedProperties(ctx, ai, obj, dom) // no error possible\n\tif len(detail) > 0 {\n\t\tc.app.AuditLog.Post(ctx, ai, centrald.ClusterUpdateAction, models.ObjID(params.ID), models.ObjName(oObj.Name), \"\", false, fmt.Sprintf(\"Updated authorizedAccounts %s\", detail))\n\t}\n\tc.setDefaultObjectScope(params.HTTPRequest, obj)\n\treturn obj, nil\n}", "func (f *Frontend) clusterStartHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\treq := regression.NewRegressionDetectionRequest()\n\tif err := json.NewDecoder(r.Body).Decode(req); err != nil {\n\t\thttputils.ReportError(w, err, \"Could not decode POST body.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tauditlog.LogWithUser(r, f.loginProvider.LoggedInAs(r).String(), \"cluster\", req)\n\n\tcb := func(_ *regression.RegressionDetectionRequest, clusterResponse []*regression.RegressionDetectionResponse, _ string) {\n\t\t// We don't do GroupBy clustering, so there will only be one clusterResponse.\n\t\treq.Progress.Results(clusterResponse[0])\n\t}\n\tf.progressTracker.Add(req.Progress)\n\n\tgo func() {\n\t\t// This intentionally does not use r.Context() because we want it to outlive this request.\n\t\terr := regression.ProcessRegressions(context.Background(), req, cb, f.perfGit, f.shortcutStore, f.dfBuilder, f.paramsetRefresher.Get(), regression.ExpandBaseAlertByGroupBy, regression.ReturnOnError, config.Config.AnomalyConfig)\n\t\tif err != nil {\n\t\t\tsklog.Errorf(\"ProcessRegressions returned: %s\", err)\n\t\t\treq.Progress.Error(\"Failed to load data.\")\n\t\t} else {\n\t\t\treq.Progress.Finished()\n\t\t}\n\t}()\n\n\tif err := req.Progress.JSON(w); err != nil {\n\t\tsklog.Errorf(\"Failed to encode paramset: %s\", err)\n\t}\n}", "func (r *Registry) NodesView() []*View {\n\tr.mu.RLock()\n\tdefer r.mu.RUnlock()\n\n\tviews := make([]*View, 0, len(r.nodesView))\n\n\tfor _, v := range r.nodesView {\n\t\tviews = append(views, v)\n\t}\n\n\treturn views\n}", "func (e *StatsExporter) ExportView(vd *view.Data) {\n\tfor _, row := range vd.Rows {\n\t\trd := &RowData{\n\t\t\tView: vd.View,\n\t\t\tStart: vd.Start,\n\t\t\tEnd: vd.End,\n\t\t\tRow: row,\n\t\t}\n\t\te.exportRowData(rd)\n\t}\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileVirtualcluster{\n\t\tClient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t}\n}", "func (c *controller) Reconcile(request reconciler.Request) (reconciler.Result, error) {\n\tklog.V(4).Infof(\"reconcile node %s for cluster %s\", request.Name, request.ClusterName)\n\tvExists := true\n\tvNodeObj, err := c.MultiClusterController.Get(request.ClusterName, request.Namespace, request.Name)\n\tif err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn reconciler.Result{Requeue: true}, err\n\t\t}\n\t\tvExists = false\n\t}\n\n\tif vExists {\n\t\tvNode := vNodeObj.(*v1.Node)\n\t\tif vNode.Labels[constants.LabelVirtualNode] != \"true\" {\n\t\t\t// We only handle virtual nodes created by syncer\n\t\t\treturn reconciler.Result{}, nil\n\t\t}\n\t\tc.Lock()\n\t\tif _, exist := c.nodeNameToCluster[request.Name]; !exist {\n\t\t\tc.nodeNameToCluster[request.Name] = make(map[string]struct{})\n\t\t}\n\t\tc.nodeNameToCluster[request.Name][request.ClusterName] = struct{}{}\n\t\tc.Unlock()\n\t} else {\n\t\tc.Lock()\n\t\tif _, exists := c.nodeNameToCluster[request.Name]; exists {\n\t\t\tdelete(c.nodeNameToCluster[request.Name], request.ClusterName)\n\t\t}\n\t\tc.Unlock()\n\n\t}\n\treturn reconciler.Result{}, nil\n}", "func (d *HetznerCloudProvider) Refresh() error {\n\tfor _, group := range d.manager.nodeGroups {\n\t\tgroup.resetTargetSize(0)\n\t}\n\treturn nil\n}", "func (ck *clusterKinds) update() {\n\toutput, err := exec.Command(\"kubectl\", \"api-resources\").Output()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tlines := strings.Split(string(output), \"\\n\")\n\tfor _, line := range lines[1:] {\n\t\tif len(line) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\ts := strings.Fields(line)\n\t\tkind := s[len(s)-1]\n\t\tnamespaced := s[len(s)-2] == \"true\"\n\n\t\tck.isNamespaced[kind] = namespaced\n\t}\n}", "func (g *Generator) Generate() (*cke.Cluster, error) {\n\tg.clearIntermediateData()\n\n\top := &updateOp{\n\t\tname: \"new\",\n\t}\n\top.record(\"generate new cluster\")\n\n\tg.nextUnused = g.getUnusedMachines(nil)\n\n\treturn g.fill(op)\n}", "func (_m *StateOps) RefreshClusterState() {\n\t_m.Called()\n}", "func (db *merkleDB) rebuild(ctx context.Context) error {\n\tdb.root = newNode(nil, RootPath)\n\tif err := db.nodeDB.Delete(rootKey); err != nil {\n\t\treturn err\n\t}\n\tit := db.nodeDB.NewIterator()\n\tdefer it.Release()\n\n\tviewSizeLimit := math.Max(\n\t\tdb.nodeCache.maxSize/rebuildViewSizeFractionOfCacheSize,\n\t\tminRebuildViewSizePerCommit,\n\t)\n\tcurrentOps := make([]database.BatchOp, 0, viewSizeLimit)\n\n\tfor it.Next() {\n\t\tif len(currentOps) >= viewSizeLimit {\n\t\t\tview, err := db.newUntrackedView(currentOps)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := view.commitToDB(ctx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcurrentOps = make([]database.BatchOp, 0, viewSizeLimit)\n\t\t}\n\n\t\tkey := it.Key()\n\t\tpath := path(key)\n\t\tvalue := it.Value()\n\t\tn, err := parseNode(path, value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcurrentOps = append(currentOps, database.BatchOp{\n\t\t\tKey: path.Serialize().Value,\n\t\t\tValue: n.value.Value(),\n\t\t\tDelete: !n.hasValue(),\n\t\t})\n\t}\n\tif err := it.Error(); err != nil {\n\t\treturn err\n\t}\n\tview, err := db.newUntrackedView(currentOps)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := view.commitToDB(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn db.nodeDB.Compact(nil, nil)\n}", "func (coc *CoClustering) clusterMean(dst []float64, clusters []int, ratings []*base.SparseVector) {\n\tbase.FillZeroVector(dst)\n\tcount := make([]float64, len(dst))\n\tfor id, cluster := range clusters {\n\t\tratings[id].ForEach(func(_, index int, value float64) {\n\t\t\tdst[cluster] += value\n\t\t\tcount[cluster]++\n\t\t})\n\t}\n\tfor i := range dst {\n\t\tif count[i] > 0 {\n\t\t\tdst[i] /= count[i]\n\t\t} else {\n\t\t\tdst[i] = coc.GlobalMean\n\t\t}\n\t}\n}", "func (mcr *MiddlewareClusterRepo) GetAll() ([]metadata.MiddlewareCluster, error) {\n\tsql := `\n\t\tselect id, cluster_name, owner_id, env_id, del_flag, create_time, last_update_time\n\t\tfrom t_meta_middleware_cluster_info\n\t\twhere del_flag = 0\n\t\torder by id;\n\t`\n\tlog.Debugf(\"metadata MiddlewareClusterRepo.GetAll() sql: \\n%s\", sql)\n\n\tresult, err := mcr.Execute(sql)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// init []*MiddlewareClusterInfo\n\tmiddlewareClusterInfoList := make([]*MiddlewareClusterInfo, result.RowNumber())\n\tfor i := range middlewareClusterInfoList {\n\t\tmiddlewareClusterInfoList[i] = NewEmptyMiddlewareClusterInfoWithGlobal()\n\t}\n\t// map to struct\n\terr = result.MapToStructSlice(middlewareClusterInfoList, constant.DefaultMiddlewareTag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// init []dependency.Entity\n\tentityList := make([]metadata.MiddlewareCluster, result.RowNumber())\n\tfor i := range entityList {\n\t\tentityList[i] = middlewareClusterInfoList[i]\n\t}\n\n\treturn entityList, nil\n}", "func (coc *CoClustering) clusterMean(dst []float64, clusters []int, ratings []*base.MarginalSubSet) {\n\tbase.FillZeroVector(dst)\n\tcount := make([]float64, len(dst))\n\tfor index, cluster := range clusters {\n\t\tratings[index].ForEachIndex(func(_, index int, value float64) {\n\t\t\tdst[cluster] += value\n\t\t\tcount[cluster]++\n\t\t})\n\t}\n\tfor i := range dst {\n\t\tif count[i] > 0 {\n\t\t\tdst[i] /= count[i]\n\t\t} else {\n\t\t\tdst[i] = coc.GlobalMean\n\t\t}\n\t}\n}", "func (c *AKSCluster) UpdateCluster(request *bTypes.UpdateClusterRequest) error {\n\tlog := logger.WithFields(logrus.Fields{\"action\": constants.TagUpdateCluster})\n\tclient, err := c.GetAKSClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient.With(log.Logger)\n\n\t// send separate requests because Azure not supports multiple nodepool modification\n\t// Azure not supports adding and deleting nodepools\n\tvar nodePoolAfterUpdate []*model.AzureNodePoolModel\n\tvar updatedCluster *banzaiAzureTypes.ResponseWithValue\n\tif requestNodes := request.Azure.NodePools; requestNodes != nil {\n\t\tfor name, np := range requestNodes {\n\t\t\tif existNodePool := c.getExistingNodePoolByName(name); np != nil && existNodePool != nil {\n\t\t\t\tlog.Infof(\"NodePool is exists[%s], update...\", name)\n\n\t\t\t\tcount := int32(np.Count)\n\n\t\t\t\t// create request model for aks-client\n\t\t\t\tccr := azureCluster.CreateClusterRequest{\n\t\t\t\t\tName: c.modelCluster.Name,\n\t\t\t\t\tLocation: c.modelCluster.Location,\n\t\t\t\t\tResourceGroup: c.modelCluster.Azure.ResourceGroup,\n\t\t\t\t\tKubernetesVersion: c.modelCluster.Azure.KubernetesVersion,\n\t\t\t\t\tProfiles: []containerservice.AgentPoolProfile{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: &name,\n\t\t\t\t\t\t\tCount: &count,\n\t\t\t\t\t\t\tVMSize: containerservice.VMSizeTypes(existNodePool.NodeInstanceType),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tnodePoolAfterUpdate = append(nodePoolAfterUpdate, &model.AzureNodePoolModel{\n\t\t\t\t\tID: existNodePool.ID,\n\t\t\t\t\tClusterModelId: existNodePool.ClusterModelId,\n\t\t\t\t\tName: name,\n\t\t\t\t\tCount: np.Count,\n\t\t\t\t\tNodeInstanceType: existNodePool.NodeInstanceType,\n\t\t\t\t})\n\n\t\t\t\tupdatedCluster, err = c.updateWithPolling(client, &ccr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"There's no nodepool with this name[%s]\", name)\n\t\t\t}\n\t\t}\n\t}\n\n\tif updatedCluster != nil {\n\t\tupdateCluster := &model.ClusterModel{\n\t\t\tModel: c.modelCluster.Model,\n\t\t\tName: c.modelCluster.Name,\n\t\t\tLocation: c.modelCluster.Location,\n\t\t\tNodeInstanceType: c.modelCluster.NodeInstanceType,\n\t\t\tCloud: c.modelCluster.Cloud,\n\t\t\tOrganizationId: c.modelCluster.OrganizationId,\n\t\t\tSecretId: c.modelCluster.SecretId,\n\t\t\tStatus: c.modelCluster.Status,\n\t\t\tAzure: model.AzureClusterModel{\n\t\t\t\tResourceGroup: c.modelCluster.Azure.ResourceGroup,\n\t\t\t\tKubernetesVersion: c.modelCluster.Azure.KubernetesVersion,\n\t\t\t\tNodePools: nodePoolAfterUpdate,\n\t\t\t},\n\t\t}\n\t\tc.modelCluster = updateCluster\n\t\tc.azureCluster = &updatedCluster.Value\n\t}\n\n\treturn nil\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\treturn &ReconcileClusterSync{client: mgr.GetClient(), scheme: mgr.GetScheme()}\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\tisOpenShift, _, err := util.DetectOpenShift()\n\n\tif err != nil {\n\t\tlogrus.Errorf(\"An error occurred when detecting current infra: %s\", err)\n\t}\n\t// Create a new controller\n\tc, err := controller.New(\"che-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\t// register OpenShift routes in the scheme\n\tif isOpenShift {\n\t\tif err := routev1.AddToScheme(mgr.GetScheme()); err != nil {\n\t\t\tlogrus.Errorf(\"Failed to add OpenShift route to scheme: %s\", err)\n\t\t}\n\t\tif err := oauth.AddToScheme(mgr.GetScheme()); err != nil {\n\t\t\tlogrus.Errorf(\"Failed to add oAuth to scheme: %s\", err)\n\t\t}\n\t}\n\n\t// register RBAC in the scheme\n\tif err := rbac.AddToScheme(mgr.GetScheme()); err != nil {\n\t\tlogrus.Errorf(\"Failed to add RBAC to scheme: %s\", err)\n\t}\n\n\t// Watch for changes to primary resource CheCluster\n\terr = c.Watch(&source.Kind{Type: &orgv1.CheCluster{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to secondary resources and requeue the owner CheCluster\n\n\tif err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &orgv1.CheCluster{},\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif err = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &orgv1.CheCluster{},\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &orgv1.CheCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Watch(&source.Kind{Type: &rbac.Role{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &orgv1.CheCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Watch(&source.Kind{Type: &rbac.RoleBinding{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &orgv1.CheCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Watch(&source.Kind{Type: &corev1.ServiceAccount{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &orgv1.CheCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif isOpenShift {\n\t\terr = c.Watch(&source.Kind{Type: &routev1.Route{}}, &handler.EnqueueRequestForOwner{\n\t\t\tIsController: true,\n\t\t\tOwnerType: &orgv1.CheCluster{},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr = c.Watch(&source.Kind{Type: &v1beta1.Ingress{}}, &handler.EnqueueRequestForOwner{\n\t\t\tIsController: true,\n\t\t\tOwnerType: &orgv1.CheCluster{},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &orgv1.CheCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Watch(&source.Kind{Type: &corev1.PersistentVolumeClaim{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &orgv1.CheCluster{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (api *clusterAPI) Update(obj *cluster.Cluster) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ClusterV1().Cluster().Update(context.Background(), obj)\n\t\treturn err\n\t}\n\n\tapi.ct.handleClusterEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Updated})\n\treturn nil\n}", "func (c *managedClusterLeaseController) sync(ctx context.Context, syncCtx factory.SyncContext) error {\n\tcluster, err := c.hubClusterLister.Get(c.clusterName)\n\t// unable to get managed cluster, make sure there is no lease update routine.\n\tif err != nil {\n\t\tc.leaseUpdater.stop()\n\t\treturn fmt.Errorf(\"unable to get managed cluster %q from hub: %w\", c.clusterName, err)\n\t}\n\n\t// the managed cluster is not accepted, make sure there is no lease update routine.\n\tif !meta.IsStatusConditionTrue(cluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) {\n\t\tc.leaseUpdater.stop()\n\t\treturn nil\n\t}\n\n\tobservedLeaseDurationSeconds := cluster.Spec.LeaseDurationSeconds\n\t// for backward compatible, release-2.1 has mutating admission webhook to mutate this field,\n\t// but release-2.0 does not have the mutating admission webhook\n\tif observedLeaseDurationSeconds == 0 {\n\t\tobservedLeaseDurationSeconds = 60\n\t}\n\n\t// if lease duration is changed, start a new lease update routine.\n\tif c.lastLeaseDurationSeconds != observedLeaseDurationSeconds {\n\t\tc.lastLeaseDurationSeconds = observedLeaseDurationSeconds\n\t\tc.leaseUpdater.stop()\n\t\tc.leaseUpdater.start(ctx, time.Duration(c.lastLeaseDurationSeconds)*time.Second)\n\t}\n\n\treturn nil\n}", "func syncToClusters(clustersAccessor clustersAccessorFunc, operationsAccessor operationsFunc, selector clusterSelectorFunc, execute executionFunc, adapter federatedtypes.FederatedTypeAdapter, informer util.FederatedInformer, obj pkgruntime.Object) reconciliationStatus {\n\tkind := adapter.Kind()\n\tkey := federatedtypes.ObjectKey(adapter, obj)\n\n\tglog.V(3).Infof(\"Syncing %s %q in underlying clusters\", kind, key)\n\n\tclusters, err := clustersAccessor()\n\tif err != nil {\n\t\truntime.HandleError(fmt.Errorf(\"Failed to get cluster list: %v\", err))\n\t\treturn statusNotSynced\n\t}\n\n\tselectedClusters, unselectedClusters, err := selector(adapter.ObjectMeta(obj), clusterselector.SendToCluster, clusters)\n\tif err != nil {\n\t\treturn statusError\n\t}\n\n\tvar schedulingInfo *federatedtypes.SchedulingInfo\n\tif adapter.IsSchedulingAdapter() {\n\t\tschedulingAdapter, ok := adapter.(federatedtypes.SchedulingAdapter)\n\t\tif !ok {\n\t\t\tglog.Fatalf(\"Adapter for kind %q does not properly implement SchedulingAdapter.\", kind)\n\t\t}\n\t\tschedulingInfo, err = schedulingAdapter.GetSchedule(obj, key, selectedClusters, informer)\n\t\tif err != nil {\n\t\t\truntime.HandleError(fmt.Errorf(\"adapter.GetSchedule() failed on adapter for %s %q: %v\", kind, key, err))\n\t\t\treturn statusError\n\t\t}\n\t}\n\n\toperations, err := operationsAccessor(adapter, selectedClusters, unselectedClusters, obj, schedulingInfo)\n\tif err != nil {\n\t\treturn statusError\n\t}\n\n\tif adapter.IsSchedulingAdapter() {\n\t\tschedulingAdapter, ok := adapter.(federatedtypes.SchedulingAdapter)\n\t\tif !ok {\n\t\t\tglog.Fatalf(\"Adapter for kind %q does not properly implement SchedulingAdapter.\", kind)\n\t\t}\n\t\terr = schedulingAdapter.UpdateFederatedStatus(obj, schedulingInfo.Status)\n\t\tif err != nil {\n\t\t\truntime.HandleError(fmt.Errorf(\"adapter.UpdateFinished() failed on adapter for %s %q: %v\", kind, key, err))\n\t\t\treturn statusError\n\t\t}\n\t}\n\n\tif len(operations) == 0 {\n\t\treturn statusAllOK\n\t}\n\n\terr = execute(operations)\n\tif err != nil {\n\t\truntime.HandleError(fmt.Errorf(\"Failed to execute updates for %s %q: %v\", kind, key, err))\n\t\treturn statusError\n\t}\n\n\t// Everything is in order but let's be double sure\n\treturn statusNeedsRecheck\n}", "func (gm *gmap) applyEntries(gmp *gmapProgress, apply *apply) {\n\t// Has entry?\n\tif len(apply.entries) == 0 {\n\t\treturn\n\t}\n\t// Is the node leave the cluster tool long, the latest snapshot is better than the entry.\n\tfirsti := apply.entries[0].Index\n\tif firsti > gmp.appliedi+1 {\n\t\tlogger.Panicf(\"first index of committed entry[%d] should <= appliedi[%d] + 1\", firsti, gmp.appliedi)\n\t}\n\t// Extract useful entries.\n\tvar ents []raftpb.Entry\n\tif gmp.appliedi+1-firsti < uint64(len(apply.entries)) {\n\t\tents = apply.entries[gmp.appliedi+1-firsti:]\n\t}\n\t// Iterate all entries\n\tfor _, e := range ents {\n\t\tswitch e.Type {\n\t\t// Normal entry.\n\t\tcase raftpb.EntryNormal:\n\t\t\tif len(e.Data) != 0 {\n\t\t\t\t// Unmarshal request.\n\t\t\t\tvar req InternalRaftRequest\n\t\t\t\tpbutil.MustUnmarshal(&req, e.Data)\n\n\t\t\t\tvar ar applyResult\n\t\t\t\t// Put new value\n\t\t\t\tif put := req.Put; put != nil {\n\t\t\t\t\t// Get set.\n\t\t\t\t\tset, exist := gm.sets[put.Set]\n\t\t\t\t\tif !exist {\n\t\t\t\t\t\tlogger.Panicf(\"set(%s) is not exist\", put.Set)\n\t\t\t\t\t}\n\t\t\t\t\t// Get key, value and revision.\n\t\t\t\t\tkey, value, revision := put.Key, set.vtype.unwrap(put.Value), e.Index\n\t\t\t\t\t// Get map and put value into map.\n\t\t\t\t\tm := set.get(put.Map)\n\t\t\t\t\tm.put(key, value, revision)\n\t\t\t\t\t// Send put event to watcher\n\t\t\t\t\tevent := MapEvent{Type: PUT, KV: &KeyValue{Key: key, Value: value}}\n\t\t\t\t\tm.watchers.Range(func(key, value interface{}) bool {\n\t\t\t\t\t\tkey.(*watcher).eventc <- event\n\t\t\t\t\t\treturn true\n\t\t\t\t\t})\n\t\t\t\t\t// Set apply result.\n\t\t\t\t\tar.rev = revision\n\t\t\t\t}\n\t\t\t\t// Delete value\n\t\t\t\tif del := req.Delete; del != nil {\n\t\t\t\t\t// Get set.\n\t\t\t\t\tset, exist := gm.sets[del.Set]\n\t\t\t\t\tif !exist {\n\t\t\t\t\t\tlogger.Panicf(\"set(%s) is not exist\", del.Set)\n\t\t\t\t\t}\n\t\t\t\t\t// Get map and delete value from map.\n\t\t\t\t\tm := set.get(del.Map)\n\t\t\t\t\tif pre := m.delete(del.Key); nil != pre {\n\t\t\t\t\t\t// Send put event to watcher\n\t\t\t\t\t\tar.pre = *pre\n\t\t\t\t\t\tevent := MapEvent{Type: DELETE, PrevKV: &KeyValue{Key: del.Key, Value: ar.pre.Value}}\n\t\t\t\t\t\tm.watchers.Range(func(key, value interface{}) bool {\n\t\t\t\t\t\t\tkey.(*watcher).eventc <- event\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Update value\n\t\t\t\tif update := req.Update; update != nil {\n\t\t\t\t\t// Get set.\n\t\t\t\t\tset, exist := gm.sets[update.Set]\n\t\t\t\t\tif !exist {\n\t\t\t\t\t\tlogger.Panicf(\"set(%s) is not exist\", update.Set)\n\t\t\t\t\t}\n\t\t\t\t\t// Get map.\n\t\t\t\t\tm := set.get(update.Map)\n\t\t\t\t\t// Update value.\n\t\t\t\t\tpre, ok := m.update(update.Key, update.Value, update.Revision, e.Index)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\t// The revision will be set only if update succeed\n\t\t\t\t\t\tar.rev = e.Index\n\t\t\t\t\t}\n\t\t\t\t\tif nil != pre {\n\t\t\t\t\t\tar.pre = *pre\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Trigger proposal waiter.\n\t\t\t\tgm.wait.Trigger(req.ID, &ar)\n\t\t\t}\n\t\t// The configuration of gmap is fixed and wil not be synchronized through raft.\n\t\tcase raftpb.EntryConfChange:\n\t\tdefault:\n\t\t\tlogger.Panicf(\"entry type should be either EntryNormal or EntryConfChange\")\n\t\t}\n\n\t\tgmp.appliedi, gmp.appliedt = e.Index, e.Term\n\t}\n}", "func (clusterRequest *ClusterLoggingRequest) CreateOrUpdateVisualization() (err error) {\n\tif clusterRequest.Cluster.Spec.Visualization == nil || clusterRequest.Cluster.Spec.Visualization.Type == \"\" {\n\t\treturn clusterRequest.removeKibana()\n\t}\n\n\tif err = clusterRequest.createOrUpdateKibanaCR(); err != nil {\n\t\treturn\n\t}\n\n\tif err = clusterRequest.createOrUpdateKibanaSecret(); err != nil {\n\t\treturn\n\t}\n\n\tif err = clusterRequest.UpdateKibanaStatus(); err != nil {\n\t\treturn\n\t}\n\n\treturn nil\n}" ]
[ "0.5783179", "0.56847245", "0.56013393", "0.5461531", "0.5358256", "0.5353074", "0.5215219", "0.52070075", "0.5188348", "0.5138726", "0.50809467", "0.5080141", "0.50436705", "0.5042867", "0.5036154", "0.5024674", "0.50158036", "0.50104976", "0.4998135", "0.49915656", "0.49689186", "0.49588066", "0.4918065", "0.48817286", "0.48656374", "0.48612884", "0.4850814", "0.484378", "0.48299733", "0.48249498", "0.48073533", "0.47935814", "0.476968", "0.4762834", "0.47623077", "0.47605476", "0.47523835", "0.47510952", "0.47289833", "0.472718", "0.4726592", "0.4718621", "0.47130477", "0.46926203", "0.46883798", "0.46876884", "0.4676445", "0.46728674", "0.4658713", "0.4645638", "0.46432334", "0.46387383", "0.46340773", "0.46310776", "0.4616886", "0.461246", "0.4611904", "0.46034813", "0.46002066", "0.46000525", "0.45982635", "0.45945504", "0.4592812", "0.45865938", "0.45671746", "0.45649", "0.45446184", "0.45417", "0.45281202", "0.45272103", "0.45270693", "0.45260254", "0.45230478", "0.45140725", "0.4507071", "0.45034483", "0.44978574", "0.4490978", "0.44901136", "0.44883743", "0.44881216", "0.44872832", "0.44861066", "0.4482821", "0.44784984", "0.4477956", "0.4477726", "0.44775218", "0.44754833", "0.44719726", "0.44657677", "0.4463401", "0.44597787", "0.4458245", "0.44501206", "0.44481173", "0.44475746", "0.44410464", "0.44392264", "0.44388" ]
0.76279587
0
Setup watchers and coordinate their goroutines
func (c *Cluster) watch() error { log.WithField("cluster", c.config.Name).Debug("Adding watches") factory := informers.NewSharedInformerFactory(c.client, 0) stopper := make(chan struct{}) defer close(stopper) podInformer := factory.Core().V1().Pods().Informer() podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { c.handlePodEvents(obj, watch.Added) }, DeleteFunc: func(obj interface{}) { c.handlePodEvents(obj, watch.Deleted) }, UpdateFunc: func(old interface{}, new interface{}) { c.handlePodEvents(new, watch.Modified) }, }) go podInformer.Run(stopper) ingressInformer := factory.Extensions().V1beta1().Ingresses().Informer() ingressInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { c.handleIngressEvent(obj, watch.Added) }, DeleteFunc: func(obj interface{}) { c.handleIngressEvent(obj, watch.Deleted) }, UpdateFunc: func(old interface{}, new interface{}) { c.handleIngressEvent(new, watch.Modified) }, }) go ingressInformer.Run(stopper) LoadBalancerInformer := factory.Core().V1().Services().Informer() LoadBalancerInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { c.handleLoadBalancerEvent(obj, watch.Added) }, DeleteFunc: func(obj interface{}) { c.handleLoadBalancerEvent(obj, watch.Deleted) }, UpdateFunc: func(old interface{}, new interface{}) { c.handleLoadBalancerEvent(new, watch.Modified) }, }) go LoadBalancerInformer.Run(stopper) if c.isFirstConnectionAttempt { c.readinessChannel <- true c.isFirstConnectionAttempt = false } <-c.aggregatorStopChannel log.WithField("cluster", c.config.Name).Debug("Waiting for watches to exit...") log.WithFields(log.Fields{ "cluster": c.config.Name, }).Debug("Stopping event handlers") log.WithField("cluster", c.config.Name).Debug("Event handlers stopped") return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *MemKv) setupWatchers(key string, v *memKvRec) {\n\tfor watchKey, wl := range f.cluster.watchers {\n\t\tfor _, w := range wl {\n\t\t\tif w.recursive {\n\t\t\t\tif strings.HasPrefix(key, watchKey) {\n\t\t\t\t\tv.watchers = append(v.watchers, w)\n\t\t\t\t\tsendEvent(w, key, v, false)\n\t\t\t\t}\n\t\t\t} else if watchKey == key {\n\t\t\t\tv.watchers = append(v.watchers, w)\n\t\t\t\tsendEvent(w, key, v, false)\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *AnalyticsController) runWatches() {\n\tlastResourceVersion := big.NewInt(0)\n\tcurrentResourceVersion := big.NewInt(0)\n\twatchListItems := WatchFuncList(c.kclient, c.client)\n\tfor name := range watchListItems {\n\n\t\t// assign local variable (not in range operator above) so that each\n\t\t// goroutine gets the correct watch function required\n\t\twfnc := watchListItems[name]\n\t\tn := name\n\t\tbackoff := 1 * time.Second\n\n\t\tgo wait.Until(func() {\n\t\t\t// any return from this func only exits that invocation of the func.\n\t\t\t// wait.Until will call it again after its sync period.\n\t\t\twatchLog := log.WithFields(log.Fields{\n\t\t\t\t\"watch\": n,\n\t\t\t})\n\t\t\twatchLog.Infof(\"starting watch\")\n\t\t\tw, err := wfnc.watchFunc(metav1.ListOptions{})\n\t\t\tif err != nil {\n\t\t\t\twatchLog.Errorf(\"error creating watch: %v\", err)\n\t\t\t}\n\n\t\t\twatchLog.Debugf(\"backing off watch for %v seconds\", backoff)\n\t\t\ttime.Sleep(backoff)\n\t\t\tbackoff = backoff * 2\n\t\t\tif backoff > 60*time.Second {\n\t\t\t\tbackoff = 60 * time.Second\n\t\t\t}\n\n\t\t\tif w == nil {\n\t\t\t\twatchLog.Errorln(\"watch function nil, watch not created, returning\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase event, ok := <-w.ResultChan():\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\twatchLog.Warnln(\"watch channel closed unexpectedly, attempting to re-establish\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tif event.Type == watch.Error {\n\t\t\t\t\t\twatchLog.Errorf(\"watch channel returned error: %s\", spew.Sdump(event))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// success means the watch is working.\n\t\t\t\t\t// reset the backoff back to 1s for this watch\n\t\t\t\t\tbackoff = 1 * time.Second\n\n\t\t\t\t\tif event.Type == watch.Added || event.Type == watch.Deleted {\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\twatchLog.Errorf(\"Unable to create object meta for %v: %v\", event.Object, err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tm, err := meta.Accessor(event.Object)\n\t\t\t\t\t\t// if both resource versions can be converted to numbers\n\t\t\t\t\t\t// and if the current resource version is lower than the\n\t\t\t\t\t\t// last recorded resource version for this resource type\n\t\t\t\t\t\t// then skip the event\n\t\t\t\t\t\tc.mutex.RLock()\n\t\t\t\t\t\tif _, ok := lastResourceVersion.SetString(c.watchResourceVersions[n], 10); ok {\n\t\t\t\t\t\t\tif _, ok = currentResourceVersion.SetString(m.GetResourceVersion(), 10); ok {\n\t\t\t\t\t\t\t\tif lastResourceVersion.Cmp(currentResourceVersion) == 1 {\n\t\t\t\t\t\t\t\t\twatchLog.Debugf(\"ResourceVersion %v is to old (%v)\",\n\t\t\t\t\t\t\t\t\t\tcurrentResourceVersion, c.watchResourceVersions[n])\n\t\t\t\t\t\t\t\t\tc.mutex.RUnlock()\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tc.mutex.RUnlock()\n\n\t\t\t\t\t\t// each watch is a separate go routine\n\t\t\t\t\t\tc.mutex.Lock()\n\t\t\t\t\t\tc.watchResourceVersions[n] = m.GetResourceVersion()\n\t\t\t\t\t\tc.mutex.Unlock()\n\n\t\t\t\t\t\tanalytic, err := newEvent(c.typer, event.Object, event.Type)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\twatchLog.Errorf(\"unexpected error creating analytic from watch event %#v\", event.Object)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// additional info will be set to the analytic and\n\t\t\t\t\t\t\t// an instance queued for all destinations\n\t\t\t\t\t\t\terr := c.AddEvent(analytic)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\twatchLog.Errorf(\"error adding event: %v - %v\", err, analytic)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}, 1*time.Millisecond, c.stopChannel)\n\t}\n}", "func (a *Async) watcher() {\n\tdefer func() {\n\t\tclose(a.forceQuit)\n\t}()\n\n\tvar buf buffer\n\tfor {\n\t\ttimeout := time.After(time.Second / 2)\n\t\tfor i := 0; i < a.bufSize; i++ {\n\t\t\tselect {\n\t\t\tcase req := <-a.taskChan:\n\t\t\t\ta.flushReq(&buf, req)\n\t\t\tcase <-timeout:\n\t\t\t\tgoto ForEnd\n\t\t\tcase <-a.quit:\n\t\t\t\tgoto ForEnd\n\n\t\t\t}\n\t\t}\n\tForEnd:\n\t\tif len(buf.Tasks()) == 0 {\n\t\t\tbreak\n\t\t}\n\t\ta.flushBuf(&buf)\n\t}\n}", "func main() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo watch(ctx, \"[first]\")\n\tgo watch(ctx, \"[second]\")\n\tgo watch(ctx, \"[third]\")\n\n\ttime.Sleep(10 * time.Second)\n\tfmt.Println(\"watching stopped\")\n\tcancel()\n\n\ttime.Sleep(5 * time.Second)\n}", "func (w *processWatcher) watch() {\n\tgo w.watchErrors()\n\tgo w.watchSignal()\n\tgo w.watchHaltChan()\n\tgo w.watchShutdownTimeout()\n}", "func (c *client) setupWorkers() {\n\t//go c.timeoutWorker()\n\t//go c.pingWorker()\n\tgo c.receiveWorker()\n\tgo c.sendWorker()\n}", "func (m *Manager) Watch() error {\n\tfor req, channel := range m.changesChannels {\n\t\tif err := m.startWatchingFlow(req, channel); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (e *etcdCacheEntry) startWatching(c *EtcdConfig) {\n // no locking; this must only be called by another method that handles synchronization\n if !e.watching {\n if e.finalize == nil {\n e.finalize = make(chan struct{})\n }\n e.watching = true\n go e.watch(c)\n }\n}", "func Run(done chan error, mgr manager.Manager, watchesPath string) {\n\twatches, err := runner.NewFromWatches(watchesPath)\n\tif err != nil {\n\t\tlogrus.Error(\"Failed to get watches\")\n\t\tdone <- err\n\t\treturn\n\t}\n\trand.Seed(time.Now().Unix())\n\tc := signals.SetupSignalHandler()\n\n\tfor gvk, runner := range watches {\n\t\tcontroller.Add(mgr, controller.Options{\n\t\t\tGVK: gvk,\n\t\t\tRunner: runner,\n\t\t})\n\t}\n\tdone <- mgr.Start(c)\n}", "func (w *Watcher) Start() {\n\tfor _, watch := range w.watches {\n\t\tgo watch.start()\n\t\tw.wg.Add(1)\n\t}\n}", "func (mgr *WatchController) Start(workerCount int) {\n\t// init the channels with empty structs\n\tmgr.stopCh = make(chan struct{})\n\tmgr.doneCh = make(chan struct{})\n\n\t// set event handlers. GenericControllers can be created at any time,\n\t// so we have to assume the shared informers are already running. We can't\n\t// add event handlers in NewWatchController() since c might be incomplete.\n\twatchHandlers := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: mgr.enqueueWatch,\n\t\tUpdateFunc: mgr.updateWatch,\n\t\tDeleteFunc: mgr.enqueueWatch,\n\t}\n\tvar resyncPeriod time.Duration\n\tif mgr.GCtlConfig.Spec.ResyncPeriodSeconds != nil {\n\t\t// Use a custom resync period if requested\n\t\t// NOTE: This only applies to the parent\n\t\tresyncPeriod =\n\t\t\ttime.Duration(*mgr.GCtlConfig.Spec.ResyncPeriodSeconds) * time.Second\n\t\t// Put a reasonable limit on it.\n\t\tif resyncPeriod < time.Second {\n\t\t\tresyncPeriod = time.Second\n\t\t}\n\t}\n\tfor _, informer := range mgr.watchInformers {\n\t\tif resyncPeriod != 0 {\n\t\t\tinformer.Informer().AddEventHandlerWithResyncPeriod(\n\t\t\t\twatchHandlers,\n\t\t\t\tresyncPeriod,\n\t\t\t)\n\t\t} else {\n\t\t\tinformer.Informer().AddEventHandler(watchHandlers)\n\t\t}\n\t}\n\tif workerCount <= 0 {\n\t\t// set a reasonable worker count value\n\t\tworkerCount = 5\n\t}\n\tgo func() {\n\t\t// close done channel i.e. mark closure of this start invocation\n\t\tdefer close(mgr.doneCh)\n\t\t// provide the ability to run operations after panics\n\t\tdefer utilruntime.HandleCrash()\n\n\t\tglog.Infof(\"Starting %s\", mgr)\n\t\tdefer glog.Infof(\"Shutting down %s\", mgr)\n\n\t\t// Wait for dynamic client and all informers.\n\t\tglog.V(7).Infof(\"Waiting for caches to sync: %s\", mgr)\n\t\tsyncFuncs := make(\n\t\t\t[]cache.InformerSynced,\n\t\t\t0,\n\t\t\t1+1+len(mgr.GCtlConfig.Spec.Attachments),\n\t\t)\n\t\tfor _, informer := range mgr.watchInformers {\n\t\t\tsyncFuncs = append(syncFuncs, informer.Informer().HasSynced)\n\t\t}\n\t\tfor _, informer := range mgr.attachmentInformers {\n\t\t\tsyncFuncs = append(syncFuncs, informer.Informer().HasSynced)\n\t\t}\n\t\tif !k8s.WaitForCacheSync(\n\t\t\tmgr.GCtlConfig.AsNamespaceNameKey(),\n\t\t\tmgr.stopCh,\n\t\t\tsyncFuncs...,\n\t\t) {\n\t\t\t// We wait forever unless Stop() is called, so this isn't an error.\n\t\t\tglog.Warningf(\"Cache sync never finished: %s\", mgr)\n\t\t\treturn\n\t\t}\n\t\tglog.V(5).Infof(\"Starting %d workers: %s\", workerCount, mgr)\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < workerCount; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\twait.Until(mgr.worker, time.Second, mgr.stopCh)\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t}()\n}", "func (mw *MultiWatcher) Watch(ctx context.Context) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(mw.watchers))\n\tfor _, w := range mw.watchers {\n\t\tgo func(w *Watcher) {\n\t\t\tdefer wg.Done()\n\t\t\tw.Watch(ctx)\n\t\t}(w)\n\t}\n\twg.Wait()\n}", "func (c *Client) watcherLoop() error {\n\tvar reqs []*Request\n\t// Events (File/Directory creation/modification/removal) are buffered\n\t// instead of being directly. This allows us to use the same TLS/TCP\n\t// connection for all the sent requests, instead of opening/closing a\n\t// new one each time. This would also allow for some possible\n\t// optimizations (not done currently) eg. if a file is modified\n\t// multiple times, only send it once. The buffering is done up to\n\t// requestsWaitTime time of no-activity.\n\t// The requestsBufferSize cap is added in order to prevent constant\n\t// events (eg. a file modified every 1 second) from being held forever.\n\tfor {\n\t\tselect {\n\t\tcase event, ok := <-c.watcher.Events:\n\t\t\tif !ok {\n\t\t\t\t// Exit on watcher close.\n\t\t\t\tlog.Println(\"Done monitoring\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treq, err := c.handleEvent(event)\n\t\t\tif err != nil {\n\t\t\t\t// Stop monitoring on first error.\n\t\t\t\treturn errors.Wrap(err, \"Handling file event failed\")\n\t\t\t}\n\t\t\tif req != nil {\n\t\t\t\treqs = append(reqs, req)\n\t\t\t}\n\t\t\t// Don't keep buffering requests forever, in case of\n\t\t\t// constant filesystem activity in the watched directories.\n\t\t\tif len(reqs) == requestsBufferSize {\n\t\t\t\tif err := c.sendRequests(reqs); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqs = nil\n\t\t\t}\n\t\tcase err, ok := <-c.watcher.Errors:\n\t\t\tif !ok {\n\t\t\t\tlog.Println(\"Done monitoring\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\tcase <-time.After(requestsWaitTime):\n\t\t\tif len(reqs) > 0 {\n\t\t\t\tif err := c.sendRequests(reqs); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqs = nil\n\t\t\t}\n\t\t}\n\t}\n}", "func (this *CertWatcher) watch() {\n\tthis.logger.Info(\"Starting certificate watcher\")\n\tfor {\n\t\tselect {\n\t\tcase event, ok := <-this.watcher.Events:\n\t\t\t// Channel is closed.\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tthis.handleEvent(event)\n\n\t\tcase err, ok := <-this.watcher.Errors:\n\t\t\t// Channel is closed.\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tthis.logger.Error(err, \"certificate watch error\")\n\t\t}\n\t}\n}", "func setupWatcher(ctx context.Context, cancel context.CancelFunc, agentState dockerstate.TaskEngineState,\n\teniChangeEvent chan<- statechange.Event, primaryMAC string) *ENIWatcher {\n\n\treturn &ENIWatcher{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tagentState: agentState,\n\t\teniChangeEvent: eniChangeEvent,\n\t\tprimaryMAC: primaryMAC,\n\t}\n}", "func (mgr *WatcherManager) Run(stopCh <-chan struct{}) {\n\t// run normal resource watchers.\n\tfor resourceName, watcher := range mgr.watchers {\n\t\tglog.Infof(\"watcher manager, start list-watcher[%+v]\", resourceName)\n\t\tgo watcher.Run(stopCh)\n\t}\n\n\tif !mgr.watchResource.DisableNetservice {\n\t\t// run netservice watcher.\n\t\tgo mgr.netserviceWatcher.Run(stopCh)\n\t}\n\n\t// synchronizer run once\n\tvar count = 0\n\tfor {\n\t\tif count >= 5 {\n\t\t\tpanic(\"synchronizer run failed\")\n\t\t}\n\t\tif err := mgr.synchronizer.RunOnce(); err != nil {\n\t\t\tglog.Errorf(\"synchronizer sync failed: %v\", err)\n\t\t\ttime.Sleep(5 * time.Minute)\n\t\t} else {\n\t\t\tglog.Infof(\"synchronizer sync done.\")\n\t\t\tbreak\n\t\t}\n\t\tcount++\n\t}\n}", "func TestWatcher(t *testing.T) {\n\twatcher := ImokWatch(500*time.Millisecond, 5)\n\twatcher.Watch(make(chan bool))\n}", "func watch(dispatchChan chan string) {\n\n\t// Start a file system watcher\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(\"Error creating watcher\", err)\n\t}\n\n\tsignalSourceChanged := debounce(800*time.Millisecond, func() {\n\t\tdispatchChan <- SOURCECHANGED\n\t})\n\n\t// Start waiting for file system messages to receive...\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\t// On any event\n\t\t\tcase ev := <-watcher.Event:\n\t\t\t\tif isGoFile(ev.Name) && (ev.IsCreate() || ev.IsDelete()) {\n\t\t\t\t\tsignalSourceChanged()\n\t\t\t\t}\n\n\t\t\t// Stop when encountering errors...\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlog.Fatal(\"Error start watching\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Create a list of all directories to watch...\n\tdirectories := append(findDirectoriesIn(DIR_TO_WATCH), DIR_TO_WATCH)\n\tfor _, directory := range directories {\n\t\t// Configure to watcher to watch the files we want to...\n\t\tif err := watcher.Watch(directory); err != nil {\n\t\t\tlog.Fatal(\"Error start watching\", err)\n\t\t}\n\t}\n}", "func run() {\n\tlogs.Start()\n\n\t// Send all data for the centralized database\n\tgo store.push()\n\tstore.Lock()\n\tdefer store.Unlock()\n\n\t// Creating the listener\n\tconfigData := config.GetConfig()\n\twatcher(configData)\n}", "func InitWorkers(workerNum int64, queueNum int64) {\n\tlog.Printf(\"worker number is %v , queue number is %v\", workerNum, queueNum)\n\tQueueNotification = make(chan model.PushNotification, queueNum)\n\tfor i := int64(0); i < workerNum; i++ {\n\t\tgo startWorker()\n\t}\n}", "func Init(repo *config.RepoConfig, opr *operator.Operator) *Watcher {\n\twatcher := &Watcher{\n\t\trepo: repo,\n\t\topr: opr,\n\t\tcheckPoints: make(map[string]map[string]checkPoint),\n\t}\n\tif repo.WatchFiles == nil {\n\t\treturn watcher\n\t}\n\n\tgo func() {\n\t\tutil.Error(watcher.composeJobs())\n\t\tutil.Error(watcher.initCheckPoints())\n\t\tutil.Println(\"init complete\", watcher.checkPoints)\n\t\tgo watcher.polling()\n\t}()\n\n\treturn watcher\n}", "func initUpdaters(ctx context.Context, opts *Opts, db *sqlx.DB, store vulnstore.Updater, dC chan context.CancelFunc, eC chan error) {\n\tcontrollers := map[string]*updater.Controller{}\n\n\tfor _, u := range opts.Updaters {\n\t\tif _, ok := controllers[u.Name()]; ok {\n\t\t\teC <- fmt.Errorf(\"duplicate updater found in UpdaterFactory. all names must be unique: %s\", u.Name())\n\t\t\treturn\n\t\t}\n\t\tcontrollers[u.Name()] = updater.NewController(&updater.Opts{\n\t\t\tUpdater: u,\n\t\t\tStore: store,\n\t\t\tName: u.Name(),\n\t\t\tInterval: opts.UpdateInterval,\n\t\t\tLock: pglock.NewLock(db, time.Duration(0)),\n\t\t\tUpdateOnStart: false,\n\t\t})\n\t}\n\n\t// limit initial concurrent updates\n\tcc := make(chan struct{}, DefaultUpdaterInitConcurrency)\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(controllers))\n\tfor _, v := range controllers {\n\t\tcc <- struct{}{}\n\t\tvv := v\n\t\tgo func() {\n\t\t\tupdateTO, cancel := context.WithTimeout(ctx, 10*time.Minute)\n\t\t\terr := vv.Update(updateTO)\n\t\t\tif err != nil {\n\t\t\t\teC <- fmt.Errorf(\"updater %s failed to update: %v\", vv.Name, err)\n\t\t\t}\n\t\t\twg.Done()\n\t\t\tcancel()\n\t\t\t<-cc\n\t\t}()\n\t}\n\twg.Wait()\n\tclose(eC)\n\n\t// start all updaters and return context\n\tctx, cancel := context.WithCancel(ctx)\n\tfor _, v := range controllers {\n\t\tv.Start(ctx)\n\t}\n\tdC <- cancel\n}", "func (b *bgpserver) watches() {\n\tb.logger.Debugf(\"Enter func (b *bgpserver) watches()\\n\")\n\tdefer b.logger.Debugf(\"Exit func (b *bgpserver) watches()\\n\")\n\n\tfor {\n\t\tselect {\n\n\t\tcase nodes := <-b.nodeChan:\n\t\t\tb.logger.Debug(\"recv nodeChan\")\n\t\t\tif types.NodesEqual(b.nodes, nodes, b.logger) {\n\t\t\t\tb.logger.Debug(\"NODES ARE EQUAL\")\n\t\t\t\tb.metrics.NodeUpdate(\"noop\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tb.metrics.NodeUpdate(\"updated\")\n\t\t\tb.logger.Debug(\"NODES ARE NOT EQUAL\")\n\t\t\tb.Lock()\n\t\t\tb.nodes = nodes\n\n\t\t\tb.lastInboundUpdate = time.Now()\n\t\t\tb.Unlock()\n\n\t\tcase configs := <-b.configChan:\n\t\t\tb.logger.Debug(\"recv configChan\")\n\t\t\tb.Lock()\n\t\t\tb.config = configs\n\t\t\tb.newConfig = true\n\t\t\tb.lastInboundUpdate = time.Now()\n\t\t\tb.Unlock()\n\t\t\tb.metrics.ConfigUpdate()\n\n\t\t// Administrative\n\t\tcase <-b.ctx.Done():\n\t\t\tb.logger.Debugf(\"parent context closed. exiting run loop\")\n\t\t\treturn\n\t\tcase <-b.ctxWatch.Done():\n\t\t\tb.logger.Debugf(\"watch context closed. exiting run loop\")\n\t\t\treturn\n\t\t}\n\n\t}\n}", "func setWatchIntervals(listRetryInterval, watchPollInterval time.Duration) {\n\twatchersyncer.ListRetryInterval = listRetryInterval\n\twatchersyncer.WatchPollInterval = watchPollInterval\n}", "func (w *Watcher) run() {\n watcher, err := w.dial()\n\n if err != nil {\n w.log <- err\n return\n }\n defer watcher.Close()\n\n go func() {\n for event := range watcher.Event {\n w.handleEvent(event)\n }\n }()\n\n go func() {\n for err := range watcher.Error {\n w.handleError(err)\n }\n }()\n\n <-w.done\n}", "func (s *Streamer) init() error {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.fsNotify = watcher // we closed it during Stop() process\n\n\ts.changedFileNames = make(chan string, 1000) // we closed it during Stop() process\n\n\treturn nil\n}", "func (c *Client) Watch(ctx context.Context) {\n\tt := time.NewTicker(1 * time.Minute)\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\turls, err := clusterNodes(c.Endpoint)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.ServerList.SetServers(urls...)\n\t\tcase <-ctx.Done():\n\t\t\tt.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (w *watcher) startWatching() {\n\tf := w.f\n\n\t// insert watcher in memKv's global list of watchers\n\t// insert watcher into key's watcher list\n\t// insert memKvRec into watcher's list (to handle Stop)\n\tf.cluster.Lock()\n\tdefer f.cluster.Unlock()\n\n\twl, ok := f.cluster.watchers[w.keyOrPrefix]\n\tif !ok {\n\t\twl = []*watcher{}\n\t}\n\twl = append(wl, w)\n\tf.cluster.watchers[w.keyOrPrefix] = wl\n\n\tif w.recursive {\n\t\tfor key, v := range f.cluster.kvs {\n\t\t\tif strings.HasPrefix(key, w.keyOrPrefix) {\n\t\t\t\tw.keys = append(w.keys, key)\n\t\t\t\tv.watchers = append(v.watchers, w)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif v, ok := f.cluster.kvs[w.keyOrPrefix]; ok {\n\t\t\tw.keys = []string{w.keyOrPrefix}\n\t\t\tv.watchers = append(v.watchers, w)\n\t\t}\n\t}\n\n\t// If starting from a lower version that current object's version\n\t// send current object(s) on the channel\n\tfor _, key := range w.keys {\n\t\tv := f.cluster.kvs[key]\n\t\tif v.revision >= w.fromVersion {\n\t\t\tsendEvent(w, key, v, false)\n\t\t}\n\t}\n}", "func (s *stateManager) Watch(watcher *AllocationWatcher) func() {\n\tstopChan := make(chan interface{})\n\ts.stopChan = append(s.stopChan, stopChan)\n\tctx := context.Background()\n\n\tkey := fmt.Sprintf(\"%s/allocations\", etcdPrefix)\n\twatchChan := s.cli.Watch(ctx, key, clientv3.WithPrefix(), clientv3.WithPrevKV())\n\n\tstopFunc := func() {\n\t\tstopChan <- true\n\t}\n\n\t// Start a new thread and watch for changes in etcd\n\tgo s.watchChannel(watchChan, stopChan, watcher)\n\n\treturn stopFunc\n}", "func (s *Service) Watch(ctx context.Context, chConfig *config.CheckerConfig) {\n\tsourcesFile, err := os.Open(chConfig.Source())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer sourcesFile.Close()\n\n\tfor i := 0; i < cap(s.workerPool); i++ {\n\t\tworker := NewWorker(s.store, s.workerPool)\n\t\tworker.Start(ctx)\n\t}\n\n\tvar parallelRun int\n\tscanner := bufio.NewScanner(sourcesFile)\n\tfor scanner.Scan() && parallelRun < parallelRunMaxQty {\n\t\ts.spawnCheck(ctx, scanner.Text(), chConfig.Interval())\n\t\tparallelRun++\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tfmt.Println(\"Service shutdown.\")\n\t\treturn\n\t}\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tvar watchKinds []watches.Watch\n\n\tws, err := watches.Load(r.GatewayOptions.WatchesPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\twatchKinds = append(watchKinds, ws...)\n\n\tfor _, w := range watchKinds {\n\t\t// Register controller with the factory\n\t\treconcilePeriod := time.Minute\n\t\tif w.ReconcilePeriod != nil {\n\t\t\treconcilePeriod = w.ReconcilePeriod.Duration\n\t\t}\n\n\t\tmaxConcurrentReconciles := runtime.NumCPU()\n\t\tif w.MaxConcurrentReconciles != nil {\n\t\t\tmaxConcurrentReconciles = *w.MaxConcurrentReconciles\n\t\t}\n\n\t\tr, err := reconciler.New(\n\t\t\treconciler.WithChart(*w.Chart),\n\t\t\treconciler.WithGroupVersionKind(w.GroupVersionKind),\n\t\t\treconciler.WithOverrideValues(r.defaultConfiguration(w.OverrideValues)),\n\t\t\treconciler.SkipDependentWatches(w.WatchDependentResources != nil && !*w.WatchDependentResources),\n\t\t\treconciler.WithMaxConcurrentReconciles(maxConcurrentReconciles),\n\t\t\treconciler.WithReconcilePeriod(reconcilePeriod),\n\t\t\treconciler.WithInstallAnnotations(annotation.DefaultInstallAnnotations...),\n\t\t\treconciler.WithUpgradeAnnotations(annotation.DefaultUpgradeAnnotations...),\n\t\t\treconciler.WithUninstallAnnotations(annotation.DefaultUninstallAnnotations...),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := r.SetupWithManager(mgr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tklog.Infoln(\"configured watch\", \"gvk\", w.GroupVersionKind, \"chartPath\", w.ChartPath, \"maxConcurrentReconciles\", maxConcurrentReconciles, \"reconcilePeriod\", reconcilePeriod)\n\t}\n\treturn nil\n}", "func (w *Listener) Start(events []chan string) {\n\tkey := joinPaths(w.prefix, w.Key)\n\tw.logger.Debug(\"watching '%s'\", key)\n\n\tgo func() {\n\tLoop:\n\t\tfor {\n\t\t\tjoin := make(chan bool)\n\t\t\tresponses := make(chan *etcd.Response)\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tresponse, open := <-responses\n\t\t\t\t\tif !open {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tw.logger.Debug(\"key '%s' changed\", response.Node.Key)\n\t\t\t\t\tevent := strings.Trim(strings.TrimPrefix(response.Node.Key, w.prefix), \"/\")\n\t\t\t\t\tfor _, eventChan := range events {\n\t\t\t\t\t\teventChan <- event\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tjoin <- true\n\t\t\t\tclose(join)\n\t\t\t}()\n\n\t\t\t_, err := w.client.client.Watch(key, 0, true, responses, w.stop)\n\t\t\t<-join\n\n\t\t\tif err == etcd.ErrWatchStoppedByUser {\n\t\t\t\tbreak Loop\n\t\t\t} else {\n\t\t\t\tw.logger.Error(\"watch on '%s' failed: %s\", key, err)\n\t\t\t\tw.logger.Info(\"retrying in %ds\", WatchRetry)\n\t\t\t\tselect {\n\t\t\t\tcase <-w.stop:\n\t\t\t\t\tbreak Loop\n\t\t\t\tcase <-time.After(WatchRetry * time.Second):\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor _, eventChan := range events {\n\t\t\tclose(eventChan)\n\t\t}\n\t\tw.join <- true\n\t}()\n}", "func (g *Gulf) Watch(patterns []string, tasks ...string) {}", "func ResolverWatch() {\n\tconf := &naming.Config{\n\t\tNodes: config.Nodes,\n\t\tZone: config.Zone,\n\t\tEnv: config.Env,\n\t}\n\tdis := naming.New(conf)\n\tc := &consumer{\n\t\tconf: conf,\n\t\tappID: config.AppID,\n\t\tdis: dis.Build(config.AppID),\n\t}\n\tConsu = c\n\trsl := dis.Build(c.appID)\n\tch := rsl.Watch()\n\tgo c.getInstances(ch)\n}", "func (c *Controller) changeWatcher() (err error) {\n\tc.tunedMainCfg, err = iniFileLoad(tunedProfilesDirCustom + \"/\" + tunedMainConfFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to load global TuneD configuration file: %v\", err)\n\t}\n\n\t// Use less aggressive per-item only exponential rate limiting for both wqKube and wqTuneD.\n\t// Start retrying at 100ms with a maximum of 1800s.\n\tc.wqKube = workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(100*time.Millisecond, 1800*time.Second))\n\tc.wqTuneD = workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(100*time.Millisecond, 1800*time.Second))\n\n\tc.clients.Tuned, err = tunedset.NewForConfig(c.kubeconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttunedInformerFactory := tunedinformers.NewSharedInformerFactoryWithOptions(\n\t\tc.clients.Tuned,\n\t\tntoconfig.ResyncPeriod(),\n\t\ttunedinformers.WithNamespace(operandNamespace))\n\n\ttrInformer := tunedInformerFactory.Tuned().V1().Tuneds()\n\tc.listers.TunedResources = trInformer.Lister().Tuneds(operandNamespace)\n\ttrInformer.Informer().AddEventHandler(c.informerEventHandler(wqKey{kind: wqKindTuned}))\n\n\ttpInformer := tunedInformerFactory.Tuned().V1().Profiles()\n\tc.listers.TunedProfiles = tpInformer.Lister().Profiles(operandNamespace)\n\ttpInformer.Informer().AddEventHandler(c.informerEventHandler(wqKey{kind: wqKindProfile}))\n\n\ttunedInformerFactory.Start(c.stopCh) // Tuned/Profile\n\n\t// Wait for the caches to be synced before starting worker(s).\n\tklog.V(1).Info(\"waiting for informer caches to sync\")\n\tok := cache.WaitForCacheSync(c.stopCh,\n\t\ttrInformer.Informer().HasSynced,\n\t\ttpInformer.Informer().HasSynced,\n\t)\n\tif !ok {\n\t\treturn fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\tklog.V(1).Info(\"starting events processors\")\n\tgo wait.Until(c.eventProcessorKube, time.Second, c.stopCh)\n\tdefer c.wqKube.ShutDown()\n\tgo wait.Until(c.eventProcessorTuneD, time.Second, c.stopCh)\n\tdefer c.wqTuneD.ShutDown()\n\tklog.Info(\"started events processors\")\n\n\t// Watch for filesystem changes on the tunedBootcmdlineFile file.\n\twFs, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create filesystem watcher: %v\", err)\n\t}\n\tdefer wFs.Close()\n\n\t// Register fsnotify watchers.\n\tfor _, element := range []string{tunedBootcmdlineFile} {\n\t\terr = wFs.Add(element)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to start watching %q: %v\", element, err)\n\t\t}\n\t}\n\n\tklog.Info(\"started controller\")\n\tfor {\n\t\tselect {\n\t\tcase <-c.stopCh:\n\t\t\tklog.Infof(\"termination signal received, stop\")\n\n\t\t\treturn nil\n\n\t\tcase <-c.tunedExit:\n\t\t\tc.tunedCmd = nil // Cmd.Start() cannot be used more than once\n\t\t\tklog.Infof(\"TuneD process exitted...\")\n\n\t\t\t// Do not be too aggressive about keeping the TuneD daemon around.\n\t\t\t// TuneD daemon might have exitted after receiving SIGTERM during\n\t\t\t// system reboot/shutdown.\n\t\t\treturn nil\n\n\t\tcase fsEvent := <-wFs.Events:\n\t\t\tklog.V(2).Infof(\"fsEvent\")\n\t\t\tif fsEvent.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\tklog.V(1).Infof(\"write event on: %s\", fsEvent.Name)\n\t\t\t\tc.change.bootcmdline = true\n\t\t\t\t// Notify the event processor that the TuneD daemon calculated new kernel command-line parameters.\n\t\t\t\tc.wqTuneD.Add(wqKey{kind: wqKindDaemon})\n\t\t\t}\n\n\t\tcase err := <-wFs.Errors:\n\t\t\treturn fmt.Errorf(\"error watching filesystem: %v\", err)\n\n\t\tcase <-c.tunedTicker.C:\n\t\t\tklog.Errorf(\"timeout (%d) to apply TuneD profile; restarting TuneD daemon\", c.tunedTimeout)\n\t\t\terr := c.tunedRestart(true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// TuneD profile application is failing, make this visible in \"oc get profile\" output.\n\t\t\tif err = c.updateTunedProfile(); err != nil {\n\t\t\t\tklog.Error(err.Error())\n\t\t\t}\n\n\t\tcase <-c.changeCh:\n\t\t\tvar synced bool\n\t\t\tklog.V(2).Infof(\"changeCh\")\n\t\t\tif c.tunedTimeout > tunedInitialTimeout {\n\t\t\t\t// TuneD is \"degraded\" as the previous profile application did not succeed in\n\t\t\t\t// tunedInitialTimeout [s]. There has been a change we must act upon though\n\t\t\t\t// fairly quickly.\n\t\t\t\tc.tunedTimeout = tunedInitialTimeout\n\t\t\t\tklog.Infof(\"previous application of TuneD profile failed; change detected, scheduling full restart in 1s\")\n\t\t\t\tc.tunedTicker.Reset(time.Second * time.Duration(1))\n\t\t\t\tc.changeChRet <- true\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif c.daemon.reloading {\n\t\t\t\t// Do not reload the TuneD daemon unless it finished with the previous reload.\n\t\t\t\tc.changeChRet <- false\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsynced, err := c.changeSyncer()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tc.changeChRet <- synced\n\t\t}\n\t}\n}", "func (ns *numbers) watcher() {\n\tclient := ns.client_pool.Get().(*etcd.Client)\n\tdefer func() {\n\t\tns.client_pool.Put(client)\n\t}()\n\n\tfor {\n\t\tch := make(chan *etcd.Response, 10)\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tif resp, ok := <-ch; ok {\n\t\t\t\t\tif !resp.Node.Dir {\n\t\t\t\t\t\tns.parse(resp.Node.Key, resp.Node.Value)\n\t\t\t\t\t\tlog.Trace(\"csv change:\", resp.Node.Key)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\t_, err := client.Watch(DEFAULT_NUMBERS_PATH, 0, true, ch, nil)\n\t\tif err != nil {\n\t\t\tlog.Critical(err)\n\t\t}\n\t\t<-time.After(RETRY_DELAY)\n\t}\n}", "func (w *Watcher) Watch(ctx context.Context) error {\n\tw.mu.Lock()\n\tif w.wasStartedOnce {\n\t\tw.mu.Unlock()\n\t\treturn errors.New(\"Can only start Watcher once per instance\")\n\t}\n\tw.wasStartedOnce = true\n\tw.mu.Unlock()\n\n\t// Create a child context so that we can preemptively cancel if there is an\n\t// error.\n\tinnerCtx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\t// A waitgroup lets us wait for all goroutines to exit.\n\twg := &sync.WaitGroup{}\n\n\t// Start four independent goroutines. The main loop, cleanup loop, removed orders\n\t// checker and max expirationTime checker. Use four separate channels to communicate errors.\n\tmainLoopErrChan := make(chan error, 1)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tmainLoopErrChan <- w.mainLoop(innerCtx)\n\t}()\n\tcleanupLoopErrChan := make(chan error, 1)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tcleanupLoopErrChan <- w.cleanupLoop(innerCtx)\n\t}()\n\tmaxExpirationTimeLoopErrChan := make(chan error, 1)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tmaxExpirationTimeLoopErrChan <- w.maxExpirationTimeLoop(innerCtx)\n\t}()\n\tremovedCheckerLoopErrChan := make(chan error, 1)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tremovedCheckerLoopErrChan <- w.removedCheckerLoop(innerCtx)\n\t}()\n\n\t// If any error channel returns a non-nil error, we cancel the inner context\n\t// and return the error. Note that this means we only return the first error\n\t// that occurs.\n\tselect {\n\tcase err := <-mainLoopErrChan:\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn err\n\t\t}\n\tcase err := <-cleanupLoopErrChan:\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn err\n\t\t}\n\tcase err := <-maxExpirationTimeLoopErrChan:\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn err\n\t\t}\n\tcase err := <-removedCheckerLoopErrChan:\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Wait for all goroutines to exit. If we reached here it means we are done\n\t// and there are no errors.\n\twg.Wait()\n\treturn nil\n}", "func (r *registryRouter) watch() {\n\tvar attempts int\n\tlogger := r.Options().Logger\n\n\tfor {\n\t\tif r.isStopped() {\n\t\t\treturn\n\t\t}\n\n\t\t// watch for changes\n\t\tw, err := r.opts.Registry.Watch()\n\t\tif err != nil {\n\t\t\tattempts++\n\t\t\tlogger.Logf(log.ErrorLevel, \"error watching endpoints: %v\", err)\n\t\t\ttime.Sleep(time.Duration(attempts) * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tch := make(chan bool)\n\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-ch:\n\t\t\t\tw.Stop()\n\t\t\tcase <-r.exit:\n\t\t\t\tw.Stop()\n\t\t\t}\n\t\t}()\n\n\t\t// reset if we get here\n\t\tattempts = 0\n\n\t\tfor {\n\t\t\t// process next event\n\t\t\tres, err := w.Next()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Logf(log.ErrorLevel, \"error getting next endoint: %v\", err)\n\t\t\t\tclose(ch)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tr.process(res)\n\t\t}\n\t}\n}", "func (w *WatchManager) run() {\n\tw.pollUpdatesInWasp() // initial pull from WASP\n\trunning := true\n\tfor running {\n\t\tselect {\n\t\tcase <-time.After(1 * time.Minute):\n\t\t\tw.pollUpdatesInWasp()\n\n\t\tcase <-w.stopChannel:\n\t\t\trunning = false\n\t\t}\n\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}", "func TestWatcher(t *testing.T) {\n\tdir := tempMkdir(t, \"crun-watcher\")\n\n\tif err := os.Chdir(dir); err != nil {\n\t\tt.Fatal(\"Failed to change to temp dir\")\n\t}\n\n\tc := make(chan string, 2)\n\twatch(c)\n\n\tnotified := 0\n\n\tgo func() {\n\t\tfor msg := range c {\n\t\t\tif msg == SOURCECHANGED {\n\t\t\t\tnotified++\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Should only do one, since debounced\n\tioutil.WriteFile(\"testfile-1.go\", []byte(\"something\"), 0644)\n\tioutil.WriteFile(\"testfile-2.go\", []byte(\"something\"), 0644)\n\n\ttime.Sleep(1 * time.Second)\n\tif notified != 1 {\n\t\tt.Fatal(\"Watcher should have notified 1 time\", notified)\n\t}\n\n\tioutil.WriteFile(\"testfile-3.go\", []byte(\"something\"), 0644)\n\ttime.Sleep(1 * time.Second)\n\tif notified != 2 {\n\t\tt.Fatal(\"Watcher should have notified 2 times\", notified)\n\t}\n\n\tioutil.WriteFile(\"testfile-1.md\", []byte(\"something\"), 0644)\n\ttime.Sleep(1 * time.Second)\n\tif notified != 2 {\n\t\tt.Fatal(\"Watcher should only notify on md files\", notified)\n\t}\n\n\tioutil.WriteFile(\"testfile-4.go\", []byte(\"something\"), 0644)\n\tioutil.WriteFile(\"testfile-5.go\", []byte(\"something\"), 0644)\n\n\ttime.Sleep(1 * time.Second)\n\tif notified != 3 {\n\t\tt.Fatal(\"Watcher should have notified 3 times\", notified)\n\t}\n\tclose(c)\n}", "func (w *watcher) Watch() {\n\tfor {\n\t\tfor watchPath := range w.watchItems {\n\t\t\tfileChanged, err := w.scanChange(watchPath)\n\t\t\tif err != nil {\n\t\t\t\tw.errors <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif fileChanged != \"\" {\n\t\t\t\tw.events <- fileChanged\n\t\t\t\tstartTime = time.Now()\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(time.Duration(w.pollInterval) * time.Millisecond)\n\t}\n}", "func (m *Manager) Run(ctx context.Context, wg *sync.WaitGroup) error {\n\tif m.machine == nil {\n\t\treturn fmt.Errorf(\"manager requires a machine to manage\")\n\t}\n\n\terr := m.configureWatchers()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, watcher := range m.watchers {\n\t\twg.Add(1)\n\t\tgo watcher.Run(ctx, wg)\n\n\t\tif watcher.AnnounceInterval() > 0 {\n\t\t\twg.Add(1)\n\t\t\tgo m.announceWatcherState(ctx, wg, watcher)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (w *watcher) start(results <-chan api.WatchEvent) {\n\tlog.WithField(\"Name\", w.name).Info(\"start watcher\")\n\tw.watcherRunningWg.Add(1)\n\tgo w.run(results)\n}", "func EventWatcher() {\n\t// router, err := goczmq.NewRouter(\"tcp://*:5555\")\n\t// if err != nil {\n\t// \tlog.Println(\"Unable to START router\")\n\t// \t// log.Fatal(err)\n\t// \treturn\n\t// }\n\t// defer router.Destroy()\n\n\t// // type MatlabCommand struct {\n\t// // \tID int\n\t// // \tName string\n\t// // \tValues map[string]float64\n\t// // }\n\t// log.Println(\"My Identity \", router.Identity())\n\tfor {\n\t\t// // log.Println(\"Waiting for zeromq messages\")\n\t\t// request, err := router.RecvMessage()\n\n\t\t// if err != nil {\n\t\t// \tlog.Println(\"Some Error in Receiving msg from router \", err)\n\t\t// } else {\n\n\t\t// }\n\t\t// if len(request) == 2 {\n\t\t// \t// data := make(map[string]interface{})\n\t\t// \t// str := string(request[2])\n\t\t// \tdataframe := request[1]\n\t\t// \tif len(dataframe) == 0 {\n\t\t// \t\tcontinue\n\t\t// \t}\n\n\t\t// \tlog.Printf(\"SRC Identity %d\", request[0])\n\t\t// \tlog.Printf(\"Received %s\", dataframe)\n\n\t\t// \tjsonData, _ := json.Marshal(plotcmd)\n\t\t// \tn, _ := activePlotter.Write(jsonData)\n\t\t// \tfmt.Printf(\"\\nSent %d bytes is %s\", n, jsonData)\n\n\t}\n\t// }\n}", "func Watcher(ctx context.Context, ch *mp.WatchResponse) error {\n\tmtx.RLock()\n\tfor _, sub := range watchers[ch.Key] {\n\t\tselect {\n\t\tcase sub.next <- ch:\n\t\tcase <-time.After(time.Millisecond * 100):\n\t\t}\n\t}\n\tmtx.RUnlock()\n\treturn nil\n}", "func InitWatcher() *Watcher {\n\twatcher := &Watcher{\n\t\trestartProc: make(chan ProcContainer),\n\t\twatchProcs: make(map[string]*ProcWatcher),\n\t}\n\treturn watcher\n}", "func (mc *NodeWatcher) StartWatcher(quitCh chan struct{}, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor {\n\t\twatcher := cache.NewListWatchFromClient(mc.clientset.CoreV1().RESTClient(), mc.resourceStr, v1.NamespaceAll, fields.Everything())\n\t\tretryWatcher, err := watchClient.NewRetryWatcher(mc.lastRV, watcher)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Could not start watcher for k8s resource: \" + mc.resourceStr)\n\t\t}\n\n\t\tresCh := retryWatcher.ResultChan()\n\t\trunWatcher := true\n\t\tfor runWatcher {\n\t\t\tselect {\n\t\t\tcase <-quitCh:\n\t\t\t\treturn\n\t\t\tcase c := <-resCh:\n\t\t\t\ts, ok := c.Object.(*metav1.Status)\n\t\t\t\tif ok && s.Status == metav1.StatusFailure {\n\t\t\t\t\tif s.Reason == metav1.StatusReasonGone {\n\t\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"Requested resource version too old, no longer stored in K8S API\")\n\t\t\t\t\t\trunWatcher = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t// Ignore and let the retry watcher retry.\n\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).WithField(\"object\", c.Object).Info(\"Failed to read from k8s watcher\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Update the lastRV, so that if the watcher restarts, it starts at the correct resource version.\n\t\t\t\to, ok := c.Object.(*v1.Node)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmc.lastRV = o.ObjectMeta.ResourceVersion\n\n\t\t\t\tpb, err := protoutils.NodeToProto(o)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tr := &storepb.K8SResource{\n\t\t\t\t\tResource: &storepb.K8SResource_Node{\n\t\t\t\t\t\tNode: pb,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tmsg := &K8sResourceMessage{\n\t\t\t\t\tObject: r,\n\t\t\t\t\tObjectType: mc.resourceStr,\n\t\t\t\t\tEventType: c.Type,\n\t\t\t\t}\n\t\t\t\tmc.updateCh <- msg\n\t\t\t}\n\t\t}\n\n\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"K8s watcher channel closed. Retrying\")\n\n\t\t// Wait 5 minutes before retrying, however if stop is called, just return.\n\t\tselect {\n\t\tcase <-quitCh:\n\t\t\treturn\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (cmw *SecretWatcher) watch() error {\n\tsec, err := cmw.kubeClient.Secrets(cmw.namespace).Get(cmw.name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsel := generic.ObjectMetaFieldsSet(&sec.ObjectMeta, true)\n\tw, err := cmw.kubeClient.Secrets(cmw.namespace).Watch(api.ListOptions{\n\t\tFieldSelector: sel.AsSelector(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmw.w = w\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event, ok := <-w.ResultChan():\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif event.Type != watch.Added {\n\t\t\t\t\tcmw.OnEvent()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}", "func TestInitializeConfigWatcher(t *testing.T) {\n\n\t// Obtain a Test Logger (Required By be InitializeConfigWatcher function)\n\tlogger := logtesting.TestLogger(t)\n\n\t// Setup Environment\n\tcommontesting.SetTestEnvironment(t)\n\n\t// Create A Test Observability ConfigMap For The InitializeObservability() Call To Watch\n\tconfigMap := commontesting.GetTestSaramaConfigMap(commontesting.OldSaramaConfig, commontesting.TestEKConfig)\n\n\t// Create The Fake K8S Client And Add It To The ConfigMap\n\tfakeK8sClient := fake.NewSimpleClientset(configMap)\n\n\t// Add The Fake K8S Client To The Context (Required By InitializeObservability)\n\tctx := context.WithValue(context.TODO(), injectionclient.Key{}, fakeK8sClient)\n\n\t// The configWatcherHandler should change the nil \"watchedConfigMap\" to a valid ConfigMap when the watcher triggers\n\n\ttestConfigMap, err := fakeK8sClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, SettingsConfigMapName, metav1.GetOptions{})\n\tassert.Nil(t, err)\n\tassert.Equal(t, testConfigMap.Data[\"sarama\"], commontesting.OldSaramaConfig)\n\n\t// Perform The Test (Initialize The Observability Watcher)\n\terr = InitializeConfigWatcher(ctx, logger, configWatcherHandler, system.Namespace())\n\tassert.Nil(t, err)\n\n\t// Wait for the configWatcherHandler to be called (happens pretty quickly; loop usually only runs once)\n\tfor try := 0; getWatchedMap() == nil && try < 100; try++ {\n\t\ttime.Sleep(5 * time.Millisecond)\n\t}\n\n\tassert.Equal(t, getWatchedMap().Data[\"sarama\"], commontesting.OldSaramaConfig)\n\n\t// Change the config map and verify the handler is called\n\ttestConfigMap.Data[\"sarama\"] = commontesting.NewSaramaConfig\n\n\t// The configWatcherHandler should change this back to a valid ConfigMap\n\tsetWatchedMap(nil)\n\n\ttestConfigMap, err = fakeK8sClient.CoreV1().ConfigMaps(system.Namespace()).Update(ctx, testConfigMap, metav1.UpdateOptions{})\n\tassert.Nil(t, err)\n\tassert.Equal(t, testConfigMap.Data[\"sarama\"], commontesting.NewSaramaConfig)\n\n\t// Wait for the configWatcherHandler to be called (happens pretty quickly; loop usually only runs once)\n\tfor try := 0; getWatchedMap() == nil && try < 100; try++ {\n\t\ttime.Sleep(5 * time.Millisecond)\n\t}\n\tassert.NotNil(t, getWatchedMap())\n\tassert.Equal(t, getWatchedMap().Data[\"sarama\"], commontesting.NewSaramaConfig)\n}", "func newWatcherSyncerTester(l []watchersyncer.ResourceType) *watcherSyncerTester {\n\t// Create the required watchers. This hs methods that we use to drive\n\t// responses.\n\tlws := map[string]*listWatchSource{}\n\tfor _, r := range l {\n\t\t// We create a watcher for each resource type. We'll store these off the\n\t\t// default enumeration path for that resource.\n\t\tname := model.ListOptionsToDefaultPathRoot(r.ListInterface)\n\t\tlws[name] = &listWatchSource{\n\t\t\tname: name,\n\t\t\twatchCallError: make(chan error, 50),\n\t\t\tlistCallResults: make(chan interface{}, 200),\n\t\t\tstopEvents: make(chan struct{}, 200),\n\t\t\tresults: make(chan api.WatchEvent, 200),\n\t\t}\n\t}\n\n\tfc := &fakeClient{\n\t\tlws: lws,\n\t}\n\n\t// Create the syncer tester.\n\tst := testutils.NewSyncerTester()\n\trst := &watcherSyncerTester{\n\t\tSyncerTester: st,\n\t\tfc: fc,\n\t\twatcherSyncer: watchersyncer.New(fc, l, st),\n\t\tlws: lws,\n\t}\n\trst.watcherSyncer.Start()\n\treturn rst\n}", "func (c *Configurer) Watch() {\n\ttick := time.NewTicker(c.refreshInterval)\n\tdefer tick.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\tcase event := <-c.w.Events:\n\t\t\tif event.Name != c.filePath {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase err := <-c.w.Errors:\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Error(\"watch error\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\terr := c.load()\n\t\tif err != nil {\n\t\t\treloadErrorsCounter.Inc()\n\t\t\tlog.WithError(err).Error(\"config load error\")\n\t\t}\n\n\t\treloadCounter.Inc()\n\t}\n}", "func (w *GitWatcher) Start() error {\n\tzap.L().Debug(\"git watcher initialising, waiting for first state to be set\")\n\n\t// wait for the first config event to set the initial state\n\tw.__waitpoint__start_wait_init()\n\n\tzap.L().Debug(\"git watcher initialised\", zap.Any(\"initial_state\", w.state))\n\n\tfor {\n\t\terr := w.__waitpoint__start_select_states()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}", "func (db *DB) watch(ctx context.Context, ch chan struct{}) {\n\tdefer func() {\n\t\tdb.notifyChMu.Lock()\n\t\tclose(ch)\n\t\tdb.notifyChMu.Unlock()\n\t}()\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\tdb.doNotifyLoop(ctx, ch)\n\t}()\n\tselect {\n\tcase <-db.closed:\n\tcase <-ctx.Done():\n\tcase <-done:\n\t}\n}", "func (w *FileWatch) StartWatcher() {\n\tlog.Trace(\">>>>> StartWatcher\")\n\tdefer log.Trace(\"<<<<< StartWatcher\")\n\tpid := os.Getpid()\n\tlog.Tracef(\"Watcher [%d PID] is successfully started\", pid)\n\n\t// Control the ticker interval, dont want to frequently wakeup\n\t// watcher as it is only needed when there is event notification. So if there is\n\t// event notification, ticker is set to wake up every one minute otherwise sleep\n\t// for 1 hour.\n\tvar delayControlFlag time.Duration = tickerDefaultDelay\n\n\t// This is used to control the flow of events, we dont want to process frequent update\n\t// If there are multiple update within 1 min, only process one event and ignore the rest of the events\n\tisSpuriousUpdate:=false\n\t// forever\n\tfor {\n\t\tselect {\n\t\tcase <-w.watchStop:\n\t\t\tlog.Infof(\"Stopping [%d PID ] csi watcher\", pid)\n\t\t\tw.wg.Done()\n\t\t\tw.watchList.Close()\n\t\t\treturn\n\t\tcase <-w.watchList.Events:\n\t\t\t// There might be spurious update, ignore the event if it occurs within 1 min.\n\t\t\tif !isSpuriousUpdate {\n\t\t\t\tlog.Infof(\"Watcher [%d PID], received notification\", pid)\n\t\t\t\tw.watchRun()\n\t\t\t\tlog.Infof(\"Watcher [%d PID], notification served\", pid)\n\t\t\t\tisSpuriousUpdate = true\n\t\t\t\tdelayControlFlag = 1\n\t\t\t} else {\n\t\t\t\tlog.Warnf(\"Watcher [%d PID], received spurious notification, ignore\", pid)\n\t\t\t}\n\t\tcase <-time.NewTicker(time.Minute * delayControlFlag).C:\n\t\t\tisSpuriousUpdate = false\n\t\t\tdelayControlFlag = tickerDefaultDelay\n\t\t}\n\t}\n}", "func (c *Connection) watchRequests(ch *api.Channel, chMeta *channelMetadata) {\n\tfor {\n\t\tselect {\n\t\tcase req, ok := <-ch.ReqChan:\n\t\t\t// new request on the request channel\n\t\t\tif !ok {\n\t\t\t\t// after closing the request channel, release API channel and return\n\t\t\t\tc.releaseAPIChannel(ch, chMeta)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.processRequest(ch, chMeta, req)\n\n\t\tcase req := <-ch.NotifSubsChan:\n\t\t\t// new request on the notification subscribe channel\n\t\t\tc.processNotifSubscribeRequest(ch, req)\n\t\t}\n\t}\n}", "func main() {\n\t// Load configuration.\n\t// @I Support providing configuration file for Cron component via cli options\n\t// @I Validate Cron component configuration when loading from JSON file\n\tvar cronConfig config.Config\n\terr := util.ReadJSONFile(CronConfigFile, &cronConfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Load Schedules provided in the config, if we run on ephemeral storage mode.\n\tloadEphemeralSchedules(&cronConfig)\n\n\t// Channel that receives IDs of the Watches that are ready to be triggered.\n\ttriggers := make(chan int)\n\n\t// Channel that receives Schedules that are candidate for triggering.\n\tschedules := make(chan schedule.Schedule)\n\n\t// Search for candidate Schedules; it could be from a variety of sources.\n\tgo search(schedules, &cronConfig)\n\n\t// Listen to candidate Schedules and send them for execution as they come. We\n\t// do this in a goroutine so that we don't block the program yet.\n\tgo func() {\n\t\tfor schedule := range schedules {\n\t\t\tgo run(schedule, triggers, &cronConfig)\n\t\t}\n\t}()\n\n\t// Configuration required by the Watch API SDK.\n\t// @I Load Watch API SDK configuration from file or command line\n\tsdkConfig := sdk.Config{\n\t\tcronConfig.WatchAPI.BaseURL,\n\t\tcronConfig.WatchAPI.Version,\n\t}\n\n\t// Listen for IDs of Watches that are ready for triggering, and trigger them\n\t// as they come. We keep the channel open and the program stays on perpetual.\n\tfor watchID := range triggers {\n\t\tfmt.Printf(\"triggering Watch with ID \\\"%d\\\"\\n\", watchID)\n\t\terr := sdk.TriggerByID(watchID, sdkConfig)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n}", "func startfileWatcherDemo(ch chan bool) {\r\n\tch <- true\r\n\tfor {\r\n\t\td, _ := os.Open(watchedPath)\r\n\t\tfiles, _ := d.Readdir(-1)\r\n\t\tfor _, fi := range files {\r\n\t\t\tfilePath := watchedPath + \"/\" + fi.Name()\r\n\t\t\tf, _ := os.Open(filePath)\r\n\t\t\tdata, _ := ioutil.ReadAll(f)\r\n\t\t\tf.Close()\r\n\t\t\tif data != nil {\r\n\t\t\t\tos.Remove(filePath)\r\n\t\t\t}\r\n\r\n\t\t\tgo func(data string) {\r\n\t\t\t\tfmt.Println(\"Record proccessed - \", data)\r\n\t\t\t}(string(data))\r\n\t\t}\r\n\t}\r\n}", "func (w *Watcher) Watch() {\n\tfor {\n\t\tselect {\n\t\tcase ev := <-w.watcher.Event:\n\t\t\tfor _, handler := range w.modifiedHandlers {\n\t\t\t\tif strings.HasPrefix(ev.Name, handler.path) {\n\t\t\t\t\tfmt.Println(handler)\n\t\t\t\t\thandler.callback(ev.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlog.Println(\"event:\", ev)\n\t\t\tlog.Println(\"handlers:\", w.modifiedHandlers)\n\t\t\t//case addreq :=\n\t\tcase err := <-w.watcher.Error:\n\t\t\tlog.Println(\"error:\", err)\n\t\t}\n\t}\n}", "func watch(ch chan<- struct{}, watchDir string) error {\n\tcmd := exec.Command(\"fswatch\", watchDir,\n\t\t\"--event\", \"Updated\",\n\t\t\"--latency\", \"0.101\",\n\t\t\"--one-per-batch\")\n\tcmd.Dir = watchDir\n\n\toutReader, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\toutScanner := bufio.NewScanner(outReader)\n\n\tgo func() {\n\t\tfor outScanner.Scan() {\n\t\t\t_ = outScanner.Text()\n\t\t\tch <- struct{}{}\n\t\t}\n\t}()\n\n\treturn cmd.Start()\n}", "func (c *cacheSelector) run() {\n\tdefer c.wg.Done()\n\n\t// 除非收到quit信号,否则一直卡在watch上,watch内部也是一个loop\n\tfor {\n\t\t// exit early if already dead\n\t\tif c.quit() {\n\t\t\tlog.Warn(\"(cacheSelector)run() quit now\")\n\t\t\treturn\n\t\t}\n\n\t\t// create new watcher\n\t\t// 创建新watch,走到这一步要么第第一次for循环进来,要么是watch函数出错。watch出错说明registry.watch的zk client与zk连接出现了问题\n\t\tw, err := c.so.Registry.Watch()\n\t\tlog.Debug(\"cache.Registry.Watch() = watch{%#v}, error{%#v}\", w, jerrors.ErrorStack(err))\n\t\tif err != nil {\n\t\t\tif c.quit() {\n\t\t\t\tlog.Warn(\"(cacheSelector)run() quit now\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Warn(\"cacheSelector.Registry.Watch() = error{%v}\", jerrors.ErrorStack(err))\n\t\t\ttime.Sleep(common.TimeSecondDuration(registry.REGISTRY_CONN_DELAY))\n\t\t\tcontinue\n\t\t}\n\n\t\t// watch for events\n\t\t// 除非watch遇到错误,否则watch函数内部的for循环一直运行下午,run函数的for循环也会一直卡在watch函数上\n\t\t// watch一旦退出,就会执行registry.Watch().Stop, 相应的client就会无效\n\t\terr = c.watch(w)\n\t\tlog.Debug(\"cache.watch(w) = err{%#+v}\", jerrors.ErrorStack(err))\n\t\tif err != nil {\n\t\t\tlog.Warn(\"cacheSelector.watch() = error{%v}\", jerrors.ErrorStack(err))\n\t\t\ttime.Sleep(common.TimeSecondDuration(registry.REGISTRY_CONN_DELAY))\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (c *Client) startWatcher() error {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.watcher = watcher\n\tif err = c.recursiveAddWatchers(c.path); err != nil {\n\t\twatcher.Close()\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *ProxyMarketClient) StartWatcher(updatedTime time.Duration) {\n\tgo func() {\n\t\tfor {\n\t\t\tres, err := c.GetProxyListAllByNewest()\n\t\t\tif err != nil {\n\t\t\t\ttime.Sleep(updatedTime * time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Proxies = res.List.Data\n\t\t\ttime.Sleep(updatedTime * time.Millisecond)\n\t\t}\n\t}()\n}", "func TestWatcher() {\n\tw, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating Watcher: %s\", err.Error())\n\t}\n\tdefer w.Close()\n\tw.Add(\"/media/peza\")\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tlog.Println(\"Starting watch...\")\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-w.Events:\n\t\t\t\tlog.Printf(\"Event: %s, %d\", ev.Name, ev.Op)\n\t\t\t\t// case err := <- w.Errors:\n\t\t\t}\n\t\t}\n\n\t}()\n\twg.Wait()\n}", "func (w *watcher) Run() {\n\t_, controller := cache.NewInformer(w.watchList, w.eventHandler.GetResourceObject(), time.Second*0, w.eventHandler)\n\tcontroller.Run(w.stopChan)\n\tclose(w.stopChan)\n}", "func (wc *watchChan) startWatching(watchClosedCh chan struct{}) {\n\tvar hasInitialRev = true\n\n\t// If the initialRev = 0, we first get the current state of the\n\t// store and send ADDED events for each existing object\n\tif wc.initialRev == 0 {\n\t\thasInitialRev = false\n\t\trev, err := wc.sync()\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"failed to sync with latest state: %v\", err)\n\t\t\twc.sendError(err)\n\t\t\treturn\n\t\t}\n\t\twc.initialRev = rev\n\t}\n\n\tvar collectionName string\n\n\t// If it is not recursive, that means we're only going to be watching\n\t// a single object\n\tif !wc.recursive {\n\n\t\t// Get the mongo object info (collection + key) from the provided\n\t\t// key\n\t\tobjInfo, err := getMongoObjectInfo(wc.key)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"failed to get collection info from key: %v\", err)\n\t\t\twc.sendError(err)\n\t\t\treturn\n\t\t}\n\n\t\tcollectionName = objInfo.collection\n\t} else {\n\n\t\t// The watch is recursive, meaning that we're going to watch multiple\n\t\t// objects from the store.\n\t\t// TODO: allow database filtering based on labels or something\n\t\t// Perhaps we don't want to retrieve every single object from the store\n\n\t\t// Get the collection info from the key\n\t\tcollInfo, err := getMongoCollectionInfo(wc.key)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"failed to get collection info from key: %v\", err)\n\t\t\twc.sendError(err)\n\t\t\treturn\n\t\t}\n\t\tcollectionName = collInfo.collectionName\n\t}\n\n\tif hasInitialRev {\n\n\t\tlogrus.Infof(\"querying with rv : %d\", wc.initialRev)\n\n\t\tfilter, err := wc.getMongoCollectionWatchFilter(false, &wc.initialRev)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"failed to create collection watch filter: %v\", err)\n\t\t\twc.sendError(err)\n\t\t\treturn\n\t\t}\n\n\t\tvar findOptions []*mongooptions.FindOptions\n\t\tif wc.limit != nil {\n\t\t\twc.sentLock.RLock()\n\t\t\tlimit := *wc.limit - wc.sentCount\n\t\t\twc.sentLock.RUnlock()\n\n\t\t\tif limit < 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfindOptions = append(findOptions, mongooptions.Find().SetLimit(limit))\n\n\t\t}\n\n\t\tfindResult, err := wc.watcher.client.\n\t\t\tDatabase(wc.watcher.database).\n\t\t\tCollection(collectionName, mongooptions.Collection().SetReadConcern(readconcern.Linearizable())).\n\t\t\tFind(wc.ctx, filter, findOptions...)\n\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"watchChannel failed to query collection: %v\", err)\n\t\t\twc.sendError(err)\n\t\t\treturn\n\t\t}\n\n\t\tvar foundQry int64 = 0\n\t\tfor {\n\t\t\tif findResult.Next(wc.ctx) {\n\t\t\t\tcur := findResult.Current\n\t\t\t\tvar mongoRecord MongoRecord\n\t\t\t\tif err := bson.Unmarshal(cur, &mongoRecord); err != nil {\n\t\t\t\t\tlogWatchChannelErr(err)\n\t\t\t\t\twc.sendError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tev, err := convertMongoDocumentToEvent(mongoRecord)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogWatchChannelErr(err)\n\t\t\t\t\twc.sendError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfoundQry++\n\t\t\t\twc.sendEvent(ev)\n\t\t\t\twc.initialRev = ev.rev + 1\n\t\t\t}\n\t\t\tif findResult.Err() != nil {\n\t\t\t\tlogWatchChannelErr(err)\n\t\t\t\twc.sendError(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tlogrus.Infof(\"found %d events using query\", foundQry)\n\n\t}\n\n\tif wc.syncOnly {\n\t\tclose(watchClosedCh)\n\t\treturn\n\t}\n\n\t// If the provided initialRev is not 0, then start watching\n\t// events at the given timestamp\n\tvar watchOptions = []*mongooptions.ChangeStreamOptions{\n\t\tmongooptions.ChangeStream().SetFullDocument(mongooptions.UpdateLookup),\n\t}\n\tif wc.initialRev != 0 {\n\t\tts := time.Unix(0, wc.initialRev).UTC().Unix()\n\t\twatchOptions = append(watchOptions, mongooptions.ChangeStream().SetStartAtOperationTime(&primitive.Timestamp{\n\t\t\tT: uint32(ts),\n\t\t}))\n\t}\n\n\tfilter, err := wc.getMongoOpLogFilter()\n\tif err != nil {\n\t\tlogrus.Warnf(\"failed to create mongo filter: %v\", err)\n\t\twc.sendError(err)\n\t\treturn\n\t}\n\tvar pipeline interface{}\n\tif filter != nil {\n\t\tpipeline = mongo.Pipeline{bson.D{{\"$match\", filter}}}\n\t} else {\n\t\tpipeline = mongo.Pipeline{}\n\t}\n\n\t// Start watching the mongo ChangeStream\n\tcs, err := wc.watcher.client.\n\t\tDatabase(wc.watcher.database).\n\t\tCollection(collectionName).\n\t\tWatch(wc.ctx, pipeline, watchOptions...)\n\tif err != nil {\n\t\tlogWatchChannelErr(err)\n\t\twc.sendError(err)\n\t\treturn\n\t}\n\n\t// This is a blocking loop. cs.Next will block until the\n\t// next result is available\n\t// TODO: add progressNotify every n seconds by sending\n\t// a BOOKMARK event meaning that the connection is not dead,\n\t// but just not receiving new events. Make sure to test\n\t// that the connection is alive using some kind of PING\n\t// for the BOOKMARK event\n\tfor {\n\t\tif cs.Next(wc.ctx) {\n\n\t\t\t// We convert the mongo ChangeStream event\n\t\t\t// to the *event type\n\t\t\tevt, err := parseEvent(cs.Current)\n\t\t\tif err != nil {\n\t\t\t\tlogWatchChannelErr(err)\n\t\t\t\twc.sendError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif evt.rev < wc.initialRev {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// The returned event might be nil\n\t\t\tif evt == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Send the event to the incoming event channel\n\t\t\t// for further processing\n\t\t\twc.sendEvent(evt)\n\t\t}\n\n\t\t// At this point, either the context was canceled\n\t\t// or we have a mongo error. Perhaps the connection\n\t\t// was closed\n\t\tif cs.Err() != nil {\n\t\t\tif errors2.Is(cs.Err(), context.Canceled) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogWatchChannelErr(cs.Err())\n\t\t\twc.sendError(cs.Err())\n\t\t\treturn\n\t\t}\n\t}\n\tclose(watchClosedCh)\n}", "func (h *allocHealthWatcherHook) init() error {\n\t// No need to watch health as it's already set\n\tif h.healthSetter.HasHealth() {\n\t\th.logger.Trace(\"not watching; already has health set\")\n\t\treturn nil\n\t}\n\n\ttg := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup)\n\tif tg == nil {\n\t\treturn fmt.Errorf(\"task group %q does not exist in job %q\", h.alloc.TaskGroup, h.alloc.Job.ID)\n\t}\n\n\th.isDeploy = h.alloc.DeploymentID != \"\"\n\n\t// No need to watch allocs for deployments that rely on operators\n\t// manually setting health\n\tif h.isDeploy && (tg.Update.IsEmpty() || tg.Update.HealthCheck == structs.UpdateStrategyHealthCheck_Manual) {\n\t\treturn nil\n\t}\n\n\t// Define the deadline, health method, min healthy time from the\n\t// deployment if this is a deployment; otherwise from the migration\n\t// strategy.\n\tdeadline, useChecks, minHealthyTime := getHealthParams(time.Now(), tg, h.isDeploy)\n\n\t// Create a context that is canceled when the tracker should shutdown.\n\tctx := context.Background()\n\tctx, h.cancelFn = context.WithCancel(ctx)\n\n\th.logger.Trace(\"watching\", \"deadline\", deadline, \"checks\", useChecks, \"min_healthy_time\", minHealthyTime)\n\t// Create a new tracker, start it, and watch for health results.\n\ttracker := allochealth.NewTracker(\n\t\tctx, h.logger, h.alloc, h.listener, h.consul, h.checkStore, minHealthyTime, useChecks,\n\t)\n\ttracker.Start()\n\n\t// Create a new done chan and start watching for health updates\n\th.watchDone = make(chan struct{})\n\tgo h.watchHealth(ctx, deadline, tracker, h.watchDone)\n\treturn nil\n}", "func (mc *EndpointsWatcher) StartWatcher(quitCh chan struct{}, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor {\n\t\twatcher := cache.NewListWatchFromClient(mc.clientset.CoreV1().RESTClient(), mc.resourceStr, v1.NamespaceAll, fields.Everything())\n\t\tretryWatcher, err := watchClient.NewRetryWatcher(mc.lastRV, watcher)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Could not start watcher for k8s resource: \" + mc.resourceStr)\n\t\t}\n\n\t\tresCh := retryWatcher.ResultChan()\n\t\trunWatcher := true\n\t\tfor runWatcher {\n\t\t\tselect {\n\t\t\tcase <-quitCh:\n\t\t\t\treturn\n\t\t\tcase c := <-resCh:\n\t\t\t\ts, ok := c.Object.(*metav1.Status)\n\t\t\t\tif ok && s.Status == metav1.StatusFailure {\n\t\t\t\t\tif s.Reason == metav1.StatusReasonGone {\n\t\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"Requested resource version too old, no longer stored in K8S API\")\n\t\t\t\t\t\trunWatcher = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t// Ignore and let the retry watcher retry.\n\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).WithField(\"object\", c.Object).Info(\"Failed to read from k8s watcher\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Update the lastRV, so that if the watcher restarts, it starts at the correct resource version.\n\t\t\t\to, ok := c.Object.(*v1.Endpoints)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmc.lastRV = o.ObjectMeta.ResourceVersion\n\n\t\t\t\tpb, err := protoutils.EndpointsToProto(o)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tr := &storepb.K8SResource{\n\t\t\t\t\tResource: &storepb.K8SResource_Endpoints{\n\t\t\t\t\t\tEndpoints: pb,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tmsg := &K8sResourceMessage{\n\t\t\t\t\tObject: r,\n\t\t\t\t\tObjectType: mc.resourceStr,\n\t\t\t\t\tEventType: c.Type,\n\t\t\t\t}\n\t\t\t\tmc.updateCh <- msg\n\t\t\t}\n\t\t}\n\n\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"K8s watcher channel closed. Retrying\")\n\n\t\t// Wait 5 minutes before retrying, however if stop is called, just return.\n\t\tselect {\n\t\tcase <-quitCh:\n\t\t\treturn\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func watcher(configModel model.Config) {\n\t// Set the client variable\n\tconfig.Client = configModel.Client.Name\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer watcher.Close()\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-watcher.Events:\n\t\t\t\tif event.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\t\tlogs.INFO.Println(\"Modified file -> \", event.Name)\n\t\t\t\t\t// When the file name has not been defined, it is time to\n\t\t\t\t\t// use the SetFile() method to add a new file to read.\n\t\t\t\t\tif filename == \"\" {\n\t\t\t\t\t\tstore.SetFile(event.Name)\n\t\t\t\t\t\tfilename = event.Name\n\t\t\t\t\t}\n\t\t\t\t\tif filename != \"\" && filename != event.Name {\n\t\t\t\t\t\tlogs.INFO.Println(\"Reset seek\")\n\t\t\t\t\t\tseek = 0\n\t\t\t\t\t}\n\t\t\t\t\treadLines(event.Name)\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tlogs.CRITICAL.Println(\"Error on watcher: \", err)\n\t\t\t}\n\t\t}\n\t}()\n\terr = watcher.Add(configModel.Pathlog.Name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t<-done\n}", "func (w Watcher) Init(opts InitOptions) {\n\tvar err error\n\tif w.kube, err = initializeKubeClient(opts.KubeConfig); err != nil {\n\t\tfmt.Println(\"initializeKubeClient err:\", err)\n\t}\n\n\tw.queue = workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(\n\t\t100*time.Millisecond,\n\t\t5*time.Second,\n\t))\n\n\tw.store, w.controller = cache.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {\n\t\t\t\treturn w.kube.CoreV1().Secrets(core_v1.NamespaceAll).List(options)\n\t\t\t},\n\t\t\tWatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {\n\t\t\t\treturn w.kube.CoreV1().Secrets(core_v1.NamespaceAll).Watch(options)\n\t\t\t},\n\t\t},\n\t\t&core_v1.Secret{},\n\t\t0,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: w.hostsEventHandlerAdd,\n\t\t\tUpdateFunc: w.hostsEventHandlerUpdate,\n\t\t\tDeleteFunc: w.hostsEventHandlerDelete,\n\t\t},\n\t)\n}", "func (pr *consulPipeRouter) watchAll() {\n\tdefer pr.wait.Done()\n\tpr.client.WatchPrefix(pr.prefix, &consulPipe{}, pr.quit, func(key string, value interface{}) bool {\n\t\tcp := *value.(*consulPipe)\n\t\tselect {\n\t\tcase pr.actorChan <- func() { pr.handlePipeUpdate(key, cp) }:\n\t\t\treturn true\n\t\tcase <-pr.quit:\n\t\t\treturn false\n\t\t}\n\t})\n}", "func (mc *PodWatcher) StartWatcher(quitCh chan struct{}, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor {\n\t\twatcher := cache.NewListWatchFromClient(mc.clientset.CoreV1().RESTClient(), mc.resourceStr, v1.NamespaceAll, fields.Everything())\n\t\tretryWatcher, err := watchClient.NewRetryWatcher(mc.lastRV, watcher)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Could not start watcher for k8s resource: \" + mc.resourceStr)\n\t\t}\n\n\t\tresCh := retryWatcher.ResultChan()\n\t\trunWatcher := true\n\t\tfor runWatcher {\n\t\t\tselect {\n\t\t\tcase <-quitCh:\n\t\t\t\treturn\n\t\t\tcase c := <-resCh:\n\t\t\t\ts, ok := c.Object.(*metav1.Status)\n\t\t\t\tif ok && s.Status == metav1.StatusFailure {\n\t\t\t\t\tif s.Reason == metav1.StatusReasonGone {\n\t\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"Requested resource version too old, no longer stored in K8S API\")\n\t\t\t\t\t\trunWatcher = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t// Ignore and let the retry watcher retry.\n\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).WithField(\"object\", c.Object).Info(\"Failed to read from k8s watcher\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Update the lastRV, so that if the watcher restarts, it starts at the correct resource version.\n\t\t\t\to, ok := c.Object.(*v1.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmc.lastRV = o.ObjectMeta.ResourceVersion\n\n\t\t\t\tpb, err := protoutils.PodToProto(o)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tr := &storepb.K8SResource{\n\t\t\t\t\tResource: &storepb.K8SResource_Pod{\n\t\t\t\t\t\tPod: pb,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tmsg := &K8sResourceMessage{\n\t\t\t\t\tObject: r,\n\t\t\t\t\tObjectType: mc.resourceStr,\n\t\t\t\t\tEventType: c.Type,\n\t\t\t\t}\n\t\t\t\tmc.updateCh <- msg\n\t\t\t}\n\t\t}\n\n\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"K8s watcher channel closed. Retrying\")\n\n\t\t// Wait 5 minutes before retrying, however if stop is called, just return.\n\t\tselect {\n\t\tcase <-quitCh:\n\t\t\treturn\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (t *FakeObjectTracker) Start() error {\n\tif t.FakeWatcher == nil {\n\t\treturn errors.New(\"tracker has no watch support\")\n\t}\n\n\tfor event := range t.ResultChan() {\n\t\tevent := event.DeepCopy() // passing a deep copy to avoid race.\n\t\tt.dispatch(event)\n\t}\n\n\treturn nil\n}", "func (s) TestWatchCallAnotherWatch(t *testing.T) {\n\tapiClientCh, cleanup := overrideNewAPIClient()\n\tdefer cleanup()\n\n\tclient, err := New(clientOpts(testXDSServer, false))\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tdefer client.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tc, err := apiClientCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"timeout when waiting for API client to be created: %v\", err)\n\t}\n\tapiClient := c.(*testAPIClient)\n\n\tclusterUpdateCh := testutils.NewChannel()\n\tfirstTime := true\n\tclient.WatchCluster(testCDSName, func(update ClusterUpdate, err error) {\n\t\tclusterUpdateCh.Send(clusterUpdateErr{u: update, err: err})\n\t\t// Calls another watch inline, to ensure there's deadlock.\n\t\tclient.WatchCluster(\"another-random-name\", func(ClusterUpdate, error) {})\n\n\t\tif _, err := apiClient.addWatches[ClusterResource].Receive(ctx); firstTime && err != nil {\n\t\t\tt.Fatalf(\"want new watch to start, got error %v\", err)\n\t\t}\n\t\tfirstTime = false\n\t})\n\tif _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil {\n\t\tt.Fatalf(\"want new watch to start, got error %v\", err)\n\t}\n\n\twantUpdate := ClusterUpdate{ServiceName: testEDSName}\n\tclient.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate})\n\tif err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twantUpdate2 := ClusterUpdate{ServiceName: testEDSName + \"2\"}\n\tclient.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2})\n\tif err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (rr *Registry) Watch(ctx context.Context) ([]*WatchEvent, <-chan *WatchEvent, error) {\n\trr.mu.Lock()\n\tdefer rr.mu.Unlock()\n\n\tprefix := rr.prefixPath()\n\n\tgetRes, err := rr.kv.Get(ctx, prefix, etcdv3.WithPrefix())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcurrentEvents := make([]*WatchEvent, 0, len(getRes.Kvs))\n\tfor _, kv := range getRes.Kvs {\n\t\treg, err := rr.unmarshalRegistration(kv)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\twev := &WatchEvent{\n\t\t\tKey: string(kv.Key),\n\t\t\tReg: reg,\n\t\t\tType: Create,\n\t\t}\n\t\tcurrentEvents = append(currentEvents, wev)\n\t}\n\n\t// Channel to publish registry changes.\n\twatchEvents := make(chan *WatchEvent)\n\n\t// Write a change or exit the watcher.\n\tput := func(we *WatchEvent) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase watchEvents <- we:\n\t\t}\n\t}\n\tputTerminalError := func(we *WatchEvent) {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\trecover()\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase <-time.After(10 * time.Minute):\n\t\t\tcase watchEvents <- we:\n\t\t\t}\n\t\t}()\n\t}\n\t// Create a watch-event from an event.\n\tcreateWatchEvent := func(ev *etcdv3.Event) *WatchEvent {\n\t\twev := &WatchEvent{Key: string(ev.Kv.Key)}\n\t\tif ev.IsCreate() {\n\t\t\twev.Type = Create\n\t\t} else if ev.IsModify() {\n\t\t\twev.Type = Modify\n\t\t} else {\n\t\t\twev.Type = Delete\n\t\t\t// Create base registration from just key.\n\t\t\treg := &Registration{}\n\t\t\tgraphType, graphName, err := rr.graphTypeAndNameFromKey(string(ev.Kv.Key))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\treg.Type = graphType\n\t\t\treg.Name = graphName\n\t\t\twev.Reg = reg\n\t\t\t// Need to return now because\n\t\t\t// delete events don't contain\n\t\t\t// any data to unmarshal.\n\t\t\treturn wev\n\t\t}\n\t\treg, err := rr.unmarshalRegistration(ev.Kv)\n\t\tif err != nil {\n\t\t\twev.Error = fmt.Errorf(\"%v: failed unmarshaling value: '%s'\", err, ev.Kv.Value)\n\t\t} else {\n\t\t\twev.Reg = reg\n\t\t}\n\t\treturn wev\n\t}\n\n\t// Watch deltas in etcd, with the give prefix, starting\n\t// at the revision of the get call above.\n\tdeltas := rr.client.Watch(ctx, prefix, etcdv3.WithPrefix(), etcdv3.WithRev(getRes.Header.Revision+1))\n\tgo func() {\n\t\tdefer close(watchEvents)\n\t\tfor {\n\t\t\tdelta, open := <-deltas\n\t\t\tif !open {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\tdefault:\n\t\t\t\t\tputTerminalError(&WatchEvent{Error: ErrWatchClosedUnexpectedly})\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif delta.Err() != nil {\n\t\t\t\tputTerminalError(&WatchEvent{Error: delta.Err()})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, event := range delta.Events {\n\t\t\t\tput(createWatchEvent(event))\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn currentEvents, watchEvents, nil\n}", "func (client *Client) Watch() <-chan AvailableServers {\n\tif len(client.discoveryConfigs) == 0 {\n\t\treturn nil\n\t}\n\n\tclient.once.Do(func() {\n\t\tfor _, sdConfig := range client.discoveryConfigs {\n\t\t\tgo func(sdConfig *DiscoveryConfig) {\n\t\t\t\tif err := sdConfig.plan.Run(client.consulAddr); err != nil {\n\t\t\t\t\tlog.Printf(\"Consul Watch Err: %+v\\n\", err)\n\t\t\t\t}\n\t\t\t}(sdConfig)\n\t\t}\n\t})\n\n\treturn client.watchChan\n}", "func (mc *ServiceWatcher) StartWatcher(quitCh chan struct{}, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor {\n\t\twatcher := cache.NewListWatchFromClient(mc.clientset.CoreV1().RESTClient(), mc.resourceStr, v1.NamespaceAll, fields.Everything())\n\t\tretryWatcher, err := watchClient.NewRetryWatcher(mc.lastRV, watcher)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Could not start watcher for k8s resource: \" + mc.resourceStr)\n\t\t}\n\n\t\tresCh := retryWatcher.ResultChan()\n\t\trunWatcher := true\n\t\tfor runWatcher {\n\t\t\tselect {\n\t\t\tcase <-quitCh:\n\t\t\t\treturn\n\t\t\tcase c := <-resCh:\n\t\t\t\ts, ok := c.Object.(*metav1.Status)\n\t\t\t\tif ok && s.Status == metav1.StatusFailure {\n\t\t\t\t\tif s.Reason == metav1.StatusReasonGone {\n\t\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"Requested resource version too old, no longer stored in K8S API\")\n\t\t\t\t\t\trunWatcher = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t// Ignore and let the retry watcher retry.\n\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).WithField(\"object\", c.Object).Info(\"Failed to read from k8s watcher\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Update the lastRV, so that if the watcher restarts, it starts at the correct resource version.\n\t\t\t\to, ok := c.Object.(*v1.Service)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmc.lastRV = o.ObjectMeta.ResourceVersion\n\n\t\t\t\tpb, err := protoutils.ServiceToProto(o)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tr := &storepb.K8SResource{\n\t\t\t\t\tResource: &storepb.K8SResource_Service{\n\t\t\t\t\t\tService: pb,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tmsg := &K8sResourceMessage{\n\t\t\t\t\tObject: r,\n\t\t\t\t\tObjectType: mc.resourceStr,\n\t\t\t\t\tEventType: c.Type,\n\t\t\t\t}\n\t\t\t\tmc.updateCh <- msg\n\t\t\t}\n\t\t}\n\n\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"K8s watcher channel closed. Retrying\")\n\n\t\t// Wait 5 minutes before retrying, however if stop is called, just return.\n\t\tselect {\n\t\tcase <-quitCh:\n\t\t\treturn\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func main() {\n\n\t// stores global configuration\n\tconfig := config.New()\n\n\t// utility module for creating sco related files/directories\n\tutil := util.New(config)\n\n\t// stores e.g. mapping between current git commit (HEAD) and sco-session\n\tgitConfig := gitconfig.New(config)\n\n\t// observes current working tree\n\twktreeObserver := wktreeobserver.New(config, gitConfig)\n\n\t// Channel from filewatcher to reporter\n\tfileEventChannel := make(chan *fw.FileEvent)\n\n\t// Channel from reporter to publisher\n\tfileChangedMessageChannel := make(chan *publisher.Message)\n\n\t// file watcher -> reports file event changes into fileEventChannel\n\tfw := fw.New(config, fileEventChannel)\n\n\t// listen on fileEventChannel -> determines the diff and updates the current sco-wktree patch\n\tgitReporter := gitReporter.New(config, gitConfig, util, wktreeObserver, fileEventChannel, fileChangedMessageChannel)\n\n\t// listen\n\tpublisher := publisher.New(config, gitConfig, util, fileChangedMessageChannel)\n\n\tpublisher.Start()\n\n\twktreeObserver.Start()\n\n\tgitReporter.Start()\n\n\tfw.Start()\n\n\t// TODO do not rely on fw.Start() to block use channel ...\n}", "func (manager *Manager) Setup(viewsManager *views.Manager) {\n\tmanager.viewsManager = viewsManager\n\tmanager.Mailbox = makeMailbox(manager)\n\n\tgo func() {\n\t\tt1 := time.Now()\n\n\t\tfor {\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\tt2 := time.Now()\n\t\t\tdt := t2.Sub(t1)\n\t\t\tt1 = t2\n\n\t\t\tmanager.update(dt.Seconds())\n\t\t}\n\t}()\n}", "func (m *Master) watch(taskName, taskType string) {\n\tctx, cancel := context.WithCancel(context.Background())\n\t// Start the timer to keep track of the task.\n\tgo func(ctx context.Context, timeout chan struct{ taskName, taskType string }, task string) {\n\t\tt := time.NewTimer(taskTimeout)\n\t\tdefer t.Stop()\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\ttimeout <- struct {\n\t\t\t\ttaskName string\n\t\t\t\ttaskType string\n\t\t\t}{taskName, taskType}\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}(ctx, m.timeout, taskName)\n\n\t// Append to the cancelers slice.\n\tm.cancelerMutex.Lock()\n\tdefer m.cancelerMutex.Unlock()\n\tm.cancelers = append(m.cancelers, cancel)\n}", "func (db *DB) watchLoop(ctx context.Context, ch chan struct{}) {\n\tvar psConn redis.Conn\n\teb := backoff.NewExponentialBackOff()\n\tfor {\n\t\tpsConn = db.pool.Get()\n\t\tpsc := redis.PubSubConn{Conn: psConn}\n\t\tif err := psc.PSubscribe(\"__keyspace*__:\" + db.versionSet); err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"failed to subscribe to version set channel\")\n\t\t\tpsConn.Close()\n\t\t\treturn\n\t\t}\n\t\tif err := db.doNotifyLoop(ctx, ch, &psc, eb); err != nil {\n\t\t\tpsConn.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (mgr bucketsWatcher) Watch() {\n\twatchChan := mgr.client.Watch(context.Background(), mgr.bucketPathPrefix, etcd.WithPrefix(), etcd.WithPrevKV())\n\tgo func() {\n\t\tfor {\n\t\t\tresp, ok := <-watchChan\n\t\t\tif !ok || resp.Err() != nil {\n\t\t\t\tif ok {\n\t\t\t\t\tglog.Errorf(\"Watching channel returns: %v\", resp.Err())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tglog.Warningf(\"Watching channel closed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, evt := range resp.Events {\n\t\t\t\tswitch evt.Type {\n\t\t\t\tcase mvccpb.PUT:\n\t\t\t\t\tbkt, err := model.NewBucketFromBytes((*evt.Kv).Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tmgr.onBucketChanged(*bkt)\n\t\t\t\tcase mvccpb.DELETE:\n\t\t\t\t\tglog.V(3).Infof(\"deleted key: %s\", string((*evt.PrevKv).Key))\n\t\t\t\t\tglog.V(3).Infof(\"deleted value: %s\", string((*evt.PrevKv).Value))\n\t\t\t\t\tbkt, err := model.NewBucketFromBytes((*evt.PrevKv).Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(2).Infof(\"Delete ACLs of bucket: %v\", bkt)\n\t\t\t\t\tmgr.onBucketDeleted(*bkt)\n\n\t\t\t\tdefault:\n\t\t\t\t\tglog.V(2).Infof(\"Unknown Etcd event: %v\", *evt)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif glog.V(3) {\n\t\t\t\tglog.Info(\"Bucket store: \")\n\t\t\t\tfor name, bkt := range mgr.bucketMap {\n\t\t\t\t\tglog.Infof(\"%s: %v\", name, bkt)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *ChangeWatcher) Start() {\n\n c.in = make(chan *RoomChange)\n c.out = make(chan *RoomChange)\n c.buffer = nil\n go func() {\n\n for c.in != nil {\n select {\n case in, ok := <-c.in:\n if !ok {\n c.in = nil\n } else {\n c.buffer = append(c.buffer, in)\n }\n\n case c.outC() <- c.curV():\n // Remove element from buffer\n c.buffer = c.buffer[1:]\n }\n }\n\n close(c.out)\n c.buffer = nil\n\n }()\n}", "func configWatcherHandler(logger *zap.SugaredLogger, configMap *corev1.ConfigMap) {\n\t// Set the package variable to indicate that the test watcher was called\n\tsetWatchedMap(configMap)\n}", "func NewWatcher(addr, passwd string, cmd chan<- *Cmd, log chan<- interface{}) (w *Watcher) {\n w = &Watcher{\n addr: addr,\n passwd: passwd,\n cmd: cmd,\n log: log,\n done: make(chan bool),\n }\n go w.run()\n return\n}", "func (o *WatchClient) eventWatcher(\n\tctx context.Context,\n\tparameters WatchParameters,\n\tevaluateChangesHandler evaluateChangesFunc,\n\tprocessEventsHandler processEventsFunc,\n\tcomponentStatus ComponentStatus,\n) error {\n\n\tvar (\n\t\tdevfilePath = odocontext.GetDevfilePath(ctx)\n\t\tpath = filepath.Dir(devfilePath)\n\t\tcomponentName = odocontext.GetComponentName(ctx)\n\t\tappName = odocontext.GetApplication(ctx)\n\t\tout = parameters.StartOptions.Out\n\t)\n\n\tvar events []fsnotify.Event\n\n\t// sourcesTimer helps collect multiple events that happen in a quick succession. We start with 1ms as we don't care much\n\t// at this point. In the select block, however, every time we receive an event, we reset the sourcesTimer to watch for\n\t// 100ms since receiving that event. This is done because a single filesystem event by the user triggers multiple\n\t// events for fsnotify. It's a known-issue, but not really bug. For more info look at below issues:\n\t// - https://github.com/fsnotify/fsnotify/issues/122\n\t// - https://github.com/fsnotify/fsnotify/issues/344\n\tsourcesTimer := time.NewTimer(time.Millisecond)\n\t<-sourcesTimer.C\n\n\t// devfileTimer has the same usage as sourcesTimer, for file events coming from devfileWatcher\n\tdevfileTimer := time.NewTimer(time.Millisecond)\n\t<-devfileTimer.C\n\n\t// deployTimer has the same usage as sourcesTimer, for events coming from watching Deployments, from deploymentWatcher\n\tdeployTimer := time.NewTimer(time.Millisecond)\n\t<-deployTimer.C\n\n\tpodsPhases := NewPodPhases()\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-o.sourcesWatcher.Events:\n\t\t\tevents = append(events, event)\n\t\t\t// We are waiting for more events in this interval\n\t\t\tsourcesTimer.Reset(100 * time.Millisecond)\n\n\t\tcase <-sourcesTimer.C:\n\t\t\t// timer has fired\n\t\t\tif !componentCanSyncFile(componentStatus.GetState()) {\n\t\t\t\tklog.V(4).Infof(\"State of component is %q, don't sync sources\", componentStatus.GetState())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar changedFiles, deletedPaths []string\n\t\t\tif !o.forceSync {\n\t\t\t\t// first find the files that have changed (also includes the ones newly created) or deleted\n\t\t\t\tchangedFiles, deletedPaths = evaluateChangesHandler(events, path, parameters.StartOptions.IgnorePaths, o.sourcesWatcher)\n\t\t\t\t// process the changes and sync files with remote pod\n\t\t\t\tif len(changedFiles) == 0 && len(deletedPaths) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcomponentStatus.SetState(StateSyncOutdated)\n\t\t\tfmt.Fprintf(out, \"Pushing files...\\n\\n\")\n\t\t\terr := processEventsHandler(ctx, parameters, changedFiles, deletedPaths, &componentStatus)\n\t\t\to.forceSync = false\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// empty the events to receive new events\n\t\t\tif componentStatus.GetState() == StateReady {\n\t\t\t\tevents = []fsnotify.Event{} // empty the events slice to capture new events\n\t\t\t}\n\n\t\tcase watchErr := <-o.sourcesWatcher.Errors:\n\t\t\treturn watchErr\n\n\t\tcase key := <-o.keyWatcher:\n\t\t\tif key == 'p' {\n\t\t\t\to.forceSync = true\n\t\t\t\tsourcesTimer.Reset(100 * time.Millisecond)\n\t\t\t}\n\n\t\tcase <-parameters.StartOptions.PushWatcher:\n\t\t\to.forceSync = true\n\t\t\tsourcesTimer.Reset(100 * time.Millisecond)\n\n\t\tcase ev := <-o.deploymentWatcher.ResultChan():\n\t\t\tswitch obj := ev.Object.(type) {\n\t\t\tcase *appsv1.Deployment:\n\t\t\t\tklog.V(4).Infof(\"deployment watcher Event: Type: %s, name: %s, rv: %s, generation: %d, pods: %d\\n\",\n\t\t\t\t\tev.Type, obj.GetName(), obj.GetResourceVersion(), obj.GetGeneration(), obj.Status.ReadyReplicas)\n\t\t\t\tif obj.GetGeneration() > o.deploymentGeneration || obj.Status.ReadyReplicas != o.readyReplicas {\n\t\t\t\t\to.deploymentGeneration = obj.GetGeneration()\n\t\t\t\t\to.readyReplicas = obj.Status.ReadyReplicas\n\t\t\t\t\tdeployTimer.Reset(300 * time.Millisecond)\n\t\t\t\t}\n\n\t\t\tcase *metav1.Status:\n\t\t\t\tklog.V(4).Infof(\"Status: %+v\\n\", obj)\n\t\t\t}\n\n\t\tcase <-deployTimer.C:\n\t\t\terr := processEventsHandler(ctx, parameters, nil, nil, &componentStatus)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase <-o.devfileWatcher.Events:\n\t\t\tdevfileTimer.Reset(100 * time.Millisecond)\n\n\t\tcase <-devfileTimer.C:\n\t\t\tfmt.Fprintf(out, \"Updating Component...\\n\\n\")\n\t\t\terr := processEventsHandler(ctx, parameters, nil, nil, &componentStatus)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase ev := <-o.podWatcher.ResultChan():\n\t\t\tswitch ev.Type {\n\t\t\tcase watch.Deleted:\n\t\t\t\tpod, ok := ev.Object.(*corev1.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn errors.New(\"unable to decode watch event\")\n\t\t\t\t}\n\t\t\t\tpodsPhases.Delete(out, pod)\n\t\t\tcase watch.Added, watch.Modified:\n\t\t\t\tpod, ok := ev.Object.(*corev1.Pod)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn errors.New(\"unable to decode watch event\")\n\t\t\t\t}\n\t\t\t\tpodsPhases.Add(out, pod.GetCreationTimestamp(), pod)\n\t\t\t}\n\n\t\tcase ev := <-o.warningsWatcher.ResultChan():\n\t\t\tswitch kevent := ev.Object.(type) {\n\t\t\tcase *corev1.Event:\n\t\t\t\tpodName := kevent.InvolvedObject.Name\n\t\t\t\tselector := labels.GetSelector(componentName, appName, labels.ComponentDevMode, true)\n\t\t\t\tmatching, err := o.kubeClient.IsPodNameMatchingSelector(ctx, podName, selector)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif matching {\n\t\t\t\t\tlog.Fwarning(out, kevent.Message)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase watchErr := <-o.devfileWatcher.Errors:\n\t\t\treturn watchErr\n\n\t\tcase <-ctx.Done():\n\t\t\tklog.V(2).Info(\"Dev mode interrupted by user\")\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (m *Monitor) Start() {\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-m.ticker.C:\n\t\t\t\tm.StatPB <- m.ReadStat()\n\n\t\t\tcase <-m.Ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (c *QueueController) startUpdateTickers() {\n\tif c.updateCh == nil {\n\t\treturn\n\t}\n\n\tfor t, f := range c.filerMap {\n\t\td := f.Filer.UpdateInterval()\n\t\tif d == snapshot.NoDuration {\n\t\t\tcontinue\n\t\t}\n\n\t\tgo func(rt runner.RunType, td time.Duration) {\n\t\t\tticker := time.NewTicker(td)\n\t\tLoop:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tc.updateLock.Lock()\n\t\t\t\t\tc.updateReq[rt] = true\n\t\t\t\t\tc.updateLock.Unlock()\n\n\t\t\t\t\tc.updateCh <- nil\n\t\t\t\tcase <-c.cancelTimerCh:\n\t\t\t\t\tticker.Stop()\n\t\t\t\t\tbreak Loop\n\t\t\t\t}\n\t\t\t}\n\t\t}(t, d)\n\t}\n}", "func InitializeWatcher(job func()) (*FileWatch, error) {\n\tlog.Trace(\">>>>> InitializeWatcher\")\n\tdefer log.Trace(\"<<<<< InitializeWatcher\")\n\twatcher, err := notify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Initialization\n\twatch := &FileWatch{\n\t\twatchStop: make(chan struct{}),\n\t\twatchList: watcher,\n\t\twatchRun: job,\n\t}\n\twatch.wg.Add(1)\n\n\t// Create a channel for OS signal\n\tsigc := make(chan os.Signal)\n\t// List of os signals to monitor.\n\tsignal.Notify(sigc,\n\t\tsyscall.SIGABRT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGKILL,\n\t)\n\t// Create a thread to monitor the os signals.\n\tgo func() {\n\t\tselect {\n\t\tcase sig := <-sigc:\n\t\t\tlog.Infof(\"Received %s os signal. Exiting...\\n\", sig)\n\t\t\t// Call stopWatcher() for graceful exit of watcher.\n\t\t\twatch.stopWatcher()\n\t\t\twatch.wg.Wait()\n\t\t}\n\t}()\n\n\treturn watch, nil\n}", "func StartWatch(tank operator.Tank, name string) {\n\tt, ok := tank.(*mongoTank)\n\tif !ok {\n\t\treturn\n\t}\n\n\tlistenerPoolLock.Lock()\n\tlistener := new(opLogListener).init(t, name)\n\tif listenerPool == nil {\n\t\tlistenerPool = make(map[string]*opLogListener)\n\t}\n\tif listenerPool[name] != nil {\n\t\tblog.Errorf(\"watcherName duplicated: %s\", name)\n\t\tlistenerPoolLock.Unlock()\n\t\treturn\n\t}\n\tlistenerPool[name] = listener\n\tlistenerPoolLock.Unlock()\n\tlistener.listen()\n}", "func (pb *Pubsub) run() {\n\tfor {\n\t\tselect {\n\t\tcase t := <-pb.updateCh.Get():\n\t\t\tpb.updateCh.Load()\n\t\t\tif pb.done.HasFired() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpb.callCallback(t.(*watcherInfoWithUpdate))\n\t\tcase <-pb.done.Done():\n\t\t\treturn\n\t\t}\n\t}\n}", "func init() {\n\tworkc = make(chan image.Rectangle, 4)\n\tfor i := 0; i < 4; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase r := <-workc:\n\t\t\t\t\tgwind.Upload(r.Min, gbuf, r)\n\t\t\t\t\twg.Done()\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}", "func startWatchListener(configFilePath string) {\n\tconfigFile := filepath.Clean(configFilePath)\n\tconfigDir, _ := filepath.Split(configFile)\n\trealConfigFile, _ := filepath.EvalSymlinks(configFilePath)\n\twatcher.Add(configDir)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event, ok := <-watcher.Events:\n\t\t\t\tif !ok { // 'Events' channel is closed\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tonWatchEvent(event, configFilePath, configFile, realConfigFile)\n\n\t\t\tcase err, ok := <-watcher.Errors:\n\t\t\t\tif ok { // 'Errors' channel is not closed\n\t\t\t\t\tlog.Printf(\"watcher error: %v\\n\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error {\n\tvar err error\n\tretry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock)\n\n\tfor {\n\t\t// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors\n\t\tselect {\n\t\tcase <-stopCh:\n\t\t\treturn nil\n\t\tdefault:\n\t\t}\n\n\t\t// start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent\n\t\tstart := r.clock.Now()\n\n\t\tif w == nil {\n\t\t\ttimeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))\n\t\t\toptions := metav1.ListOptions{\n\t\t\t\tResourceVersion: r.LastSyncResourceVersion(),\n\t\t\t\t// We want to avoid situations of hanging watchers. Stop any watchers that do not\n\t\t\t\t// receive any events within the timeout window.\n\t\t\t\tTimeoutSeconds: &timeoutSeconds,\n\t\t\t\t// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.\n\t\t\t\t// Reflector doesn't assume bookmarks are returned at all (if the server do not support\n\t\t\t\t// watch bookmarks, it will ignore this field).\n\t\t\t\tAllowWatchBookmarks: true,\n\t\t\t}\n\n\t\t\tw, err = r.listerWatcher.Watch(options)\n\t\t\tif err != nil {\n\t\t\t\tif canRetry := isWatchErrorRetriable(err); canRetry {\n\t\t\t\t\tklog.V(4).Infof(\"%s: watch of %v returned %v - backing off\", r.name, r.typeDescription, err)\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-stopCh:\n\t\t\t\t\t\treturn nil\n\t\t\t\t\tcase <-r.backoffManager.Backoff().C():\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\terr = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh)\n\t\t// Ensure that watch will not be reused across iterations.\n\t\tw.Stop()\n\t\tw = nil\n\t\tretry.After(err)\n\t\tif err != nil {\n\t\t\tif err != errorStopRequested {\n\t\t\t\tswitch {\n\t\t\t\tcase isExpiredError(err):\n\t\t\t\t\t// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already\n\t\t\t\t\t// has a semantic that it returns data at least as fresh as provided RV.\n\t\t\t\t\t// So first try to LIST with setting RV to resource version of last observed object.\n\t\t\t\t\tklog.V(4).Infof(\"%s: watch of %v closed with: %v\", r.name, r.typeDescription, err)\n\t\t\t\tcase apierrors.IsTooManyRequests(err):\n\t\t\t\t\tklog.V(2).Infof(\"%s: watch of %v returned 429 - backing off\", r.name, r.typeDescription)\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-stopCh:\n\t\t\t\t\t\treturn nil\n\t\t\t\t\tcase <-r.backoffManager.Backoff().C():\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\tcase apierrors.IsInternalError(err) && retry.ShouldRetry():\n\t\t\t\t\tklog.V(2).Infof(\"%s: retrying watch of %v internal error: %v\", r.name, r.typeDescription, err)\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tklog.Warningf(\"%s: watch of %v ended with: %v\", r.name, r.typeDescription, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (w *Watcher) doWatch(d time.Duration) {\n\tticker := time.NewTicker(d)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-w.closed:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tcurrFileList := w.listForAll()\n\t\t\tw.pollEvents(currFileList)\n\n\t\t\t// update file list\n\t\t\tw.mu.Lock()\n\t\t\tw.files = currFileList\n\t\t\tw.mu.Unlock()\n\t\t}\n\t}\n}", "func WatchCommand(args Args, done chan bool) {\n\tfor _, client := range args.ThemeClients {\n\t\tconfig := client.GetConfiguration()\n\t\tclient.Message(\"Spawning %d workers for %s\", config.Concurrency, args.Domain)\n\t\tassetEvents := client.NewFileWatcher(args.Directory, args.NotifyFile)\n\t\tfor i := 0; i < config.Concurrency; i++ {\n\t\t\tgo spawnWorker(assetEvents, client)\n\t\t\tclient.Message(\"%s Worker #%d ready to upload local changes\", config.Domain, i)\n\t\t}\n\t}\n}", "func (di *directoryInfo) watch() {\n\tdi.root.Watch()\n\n\tdi.removeTicker = time.NewTicker(2 * time.Second)\n\n\tdefer func() {\n\t\tdi.root.Close()\n\t}()\n\n\t// event listener\n\tfor {\n\t\tselect {\n\t\tcase e := <-di.root.Ch:\n\t\t\t// file event\n\t\t\tdi.updatePsiMap(e)\n\t\tcase <-di.removeTicker.C:\n\t\t}\n\t}\n}", "func exampleNewWatcher(t *testing.T) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer watcher.Close()\n\n\terr = watcher.Add(\".\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tsigs := make(chan os.Signal)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\nMAIN:\n\tfor {\n\t\tselect {\n\t\tcase event, ok := <-watcher.Events:\n\t\t\tif !ok { // when close channel ex) close(watcher.Events)\n\t\t\t\tbreak MAIN\n\t\t\t}\n\t\t\tlog.Println(\"event:\", event)\n\t\t\tif event.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\tlog.Println(\"modified file:\", event.Name)\n\t\t\t}\n\n\t\tcase err, ok := <-watcher.Errors:\n\t\t\tif !ok { // when close channel ex) close(watcher.Errors)\n\t\t\t\tbreak MAIN\n\t\t\t}\n\t\t\tlog.Println(\"error:\", err)\n\n\t\tcase signal := <-sigs:\n\t\t\tif signal == os.Interrupt {\n\t\t\t\tbreak MAIN\n\t\t\t\t// close(watcher.Events) or close(watcher.Errors)\n\t\t\t}\n\t\t}\n\t}\n}", "func (d *Discovery) watchFiles() {\n\tif d.watcher == nil {\n\t\tpanic(\"no watcher configured\")\n\t}\n\tfor _, p := range d.paths {\n\t\tif dir, _ := filepath.Split(p); dir != \"\" {\n\t\t\tp = dir\n\t\t} else {\n\t\t\tp = \"./\"\n\t\t}\n\t\tif err := d.watcher.Add(p); err != nil {\n\t\t\tlevel.Error(d.logger).Log(\"msg\", \"Error adding file watch\", \"path\", p, \"err\", err)\n\t\t}\n\t}\n}", "func (etcd *EtcdSource) watch(watchChan chan Node) {\n\tdefer close(watchChan)\n\n\twatcher := etcd.keysAPI.Watcher(etcd.path(), &client.WatcherOptions{AfterIndex: etcd.syncIndex, Recursive: true})\n\n\tfor {\n\t\tif response, err := watcher.Next(context.Background()); err != nil {\n\t\t\terr = fixupClusterError(err)\n\t\t\tlog.Printf(\"config:EtcdSource.watch: %v\", err)\n\t\t\treturn\n\t\t} else if node, err := etcd.syncNode(response.Action, response.Node); err != nil {\n\t\t\tlog.Printf(\"config:EtcdSource.watch %#v: syncNode: %s\", response, err)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Printf(\"config:EtcdSource.watch: %v %v\", response.Action, node)\n\t\t\twatchChan <- node\n\t\t}\n\t}\n}", "func (w *Watcher) Watch() {\n\tch := make(chan struct{})\n\tgo func(stopCh <-chan struct{}) {\n\t\tw.Informer.Informer().AddEventHandler(w.ResourceEventHandlerFuncs)\n\t\tw.Informer.Informer().Run(stopCh)\n\t}(ch)\n\t<-w.StopChannel\n\tclose(ch)\n\tlogrus.Info(\"stoping watcher for \", w.GroupVersionResource)\n}" ]
[ "0.669185", "0.6640005", "0.6364289", "0.63083565", "0.6291414", "0.6149953", "0.6108959", "0.6095144", "0.6090936", "0.606489", "0.60646594", "0.60448515", "0.6028938", "0.6023171", "0.6018793", "0.6009195", "0.5954062", "0.59414184", "0.59298897", "0.5907481", "0.5880078", "0.5875268", "0.5873275", "0.5859277", "0.5857307", "0.58414674", "0.58396834", "0.5836075", "0.5810858", "0.5800386", "0.57953537", "0.578899", "0.5779406", "0.5777314", "0.57606286", "0.57420784", "0.5740841", "0.5736855", "0.5718782", "0.5713093", "0.57039815", "0.57027644", "0.5701166", "0.569298", "0.5652018", "0.5636739", "0.5632267", "0.5629984", "0.5619972", "0.5612528", "0.5605621", "0.5600877", "0.5598344", "0.5594056", "0.5590264", "0.5588868", "0.55874324", "0.5579902", "0.5578793", "0.55785155", "0.5571988", "0.5569241", "0.55677056", "0.5564567", "0.55625504", "0.556191", "0.5557135", "0.55564016", "0.5545122", "0.5540534", "0.55384135", "0.55349255", "0.5533466", "0.5527412", "0.55252445", "0.5523291", "0.5522113", "0.55202043", "0.55190974", "0.5516404", "0.5504123", "0.5504104", "0.54958606", "0.54850537", "0.54824066", "0.5478208", "0.54767656", "0.54667753", "0.5463037", "0.54559606", "0.5455352", "0.5448874", "0.54320556", "0.5431828", "0.5431357", "0.54294336", "0.542214", "0.5419179", "0.54107606", "0.54094756" ]
0.5834932
28
Take care of ingress events from the ingress watch
func (c *Cluster) handleIngressEvent(event interface{}, action watch.EventType) { eventObj, ok := event.(*v1beta1extensionsapi.Ingress) if !ok { if action != watch.Error { log.WithFields(log.Fields{ "cluster": c.config.Name, }).Error("Got event in ingress handler which contains no ingress") } else { log.WithFields(log.Fields{ "cluster": c.config.Name, "event": event, }).Error("Some other error") } return } c.latestIngressVersion = eventObj.ResourceVersion switch action { case watch.Deleted: event := state.IngressChange{ Ingress: state.K8RouterIngress{ Name: eventObj.Namespace + "-" + eventObj.Name, Hosts: []string{}, }, Created: false, } delete(c.knownIngresses, event.Ingress.Name) c.ingressEvents <- event case watch.Modified: case watch.Added: obj := state.K8RouterIngress{ Name: eventObj.Namespace + "-" + eventObj.Name, Hosts: []string{}, } for _, rule := range eventObj.Spec.Rules { obj.Hosts = append(obj.Hosts, rule.Host) } myEvent := state.IngressChange{ Ingress: obj, Created: false, } val, _ := c.knownIngresses[obj.Name] isEquivalent := ok && state.IsIngressEquivalent(&obj, &val) if action == watch.Modified && !isEquivalent { c.ingressEvents <- myEvent } if !isEquivalent { myEvent.Created = true c.ingressEvents <- myEvent } c.knownIngresses[obj.Name] = obj } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (op *Operator) initIngressCRDWatcher() cache.Controller {\n\tlw := &cache.ListWatch{\n\t\tListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn op.VoyagerClient.Ingresses(apiv1.NamespaceAll).List(metav1.ListOptions{})\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn op.VoyagerClient.Ingresses(apiv1.NamespaceAll).Watch(metav1.ListOptions{})\n\t\t},\n\t}\n\t_, informer := cache.NewInformer(lw,\n\t\t&tapi.Ingress{},\n\t\top.Opt.ResyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif engress, ok := obj.(*tapi.Ingress); ok {\n\t\t\t\t\tlog.Infof(\"%s %s@%s added\", engress.GroupVersionKind(), engress.Name, engress.Namespace)\n\t\t\t\t\tif !engress.ShouldHandleIngress(op.Opt.IngressClass) {\n\t\t\t\t\t\tlog.Infof(\"%s %s@%s does not match ingress class\", engress.GroupVersionKind(), engress.Name, engress.Namespace)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif err := engress.IsValid(op.Opt.CloudProvider); err != nil {\n\t\t\t\t\t\top.recorder.Eventf(\n\t\t\t\t\t\t\tengress.ObjectReference(),\n\t\t\t\t\t\t\tapiv1.EventTypeWarning,\n\t\t\t\t\t\t\teventer.EventReasonIngressInvalid,\n\t\t\t\t\t\t\t\"Reason: %s\",\n\t\t\t\t\t\t\terr.Error(),\n\t\t\t\t\t\t)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\top.AddEngress(engress)\n\t\t\t\t}\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\toldEngress, ok := old.(*tapi.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Errorln(errors.New(\"Invalid Ingress object\").Err())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tnewEngress, ok := new.(*tapi.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Errorln(errors.New(\"Invalid Ingress object\").Err())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif changed, _ := oldEngress.HasChanged(*newEngress); !changed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Infof(\"%s %s@%s has changed\", newEngress.GroupVersionKind(), newEngress.Name, newEngress.Namespace)\n\t\t\t\tif err := newEngress.IsValid(op.Opt.CloudProvider); err != nil {\n\t\t\t\t\top.recorder.Eventf(\n\t\t\t\t\t\tnewEngress.ObjectReference(),\n\t\t\t\t\t\tapiv1.EventTypeWarning,\n\t\t\t\t\t\teventer.EventReasonIngressInvalid,\n\t\t\t\t\t\t\"Reason: %s\",\n\t\t\t\t\t\terr.Error(),\n\t\t\t\t\t)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\top.UpdateEngress(oldEngress, newEngress)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif engress, ok := obj.(*tapi.Ingress); ok {\n\t\t\t\t\tlog.Infof(\"%s %s@%s deleted\", engress.GroupVersionKind(), engress.Name, engress.Namespace)\n\t\t\t\t\tif !engress.ShouldHandleIngress(op.Opt.IngressClass) {\n\t\t\t\t\t\tlog.Infof(\"%s %s@%s does not match ingress class\", engress.GroupVersionKind(), engress.Name, engress.Namespace)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\top.DeleteEngress(engress)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\treturn informer\n}", "func (x *Kubernetes) ProcessIngress(e transistor.Event) {\n\tlog.Println(\"processing ingress\")\n\n\tif e.Matches(\"project:kubernetes:ingress\") {\n\t\tvar err error\n\t\tswitch e.Action {\n\t\tcase transistor.GetAction(\"delete\"):\n\t\t\terr = x.deleteIngress(e)\n\t\tcase transistor.GetAction(\"create\"):\n\t\t\terr = x.createIngress(e)\n\t\tcase transistor.GetAction(\"update\"):\n\t\t\terr = x.createIngress(e)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tx.sendErrorResponse(e, err.Error())\n\t\t}\n\t}\n\n\treturn\n}", "func (op *Operator) initIngressCRDWatcher() cache.Controller {\n\tlw := &cache.ListWatch{\n\t\tListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn op.VoyagerClient.Ingresses(op.Opt.WatchNamespace()).List(metav1.ListOptions{})\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn op.VoyagerClient.Ingresses(op.Opt.WatchNamespace()).Watch(metav1.ListOptions{})\n\t\t},\n\t}\n\t_, informer := cache.NewInformer(lw,\n\t\t&api.Ingress{},\n\t\top.Opt.ResyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tctx := etx.Background()\n\t\t\t\tlogger := log.New(ctx)\n\t\t\t\tif engress, ok := obj.(*api.Ingress); ok {\n\t\t\t\t\tengress.Migrate()\n\t\t\t\t\tlogger.Infof(\"%s %s@%s added\", engress.APISchema(), engress.Name, engress.Namespace)\n\t\t\t\t\tif !engress.ShouldHandleIngress(op.Opt.IngressClass) {\n\t\t\t\t\t\tlogger.Infof(\"%s %s@%s does not match ingress class\", engress.APISchema(), engress.Name, engress.Namespace)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif err := engress.IsValid(op.Opt.CloudProvider); err != nil {\n\t\t\t\t\t\top.recorder.Eventf(\n\t\t\t\t\t\t\tengress.ObjectReference(),\n\t\t\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\t\t\teventer.EventReasonIngressInvalid,\n\t\t\t\t\t\t\t\"Reason: %s\",\n\t\t\t\t\t\t\terr.Error(),\n\t\t\t\t\t\t)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\top.AddEngress(ctx, engress)\n\t\t\t\t}\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tctx := etx.Background()\n\t\t\t\tlogger := log.New(ctx)\n\t\t\t\toldEngress, ok := old.(*api.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Errorln(\"Invalid Ingress object\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\toldEngress.Migrate()\n\t\t\t\tnewEngress, ok := new.(*api.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Errorln(\"Invalid Ingress object\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tnewEngress.Migrate()\n\t\t\t\tif changed, _ := oldEngress.HasChanged(*newEngress); !changed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdiff := cmp.Diff(oldEngress, newEngress,\n\t\t\t\t\tcmp.Comparer(func(x, y resource.Quantity) bool {\n\t\t\t\t\t\treturn x.Cmp(y) == 0\n\t\t\t\t\t}),\n\t\t\t\t\tcmp.Comparer(func(x, y *metav1.Time) bool {\n\t\t\t\t\t\tif x == nil && y == nil {\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif x != nil && y != nil {\n\t\t\t\t\t\t\treturn x.Time.Equal(y.Time)\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}))\n\t\t\t\tlogger.Infof(\"%s %s@%s has changed. Diff: %s\", newEngress.APISchema(), newEngress.Name, newEngress.Namespace, diff)\n\t\t\t\tif err := newEngress.IsValid(op.Opt.CloudProvider); err != nil {\n\t\t\t\t\top.recorder.Eventf(\n\t\t\t\t\t\tnewEngress.ObjectReference(),\n\t\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\t\teventer.EventReasonIngressInvalid,\n\t\t\t\t\t\t\"Reason: %s\",\n\t\t\t\t\t\terr.Error(),\n\t\t\t\t\t)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\top.UpdateEngress(ctx, oldEngress, newEngress)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif engress, ok := obj.(*api.Ingress); ok {\n\t\t\t\t\tengress.Migrate()\n\t\t\t\t\tctx := etx.Background()\n\t\t\t\t\tlogger := log.New(ctx)\n\t\t\t\t\tlogger.Infof(\"%s %s@%s deleted\", engress.APISchema(), engress.Name, engress.Namespace)\n\t\t\t\t\tif !engress.ShouldHandleIngress(op.Opt.IngressClass) {\n\t\t\t\t\t\tlogger.Infof(\"%s %s@%s does not match ingress class\", engress.APISchema(), engress.Name, engress.Namespace)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\top.DeleteEngress(ctx, engress)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\treturn informer\n}", "func WatchIngress(client *kubernetes.Clientset, ic IngressCache, l log.Logger) cache.SharedInformer {\n\tlw := cache.NewListWatchFromClient(client.ExtensionsV1beta1().RESTClient(), \"ingresses\", v1.NamespaceAll, fields.Everything())\n\tiw := cache.NewSharedInformer(lw, new(v1beta1.Ingress), 30*time.Minute)\n\tiw.AddEventHandler(&IngressWatchAdapter{\n\t\tIngressCache: ic,\n\t\tLogger: l.WithPrefix(\"IngressWatchAdapter\"),\n\t})\n\treturn iw\n}", "func CreateIngressHandlers(lbc *controller.LoadBalancerController) cache.ResourceEventHandlerFuncs {\n\treturn cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tingress := obj.(*extensions.Ingress)\n\t\t\tif !lbc.IsNginxIngress(ingress) {\n\t\t\t\tlog.Printf(\"Ignoring Ingress %v based on Annotation %v\\n\", ingress.Name, lbc.GetIngressClassKey())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Printf(\"Adding Ingress: %v\", ingress.Name)\n\t\t\tlbc.AddSyncQueue(obj)\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tingress, isIng := obj.(*extensions.Ingress)\n\t\t\tif !isIng {\n\t\t\t\tdeletedState, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Printf(\"Error received unexpected object: %v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tingress, ok = deletedState.Obj.(*extensions.Ingress)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Printf(\"Error DeletedFinalStateUnknown contained non-Ingress object: %v\", deletedState.Obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !lbc.IsNginxIngress(ingress) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Printf(\"Removing Ingress: %v\", ingress.Name)\n\t\t\tlbc.AddSyncQueue(obj)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tif !reflect.DeepEqual(old, cur) {\n\t\t\t\tglog.V(3).Infof(\"Endpoints %v changed, syncing\", cur.(*api_v1.Endpoints).Name)\n\t\t\t\tlbc.AddSyncQueue(cur)\n\t\t\t}\n\t\t},\n\t}\n}", "func ingressName(instance *v1alpha1.Nuxeo, nodeSet v1alpha1.NodeSet) string {\n\treturn instance.Name + \"-\" + nodeSet.Name + \"-ingress\"\n}", "func (i *IngressController) updateIngress(ctx context.Context, ingress *networkingv1.Ingress) error {\n\thost := i.staticAddress\n\tif host == \"\" {\n\t\thosts := make(map[string]uint)\n\t\tfor _, rule := range ingress.Spec.Rules {\n\t\t\tfor _, path := range rule.HTTP.Paths {\n\t\t\t\tsvc, err := i.CoreV1().Services(ingress.Namespace).Get(ctx, path.Backend.Service.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\topts := metav1.ListOptions{\n\t\t\t\t\tLabelSelector: labels.Set(svc.Spec.Selector).String(),\n\t\t\t\t}\n\n\t\t\t\tpods, err := i.CoreV1().Pods(svc.Namespace).List(ctx, opts)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tfor _, pod := range pods.Items {\n\t\t\t\t\tif pod.Status.Phase != v1.PodRunning {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// resolve external IP from node\n\t\t\t\t\tnode, err := i.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tnodeAddress := \"\"\n\t\t\t\t\tfor _, address := range node.Status.Addresses {\n\t\t\t\t\t\tif address.Type == v1.NodeExternalIP {\n\t\t\t\t\t\t\tnodeAddress = address.Address\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\thosts[nodeAddress] += 1\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(hosts) == 0 {\n\t\t\tlog.Info(\"No backends found for ingress, can't update ingress host field\")\n\t\t\treturn nil\n\t\t}\n\n\t\tvar max uint\n\t\tfor ip, count := range hosts {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"host\": ip,\n\t\t\t\t\"num\": count,\n\t\t\t}).Debug()\n\t\t\tif count > max {\n\t\t\t\thost = ip\n\t\t\t\tmax = count\n\t\t\t}\n\t\t}\n\t}\n\n\tingress.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{{\n\t\tIP: host,\n\t}}\n\n\t_, err := i.NetworkingV1().Ingresses(ingress.Namespace).UpdateStatus(ctx, ingress, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"ingress\": ingress.Name,\n\t\t\"ip\": host,\n\t}).Info()\n\n\treturn nil\n}", "func (c *clientImpl) WatchIngresses(namespace string, labelSelector labels.Selector, watchCh chan<- interface{}) cache.SharedInformer {\n\tlistWatch := newListWatchFromClientWithLabelSelector(\n\t\tc.clientset.ExtensionsV1beta1().RESTClient(),\n\t\tkindIngresses,\n\t\tnamespace,\n\t\tfields.Everything(),\n\t\tlabelSelector)\n\n\tinformer := loadInformer(listWatch, &v1beta1.Ingress{}, watchCh)\n\tc.ingStores = append(c.ingStores, informer.GetStore())\n\treturn informer\n}", "func (d *FrameReader) SubscribeIngress() <-chan *xivnet.Frame {\n\treturn d.ingressFramesChan\n}", "func (r *reconciler) routeToIngressController(context context.Context, obj client.Object) []reconcile.Request {\n\tvar requests []reconcile.Request\n\t// Cast the received object into Route object.\n\troute := obj.(*routev1.Route)\n\n\t// Create the NamespacedName for the Route.\n\trouteNamespacedName := types.NamespacedName{\n\t\tNamespace: route.Namespace,\n\t\tName: route.Name,\n\t}\n\n\t// Create a set of current Ingresses of the Route to easily retrieve them.\n\tcurrentRouteIngresses := sets.NewString()\n\n\t// Iterate through the related Route's Ingresses.\n\tfor _, ri := range route.Status.Ingress {\n\t\t// Check if the Route was admitted by the RouteIngress.\n\t\tfor _, cond := range ri.Conditions {\n\t\t\tif cond.Type == routev1.RouteAdmitted && cond.Status == corev1.ConditionTrue {\n\t\t\t\tlog.Info(\"queueing ingresscontroller\", \"name\", ri.RouterName)\n\t\t\t\t// Create a reconcile.Request for the router named in the RouteIngress.\n\t\t\t\trequest := reconcile.Request{\n\t\t\t\t\tNamespacedName: types.NamespacedName{\n\t\t\t\t\t\tName: ri.RouterName,\n\t\t\t\t\t\tNamespace: r.namespace,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\trequests = append(requests, request)\n\n\t\t\t\t// Add the Router Name to the currentIngressSet.\n\t\t\t\tcurrentRouteIngresses.Insert(ri.RouterName)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Get the previous set of Ingresses of the Route.\n\tpreviousRouteIngresses := r.routeToIngresses[routeNamespacedName]\n\n\t// Iterate through the previousRouteIngresses.\n\tfor routerName := range previousRouteIngresses {\n\t\t// Check if the currentRouteIngresses contains the Router Name. If it does not,\n\t\t// then the Ingress was removed from the Route Status. The reconcile loop is needed\n\t\t// to be run for the corresponding Ingress Controller.\n\t\tif !currentRouteIngresses.Has(routerName) {\n\t\t\tlog.Info(\"queueing ingresscontroller\", \"name\", routerName)\n\t\t\t// Create a reconcile.Request for the router named in the RouteIngress.\n\t\t\trequest := reconcile.Request{\n\t\t\t\tNamespacedName: types.NamespacedName{\n\t\t\t\t\tName: routerName,\n\t\t\t\t\tNamespace: r.namespace,\n\t\t\t\t},\n\t\t\t}\n\t\t\trequests = append(requests, request)\n\t\t}\n\t}\n\n\t// Map the currentRouteIngresses to Route's NamespacedName.\n\tr.routeToIngresses[routeNamespacedName] = currentRouteIngresses\n\n\treturn requests\n}", "func (ings *IngressServer) InitIngress(service *spec.Service, port uint32) error {\n\tings.mutex.Lock()\n\tdefer ings.mutex.Unlock()\n\n\tings.applicationPort = port\n\n\tif _, ok := ings.pipelines[service.IngressPipelineName()]; !ok {\n\t\tsuperSpec, err := service.SideCarIngressPipelineSpec(port)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tentity, err := ings.tc.CreateHTTPPipelineForSpec(ings.namespace, superSpec)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"create http pipeline %s failed: %v\", superSpec.Name(), err)\n\t\t}\n\t\tings.pipelines[service.IngressPipelineName()] = entity\n\t}\n\n\tif ings.httpServer == nil {\n\t\tsuperSpec, err := service.SideCarIngressHTTPServerSpec()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tentity, err := ings.tc.CreateHTTPServerForSpec(ings.namespace, superSpec)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"create http server %s failed: %v\", superSpec.Name(), err)\n\t\t}\n\t\tings.httpServer = entity\n\t}\n\n\tif err := ings.inf.OnPartOfServiceSpec(service.Name, informer.AllParts, ings.reloadTraffic); err != nil {\n\t\t// Only return err when its type is not `AlreadyWatched`\n\t\tif err != informer.ErrAlreadyWatched {\n\t\t\tlogger.Errorf(\"add ingress spec watching service: %s failed: %v\", service.Name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"ingress-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource Ingress\n\terr = c.Watch(&source.Kind{Type: &networkingv1alpha1.Ingress{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to secondary resource Routes and requeue the\n\t// owner Ingress\n\terr = c.Watch(&source.Kind{Type: &routev1.Route{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &networkingv1alpha1.Ingress{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *k8sClient) OnAdd(obj interface{}) {\n\t// if there's an event already in the channel, discard this one,\n\t// this is fine because IngressController always reload everything\n\t// when receiving an event. Same for OnUpdate & OnDelete\n\tselect {\n\tcase c.eventCh <- obj:\n\tdefault:\n\t}\n}", "func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePortManager) {\n\tlbc.ctx.IngressInformer.GetIndexer().Add(ing)\n\tif pm == nil {\n\t\treturn\n\t}\n\tfor _, rule := range ing.Spec.Rules {\n\t\tfor _, path := range rule.HTTP.Paths {\n\t\t\tsvc := &api_v1.Service{\n\t\t\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\t\t\tName: path.Backend.ServiceName,\n\t\t\t\t\tNamespace: ing.Namespace,\n\t\t\t\t},\n\t\t\t}\n\t\t\tvar svcPort api_v1.ServicePort\n\t\t\tswitch path.Backend.ServicePort.Type {\n\t\t\tcase intstr.Int:\n\t\t\t\tsvcPort = api_v1.ServicePort{Port: path.Backend.ServicePort.IntVal}\n\t\t\tdefault:\n\t\t\t\tsvcPort = api_v1.ServicePort{Name: path.Backend.ServicePort.StrVal}\n\t\t\t}\n\t\t\tsvcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName))\n\t\t\tsvc.Spec.Ports = []api_v1.ServicePort{svcPort}\n\t\t\tlbc.ctx.ServiceInformer.GetIndexer().Add(svc)\n\t\t}\n\t}\n}", "func labelsForEnvoyIngress(crName, eLName string) map[string]string {\n\treturn map[string]string{v1beta1.AppLabelKey: \"envoyingress\", \"eListenerName\": eLName, v1beta1.KafkaCRLabelKey: crName}\n}", "func getIngressUrls(config *rest.Config, namespace string) url {\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tingress, err := clientset.Extensions().Ingresses(namespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar urls url\n\n\t//loop through all ingress items in namespace\n\tfor _, i := range ingress.Items {\n\n\t\t//default http protocol to http\n\t\thttpProto := \"http\"\n\t\t//loop through annotations for each ingress object\n\t\tfor k, v := range i.Annotations {\n\t\t\t//if ssl-redirect is set, only check https\n\t\t\tif k == \"ingress.kubernetes.io/ssl-redirect\" && v == \"true\" {\n\t\t\t\thttpProto = \"https\"\n\t\t\t} else if k == \"traefik.ingress.kubernetes.io/redirect-entry-point\" && v == \"https\" {\n\t\t\t\thttpProto = \"https\"\n\t\t\t} else if k == \"traefik.ingress.kubernetes.io/frontend-entry-points\" && v == \"https\" {\n\t\t\t\thttpProto = \"https\"\n\t\t\t}\n\t\t}\n\t\t//build list of https urls from ingress.Spec.TLS.hosts[]\n\t\t//if ingress.spec.tls.host does not have corresponding ingress.spec.rules.host it is ignored.\n\t\t//end result https://ingress.spec.tls.host/ingress.spec.rules.host.path\n\t\tfor _, t := range i.Spec.TLS {\n\t\t\t//if tls has no host, use rules[0] as the host\n\t\t\tif t.Hosts == nil {\n\t\t\t\tfor _, r := range i.Spec.Rules {\n\t\t\t\t\t//skip wild card hosts\n\t\t\t\t\tif !strings.Contains(r.Host, \"*\") {\n\t\t\t\t\t\tfor _, p := range r.HTTP.Paths {\n\t\t\t\t\t\t\turls = append(urls, \"https://\"+r.Host+p.Path)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor _, h := range t.Hosts {\n\t\t\t\t\tfor _, r := range i.Spec.Rules {\n\t\t\t\t\t\tif r.Host == h {\n\t\t\t\t\t\t\t//skip wild card hosts\n\t\t\t\t\t\t\tif !strings.Contains(r.Host, \"*\") {\n\t\t\t\t\t\t\t\tfor _, p := range r.HTTP.Paths {\n\t\t\t\t\t\t\t\t\turls = append(urls, \"https://\"+h+p.Path)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t//if ssl redirect annotation isn't set build http urls from ingress.spce.rules.hosts[].paths[]\n\t\tif httpProto != \"https\" {\n\t\t\tfor _, r := range i.Spec.Rules {\n\t\t\t\t//skip wild card hosts\n\t\t\t\tif !strings.Contains(r.Host, \"*\") {\n\t\t\t\t\tfor _, p := range r.HTTP.Paths {\n\t\t\t\t\t\turls = append(urls, \"http://\"+r.Host+p.Path)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t//remove excluded urls in -customizefile file\n\turls = urls.removeExcluded(excluded)\n\turls = urls.addAdditional(additional)\n\treturn urls\n\n}", "func (r *reconciler) secretToIngressController(ctx context.Context, o client.Object) []reconcile.Request {\n\tvar (\n\t\trequests []reconcile.Request\n\t\tlist operatorv1.IngressControllerList\n\t\tlistOpts = client.MatchingFields(map[string]string{\n\t\t\t\"defaultCertificateName\": o.GetName(),\n\t\t})\n\t\tingressConfig configv1.Ingress\n\t)\n\tif err := r.cache.List(ctx, &list, listOpts); err != nil {\n\t\tlog.Error(err, \"failed to list ingresscontrollers for secret\", \"secret\", o.GetName())\n\t\treturn requests\n\t}\n\tif err := r.cache.Get(ctx, controller.IngressClusterConfigName(), &ingressConfig); err != nil {\n\t\tlog.Error(err, \"failed to get ingresses.config.openshift.io\", \"name\", controller.IngressClusterConfigName())\n\t\treturn requests\n\t}\n\tfor _, ic := range list.Items {\n\t\tif ic.Status.Domain != ingressConfig.Spec.Domain {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"queueing ingresscontroller\", \"name\", ic.Name)\n\t\trequest := reconcile.Request{\n\t\t\tNamespacedName: types.NamespacedName{\n\t\t\t\tNamespace: ic.Namespace,\n\t\t\t\tName: ic.Name,\n\t\t\t},\n\t\t}\n\t\trequests = append(requests, request)\n\t}\n\treturn requests\n}", "func Validate(ingress *networkingv1.Ingress) error {\n\tif supportsTLS(ingress) && containsWildcard(ingress.Spec.TLS[0].Hosts[0]) {\n\t\treturn errors.Errorf(\"ingress TLS host %q contains wildcards\", ingress.Spec.TLS[0].Hosts[0])\n\t}\n\n\tif len(ingress.Spec.Rules) == 0 {\n\t\treturn errors.New(\"ingress does not have any rules\")\n\t}\n\n\tif containsWildcard(ingress.Spec.Rules[0].Host) {\n\t\treturn errors.Errorf(\"ingress host %q contains wildcards\", ingress.Spec.Rules[0].Host)\n\t}\n\n\treturn nil\n}", "func (c *Cluster) watch() error {\n\tlog.WithField(\"cluster\", c.config.Name).Debug(\"Adding watches\")\n\n\tfactory := informers.NewSharedInformerFactory(c.client, 0)\n\tstopper := make(chan struct{})\n\tdefer close(stopper)\n\n\tpodInformer := factory.Core().V1().Pods().Informer()\n\tpodInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { c.handlePodEvents(obj, watch.Added) },\n\t\tDeleteFunc: func(obj interface{}) { c.handlePodEvents(obj, watch.Deleted) },\n\t\tUpdateFunc: func(old interface{}, new interface{}) { c.handlePodEvents(new, watch.Modified) },\n\t})\n\tgo podInformer.Run(stopper)\n\n\tingressInformer := factory.Extensions().V1beta1().Ingresses().Informer()\n\tingressInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { c.handleIngressEvent(obj, watch.Added) },\n\t\tDeleteFunc: func(obj interface{}) { c.handleIngressEvent(obj, watch.Deleted) },\n\t\tUpdateFunc: func(old interface{}, new interface{}) { c.handleIngressEvent(new, watch.Modified) },\n\t})\n\tgo ingressInformer.Run(stopper)\n\n\tLoadBalancerInformer := factory.Core().V1().Services().Informer()\n\tLoadBalancerInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { c.handleLoadBalancerEvent(obj, watch.Added) },\n\t\tDeleteFunc: func(obj interface{}) { c.handleLoadBalancerEvent(obj, watch.Deleted) },\n\t\tUpdateFunc: func(old interface{}, new interface{}) { c.handleLoadBalancerEvent(new, watch.Modified) },\n\t})\n\tgo LoadBalancerInformer.Run(stopper)\n\n\tif c.isFirstConnectionAttempt {\n\t\tc.readinessChannel <- true\n\t\tc.isFirstConnectionAttempt = false\n\t}\n\t<-c.aggregatorStopChannel\n\tlog.WithField(\"cluster\", c.config.Name).Debug(\"Waiting for watches to exit...\")\n\n\tlog.WithFields(log.Fields{\n\t\t\"cluster\": c.config.Name,\n\t}).Debug(\"Stopping event handlers\")\n\n\tlog.WithField(\"cluster\", c.config.Name).Debug(\"Event handlers stopped\")\n\treturn nil\n}", "func Start(clientset *kubernetes.Clientset, stopCh <-chan struct{}) {\n\tglog.Info(\"Starting ingress initializer...\")\n\trestClient := clientset.ExtensionsV1beta1().RESTClient()\n\twatchlist := cache.NewListWatchFromClient(restClient, \"ingresses\", corev1.NamespaceAll, fields.Everything())\n\tincludeUninitializedWatchlist := &cache.ListWatch{\n\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\toptions.IncludeUninitialized = true\n\t\t\treturn watchlist.List(options)\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\toptions.IncludeUninitialized = true\n\t\t\treturn watchlist.Watch(options)\n\t\t},\n\t}\n\n\tresyncPeriod := 30 * time.Second\n\n\t_, controller := cache.NewInformer(includeUninitializedWatchlist, &extv1beta1.Ingress{}, resyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\terr := initializeIngress(obj.(*extv1beta1.Ingress), clientset)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\tgo controller.Run(stopCh)\n}", "func (sr *serviceRepository) ListIngress(n string) ([]resource.Service, error) {\n\tingressList, err := sr.kubernetes.NetworkingV1().Ingresses(n).List(context.Background(), metav1.ListOptions{})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar services []resource.Service\n\n\tfor _, ing := range ingressList.Items {\n\n\t\tfor _, rules := range ing.Spec.Rules {\n\t\t\tfor _, path := range rules.HTTP.Paths {\n\t\t\t\tsvc := resource.Service{\n\t\t\t\t\tName: path.Backend.Service.Name,\n\t\t\t\t\tAddr: rules.Host,\n\t\t\t\t\tPorts: []resource.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPort: path.Backend.Service.Port.Number,\n\t\t\t\t\t\t\tExposedPort: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tservices = append(services, svc)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn services, nil\n\n}", "func parseIngressConfig(config *netConfigJSON, netConfig *NetConfig) error {\n\tingressListenerToInterceptPortMap := make(map[int]int)\n\tfor _, s := range config.IngressConfig {\n\t\t// verify that the ports are valid\n\t\tif err := vpc.ValidatePortRange(s.ListenerPort); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif s.InterceptPort != 0 {\n\t\t\tif err := vpc.ValidatePortRange(s.InterceptPort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tingressListenerToInterceptPortMap[s.ListenerPort] = s.InterceptPort\n\t\t}\n\t}\n\tnetConfig.IngressListenerToInterceptPortMap = ingressListenerToInterceptPortMap\n\treturn nil\n}", "func Ingress(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Ingress request to simple-service successful!\")\n}", "func (s *k8sStore) ingressIsValid(ing *networkingv1.Ingress) bool {\n\tvar endpointKey string\n\tif ing.Spec.DefaultBackend != nil { // stream\n\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, ing.Spec.DefaultBackend.Service.Name)\n\t} else { // http\n\tLoop:\n\t\tfor _, rule := range ing.Spec.Rules {\n\t\t\tfor _, path := range rule.IngressRuleValue.HTTP.Paths {\n\t\t\t\tendpointKey = fmt.Sprintf(\"%s/%s\", ing.Namespace, path.Backend.Service.Name)\n\t\t\t\tif endpointKey != \"\" {\n\t\t\t\t\tbreak Loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\titem, exists, err := s.listers.Endpoint.GetByKey(endpointKey)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Can not get endpoint by key(%s): %v\", endpointKey, err)\n\t\treturn false\n\t}\n\tif !exists {\n\t\tlogrus.Debugf(\"Endpoint %s does not exist.\", endpointKey)\n\t\treturn false\n\t}\n\tendpoint, ok := item.(*corev1.Endpoints)\n\tif !ok {\n\t\tlogrus.Errorf(\"Cant not convert %v to %v\", reflect.TypeOf(item), reflect.TypeOf(endpoint))\n\t\treturn false\n\t}\n\tif len(endpoint.Subsets) == 0 {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\tif !hasReadyAddresses(endpoint) {\n\t\tlogrus.Debugf(\"Endpoints(%s) is empty, ignore it\", endpointKey)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (ws *WebServer) ObserveControlPoints() {\n\n}", "func (r *Reconciler) PatchIngress(ns, name string, pt types.PatchType, data []byte, subresources ...string) (v1alpha1.IngressAccessor, error) {\n\treturn r.ServingClientSet.NetworkingV1alpha1().ClusterIngresses().Patch(name, pt, data, subresources...)\n}", "func (s *k8sStore) syncIngress(ing *networking.Ingress) {\n\tkey := MetaNamespaceKey(ing)\n\tklog.V(3).Infof(\"updating annotations information for ingress %v\", key)\n\tif !IsValid(ing) {\n\t\treturn\n\t}\n\tcopyIng := &networking.Ingress{}\n\ting.ObjectMeta.DeepCopyInto(&copyIng.ObjectMeta)\n\ting.Spec.DeepCopyInto(&copyIng.Spec)\n\ting.Status.DeepCopyInto(&copyIng.Status)\n\n\tfor ri, rule := range copyIng.Spec.Rules {\n\t\tif rule.HTTP == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor pi, path := range rule.HTTP.Paths {\n\t\t\tif path.Path == \"\" {\n\t\t\t\tcopyIng.Spec.Rules[ri].HTTP.Paths[pi].Path = \"/\"\n\t\t\t}\n\t\t}\n\t}\n\n\tSetDefaultALBPathType(copyIng)\n\n\terr := s.listers.IngressWithAnnotation.Update(&Ingress{\n\t\tIngress: *copyIng,\n\t})\n\tif err != nil {\n\t\tklog.Error(err)\n\t}\n}", "func (op *Operator) initSecretWatcher() cache.Controller {\n\tlw := &cache.ListWatch{\n\t\tListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn op.KubeClient.CoreV1().Secrets(op.Opt.WatchNamespace()).List(metav1.ListOptions{})\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn op.KubeClient.CoreV1().Secrets(op.Opt.WatchNamespace()).Watch(metav1.ListOptions{})\n\t\t},\n\t}\n\t_, informer := cache.NewIndexerInformer(lw,\n\t\t&core.Secret{},\n\t\top.Opt.ResyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tif oldSecret, ok := old.(*core.Secret); ok {\n\t\t\t\t\tif newSecret, ok := new.(*core.Secret); ok {\n\t\t\t\t\t\tif reflect.DeepEqual(oldSecret.Data, newSecret.Data) {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tctx := etx.Background()\n\t\t\t\t\t\tlogger := log.New(ctx)\n\t\t\t\t\t\t// Secret DataChanged. We need to list all Ingress and check which of\n\t\t\t\t\t\t// those ingress uses this secret as basic auth secret.\n\t\t\t\t\t\titems, err := op.listIngresses()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Errorln(err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfor i := range items {\n\t\t\t\t\t\t\tengress := &items[i]\n\t\t\t\t\t\t\tif engress.ShouldHandleIngress(op.Opt.IngressClass) || op.IngressServiceUsesAuthSecret(engress, newSecret) {\n\t\t\t\t\t\t\t\tif engress.UsesAuthSecret(newSecret.Namespace, newSecret.Name) {\n\t\t\t\t\t\t\t\t\tctrl := ingress.NewController(ctx, op.KubeClient, op.CRDClient, op.VoyagerClient, op.PromClient, op.ServiceLister, op.EndpointsLister, op.Opt, engress)\n\t\t\t\t\t\t\t\t\tif ctrl.IsExists() {\n\t\t\t\t\t\t\t\t\t\tcfgErr := ctrl.Update(0, nil)\n\t\t\t\t\t\t\t\t\t\tif cfgErr != nil {\n\t\t\t\t\t\t\t\t\t\t\tlogger.Infof(\"Failed to update offshoots of %s Ingress %s/%s. Reason: %s\", engress.APISchema(), engress.Namespace, engress.Name, cfgErr)\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tctrl.Create()\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\tcache.Indexers{},\n\t)\n\treturn informer\n}", "func MakeIngress(dm *servingv1alpha1.DomainMapping, backendServiceName, hostName, ingressClass string, httpOption netv1alpha1.HTTPOption, tls []netv1alpha1.IngressTLS, acmeChallenges ...netv1alpha1.HTTP01Challenge) *netv1alpha1.Ingress {\n\tpaths, hosts := routeresources.MakeACMEIngressPaths(acmeChallenges, sets.NewString(dm.GetName()))\n\treturn &netv1alpha1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: kmeta.ChildName(dm.GetName(), \"\"),\n\t\t\tNamespace: dm.Namespace,\n\t\t\tAnnotations: kmeta.FilterMap(kmeta.UnionMaps(map[string]string{\n\t\t\t\tnetapi.IngressClassAnnotationKey: ingressClass,\n\t\t\t}, dm.GetAnnotations()), routeresources.ExcludedAnnotations.Has),\n\t\t\tLabels: kmeta.UnionMaps(dm.Labels, map[string]string{\n\t\t\t\tserving.DomainMappingUIDLabelKey: string(dm.UID),\n\t\t\t\tserving.DomainMappingNamespaceLabelKey: dm.Namespace,\n\t\t\t}),\n\t\t\tOwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(dm)},\n\t\t},\n\t\tSpec: netv1alpha1.IngressSpec{\n\t\t\tHTTPOption: httpOption,\n\t\t\tTLS: tls,\n\t\t\tRules: []netv1alpha1.IngressRule{{\n\t\t\t\tHosts: append(hosts, dm.Name),\n\t\t\t\tVisibility: netv1alpha1.IngressVisibilityExternalIP,\n\t\t\t\tHTTP: &netv1alpha1.HTTPIngressRuleValue{\n\t\t\t\t\t// The order of the paths is sensitive, always put tls challenge first\n\t\t\t\t\tPaths: append(paths,\n\t\t\t\t\t\t[]netv1alpha1.HTTPIngressPath{{\n\t\t\t\t\t\t\tRewriteHost: hostName,\n\t\t\t\t\t\t\tSplits: []netv1alpha1.IngressBackendSplit{{\n\t\t\t\t\t\t\t\tPercent: 100,\n\t\t\t\t\t\t\t\tAppendHeaders: map[string]string{\n\t\t\t\t\t\t\t\t\tnetheader.OriginalHostKey: dm.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tIngressBackend: netv1alpha1.IngressBackend{\n\t\t\t\t\t\t\t\t\tServiceNamespace: dm.Namespace,\n\t\t\t\t\t\t\t\t\tServiceName: backendServiceName,\n\t\t\t\t\t\t\t\t\tServicePort: intstr.FromInt(80),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t}}...),\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n}", "func isCatchAllIngress(spec networking.IngressSpec) bool {\n\treturn spec.DefaultBackend != nil && len(spec.Rules) == 0\n}", "func (s *k8sStore) ListIngresses() []*Ingress {\n\t// filter ingress rules\n\tingresses := make([]*Ingress, 0)\n\tfor _, item := range s.listers.IngressWithAnnotation.List() {\n\t\ting := item.(*Ingress)\n\t\tif IsValid(&ing.Ingress) {\n\t\t\tingresses = append(ingresses, ing)\n\t\t}\n\n\t}\n\n\tsortIngressSlice(ingresses)\n\n\treturn ingresses\n}", "func createIngressLW(kubeClient *kclient.ExtensionsClient) *kcache.ListWatch {\n\treturn kcache.NewListWatchFromClient(kubeClient, \"ingresses\", kapi.NamespaceAll, kSelector.Everything())\n}", "func (r *AppReconciler) ingressForApp(app *cloudv1alpha1.App) (*netv1beta1.Ingress, error) {\n\tprojectName := AppProjectName(app)\n\tlabels := LabelsForApp(projectName, app.Name)\n\n\tport := appPortToExposeExternally(app)\n\n\tingr := &netv1beta1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: app.Name,\n\t\t\tNamespace: app.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: netv1beta1.IngressSpec{\n\t\t\tRules: []netv1beta1.IngressRule{\n\t\t\t\tnetv1beta1.IngressRule{\n\t\t\t\t\tHost: appURLHost(app),\n\t\t\t\t\tIngressRuleValue: netv1beta1.IngressRuleValue{\n\t\t\t\t\t\tHTTP: &netv1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\tPaths: []netv1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\tnetv1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t\tBackend: netv1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\tServiceName: app.Name,\n\t\t\t\t\t\t\t\t\t\tServicePort: intstr.FromInt(int(*port)),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Set app instance as the owner and controller\n\terr := ctrl.SetControllerReference(app, ingr, r.Scheme)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ingr, nil\n}", "func (c *AviController) SetupMultiClusterIngressEventHandlers(numWorkers uint32) {\n\tutils.AviLog.Infof(\"Setting up MultiClusterIngress CRD Event handlers\")\n\n\tmultiClusterIngressEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmci := obj.(*akov1alpha1.MultiClusterIngress)\n\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(mci))\n\t\t\tkey := lib.MultiClusterIngress + \"/\" + utils.ObjKey(mci)\n\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Multi-cluster Ingress add event: Namespace: %s didn't qualify filter. Not adding multi-cluster ingress\", key, namespace)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := c.GetValidator().ValidateMultiClusterIngressObj(key, mci); err != nil {\n\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Validation of MultiClusterIngress failed: %v\", key, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t},\n\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\toldObj := old.(*akov1alpha1.MultiClusterIngress)\n\t\t\tmci := new.(*akov1alpha1.MultiClusterIngress)\n\t\t\tif !reflect.DeepEqual(oldObj.Spec, mci.Spec) {\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(mci))\n\t\t\t\tkey := lib.MultiClusterIngress + \"/\" + utils.ObjKey(mci)\n\t\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Multi-cluster Ingress update event: Namespace: %s didn't qualify filter. Not updating multi-cluster ingress\", key, namespace)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err := c.GetValidator().ValidateMultiClusterIngressObj(key, mci); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Validation of MultiClusterIngress failed: %v\", key, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmci, ok := obj.(*akov1alpha1.MultiClusterIngress)\n\t\t\tif !ok {\n\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmci, ok = tombstone.Obj.(*akov1alpha1.MultiClusterIngress)\n\t\t\t\tif !ok {\n\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not a MultiClusterIngress: %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(mci))\n\t\t\tkey := lib.MultiClusterIngress + \"/\" + utils.ObjKey(mci)\n\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Multi-cluster Ingress delete event: Namespace: %s didn't qualify filter. Not deleting multi-cluster ingress\", key, namespace)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t},\n\t}\n\tc.informers.MultiClusterIngressInformer.Informer().AddEventHandler(multiClusterIngressEventHandler)\n}", "func ListPushedIngress(client *kclient.Client, componentName string) (URLList, error) {\n\tlabelSelector := fmt.Sprintf(\"%v=%v\", componentlabels.ComponentLabel, componentName)\n\tklog.V(4).Infof(\"Listing ingresses with label selector: %v\", labelSelector)\n\tingresses, err := client.ListIngresses(labelSelector)\n\tif err != nil {\n\t\treturn URLList{}, errors.Wrap(err, \"unable to list ingress names\")\n\t}\n\n\tvar urls []URL\n\tfor _, i := range ingresses {\n\t\ta := getMachineReadableFormatIngress(i)\n\t\turls = append(urls, a)\n\t}\n\n\turlList := getMachineReadableFormatForList(urls)\n\treturn urlList, nil\n}", "func (inf *meshInformer) OnAllIngressSpecs(fn IngressSpecsFunc) error {\n\tstoreKey := layout.IngressPrefix()\n\tsyncerKey := \"prefix-ingress\"\n\n\tspecsFunc := func(kvs map[string]string) bool {\n\t\tingresss := make(map[string]*spec.Ingress)\n\t\tfor k, v := range kvs {\n\t\t\tingressSpec := &spec.Ingress{}\n\t\t\tif err := yaml.Unmarshal([]byte(v), ingressSpec); err != nil {\n\t\t\t\tlogger.Errorf(\"BUG: unmarshal %s to yaml failed: %v\", v, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tingresss[k] = ingressSpec\n\t\t}\n\n\t\treturn fn(ingresss)\n\t}\n\n\treturn inf.onSpecs(storeKey, syncerKey, specsFunc)\n}", "func InitKubServiceWatch(np *plugin.NetPlugin) {\n\n\twatchClient := setUpAPIClient()\n\tif watchClient == nil {\n\t\tlog.Fatalf(\"Could not init kubernetes API client\")\n\t}\n\n\tsvcCh := make(chan SvcWatchResp, 1)\n\tepCh := make(chan EpWatchResp, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase svcEvent := <-svcCh:\n\t\t\t\tswitch svcEvent.opcode {\n\t\t\t\tcase \"WARN\":\n\t\t\t\t\tlog.Debugf(\"epWatch : %s\", svcEvent.errStr)\n\t\t\t\t\tbreak\n\t\t\t\tcase \"FATAL\":\n\t\t\t\t\tlog.Errorf(\"epWatch : %s\", svcEvent.errStr)\n\t\t\t\t\tbreak\n\t\t\t\tcase \"ERROR\":\n\t\t\t\t\tlog.Warnf(\"svcWatch : %s\", svcEvent.errStr)\n\t\t\t\t\twatchClient.WatchServices(svcCh)\n\t\t\t\t\tbreak\n\n\t\t\t\tcase \"DELETED\":\n\t\t\t\t\tnp.NetworkDriver.DelSvcSpec(svcEvent.svcName, &svcEvent.svcSpec)\n\t\t\t\t\tbreak\n\t\t\t\tdefault:\n\t\t\t\t\tnp.NetworkDriver.AddSvcSpec(svcEvent.svcName, &svcEvent.svcSpec)\n\t\t\t\t}\n\t\t\tcase epEvent := <-epCh:\n\t\t\t\tswitch epEvent.opcode {\n\t\t\t\tcase \"WARN\":\n\t\t\t\t\tlog.Debugf(\"epWatch : %s\", epEvent.errStr)\n\t\t\t\t\tbreak\n\t\t\t\tcase \"FATAL\":\n\t\t\t\t\tlog.Errorf(\"epWatch : %s\", epEvent.errStr)\n\t\t\t\t\tbreak\n\t\t\t\tcase \"ERROR\":\n\t\t\t\t\tlog.Warnf(\"epWatch : %s\", epEvent.errStr)\n\t\t\t\t\twatchClient.WatchSvcEps(epCh)\n\t\t\t\t\tbreak\n\n\t\t\t\tdefault:\n\t\t\t\t\tnp.NetworkDriver.SvcProviderUpdate(epEvent.svcName, epEvent.providers)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\twatchClient.WatchServices(svcCh)\n\twatchClient.WatchSvcEps(epCh)\n}", "func (s *k8sStore) checkIngress(ing *networkingv1.Ingress) bool {\n\ti, err := l4.NewParser(s).Parse(ing)\n\tif err != nil {\n\t\tlogrus.Warningf(\"Uxpected error with ingress: %v\", err)\n\t\treturn false\n\t}\n\n\tcfg := i.(*l4.Config)\n\tif cfg.L4Enable {\n\t\t_, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:%d\", cfg.L4Host, cfg.L4Port))\n\t\tif err == nil {\n\t\t\tlogrus.Warningf(\"%s, in Ingress(%v), is already in use.\",\n\t\t\t\tfmt.Sprintf(\"%s:%d\", cfg.L4Host, cfg.L4Port), ing)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\treturn true\n}", "func (n *NGINXController) Start() {\n\tglog.Infof(\"starting Ingress controller\")\n\n\tn.store.Run(n.stopCh)\n\n\tif n.syncStatus != nil {\n\t\tgo n.syncStatus.Run()\n\t}\n\n\tgo n.syncQueue.Run(time.Second, n.stopCh)\n\t// force initial sync\n\tn.syncQueue.Enqueue(&extensions.Ingress{})\n\n\tfor {\n\t\tselect {\n\t\tcase err := <-n.ngxErrCh:\n\t\t\tif n.isShuttingDown {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tglog.Infof(\"Unexpected error: %v\", err)\n\t\tcase event := <-n.updateCh.Out():\n\t\t\tif n.isShuttingDown {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif evt, ok := event.(store.Event); ok {\n\t\t\t\tglog.V(3).Infof(\"Event %v received - object %v\", evt.Type, evt.Obj)\n\t\t\t\tn.syncQueue.Enqueue(evt.Obj)\n\t\t\t} else {\n\t\t\t\tglog.Warningf(\"unexpected event type received %T\", event)\n\t\t\t}\n\t\tcase <-n.stopCh:\n\t\t\tbreak\n\t\t}\n\t}\n}", "func newDNSController(kubeClient *client.Client,\n\tresyncPeriod time.Duration,\n\tprovider providers.DNSProvider,\n\twatchNamespace string,\n\tpublishServices []string) (*dnsController, error) {\n\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(glog.Infof)\n\teventBroadcaster.StartRecordingToSink(kubeClient.Events(\"\"))\n\n\tlbc := dnsController{\n\t\tprovider: provider,\n\t\tpublishServices: publishServices,\n\t\tclient: kubeClient,\n\t\tstopCh: make(chan struct{}),\n\t\trecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: \"loadbalancer-controller\"}),\n\t}\n\n\tlbc.syncQueue = NewTaskQueue(lbc.sync)\n\n\tingEventHandler := framework.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\taddIng := obj.(*extensions.Ingress)\n\t\t\tlbc.recorder.Eventf(addIng, api.EventTypeNormal, \"CREATE\", fmt.Sprintf(\"%s/%s\", addIng.Namespace, addIng.Name))\n\t\t\tlbc.syncQueue.enqueue(obj)\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tupIng := obj.(*extensions.Ingress)\n\t\t\tlbc.recorder.Eventf(upIng, api.EventTypeNormal, \"DELETE\", fmt.Sprintf(\"%s/%s\", upIng.Namespace, upIng.Name))\n\t\t\tlbc.syncQueue.enqueue(obj)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tif !reflect.DeepEqual(old, cur) {\n\t\t\t\tupIng := cur.(*extensions.Ingress)\n\t\t\t\tlbc.recorder.Eventf(upIng, api.EventTypeNormal, \"UPDATE\", fmt.Sprintf(\"%s/%s\", upIng.Namespace, upIng.Name))\n\t\t\t\tlbc.syncQueue.enqueue(cur)\n\t\t\t}\n\t\t},\n\t}\n\n\tlbc.ingLister.Store, lbc.ingController = framework.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: ingressListFunc(lbc.client, watchNamespace),\n\t\t\tWatchFunc: ingressWatchFunc(lbc.client, watchNamespace),\n\t\t},\n\t\t&extensions.Ingress{}, resyncPeriod, ingEventHandler)\n\n\tlbc.svcLister.Store, lbc.svcController = framework.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: serviceListFunc(lbc.client, watchNamespace),\n\t\t\tWatchFunc: serviceWatchFunc(lbc.client, watchNamespace),\n\t\t},\n\t\t&api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})\n\n\treturn &lbc, nil\n}", "func (r *NuxeoReconciler) defaultIngress(instance *v1alpha1.Nuxeo, access v1alpha1.NuxeoAccess, forcePassthrough bool,\n\tingressName string, nodeSet v1alpha1.NodeSet) (*v1beta1.Ingress, error) {\n\tconst nginxPassthroughAnnotation = \"nginx.ingress.kubernetes.io/ssl-passthrough\"\n\ttargetPort := intstr.IntOrString{\n\t\tType: intstr.String,\n\t\tStrVal: \"web\",\n\t}\n\tif access.TargetPort != (intstr.IntOrString{}) {\n\t\ttargetPort = access.TargetPort\n\t}\n\tingress := v1beta1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: ingressName,\n\t\t\tNamespace: instance.Namespace,\n\t\t},\n\t\tSpec: v1beta1.IngressSpec{\n\t\t\tRules: []v1beta1.IngressRule{{\n\t\t\t\tHost: access.Hostname,\n\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{{\n\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\tServiceName: serviceName(instance, nodeSet),\n\t\t\t\t\t\t\t\tServicePort: targetPort,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\tif access.Termination != \"\" || forcePassthrough {\n\t\tif access.Termination != \"\" && access.Termination != routev1.TLSTerminationPassthrough &&\n\t\t\taccess.Termination != routev1.TLSTerminationEdge {\n\t\t\treturn nil, fmt.Errorf(\"only passthrough and edge termination are supported\")\n\t\t}\n\t\tingress.Spec.TLS = []v1beta1.IngressTLS{{\n\t\t\tHosts: []string{access.Hostname},\n\t\t}}\n\t\tif access.Termination == routev1.TLSTerminationPassthrough || forcePassthrough {\n\t\t\tingress.ObjectMeta.Annotations = map[string]string{nginxPassthroughAnnotation: \"true\"}\n\t\t} else {\n\t\t\t// the Ingress will terminate TLS\n\t\t\tif access.TLSSecret == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"the Ingress was configured for TLS termination but no secret was provided\")\n\t\t\t}\n\t\t\t// secret needs keys 'tls.crt' and 'tls.key' and cert must have CN=<access.Hostname>\n\t\t\tingress.Spec.TLS[0].SecretName = access.TLSSecret\n\t\t}\n\t}\n\t_ = controllerutil.SetControllerReference(instance, &ingress, r.Scheme)\n\treturn &ingress, nil\n}", "func (s *k8sStore) ListIngresses() []*networkingv1.Ingress {\n\t// filter ingress rules\n\tvar ingresses []*networkingv1.Ingress\n\tfor _, item := range s.listers.Ingress.List() {\n\t\ting := item.(*networkingv1.Ingress)\n\n\t\tingresses = append(ingresses, ing)\n\t}\n\n\treturn ingresses\n}", "func add(_ manager.Manager, c controller.Controller) error {\n\tvar err error\n\n\t// Watch for changes to primary resource Egress Gateway.\n\terr = c.Watch(&source.Kind{Type: &operatorv1.EgressGateway{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = imageset.AddImageSetWatch(c); err != nil {\n\t\treturn fmt.Errorf(\"egressgateway-controller failed to watch ImageSet: %w\", err)\n\t}\n\n\tif err = utils.AddNetworkWatch(c); err != nil {\n\t\tlog.V(5).Info(\"Failed to create network watch\", \"err\", err)\n\t\treturn fmt.Errorf(\"egressgateway-controller failed to watch Tigera network resource: %v\", err)\n\t}\n\n\t// Watch for changes to FelixConfiguration.\n\terr = c.Watch(&source.Kind{Type: &crdv1.FelixConfiguration{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"egressGateway-controller failed to watch FelixConfiguration resource: %w\", err)\n\t}\n\n\treturn nil\n}", "func IngressToGateway(key resource.VersionedKey, i *ingress.IngressSpec) resource.Entry {\n\tnamespace, name := key.FullName.InterpretAsNamespaceAndName()\n\n\tgateway := &v1alpha3.Gateway{\n\t\tSelector: model.IstioIngressWorkloadLabels,\n\t}\n\n\t// FIXME this is a temporary hack until all test templates are updated\n\t//for _, tls := range i.Spec.TLS {\n\tif len(i.TLS) > 0 {\n\t\ttls := i.TLS[0] // FIXME\n\t\t// TODO validation when multiple wildcard tls secrets are given\n\t\tif len(tls.Hosts) == 0 {\n\t\t\ttls.Hosts = []string{\"*\"}\n\t\t}\n\t\tgateway.Servers = append(gateway.Servers, &v1alpha3.Server{\n\t\t\tPort: &v1alpha3.Port{\n\t\t\t\tNumber: 443,\n\t\t\t\tProtocol: string(model.ProtocolHTTPS),\n\t\t\t\tName: fmt.Sprintf(\"https-443-i-%s-%s\", name, namespace),\n\t\t\t},\n\t\t\tHosts: tls.Hosts,\n\t\t\t// While we accept multiple certs, we expect them to be mounted in\n\t\t\t// /etc/istio/certs/namespace/secretname/tls.crt|tls.key\n\t\t\tTls: &v1alpha3.Server_TLSOptions{\n\t\t\t\tHttpsRedirect: false,\n\t\t\t\tMode: v1alpha3.Server_TLSOptions_SIMPLE,\n\t\t\t\t// TODO this is no longer valid for the new v2 stuff\n\t\t\t\tPrivateKey: path.Join(model.IngressCertsPath, model.IngressKeyFilename),\n\t\t\t\tServerCertificate: path.Join(model.IngressCertsPath, model.IngressCertFilename),\n\t\t\t\t// TODO: make sure this is mounted\n\t\t\t\tCaCertificates: path.Join(model.IngressCertsPath, model.RootCertFilename),\n\t\t\t},\n\t\t})\n\t}\n\n\tgateway.Servers = append(gateway.Servers, &v1alpha3.Server{\n\t\tPort: &v1alpha3.Port{\n\t\t\tNumber: 80,\n\t\t\tProtocol: string(model.ProtocolHTTP),\n\t\t\tName: fmt.Sprintf(\"http-80-i-%s-%s\", name, namespace),\n\t\t},\n\t\tHosts: []string{\"*\"},\n\t})\n\n\tnewName := name + \"-\" + model.IstioIngressGatewayName\n\tnewNamespace := model.IstioIngressNamespace\n\n\tgw := resource.Entry{\n\t\tID: resource.VersionedKey{\n\t\t\tKey: resource.Key{\n\t\t\t\tFullName: resource.FullNameFromNamespaceAndName(newNamespace, newName),\n\t\t\t\tTypeURL: metadata.VirtualService.TypeURL,\n\t\t\t},\n\t\t\tVersion: key.Version,\n\t\t\tCreateTime: key.CreateTime,\n\t\t},\n\t\tItem: gateway,\n\t}\n\n\treturn gw\n}", "func (r Resource) GetIngress(request *restful.Request, response *restful.Response) {\n\trequestNamespace := utils.GetNamespace(request)\n\n\tingress, err := r.K8sClient.ExtensionsV1beta1().Ingresses(requestNamespace).Get(tektonDashboardIngressName, metav1.GetOptions{})\n\n\tif err != nil || ingress == nil {\n\t\tlogging.Log.Errorf(\"Unable to retrieve any ingresses: %s\", err)\n\t\tutils.RespondError(response, err, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tnoRuleError := \"no Ingress rules found labelled \" + tektonDashboardIngressName\n\n\t// Harden this block to avoid Go panics (array index out of range)\n\tif len(ingress.Spec.Rules) > 0 { // Got more than zero entries?\n\t\tif ingress.Spec.Rules[0].Host != \"\" { // For that rule, is there actually a host?\n\t\t\tingressHost := ingress.Spec.Rules[0].Host\n\t\t\tresponse.WriteEntity(ingressHost)\n\t\t\treturn\n\t\t}\n\t\tlogging.Log.Errorf(\"found an empty Ingress rule labelled %s\", tektonDashboardIngressName)\n\t} else {\n\t\tlogging.Log.Error(noRuleError)\n\t}\n\n\tlogging.Log.Error(\"Unable to retrieve any Ingresses\")\n\tutils.RespondError(response, err, http.StatusInternalServerError)\n\treturn\n}", "func (r Resource) GetIngress(request *restful.Request, response *restful.Response) {\n\trequestNamespace := utils.GetNamespace(request)\n\n\tingress, err := r.K8sClient.ExtensionsV1beta1().Ingresses(requestNamespace).Get(tektonDashboardIngressName, metav1.GetOptions{})\n\n\tif err != nil || ingress == nil {\n\t\tlogging.Log.Errorf(\"Unable to retrieve any ingresses: %s\", err)\n\t\tutils.RespondError(response, err, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tnoRuleError := \"no Ingress rules found labelled \" + tektonDashboardIngressName\n\n\t// Harden this block to avoid Go panics (array index out of range)\n\tif len(ingress.Spec.Rules) > 0 { // Got more than zero entries?\n\t\tif ingress.Spec.Rules[0].Host != \"\" { // For that rule, is there actually a host?\n\t\t\tingressHost := ingress.Spec.Rules[0].Host\n\t\t\tresponse.WriteEntity(ingressHost)\n\t\t\treturn\n\t\t}\n\t\tlogging.Log.Errorf(\"found an empty Ingress rule labelled %s\", tektonDashboardIngressName)\n\t} else {\n\t\tlogging.Log.Error(noRuleError)\n\t}\n\n\tlogging.Log.Error(\"Unable to retrieve any Ingresses\")\n\tutils.RespondError(response, err, http.StatusInternalServerError)\n\treturn\n}", "func (c *Cluster) aggregateClusterView() {\n\tfor {\n\t\tselect {\n\t\tcase event := <-c.ingressEvents:\n\t\t\tif event.Created {\n\t\t\t\tc.currentClusterState.Ingresses = append(c.currentClusterState.Ingresses, event.Ingress)\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"cluster\": c.config.Name,\n\t\t\t\t\t\"ingress\": event.Ingress.Name,\n\t\t\t\t}).Info(\"Detected new ingress.\")\n\t\t\t} else {\n\t\t\t\tfor i, ingress := range c.currentClusterState.Ingresses {\n\t\t\t\t\tif ingress.Name == event.Ingress.Name {\n\t\t\t\t\t\tc.currentClusterState.Ingresses[i] = c.currentClusterState.Ingresses[len(c.currentClusterState.Ingresses)-1]\n\t\t\t\t\t\tc.currentClusterState.Ingresses = c.currentClusterState.Ingresses[:len(c.currentClusterState.Ingresses)-1]\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"cluster\": c.config.Name,\n\t\t\t\t\t\t\t\"ingress\": event.Ingress.Name,\n\t\t\t\t\t\t}).Info(\"Removed old ingress.\")\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.clusterStateChannel <- c.currentClusterState\n\t\tcase event := <-c.backendEvents:\n\t\t\tif event.Created {\n\t\t\t\tc.currentClusterState.Backends = append(c.currentClusterState.Backends, event.Backend)\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"cluster\": c.config.Name,\n\t\t\t\t\t\"backend\": event.Backend.Name,\n\t\t\t\t\t\"ip\": event.Backend.IP,\n\t\t\t\t}).Info(\"Detected new backend pod.\")\n\t\t\t} else {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"cluster\": c.config.Name,\n\t\t\t\t\t\"backend\": event.Backend.Name,\n\t\t\t\t\t\"ip\": event.Backend.IP,\n\t\t\t\t}).Debug(\"Detected backend pod removal, searching...\")\n\t\t\t\tfor i, backend := range c.currentClusterState.Backends {\n\t\t\t\t\tif backend.Name == event.Backend.Name {\n\t\t\t\t\t\tc.currentClusterState.Backends[i] = c.currentClusterState.Backends[len(c.currentClusterState.Backends)-1]\n\t\t\t\t\t\tc.currentClusterState.Backends = c.currentClusterState.Backends[:len(c.currentClusterState.Backends)-1]\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"cluster\": c.config.Name,\n\t\t\t\t\t\t\t\"backend\": event.Backend.Name,\n\t\t\t\t\t\t\t\"ip\": event.Backend.IP,\n\t\t\t\t\t\t}).Info(\"Removed old backend pod.\")\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.clusterStateChannel <- c.currentClusterState\n\t\tcase _ = <-c.aggregatorStopChannel:\n\t\t\treturn\n\t\tcase _ = <-c.clearChannel:\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"cluster\": c.config.Name,\n\t\t\t}).Debug(\"Clearing full cluster state...\")\n\t\t\tc.currentClusterState.Backends = nil\n\t\t\tc.currentClusterState.Ingresses = nil\n\t\t\tc.clusterStateChannel <- c.currentClusterState\n\t\t}\n\t}\n}", "func transformHttpStartStopEvent(cfEvent *events.Envelope, nrEvent map[string]interface{}) {\n\t// event: origin:\"gorouter\" eventType:HttpStartStop timestamp:1497038373295178447 deployment:\"cf\" job:\"router\" index:\"1276dbaa-f5a4-4c48-bcbe-d06ff0dba58d\" ip:\"192.168.16.16\" httpStartStop:<startTimestamp:1497038373206213992 stopTimestamp:1497038373295152451 requestId:<low:7513566559519661218 high:8828490834936076361 > peerType:Client method:GET uri:\"http://api.sys.pie-22.cfplatformeng.com/v2/syslog_drain_urls\" remoteAddress:\"130.211.2.63:61939\" userAgent:\"Go-http-client/1.1\" statusCode:200 contentLength:42 instanceId:\"89a53ed9-cf20-404b-5728-33a19c1e13ef\" forwarded:\"104.197.98.14\" forwarded:\"35.186.215.103\" forwarded:\"130.211.2.63\" >\n\thttpEvent := cfEvent.HttpStartStop\n\tprefix := \"http\"\n\tstart := time.Unix(0, httpEvent.GetStartTimestamp())\n\tend := time.Unix(0, httpEvent.GetStopTimestamp())\n\tduration := float64(end.Sub(start)) / float64(time.Millisecond)\n\tnrEvent[prefix+\"StartTimestamp\"] = start\n\tnrEvent[prefix+\"StopTimestamp\"] = end\n\tnrEvent[prefix+\"DurationMs\"] = duration\n\tif httpEvent.RequestId != nil {\n\t\tnrEvent[prefix+\"RequestId\"] = httpEvent.GetRequestId().String()\n\t}\n\tif httpEvent.PeerType != nil {\n\t\tnrEvent[prefix+\"PeerType\"] = httpEvent.GetPeerType().String()\n\t}\n\tif httpEvent.Method != nil {\n\t\tnrEvent[prefix+\"Method\"] = httpEvent.GetMethod().String()\n\t}\n\tif httpEvent.Uri != nil {\n\t\tnrEvent[prefix+\"Uri\"] = httpEvent.GetUri()\n\t}\n\tif httpEvent.RemoteAddress != nil {\n\t\tnrEvent[prefix+\"RemoteAddress\"] = httpEvent.GetRemoteAddress()\n\t}\n\tif httpEvent.UserAgent != nil {\n\t\tnrEvent[prefix+\"UserAgent\"] = httpEvent.GetUserAgent()\n\t}\n\tif httpEvent.StatusCode != nil {\n\t\tnrEvent[prefix+\"StatusCode\"] = httpEvent.GetStatusCode()\n\t}\n\tif httpEvent.ContentLength != nil {\n\t\tnrEvent[prefix+\"ContentLength\"] = httpEvent.GetContentLength()\n\t}\n\tif httpEvent.ApplicationId != nil {\n\t\tnrEvent[prefix+\"ApplicationId\"] = httpEvent.GetApplicationId()\n\t}\n\tif httpEvent.InstanceIndex != nil {\n\t\tnrEvent[prefix+\"InstanceIndex\"] = httpEvent.GetInstanceIndex()\n\t}\n\tif httpEvent.InstanceId != nil {\n\t\tnrEvent[prefix+\"InstanceId\"] = httpEvent.GetInstanceId()\n\t}\n\tfor i, forwardedIp := range httpEvent.Forwarded {\n\t\tindex := strconv.Itoa(i)\n\t\tnrEvent[prefix+\"Forwarded\"+index] = forwardedIp\n\t}\n}", "func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\tlog.Info(\"reconciling\", \"request\", request)\n\n\t// Fetch the Ingress Controller object.\n\tingressController := &operatorv1.IngressController{}\n\tif err := r.cache.Get(ctx, request.NamespacedName, ingressController); err != nil {\n\t\tif kerrors.IsNotFound(err) {\n\t\t\t// This means the Ingress Controller object was already deleted/finalized.\n\t\t\tlog.Info(\"Ingress Controller not found; reconciliation will be skipped\", \"request\", request)\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, fmt.Errorf(\"failed to get Ingress Controller %q: %w\", request, err)\n\t}\n\n\t// If the Ingress Controller is marked to be deleted, then return early. The corresponding RouteMetricsControllerRoutesPerShard metric label\n\t// will be deleted in \"ensureIngressDeleted\" function of ingresscontroller.\n\tif ingressController.DeletionTimestamp != nil {\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// NOTE: Even though the route admitted status should reflect validity of the namespace and route labelselectors, we still will validate\n\t// the namespace and route labels as there are still edge scenarios where the route status may be inaccurate.\n\n\t// List all the Namespaces filtered by our ingress's Namespace selector.\n\tnamespaceMatchingLabelsSelector := client.MatchingLabelsSelector{Selector: labels.Everything()}\n\tif ingressController.Spec.NamespaceSelector != nil {\n\t\tnamespaceSelector, err := metav1.LabelSelectorAsSelector(ingressController.Spec.NamespaceSelector)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"ingresscontroller has an invalid namespace selector\", \"ingresscontroller\",\n\t\t\t\tingressController.Name, \"namespaceSelector\", ingressController.Spec.NamespaceSelector)\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\tnamespaceMatchingLabelsSelector = client.MatchingLabelsSelector{Selector: namespaceSelector}\n\t}\n\n\tnamespaceList := corev1.NamespaceList{}\n\tif err := r.cache.List(ctx, &namespaceList, namespaceMatchingLabelsSelector); err != nil {\n\t\treturn reconcile.Result{}, fmt.Errorf(\"failed to list Namespaces %q: %w\", request, err)\n\t}\n\t// Create a set of Namespaces to easily look up Namespaces that matches the Routes assigned to the Ingress Controller.\n\tnamespacesSet := sets.NewString()\n\tfor i := range namespaceList.Items {\n\t\tnamespacesSet.Insert(namespaceList.Items[i].Name)\n\t}\n\n\t// List routes filtered by our ingress's route selector.\n\trouteMatchingLabelsSelector := client.MatchingLabelsSelector{Selector: labels.Everything()}\n\tif ingressController.Spec.RouteSelector != nil {\n\t\trouteSelector, err := metav1.LabelSelectorAsSelector(ingressController.Spec.RouteSelector)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"ingresscontroller has an invalid route selector\", \"ingresscontroller\",\n\t\t\t\tingressController.Name, \"routeSelector\", ingressController.Spec.RouteSelector)\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\trouteMatchingLabelsSelector = client.MatchingLabelsSelector{Selector: routeSelector}\n\t}\n\trouteList := routev1.RouteList{}\n\tif err := r.cache.List(ctx, &routeList, routeMatchingLabelsSelector); err != nil {\n\t\treturn reconcile.Result{}, fmt.Errorf(\"failed to list Routes for the Shard %q: %w\", request, err)\n\t}\n\n\t// Variable to store the number of routes admitted by the Shard (Ingress Controller).\n\troutesAdmitted := 0\n\n\t// Iterate through the list Routes.\n\tfor _, route := range routeList.Items {\n\t\t// Check if the Route's Namespace matches one of the Namespaces in the set namespacesSet and\n\t\t// the Route is admitted by the Ingress Controller.\n\t\tif namespacesSet.Has(route.Namespace) && routeStatusAdmitted(route, ingressController.Name) {\n\t\t\t// If the Route is admitted then, the routesAdmitted should be incremented by 1 for the Shard.\n\t\t\troutesAdmitted++\n\t\t}\n\t}\n\n\t// Set the value of the metric to the number of routesAdmitted for the corresponding Shard (Ingress Controller).\n\tSetRouteMetricsControllerRoutesPerShardMetric(request.Name, float64(routesAdmitted))\n\n\treturn reconcile.Result{}, nil\n}", "func (d *DataplaneWatchdog) syncIngress(ctx context.Context, metadata *core_xds.DataplaneMetadata) (SyncResult, error) {\n\tenvoyCtx := &xds_context.Context{\n\t\tControlPlane: d.EnvoyCpCtx,\n\t\tMesh: xds_context.MeshContext{}, // ZoneIngress does not have a mesh!\n\t}\n\n\taggregatedMeshCtxs, err := xds_context.AggregateMeshContexts(ctx, d.ResManager, d.MeshCache.GetMeshContext)\n\tif err != nil {\n\t\treturn SyncResult{}, err\n\t}\n\n\tresult := SyncResult{\n\t\tProxyType: mesh_proto.IngressProxyType,\n\t}\n\tsyncForConfig := aggregatedMeshCtxs.Hash != d.lastHash\n\tif !syncForConfig {\n\t\tresult.Status = SkipStatus\n\t\treturn result, nil\n\t}\n\tif syncForConfig {\n\t\td.log.V(1).Info(\"snapshot hash updated, reconcile\", \"prev\", d.lastHash, \"current\", aggregatedMeshCtxs.Hash)\n\t}\n\n\tproxy, err := d.IngressProxyBuilder.Build(ctx, d.key, aggregatedMeshCtxs)\n\tif err != nil {\n\t\treturn SyncResult{}, err\n\t}\n\tnetworking := proxy.ZoneIngressProxy.ZoneIngressResource.Spec.GetNetworking()\n\tenvoyAdminMTLS, err := d.getEnvoyAdminMTLS(ctx, networking.GetAddress(), networking.GetAdvertisedAddress())\n\tif err != nil {\n\t\treturn SyncResult{}, errors.Wrap(err, \"could not get Envoy Admin mTLS certs\")\n\t}\n\tproxy.EnvoyAdminMTLSCerts = envoyAdminMTLS\n\tproxy.Metadata = metadata\n\tchanged, err := d.IngressReconciler.Reconcile(ctx, *envoyCtx, proxy)\n\tif err != nil {\n\t\treturn SyncResult{}, err\n\t}\n\tif changed {\n\t\tresult.Status = ChangedStatus\n\t} else {\n\t\tresult.Status = GeneratedStatus\n\t}\n\treturn result, nil\n}", "func (c *controller) shouldProcessIngressUpdate(ing *knetworking.Ingress) bool {\n\t// ingress add/update\n\tshouldProcess := c.shouldProcessIngress(c.meshWatcher.Mesh(), ing)\n\titem := config.NamespacedName(ing)\n\tif shouldProcess {\n\t\t// record processed ingress\n\t\tc.mutex.Lock()\n\t\tc.ingresses[item] = ing\n\t\tc.mutex.Unlock()\n\t\treturn true\n\t}\n\n\tc.mutex.Lock()\n\t_, preProcessed := c.ingresses[item]\n\t// previous processed but should not currently, delete it\n\tif preProcessed && !shouldProcess {\n\t\tdelete(c.ingresses, item)\n\t} else {\n\t\tc.ingresses[item] = ing\n\t}\n\tc.mutex.Unlock()\n\n\treturn preProcessed\n}", "func (ct *ctrlerCtx) handleHostEventNoResolver(evt *kvstore.WatchEvent) error {\n\tswitch tp := evt.Object.(type) {\n\tcase *cluster.Host:\n\t\teobj := evt.Object.(*cluster.Host)\n\t\tkind := \"Host\"\n\n\t\tlog.Infof(\"Watcher: Got %s watch event(%s): {%+v}\", kind, evt.Type, eobj)\n\n\t\tct.Lock()\n\t\thandler, ok := ct.handlers[kind]\n\t\tct.Unlock()\n\t\tif !ok {\n\t\t\tct.logger.Fatalf(\"Cant find the handler for %s\", kind)\n\t\t}\n\t\thostHandler := handler.(HostHandler)\n\t\t// handle based on event type\n\t\tctrlCtx := &hostCtx{event: evt.Type, obj: &Host{Host: *eobj, ctrler: ct}}\n\t\tswitch evt.Type {\n\t\tcase kvstore.Created:\n\t\t\tfallthrough\n\t\tcase kvstore.Updated:\n\t\t\tfobj, err := ct.getObject(kind, ctrlCtx.GetKey())\n\t\t\tif err != nil {\n\t\t\t\tct.addObject(ctrlCtx)\n\t\t\t\tct.stats.Counter(\"Host_Created_Events\").Inc()\n\n\t\t\t\t// call the event handler\n\t\t\t\tctrlCtx.Lock()\n\t\t\t\terr = hostHandler.OnHostCreate(ctrlCtx.obj)\n\t\t\t\tctrlCtx.Unlock()\n\t\t\t\tif err != nil {\n\t\t\t\t\tct.logger.Errorf(\"Error creating %s %+v. Err: %v\", kind, ctrlCtx.obj.GetObjectMeta(), err)\n\t\t\t\t\tct.delObject(kind, ctrlCtx.GetKey())\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfResVer, fErr := strconv.ParseInt(fobj.GetResourceVersion(), 10, 64)\n\t\t\t\teResVer, eErr := strconv.ParseInt(eobj.GetResourceVersion(), 10, 64)\n\t\t\t\tif ct.resolver != nil && fErr == nil && eErr == nil && fResVer >= eResVer {\n\t\t\t\t\t// Event already processed.\n\t\t\t\t\tct.logger.Infof(\"Skipping update due to old resource version\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tctrlCtx := fobj.(*hostCtx)\n\t\t\t\tct.stats.Counter(\"Host_Updated_Events\").Inc()\n\t\t\t\tctrlCtx.Lock()\n\t\t\t\tp := cluster.Host{Spec: eobj.Spec,\n\t\t\t\t\tObjectMeta: eobj.ObjectMeta,\n\t\t\t\t\tTypeMeta: eobj.TypeMeta,\n\t\t\t\t\tStatus: eobj.Status}\n\n\t\t\t\terr = hostHandler.OnHostUpdate(ctrlCtx.obj, &p)\n\t\t\t\tctrlCtx.obj.Host = *eobj\n\t\t\t\tctrlCtx.Unlock()\n\t\t\t\tif err != nil {\n\t\t\t\t\tct.logger.Errorf(\"Error creating %s %+v. Err: %v\", kind, ctrlCtx.obj.GetObjectMeta(), err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t}\n\t\tcase kvstore.Deleted:\n\t\t\tctrlCtx := &hostCtx{event: evt.Type, obj: &Host{Host: *eobj, ctrler: ct}}\n\t\t\tfobj, err := ct.findObject(kind, ctrlCtx.GetKey())\n\t\t\tif err != nil {\n\t\t\t\tct.logger.Errorf(\"Object %s/%s not found durng delete. Err: %v\", kind, eobj.GetKey(), err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tobj := fobj.(*Host)\n\t\t\tct.stats.Counter(\"Host_Deleted_Events\").Inc()\n\t\t\tobj.Lock()\n\t\t\terr = hostHandler.OnHostDelete(obj)\n\t\t\tobj.Unlock()\n\t\t\tif err != nil {\n\t\t\t\tct.logger.Errorf(\"Error deleting %s: %+v. Err: %v\", kind, obj.GetObjectMeta(), err)\n\t\t\t}\n\t\t\tct.delObject(kind, ctrlCtx.GetKey())\n\t\t\treturn nil\n\n\t\t}\n\tdefault:\n\t\tct.logger.Fatalf(\"API watcher Found object of invalid type: %v on Host watch channel\", tp)\n\t}\n\n\treturn nil\n}", "func recordIngressCondition(route *routeapi.Route, name, hostName string, condition routeapi.RouteIngressCondition) (changed, created bool, _ time.Time, latest *routeapi.RouteIngress) {\n\tfor i := range route.Status.Ingress {\n\t\texisting := &route.Status.Ingress[i]\n\t\tif existing.RouterName != name {\n\t\t\tcontinue\n\t\t}\n\n\t\t// check whether the ingress is out of date without modifying it\n\t\tchanged := existing.Host != route.Spec.Host ||\n\t\t\texisting.WildcardPolicy != route.Spec.WildcardPolicy ||\n\t\t\texisting.RouterCanonicalHostname != hostName\n\n\t\texistingCondition := findCondition(existing, condition.Type)\n\t\tif existingCondition != nil {\n\t\t\tcondition.LastTransitionTime = existingCondition.LastTransitionTime\n\t\t\tif *existingCondition != condition {\n\t\t\t\tchanged = true\n\t\t\t}\n\t\t}\n\t\tif !changed {\n\t\t\treturn false, false, time.Time{}, existing\n\t\t}\n\n\t\t// generate the correct ingress\n\t\texisting.Host = route.Spec.Host\n\t\texisting.WildcardPolicy = route.Spec.WildcardPolicy\n\t\texisting.RouterCanonicalHostname = hostName\n\t\tif existingCondition == nil {\n\t\t\texisting.Conditions = append(existing.Conditions, condition)\n\t\t\texistingCondition = &existing.Conditions[len(existing.Conditions)-1]\n\t\t} else {\n\t\t\t*existingCondition = condition\n\t\t}\n\t\tnow := nowFn()\n\t\texistingCondition.LastTransitionTime = &now\n\n\t\treturn true, false, now.Time, existing\n\t}\n\n\t// add a new ingress\n\troute.Status.Ingress = append(route.Status.Ingress, routeapi.RouteIngress{\n\t\tRouterName: name,\n\t\tHost: route.Spec.Host,\n\t\tWildcardPolicy: route.Spec.WildcardPolicy,\n\t\tRouterCanonicalHostname: hostName,\n\t\tConditions: []routeapi.RouteIngressCondition{\n\t\t\tcondition,\n\t\t},\n\t})\n\tingress := &route.Status.Ingress[len(route.Status.Ingress)-1]\n\tnow := nowFn()\n\tingress.Conditions[0].LastTransitionTime = &now\n\n\treturn true, true, now.Time, ingress\n}", "func onRequestEvent(e *aah.Event) {\n\te.Data.(*aah.Context).Req.Host = \"TFB-Server:8080\"\n}", "func setIngressManagedAnnotation(rollouts []v1alpha1.Rollout, refResource validation.ReferencedResources) {\n\tfor _, rollout := range rollouts {\n\t\tfor i := range refResource.Ingresses {\n\t\t\tvar serviceName string\n\n\t\t\t// Basic Canary so ingress is only pointing a single service and so no linting is needed for this case.\n\t\t\tif rollout.Spec.Strategy.Canary == nil || rollout.Spec.Strategy.Canary.TrafficRouting == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif rollout.Spec.Strategy.Canary.TrafficRouting.Nginx != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.StableService\n\t\t\t} else if rollout.Spec.Strategy.Canary.TrafficRouting.ALB != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.StableService\n\t\t\t\tif rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService != \"\" {\n\t\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.TrafficRouting.ALB.RootService\n\t\t\t\t}\n\t\t\t} else if rollout.Spec.Strategy.Canary.TrafficRouting.SMI != nil {\n\t\t\t\tserviceName = rollout.Spec.Strategy.Canary.TrafficRouting.SMI.RootService\n\t\t\t}\n\n\t\t\tif ingressutil.HasRuleWithService(&refResource.Ingresses[i], serviceName) {\n\t\t\t\tannotations := refResource.Ingresses[i].GetAnnotations()\n\t\t\t\tif annotations == nil {\n\t\t\t\t\tannotations = make(map[string]string)\n\t\t\t\t}\n\t\t\t\tannotations[v1alpha1.ManagedByRolloutsKey] = rollout.Name\n\t\t\t\trefResource.Ingresses[i].SetAnnotations(annotations)\n\t\t\t}\n\t\t}\n\t}\n}", "func eventHandler(ctx context.Context, client ce.Client) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != http.MethodPost {\n\t\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\n\t\t// TODO (@mgasch): support inbound rate limiting\n\n\t\tlog := logger.Get(ctx)\n\t\tb, err := io.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tlog.Error(\"read body\", zap.Error(err))\n\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tvar event model.Payload\n\t\tif err = json.Unmarshal(b, &event); err != nil {\n\t\t\tlog.Error(\"could not decode harbor notification event\", zap.Error(err))\n\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tid := uuid.New().String()\n\t\tlog = log.With(zap.String(\"eventID\", id))\n\n\t\tlog.Debug(\"received request\", zap.String(\"request\", string(b)))\n\n\t\te := ce.NewEvent()\n\t\te.SetID(id)\n\t\te.SetSource(fmt.Sprintf(sourceFormat, os.Getenv(\"K_SERVICE\")))\n\t\te.SetSubject(event.Operator) // might be empty\n\n\t\t// sanity check\n\t\tif event.Type == \"\" {\n\t\t\tlog.Error(\"harbor event type must not be empty\", zap.String(\"type\", event.Type))\n\t\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tt := strings.ToLower(event.Type)\n\t\te.SetType(fmt.Sprintf(eventTypeFormat, t))\n\n\t\tts := time.Unix(event.OccurAt, 0)\n\t\te.SetTime(ts)\n\n\t\tif err = e.SetData(ce.ApplicationJSON, event); err != nil {\n\t\t\tlog.Error(\"could not set cloudevent data\", zap.Error(err))\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tctx = cectx.WithRetriesExponentialBackoff(ctx, retryDelay, retries)\n\t\tif err = client.Send(ctx, e); ce.IsUndelivered(err) || ce.IsNACK(err) {\n\t\t\tlog.Error(\"could not send cloudevent\", zap.Error(err), zap.String(\"event\", e.String()))\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debug(\"successfully sent cloudevent\", zap.Any(\"event\", e))\n\t})\n}", "func verifyExternalIngressController(t *testing.T, name types.NamespacedName, hostname, address string) {\n\tt.Helper()\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\t// If we have a DNS as an external IP address, make sure we can resolve it before moving on.\n\t// This just limits the number of \"could not resolve host\" errors which can be confusing.\n\tif net.ParseIP(address) == nil {\n\t\tif err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {\n\t\t\t_, err := net.LookupIP(address)\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"waiting for loadbalancer domain %s to resolve...\", address)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}); err != nil {\n\t\t\tt.Fatalf(\"loadbalancer domain %s was unable to resolve:\", address)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"http://%s\", address), nil)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build client request: %v\", err)\n\t}\n\t// we use HOST header to map to the domain associated on the ingresscontroller.\n\t// This ensures our http call is routed to the correct router.\n\treq.Host = hostname\n\n\thttpClient := http.Client{Timeout: 5 * time.Second}\n\terr = waitForHTTPClientCondition(t, &httpClient, req, 10*time.Second, 10*time.Minute, func(r *http.Response) bool {\n\t\tif r.StatusCode == http.StatusOK {\n\t\t\tt.Logf(\"verified connectivity with workload with req %v and response %v\", req.URL, r.StatusCode)\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with reqURL %s using external client: %v\", req.URL, err)\n\t}\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource KeepalivedGroup\n\terr = c.Watch(&source.Kind{Type: &redhatcopv1alpha1.KeepalivedGroup{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"KeepalivedGroup\",\n\t\t},\n\t}}, &handler.EnqueueRequestForObject{}, util.ResourceGenerationOrFinalizerChangedPredicate{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// this will filter new secrets and secrets where the content changed\n\t// secret that are actually referenced by routes will be filtered by the handler\n\tisAnnotatedService := predicate.Funcs{\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\tservice, ok := e.ObjectNew.DeepCopyObject().(*corev1.Service)\n\t\t\tif ok {\n\t\t\t\tif _, ok := service.GetAnnotations()[keepalivedGroupAnnotation]; ok && (service.Spec.Type == corev1.ServiceTypeLoadBalancer || len(service.Spec.ExternalIPs) > 0) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\tservice, ok = e.ObjectOld.DeepCopyObject().(*corev1.Service)\n\t\t\tif ok {\n\t\t\t\tif _, ok := service.GetAnnotations()[keepalivedGroupAnnotation]; ok && (service.Spec.Type == corev1.ServiceTypeLoadBalancer || len(service.Spec.ExternalIPs) > 0) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tCreateFunc: func(e event.CreateEvent) bool {\n\t\t\tservice, ok := e.Object.DeepCopyObject().(*corev1.Service)\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif _, ok := service.GetAnnotations()[keepalivedGroupAnnotation]; ok && (service.Spec.Type == corev1.ServiceTypeLoadBalancer || len(service.Spec.ExternalIPs) > 0) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tDeleteFunc: func(e event.DeleteEvent) bool {\n\t\t\tservice, ok := e.Object.DeepCopyObject().(*corev1.Service)\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif _, ok := service.GetAnnotations()[keepalivedGroupAnnotation]; ok && (service.Spec.Type == corev1.ServiceTypeLoadBalancer || len(service.Spec.ExternalIPs) > 0) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t}\n\n\t// TODO(user): Modify this to be the types you create that are owned by the primary resource\n\t// Watch for changes to secondary resource Pods and requeue the owner Route\n\terr = c.Watch(&source.Kind{Type: &corev1.Service{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Service\",\n\t\t},\n\t}}, &enqueueRequestForReferredKeepAlivedGroup{\n\t\tClient: mgr.GetClient(),\n\t}, isAnnotatedService)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func NewIngressController(\n\tlog *logrus.Logger,\n\tingClass string,\n\tkubeClient kubernetes.Interface,\n\tvc *varnish.Controller,\n\tinfFactory informers.SharedInformerFactory,\n\tvcrInfFactory vcr_informers.SharedInformerFactory,\n) (*IngressController, error) {\n\n\tingc := IngressController{\n\t\tlog: log,\n\t\tclient: kubeClient,\n\t\tstopCh: make(chan struct{}),\n\t\tvController: vc,\n\t}\n\n\tInitMetrics()\n\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(ingc.log.Printf)\n\teventBroadcaster.StartRecordingToSink(&core_v1.EventSinkImpl{\n\t\tInterface: ingc.client.CoreV1().Events(\"\"),\n\t})\n\tevtScheme := runtime.NewScheme()\n\tif err := api_v1.AddToScheme(evtScheme); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := extensions.AddToScheme(evtScheme); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := vcr_v1alpha1.AddToScheme(evtScheme); err != nil {\n\t\treturn nil, err\n\t}\n\tingc.recorder = eventBroadcaster.NewRecorder(evtScheme,\n\t\tapi_v1.EventSource{Component: \"varnish-ingress-controller\"})\n\n\tingc.informers = &infrmrs{\n\t\ting: infFactory.Extensions().V1beta1().Ingresses().Informer(),\n\t\tsvc: infFactory.Core().V1().Services().Informer(),\n\t\tendp: infFactory.Core().V1().Endpoints().Informer(),\n\t\tsecr: infFactory.Core().V1().Secrets().Informer(),\n\t\tvcfg: vcrInfFactory.Ingress().V1alpha1().VarnishConfigs().\n\t\t\tInformer(),\n\t\tbcfg: vcrInfFactory.Ingress().V1alpha1().BackendConfigs().\n\t\t\tInformer(),\n\t}\n\n\tevtFuncs := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: ingc.addObj,\n\t\tDeleteFunc: ingc.deleteObj,\n\t\tUpdateFunc: ingc.updateObj,\n\t}\n\n\tingc.informers.ing.AddEventHandler(evtFuncs)\n\tingc.informers.svc.AddEventHandler(evtFuncs)\n\tingc.informers.endp.AddEventHandler(evtFuncs)\n\tingc.informers.secr.AddEventHandler(evtFuncs)\n\tingc.informers.vcfg.AddEventHandler(evtFuncs)\n\tingc.informers.bcfg.AddEventHandler(evtFuncs)\n\n\tingc.listers = &Listers{\n\t\ting: infFactory.Extensions().V1beta1().Ingresses().Lister(),\n\t\tsvc: infFactory.Core().V1().Services().Lister(),\n\t\tendp: infFactory.Core().V1().Endpoints().Lister(),\n\t\tsecr: infFactory.Core().V1().Secrets().Lister(),\n\t\tvcfg: vcrInfFactory.Ingress().V1alpha1().VarnishConfigs().\n\t\t\tLister(),\n\t\tbcfg: vcrInfFactory.Ingress().V1alpha1().BackendConfigs().\n\t\t\tLister(),\n\t}\n\n\tingc.nsQs = NewNamespaceQueues(ingc.log, ingClass, ingc.vController,\n\t\tingc.listers, ingc.client, ingc.recorder)\n\n\treturn &ingc, nil\n}", "func (c *AviController) SetupAKOCRDEventHandlers(numWorkers uint32) {\n\tutils.AviLog.Infof(\"Setting up AKO CRD Event handlers\")\n\tinformer := lib.AKOControlConfig().CRDInformers()\n\n\tif lib.AKOControlConfig().HostRuleEnabled() {\n\t\thostRuleEventHandler := cache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thostrule := obj.(*akov1alpha1.HostRule)\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(hostrule))\n\t\t\t\tkey := lib.HostRule + \"/\" + utils.ObjKey(hostrule)\n\t\t\t\tif err := c.GetValidator().ValidateHostRuleObj(key, hostrule); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Error retrieved during validation of HostRule: %v\", key, err)\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\toldObj := old.(*akov1alpha1.HostRule)\n\t\t\t\thostrule := new.(*akov1alpha1.HostRule)\n\t\t\t\tif isHostRuleUpdated(oldObj, hostrule) {\n\t\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(hostrule))\n\t\t\t\t\tkey := lib.HostRule + \"/\" + utils.ObjKey(hostrule)\n\t\t\t\t\tif err := c.GetValidator().ValidateHostRuleObj(key, hostrule); err != nil {\n\t\t\t\t\t\tutils.AviLog.Warnf(\"key: %s, Error retrieved during validation of HostRule: %v\", key, err)\n\t\t\t\t\t}\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thostrule, ok := obj.(*akov1alpha1.HostRule)\n\t\t\t\tif !ok {\n\t\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\thostrule, ok = tombstone.Obj.(*akov1alpha1.HostRule)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not an HostRule: %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(hostrule))\n\t\t\t\tkey := lib.HostRule + \"/\" + utils.ObjKey(hostrule)\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t}\n\n\t\tinformer.HostRuleInformer.Informer().AddEventHandler(hostRuleEventHandler)\n\t}\n\n\tif lib.AKOControlConfig().HttpRuleEnabled() {\n\t\thttpRuleEventHandler := cache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttprule := obj.(*akov1alpha1.HTTPRule)\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(httprule))\n\t\t\t\tkey := lib.HTTPRule + \"/\" + utils.ObjKey(httprule)\n\t\t\t\tif err := c.GetValidator().ValidateHTTPRuleObj(key, httprule); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"Error retrieved during validation of HTTPRule: %v\", err)\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\toldObj := old.(*akov1alpha1.HTTPRule)\n\t\t\t\thttprule := new.(*akov1alpha1.HTTPRule)\n\t\t\t\t// reflect.DeepEqual does not work on type []byte,\n\t\t\t\t// unable to capture edits in destinationCA\n\t\t\t\tif isHTTPRuleUpdated(oldObj, httprule) {\n\t\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(httprule))\n\t\t\t\t\tkey := lib.HTTPRule + \"/\" + utils.ObjKey(httprule)\n\t\t\t\t\tif err := c.GetValidator().ValidateHTTPRuleObj(key, httprule); err != nil {\n\t\t\t\t\t\tutils.AviLog.Warnf(\"Error retrieved during validation of HTTPRule: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttprule, ok := obj.(*akov1alpha1.HTTPRule)\n\t\t\t\tif !ok {\n\t\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\thttprule, ok = tombstone.Obj.(*akov1alpha1.HTTPRule)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not an HTTPRule: %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tkey := lib.HTTPRule + \"/\" + utils.ObjKey(httprule)\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(httprule))\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\t\t// no need to validate for delete handler\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t}\n\n\t\tinformer.HTTPRuleInformer.Informer().AddEventHandler(httpRuleEventHandler)\n\t}\n\n\tif lib.AKOControlConfig().AviInfraSettingEnabled() {\n\t\taviInfraEventHandler := cache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\taviinfra := obj.(*akov1alpha1.AviInfraSetting)\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(aviinfra))\n\t\t\t\tkey := lib.AviInfraSetting + \"/\" + utils.ObjKey(aviinfra)\n\t\t\t\tif err := c.GetValidator().ValidateAviInfraSetting(key, aviinfra); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"Error retrieved during validation of AviInfraSetting: %v\", err)\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\toldObj := old.(*akov1alpha1.AviInfraSetting)\n\t\t\t\taviInfra := new.(*akov1alpha1.AviInfraSetting)\n\t\t\t\tif isAviInfraUpdated(oldObj, aviInfra) {\n\t\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(aviInfra))\n\t\t\t\t\tkey := lib.AviInfraSetting + \"/\" + utils.ObjKey(aviInfra)\n\t\t\t\t\tif err := c.GetValidator().ValidateAviInfraSetting(key, aviInfra); err != nil {\n\t\t\t\t\t\tutils.AviLog.Warnf(\"Error retrieved during validation of AviInfraSetting: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\taviinfra, ok := obj.(*akov1alpha1.AviInfraSetting)\n\t\t\t\tif !ok {\n\t\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\taviinfra, ok = tombstone.Obj.(*akov1alpha1.AviInfraSetting)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not an AviInfraSetting: %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tkey := lib.AviInfraSetting + \"/\" + utils.ObjKey(aviinfra)\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(aviinfra))\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\t\t// no need to validate for delete handler\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t}\n\n\t\tinformer.AviInfraSettingInformer.Informer().AddEventHandler(aviInfraEventHandler)\n\t}\n\n\tif lib.AKOControlConfig().SsoRuleEnabled() {\n\t\tssoRuleEventHandler := cache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tssoRule := obj.(*akov1alpha2.SSORule)\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(ssoRule))\n\t\t\t\tkey := lib.SSORule + \"/\" + utils.ObjKey(ssoRule)\n\t\t\t\tif err := c.GetValidator().ValidateSSORuleObj(key, ssoRule); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Error retrieved during validation of SSORule: %v\", key, err)\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\toldObj := old.(*akov1alpha2.SSORule)\n\t\t\t\tssoRule := new.(*akov1alpha2.SSORule)\n\t\t\t\tif isSSORuleUpdated(oldObj, ssoRule) {\n\t\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(ssoRule))\n\t\t\t\t\tkey := lib.SSORule + \"/\" + utils.ObjKey(ssoRule)\n\t\t\t\t\tif err := c.GetValidator().ValidateSSORuleObj(key, ssoRule); err != nil {\n\t\t\t\t\t\tutils.AviLog.Warnf(\"key: %s, Error retrieved during validation of SSORule: %v\", key, err)\n\t\t\t\t\t}\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tssoRule, ok := obj.(*akov1alpha2.SSORule)\n\t\t\t\tif !ok {\n\t\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tssoRule, ok = tombstone.Obj.(*akov1alpha2.SSORule)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not an SSORule: %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(ssoRule))\n\t\t\t\tkey := lib.SSORule + \"/\" + utils.ObjKey(ssoRule)\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t}\n\t\tinformer.SSORuleInformer.Informer().AddEventHandler(ssoRuleEventHandler)\n\t}\n\n\tif lib.AKOControlConfig().L4RuleEnabled() {\n\t\tl4RuleEventHandler := cache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tl4Rule := obj.(*akov1alpha2.L4Rule)\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(l4Rule))\n\t\t\t\tkey := lib.L4Rule + \"/\" + utils.ObjKey(l4Rule)\n\t\t\t\tif err := c.GetValidator().ValidateL4RuleObj(key, l4Rule); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"Error retrieved during validation of L4Rule: %v\", err)\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\toldObj := old.(*akov1alpha2.L4Rule)\n\t\t\t\tl4Rule := new.(*akov1alpha2.L4Rule)\n\t\t\t\tif isL4RuleUpdated(oldObj, l4Rule) {\n\t\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(l4Rule))\n\t\t\t\t\tkey := lib.L4Rule + \"/\" + utils.ObjKey(l4Rule)\n\t\t\t\t\tif err := c.GetValidator().ValidateL4RuleObj(key, l4Rule); err != nil {\n\t\t\t\t\t\tutils.AviLog.Warnf(\"Error retrieved during validation of L4Rule: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tif c.DisableSync {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tl4Rule, ok := obj.(*akov1alpha2.L4Rule)\n\t\t\t\tif !ok {\n\t\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tl4Rule, ok = tombstone.Obj.(*akov1alpha2.L4Rule)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not an L4Rule: %#v\", obj)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tkey := lib.L4Rule + \"/\" + utils.ObjKey(l4Rule)\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(l4Rule))\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t},\n\t\t}\n\t\tinformer.L4RuleInformer.Informer().AddEventHandler(l4RuleEventHandler)\n\t}\n\treturn\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"apischeme-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource APIScheme\n\terr = c.Watch(&source.Kind{Type: &cloudingressv1alpha1.APIScheme{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func registerResourceTelemetryPreMitigation(request *libcoap.Pdu, typ reflect.Type, controller controllers.ControllerInterface, session *libcoap.Session,\n context *libcoap.Context, is_unknown bool) (interface{}, string, error) {\n\n hex := hex.Dump(request.Data)\n if request.Code == libcoap.RequestPut && !strings.Contains(hex, string(libcoap.IETF_TELEMETRY_PRE_MITIGATION)) {\n return nil, \"\", errors.New(\"Body data MUST be telemetry pre-mitigation request\")\n }\n body, err := messages.UnmarshalCbor(request, reflect.TypeOf(messages.TelemetryPreMitigationRequest{}))\n if err != nil {\n return nil, \"\", err\n }\n\n var resourcePath string\n\n // Create sub resource to handle observation on behalf of Unknown resource in case of telemetry pre-mitigation PUT\n if is_unknown && request.Code == libcoap.RequestPut {\n p := request.PathString()\n resourcePath = p\n r := libcoap.ResourceInit(&p, 0)\n r.TurnOnResourceObservable()\n r.RegisterHandler(libcoap.RequestGet, toMethodHandler(controller.HandleGet, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestPut, toMethodHandler(controller.HandlePut, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestPost, toMethodHandler(controller.HandlePost, typ, controller, !is_unknown))\n r.RegisterHandler(libcoap.RequestDelete, toMethodHandler(controller.HandleDelete, typ, controller, !is_unknown))\n context.AddResource(r)\n r.SetSession(session)\n log.Debugf(\"Create sub resource to handle observation later : uri-path=%+v\", p)\n // Create sub resource for handle get all with observe option\n pa := strings.Split(p, \"/tmid\")\n if len(pa) > 1 {\n resourceAll := context.GetResourceByQuery(&pa[0])\n if resourceAll == nil {\n ra := libcoap.ResourceInit(&pa[0], 0)\n ra.TurnOnResourceObservable()\n ra.RegisterHandler(libcoap.RequestGet, toMethodHandler(controller.HandleGet, typ, controller, !is_unknown))\n ra.RegisterHandler(libcoap.RequestDelete, toMethodHandler(controller.HandleDelete, typ, controller, !is_unknown))\n context.AddResource(ra)\n ra.SetSession(session)\n log.Debugf(\"Create observer in sub-resource with query: %+v\", pa[0])\n }\n }\n }\n return body, resourcePath, nil\n}", "func GetIngressEndpoints(ingress *networkingv1.Ingress) []string {\n\tendpointStatuses := ingress.Status.LoadBalancer.Ingress\n\tendpoints := []string{}\n\tfor _, endpointStatus := range endpointStatuses {\n\t\tendpoint := endpointStatus.Hostname\n\t\tif endpoint == \"\" {\n\t\t\tendpoint = endpointStatus.IP\n\t\t}\n\t\tendpoints = append(endpoints, endpoint)\n\t}\n\treturn endpoints\n}", "func verifyInternalIngressController(t *testing.T, name types.NamespacedName, hostname, address, image string) {\n\tkubeConfig, err := config.GetConfig()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get kube config: %v\", err)\n\t}\n\tclient, err := kubernetes.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create kube client: %v\", err)\n\t}\n\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\textraArgs := []string{\n\t\t\"--header\", \"HOST:\" + echoRoute.Spec.Host,\n\t\t\"-v\",\n\t\t\"--retry-delay\", \"20\",\n\t\t\"--max-time\", \"10\",\n\t}\n\tclientPodName := types.NamespacedName{Namespace: name.Namespace, Name: \"curl-\" + name.Name}\n\tclientPodSpec := buildCurlPod(clientPodName.Name, clientPodName.Namespace, image, address, echoRoute.Spec.Host, extraArgs...)\n\tclientPod := clientPodSpec.DeepCopy()\n\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t}\n\t}()\n\n\tvar curlPodLogs string\n\terr = wait.PollImmediate(10*time.Second, 10*time.Minute, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), clientPodName, clientPod); err != nil {\n\t\t\tt.Logf(\"error getting client pod %q: %v, retrying...\", clientPodName, err)\n\t\t\treturn false, nil\n\t\t}\n\t\t// First check if client curl pod is still starting or not running.\n\t\tif clientPod.Status.Phase == corev1.PodPending {\n\t\t\tt.Logf(\"waiting for client pod %q to start\", clientPodName)\n\t\t\treturn false, nil\n\t\t}\n\t\treadCloser, err := client.CoreV1().Pods(clientPod.Namespace).GetLogs(clientPod.Name, &corev1.PodLogOptions{\n\t\t\tContainer: \"curl\",\n\t\t\tFollow: false,\n\t\t}).Stream(context.TODO())\n\t\tif err != nil {\n\t\t\tt.Logf(\"failed to read output from pod %s: %v\", clientPod.Name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tscanner := bufio.NewScanner(readCloser)\n\t\tdefer func() {\n\t\t\tif err := readCloser.Close(); err != nil {\n\t\t\t\tt.Errorf(\"failed to close reader for pod %s: %v\", clientPod.Name, err)\n\t\t\t}\n\t\t}()\n\t\tcurlPodLogs = \"\"\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tcurlPodLogs += line + \"\\n\"\n\t\t\tif strings.Contains(line, \"HTTP/1.0 200 OK\") {\n\t\t\t\tt.Logf(\"verified connectivity with workload with address: %s with response %s\", address, line)\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\t// If failed or succeeded, the pod is stopped, but didn't provide us 200 response, let's try again.\n\t\tif clientPod.Status.Phase == corev1.PodFailed || clientPod.Status.Phase == corev1.PodSucceeded {\n\t\t\tt.Logf(\"client pod %q has stopped...restarting. Curl Pod Logs:\\n%s\", clientPodName, curlPodLogs)\n\t\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil && errors.IsNotFound(err) {\n\t\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\t// Wait for deletion to prevent a race condition. Use PollInfinite since we are already in a Poll.\n\t\t\twait.PollInfinite(5*time.Second, func() (bool, error) {\n\t\t\t\terr = kclient.Get(context.TODO(), clientPodName, clientPod)\n\t\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\t\tt.Logf(\"waiting for %q: to be deleted\", clientPodName)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t})\n\t\t\tclientPod = clientPodSpec.DeepCopy()\n\t\t\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\t\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with address: %s using internal curl client. Curl Pod Logs:\\n%s\", address, curlPodLogs)\n\t}\n}", "func addAllEventHandlers(\r\n sched *scheduler.Scheduler,\r\n informerFactory informers.SharedInformerFactory,\r\n nodeInformer cache.SharedIndexInformer,\r\n podInformer cache.SharedIndexInformer,\r\n ){\r\n\r\n nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\r\n\r\n // A new node is added\r\n AddFunc: func(obj interface{}) {\r\n\r\n node := obj.(*corev1.Node)\r\n err := sched.SchedulerCache.AddNode(node)\r\n\r\n if err != nil{\r\n fmt.Println(\"Fail to add node to cache\", err)\r\n }\r\n\r\n },\r\n // One of the nodes got updated information\r\n UpdateFunc: func(oldObj, newObj interface{}) {\r\n oldNode := oldObj.(*corev1.Node)\r\n newNode := newObj.(*corev1.Node)\r\n\r\n err := sched.SchedulerCache.UpdateNode(oldNode,newNode)\r\n\r\n if err != nil{\r\n fmt.Println(\"Fail to update node to cache\", err)\r\n }\r\n },\r\n // A node is deleted\r\n DeleteFunc: func(obj interface{}) {\r\n node := obj.(*corev1.Node)\r\n err := sched.SchedulerCache.RemoveNode(node)\r\n\r\n if err != nil{\r\n fmt.Println(\"Fail to remove node from cache\", err)\r\n }\r\n\r\n },\r\n })\r\n\r\n podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\r\n // A new pod is added\r\n AddFunc: func(obj interface{}) {\r\n\r\n\r\n pod := obj.(*corev1.Pod)\r\n\r\n err := sched.SchedulerCache.AddPod(pod)\r\n\r\n if err != nil{\r\n fmt.Println(\"Fail to add pod to cache\", err)\r\n }\r\n },\r\n // One of the pods got updated information\r\n UpdateFunc: func(oldObj, newObj interface{}) {\r\n\r\n oldPod := oldObj.(*corev1.Pod)\r\n newPod := newObj.(*corev1.Pod)\r\n\r\n err := sched.SchedulerCache.UpdatePod(oldPod,newPod)\r\n\r\n if err != nil{\r\n fmt.Println(\"Fail to update pod to cache\", err)\r\n }\r\n\r\n },\r\n // A pod is deleted\r\n DeleteFunc: func(obj interface{}) {\r\n pod := obj.(*corev1.Pod)\r\n err := sched.SchedulerCache.RemovePod(pod)\r\n\r\n if err != nil{\r\n fmt.Println(\"Fail to remove Pod from cache\", err)\r\n }\r\n\r\n },\r\n })\r\n\r\n\r\n}", "func TestControllerHandleEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\taddServices []*corev1.Service\n\t\tupdateServices []string\n\t\tdelServices []string\n\t\texpAddedServices []string\n\t\texpDeletedServices []string\n\t}{\n\t\t{\n\t\t\tname: \"If a controller is watching services it should react to the service change events.\",\n\t\t\taddServices: []*corev1.Service{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc1\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc2\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tupdateServices: []string{\"svc1\"},\n\t\t\tdelServices: []string{\"svc1\", \"svc2\"},\n\t\t\texpAddedServices: []string{\"svc1\", \"svc2\", \"svc1\"},\n\t\t\texpDeletedServices: []string{\"svc1\", \"svc2\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\tassert := assert.New(t)\n\t\t\tresync := 30 * time.Second\n\t\t\tstopC := make(chan struct{})\n\t\t\tvar gotAddedServices []string\n\t\t\tvar gotDeletedServices []string\n\n\t\t\t// Create the kubernetes client.\n\t\t\tk8scli, _, _, err := cli.GetK8sClients(\"\")\n\n\t\t\trequire.NoError(err, \"kubernetes client is required\")\n\n\t\t\t// Prepare the environment on the cluster.\n\t\t\tprep := prepare.New(k8scli, t)\n\t\t\tprep.SetUp()\n\t\t\tdefer prep.TearDown()\n\n\t\t\t// Create the reitrever.\n\t\t\trt := &retrieve.Resource{\n\t\t\t\tListerWatcher: cache.NewListWatchFromClient(k8scli.CoreV1().RESTClient(), \"services\", prep.Namespace().Name, fields.Everything()),\n\t\t\t\tObject: &corev1.Service{},\n\t\t\t}\n\n\t\t\t// Call times are the number of times the handler should be called before sending the termination signal.\n\t\t\tstopCallTimes := len(test.addServices) + len(test.updateServices) + len(test.delServices)\n\t\t\tcalledTimes := 0\n\t\t\tvar mx sync.Mutex\n\n\t\t\t// Create the handler.\n\t\t\thl := &handler.HandlerFunc{\n\t\t\t\tAddFunc: func(_ context.Context, obj runtime.Object) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\tsvc := obj.(*corev1.Service)\n\t\t\t\t\tgotAddedServices = append(gotAddedServices, svc.Name)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(_ context.Context, id string) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\t// Ignore namespace.\n\t\t\t\t\tid = strings.Split(id, \"/\")[1]\n\t\t\t\t\tgotDeletedServices = append(gotDeletedServices, id)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Create a Pod controller.\n\t\t\tctrl := controller.NewSequential(resync, hl, rt, nil, log.Dummy)\n\t\t\trequire.NotNil(ctrl, \"controller is required\")\n\t\t\tgo ctrl.Run(stopC)\n\n\t\t\t// Create the required services.\n\t\t\tfor _, svc := range test.addServices {\n\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Create(svc)\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\tfor _, svc := range test.updateServices {\n\t\t\t\torigSvc, err := k8scli.CoreV1().Services(prep.Namespace().Name).Get(svc, metav1.GetOptions{})\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\t// Change something\n\t\t\t\t\torigSvc.Spec.Ports = append(origSvc.Spec.Ports, corev1.ServicePort{Name: \"updateport\", Port: 9876})\n\t\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Update(origSvc)\n\t\t\t\t\tassert.NoError(err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete the required services.\n\t\t\tfor _, svc := range test.delServices {\n\t\t\t\terr := k8scli.CoreV1().Services(prep.Namespace().Name).Delete(svc, &metav1.DeleteOptions{})\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\t// Wait until we have finished.\n\t\t\tselect {\n\t\t\t// Timeout.\n\t\t\tcase <-time.After(20 * time.Second):\n\t\t\t// Finished.\n\t\t\tcase <-stopC:\n\t\t\t}\n\n\t\t\t// Check.\n\t\t\tassert.Equal(test.expAddedServices, gotAddedServices)\n\t\t\tassert.Equal(test.expDeletedServices, gotDeletedServices)\n\t\t})\n\t}\n}", "func watch(url string, etcdcli *etcdutil.EtcdClient) {\n\n\t// test given host provides a remote api.\n\ttestUrl := url + \"/images/json\"\n\tif _, ret := apiwatch.GetContent(testUrl); ret == false {\n\t\tglog.Errorf(\"cloud not access test endpoint %s. It might not provide a docker remote api.\", testUrl)\n\t\tos.Exit(1)\n\t}\n\n\t// watch http streaming on /events.\n\teventUrl := url + \"/events\"\n\tglog.Infof(\"start watching docker api: %s\", eventUrl)\n\n\tapiwatch.ReadStream(eventUrl, func(id string, status string) {\n\t\tinspectUrl := url + \"/containers/\" + id + \"/json\"\n\n\t\tswitch status {\n\t\tcase \"start\":\n\t\t\tglog.Infof(\"inspect: %s\\n\", inspectUrl)\n\t\t\tdata, _ := apiwatch.GetContent(inspectUrl)\n\t\t\tcontainerInfo := apiwatch.JsonToMap(data)\n\t\t\tconfig, _ := containerInfo[\"Config\"].(map[string]interface{})\n\n\t\t\tnetworkSettings, _ := containerInfo[\"NetworkSettings\"].(map[string]interface{})\n\t\t\tregisterIp(config[\"Hostname\"].(string), networkSettings[\"IPAddress\"].(string), etcdcli)\n\t\tcase \"stop\":\n\t\t\tglog.Infof(\"inspect: %s\\n\", inspectUrl)\n\t\t\tdata, _ := apiwatch.GetContent(inspectUrl)\n\t\t\tcontainerInfo := apiwatch.JsonToMap(data)\n\t\t\tconfig, _ := containerInfo[\"Config\"].(map[string]interface{})\n\n\t\t\tunregisterIp(config[\"Hostname\"].(string), etcdcli)\n\t\tdefault:\n\t\t}\n\t})\n}", "func (r *Reconciler) UpdateIngress(ia v1alpha1.IngressAccessor) (v1alpha1.IngressAccessor, error) {\n\treturn r.ServingClientSet.NetworkingV1alpha1().ClusterIngresses().Update(ia.(*v1alpha1.ClusterIngress))\n}", "func main() {\n\tvar (\n\t\t//port = flag.Int(\"port\", 7472, \"HTTP listening port for Prometheus metrics\")\n\t\t//name = flag.String(\"name\", \"lb-ippool\", \"configmap name in default namespace\")\n\t\tpath = flag.String(\"config\", \"\", \"config file\")\n\t\tkubeconfig = flag.String(\"kubeconfig\", \"\", \"absolute path to the kubeconfig file (only needed when running outside of k8s)\")\n\t)\n\n\tflag.Parse()\n\tif len(*path) == 0 {\n\t\tklog.Fatalf(fmt.Sprintf(\"config file is required\"))\n\t}\n\n\trestConfig, err := clientcmd.BuildConfigFromFlags(\"\", *kubeconfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\tclientset, err := kubernetes.NewForConfig(restConfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\tbroadcaster := record.NewBroadcaster()\n\tbroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: corev1.New(clientset.CoreV1().RESTClient()).Events(\"\")})\n\trecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: \"lb-controller\"})\n\tqueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())\n\n\t// INFO: (1) 与 router server 建立 bgp session\n\ts := getSpeaker(*path)\n\n\tsvcWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), \"services\",\n\t\tmetav1.NamespaceAll, fields.Everything())\n\tsvcIndexer, svcInformer := cache.NewIndexerInformer(svcWatcher, &v1.Service{}, 0, cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tkey, err := cache.MetaNamespaceKeyFunc(obj)\n\t\t\tif err == nil {\n\t\t\t\tqueue.Add(svcKey(key))\n\t\t\t}\n\t\t},\n\t\tUpdateFunc: func(old interface{}, new interface{}) {\n\t\t\t//key, err := cache.MetaNamespaceKeyFunc(new)\n\t\t\t//if err == nil {\n\t\t\t//\t//queue.Add(svcKey(key))\n\t\t\t//\tklog.Infof(fmt.Sprintf(\"update %s\", key))\n\t\t\t//}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t//key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n\t\t\t//if err == nil {\n\t\t\t//\t//queue.Add(svcKey(key))\n\t\t\t//\tklog.Infof(fmt.Sprintf(\"delete %s\", key))\n\t\t\t//}\n\t\t},\n\t}, cache.Indexers{})\n\n\tepWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), \"endpoints\",\n\t\tmetav1.NamespaceAll, fields.Everything())\n\tepIndexer, epInformer := cache.NewIndexerInformer(epWatcher, &v1.Endpoints{}, 0, cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\t//key, err := cache.MetaNamespaceKeyFunc(obj)\n\t\t\t//if err == nil {\n\t\t\t//\t//queue.Add(svcKey(key))\n\t\t\t//\tklog.Info(key)\n\t\t\t//}\n\t\t},\n\t\tUpdateFunc: func(old interface{}, new interface{}) {\n\t\t\t//key, err := cache.MetaNamespaceKeyFunc(new)\n\t\t\t//if err == nil {\n\t\t\t//\tklog.Info(key)\n\t\t\t//\t//queue.Add(svcKey(key))\n\t\t\t//}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t//key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n\t\t\t//if err == nil {\n\t\t\t//\t//queue.Add(svcKey(key))\n\t\t\t//\tklog.Info(key)\n\t\t\t//}\n\t\t},\n\t}, cache.Indexers{})\n\n\tstopCh := make(chan struct{})\n\tgo svcInformer.Run(stopCh)\n\tgo epInformer.Run(stopCh)\n\tif !cache.WaitForCacheSync(stopCh, svcInformer.HasSynced, epInformer.HasSynced) {\n\t\tklog.Fatalf(fmt.Sprintf(\"time out waiting for cache sync\"))\n\t}\n\n\tsync := func(key interface{}, queue workqueue.RateLimitingInterface) error {\n\t\tdefer queue.Done(key)\n\n\t\tswitch k := key.(type) {\n\t\tcase svcKey:\n\t\t\tsvc, exists, err := svcIndexer.GetByKey(string(k))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !exists {\n\t\t\t\treturn fmt.Errorf(\"not exist\")\n\t\t\t}\n\t\t\tendpoints, exists, err := epIndexer.GetByKey(string(k))\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"failed to get endpoints\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !exists {\n\t\t\t\treturn fmt.Errorf(\"not exist\")\n\t\t\t}\n\n\t\t\tif svc.(*v1.Service).Spec.Type != v1.ServiceTypeLoadBalancer {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\trecorder.Eventf(svc.(*v1.Service), v1.EventTypeNormal, \"SetBalancer\", \"advertise svc ip\")\n\t\t\ts.SetBalancer(string(k), svc.(*v1.Service), endpoints.(*v1.Endpoints))\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unknown key type for %s %T\", key, key))\n\t\t}\n\t}\n\n\tfor {\n\t\tkey, quit := queue.Get()\n\t\tif quit {\n\t\t\treturn\n\t\t}\n\n\t\terr := sync(key, queue)\n\t\tif err != nil {\n\t\t\tklog.Error(err)\n\t\t} else {\n\t\t\tqueue.Forget(key)\n\t\t}\n\t}\n}", "func (a *Client) WatchNetworkingV1beta1NamespacedIngressList(params *WatchNetworkingV1beta1NamespacedIngressListParams) (*WatchNetworkingV1beta1NamespacedIngressListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewWatchNetworkingV1beta1NamespacedIngressListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"watchNetworkingV1beta1NamespacedIngressList\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/apis/networking.k8s.io/v1beta1/watch/namespaces/{namespace}/ingresses\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"application/json;stream=watch\", \"application/vnd.kubernetes.protobuf\", \"application/vnd.kubernetes.protobuf;stream=watch\", \"application/yaml\"},\n\t\tConsumesMediaTypes: []string{\"*/*\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &WatchNetworkingV1beta1NamespacedIngressListReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*WatchNetworkingV1beta1NamespacedIngressListOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for watchNetworkingV1beta1NamespacedIngressList: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\tlog.Debug(\"Adding watched objects for KogitoInfra controller\")\n\t// Create a new controller\n\tc, err := controller.New(\"kogitoinfra-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = c.Watch(&source.Kind{Type: &v1beta1.KogitoInfra{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar watchedObjects []framework.WatchedObjects\n\twatchedObjects = append(watchedObjects, getInfinispanWatchedObjects()...)\n\twatchedObjects = append(watchedObjects, getKafkaWatchedObjects()...)\n\twatchedObjects = append(watchedObjects, getKeycloakWatchedObjects()...)\n\n\tcontrollerWatcher := framework.NewControllerWatcher(r.(*ReconcileKogitoInfra).client, mgr, c, &v1beta1.KogitoInfra{})\n\tif err = controllerWatcher.Watch(watchedObjects...); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func createIngress(kubeconfig, ingressFilename string) error {\n\t// TODO(nikhiljindal): Allow users to specify the list of clusters to create the ingress in\n\t// rather than assuming all contexts in kubeconfig.\n\tclusters, err := getClusters(kubeconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn createIngressInClusters(kubeconfig, ingressFilename, clusters)\n}", "func run(ctx context.Context, kubeClient kubernetes.Interface, er record.EventRecorder, edgelbManager manager.EdgeLBManager, kubeInformerFactory kubeinformers.SharedInformerFactory, kubeCache dklbcache.KubernetesResourceCache, dcosClient *dcos.APIClient, saConfig dcos.ServiceAccountOptions) {\n\tingressInformer := kubeInformerFactory.Extensions().V1beta1().Ingresses()\n\tserviceInformer := kubeInformerFactory.Core().V1().Services()\n\t// we need to setup the secrets informer so that the kubeCache\n\t// gets populated accordingly\n\tsecretsInformer := kubeInformerFactory.Core().V1().Secrets()\n\tsecretsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{})\n\tsecretsReflector := secretsreflector.New(dcosClient.Secrets, kubeCache, kubeClient)\n\n\t// Create an instance of the ingress controller.\n\tingressController := controllers.NewIngressController(kubeClient, er, ingressInformer, serviceInformer, kubeCache, edgelbManager, secretsReflector)\n\n\t// Create an instance of the service controller.\n\tserviceController := controllers.NewServiceController(kubeClient, er, serviceInformer, kubeCache, edgelbManager)\n\n\t// Start the shared informer factory.\n\tgo kubeInformerFactory.Start(ctx.Done())\n\n\t// Wait for the caches to be synced before starting workers.\n\tlog.Debug(\"waiting for informer caches to be synced\")\n\tif ok := cache.WaitForCacheSync(ctx.Done(), kubeCache.HasSynced, ingressInformer.Informer().HasSynced, serviceInformer.Informer().HasSynced, secretsInformer.Informer().HasSynced); !ok {\n\t\tlog.Error(\"failed to wait for informer caches to be synced\")\n\t\treturn\n\t}\n\tlog.Debug(\"informer caches are synced\")\n\n\t// Start the ingress and service controllers.\n\tvar wg sync.WaitGroup\n\tfor _, c := range []controllers.Controller{ingressController, serviceController} {\n\t\twg.Add(1)\n\t\tgo func(c controllers.Controller) {\n\t\t\tdefer wg.Done()\n\t\t\tif err := c.Run(ctx); err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}(c)\n\t}\n\n\t// Wait for the controllers to stop.\n\twg.Wait()\n\t// Wait for the default backend and admission webhook servers to stop.\n\tsrvWaitGroup.Wait()\n\t// Confirm successful shutdown.\n\tlog.WithField(\"version\", version.Version).Infof(\"%s is shutting down\", constants.ComponentName)\n\t// There is a goroutine in the background trying to renew the leader election lock.\n\t// Hence, we must manually exit now that we know controllers have been shutdown properly.\n\tos.Exit(0)\n}", "func (a *Client) WatchNetworkingV1beta1NamespacedIngress(params *WatchNetworkingV1beta1NamespacedIngressParams) (*WatchNetworkingV1beta1NamespacedIngressOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewWatchNetworkingV1beta1NamespacedIngressParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"watchNetworkingV1beta1NamespacedIngress\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/apis/networking.k8s.io/v1beta1/watch/namespaces/{namespace}/ingresses/{name}\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"application/json;stream=watch\", \"application/vnd.kubernetes.protobuf\", \"application/vnd.kubernetes.protobuf;stream=watch\", \"application/yaml\"},\n\t\tConsumesMediaTypes: []string{\"*/*\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &WatchNetworkingV1beta1NamespacedIngressReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*WatchNetworkingV1beta1NamespacedIngressOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for watchNetworkingV1beta1NamespacedIngress: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (r *NuxeoReconciler) reconcileIngress(access v1alpha1.NuxeoAccess, forcePassthrough bool, nodeSet v1alpha1.NodeSet,\n\tinstance *v1alpha1.Nuxeo) error {\n\tingressName := ingressName(instance, nodeSet)\n\tif access != (v1alpha1.NuxeoAccess{}) {\n\t\tif expected, err := r.defaultIngress(instance, access, forcePassthrough, ingressName, nodeSet); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t_, err = r.addOrUpdate(ingressName, instance.Namespace, expected, &v1beta1.Ingress{}, util.IngressComparer)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\treturn r.removeIfPresent(instance, ingressName, instance.Namespace, &v1beta1.Ingress{})\n\t}\n}", "func (s *Server) forwardCacheEvents() {\n for e := range s.listener.C {\n // TODO: socket.io eats serialization errors here - use this to debug until this issue is fixed\n /*_, err := json.Marshal(e)\n if err != nil {\n s.logger.Errorf(\"ENCODING ERROR: %s %v\", err, e)\n } */\n s.logger.Debugf(\"forwarding event of type %T (using key %T) to active clients\", e.Object, e.Key)\n s.io.BroadcastTo(\"ui\", \"cache\", e)\n }\n}", "func (bc *ReconcileJenkinsInstance) newIngress(instanceName types.NamespacedName) (*v1beta1.Ingress, error) {\n\texists := false\n\n\tjenkinsInstance, err := bc.getJenkinsInstance(instanceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif jenkinsInstance.Spec.Ingress == nil {\n\t\treturn nil, nil\n\t}\n\n\tingress, err := bc.getIngress(instanceName)\n\n\t// If the ingress doesn't exist, we'll create it\n\tif err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// If the Ingress is not controlled by this JenkinsInstance resource, we should log\n\t\t// a warning to the event recorder and ret\n\t\tif !metav1.IsControlledBy(ingress, jenkinsInstance) {\n\t\t\tmsg := fmt.Sprintf(MessageResourceExists, ingress.GetName())\n\t\t\tbc.Event(jenkinsInstance, corev1.EventTypeWarning, ErrResourceExists, msg)\n\t\t\treturn ingress, fmt.Errorf(msg)\n\t\t}\n\n\t\texists = true\n\t}\n\n\tlabels := map[string]string{\n\t\t\"app\": \"jenkinsci\",\n\t\t\"controller\": jenkinsInstance.GetName(),\n\t\t\"component\": string(jenkinsInstance.UID),\n\t}\n\n\tserviceName := jenkinsInstance.GetName()\n\tif jenkinsInstance.Spec.Service != nil && jenkinsInstance.Spec.Service.Name != \"\" {\n\t\tserviceName = jenkinsInstance.Spec.Service.Name\n\t}\n\tif jenkinsInstance.Spec.Ingress.Service != \"\" {\n\t\tserviceName = jenkinsInstance.Spec.Ingress.Service\n\t}\n\n\tingressPath := jenkinsInstance.Spec.Ingress.Path\n\tif ingressPath == \"\" {\n\t\tingressPath = \"/\"\n\t}\n\n\tif exists {\n\t\tingressCopy := ingress.DeepCopy()\n\t\tingressCopy.Labels = labels\n\t\tingressCopy.Spec.TLS = []v1beta1.IngressTLS{\n\t\t\t{\n\t\t\t\tSecretName: jenkinsInstance.Spec.Ingress.TlsSecret,\n\t\t\t\tHosts: []string{\n\t\t\t\t\tutil.GetJenkinsLocationHost(jenkinsInstance),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tingressCopy.Spec.Rules = []v1beta1.IngressRule{\n\t\t\t{\n\t\t\t\tHost: util.GetJenkinsLocationHost(jenkinsInstance),\n\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tPath: ingressPath,\n\t\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\tServiceName: serviceName,\n\t\t\t\t\t\t\t\t\tServicePort: intstr.IntOrString{\n\t\t\t\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t\t\t\t\tIntVal: JenkinsMasterPort,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tif reflect.DeepEqual(ingressCopy.Spec, ingress.Spec) {\n\t\t\treturn ingress, nil\n\t\t}\n\n\t\tglog.Info(\"updating ingress\")\n\t\terr = bc.Client.Update(context.TODO(), ingressCopy)\n\t\treturn ingress, err\n\n\t} else {\n\n\t\tingress = &v1beta1.Ingress{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: jenkinsInstance.GetName(),\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tLabels: labels,\n\t\t\t\tAnnotations: jenkinsInstance.Spec.Ingress.Annotations,\n\t\t\t},\n\t\t\tSpec: v1beta1.IngressSpec{\n\t\t\t\tTLS: []v1beta1.IngressTLS{\n\t\t\t\t\t{\n\t\t\t\t\t\tSecretName: jenkinsInstance.Spec.Ingress.TlsSecret,\n\t\t\t\t\t\tHosts: []string{\n\t\t\t\t\t\t\tutil.GetJenkinsLocationHost(jenkinsInstance),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRules: []v1beta1.IngressRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tHost: util.GetJenkinsLocationHost(jenkinsInstance),\n\t\t\t\t\t\tIngressRuleValue: v1beta1.IngressRuleValue{\n\t\t\t\t\t\t\tHTTP: &v1beta1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\t\tPaths: []v1beta1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tPath: ingressPath,\n\t\t\t\t\t\t\t\t\t\tBackend: v1beta1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\t\tServiceName: serviceName,\n\t\t\t\t\t\t\t\t\t\t\tServicePort: intstr.IntOrString{\n\t\t\t\t\t\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t\t\t\t\t\t\tIntVal: JenkinsMasterPort,\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\terr = controllerutil.SetControllerReference(jenkinsInstance, ingress, bc.scheme)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = bc.Client.Create(context.TODO(), ingress)\n\t\treturn ingress, err\n\t}\n}", "func ConvertIngressURLToIngress(ingressURL URL, serviceName string) iextensionsv1.Ingress {\n\tport := intstr.IntOrString{\n\t\tType: intstr.Int,\n\t\tIntVal: int32(ingressURL.Spec.Port),\n\t}\n\tingress := iextensionsv1.Ingress{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Ingress\",\n\t\t\tAPIVersion: \"extensions/v1beta1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: ingressURL.Name,\n\t\t},\n\t\tSpec: iextensionsv1.IngressSpec{\n\t\t\tRules: []iextensionsv1.IngressRule{\n\t\t\t\t{\n\t\t\t\t\tHost: ingressURL.Spec.Host,\n\t\t\t\t\tIngressRuleValue: iextensionsv1.IngressRuleValue{\n\t\t\t\t\t\tHTTP: &iextensionsv1.HTTPIngressRuleValue{\n\t\t\t\t\t\t\tPaths: []iextensionsv1.HTTPIngressPath{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPath: ingressURL.Spec.Path,\n\t\t\t\t\t\t\t\t\tBackend: iextensionsv1.IngressBackend{\n\t\t\t\t\t\t\t\t\t\tServiceName: serviceName,\n\t\t\t\t\t\t\t\t\t\tServicePort: port,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tif len(ingressURL.Spec.TLSSecret) > 0 {\n\t\tingress.Spec.TLS = []iextensionsv1.IngressTLS{\n\t\t\t{\n\t\t\t\tHosts: []string{\n\t\t\t\t\tingressURL.Spec.Host,\n\t\t\t\t},\n\t\t\t\tSecretName: ingressURL.Spec.TLSSecret,\n\t\t\t},\n\t\t}\n\t}\n\treturn ingress\n}", "func EventSubscribeH(w http.ResponseWriter, r *http.Request) {\n\n\tlog.V(logLevel).Debugf(\"%s:subscribe:> subscribe on subscribe\", logPrefix)\n\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"Method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tlog.V(logLevel).Debugf(\"%s:subscribe:> watch all events\", logPrefix)\n\n\tvar (\n\t\tsm = distribution.NewServiceModel(r.Context(), envs.Get().GetStorage())\n\t\tnm = distribution.NewNamespaceModel(r.Context(), envs.Get().GetStorage())\n\t\tcm = distribution.NewClusterModel(r.Context(), envs.Get().GetStorage())\n\t\tdone = make(chan bool, 1)\n\t)\n\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.V(logLevel).Debugf(\"%s:subscribe:> set websocket upgrade err: %s\", logPrefix, err.Error())\n\t\treturn\n\t}\n\n\tticker := time.NewTicker(time.Second)\n\tdefer ticker.Stop()\n\n\tvar serviceEvents = make(chan types.ServiceEvent)\n\tvar namespaceEvents = make(chan types.NamespaceEvent)\n\tvar clusterEvents = make(chan types.ClusterEvent)\n\n\tnotify := w.(http.CloseNotifier).CloseNotify()\n\n\tgo func() {\n\t\t<-notify\n\t\tlog.V(logLevel).Debugf(\"%s:subscribe:> HTTP connection just closed.\", logPrefix)\n\t\tdone <- true\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\tclose(serviceEvents)\n\t\t\t\tclose(namespaceEvents)\n\t\t\t\tclose(clusterEvents)\n\t\t\t\treturn\n\t\t\tcase e := <-clusterEvents:\n\n\t\t\t\tvar data interface{}\n\t\t\t\tif e.Data == nil {\n\t\t\t\t\tdata = nil\n\t\t\t\t} else {\n\t\t\t\t\tdata = v1.View().Cluster().New(e.Data)\n\t\t\t\t}\n\n\t\t\t\tevent := Event{\n\t\t\t\t\tEntity: \"cluster\",\n\t\t\t\t\tAction: e.Action,\n\t\t\t\t\tName: e.Name,\n\t\t\t\t\tData: data,\n\t\t\t\t}\n\n\t\t\t\tif err = conn.WriteJSON(event); err != nil {\n\t\t\t\t\tlog.Errorf(\"%s:subscribe:> write cluster event to socket error.\", logPrefix)\n\t\t\t\t}\n\t\t\tcase e := <-serviceEvents:\n\n\t\t\t\tvar data interface{}\n\t\t\t\tif e.Data == nil {\n\t\t\t\t\tdata = nil\n\t\t\t\t} else {\n\t\t\t\t\tdata = v1.View().Service().New(e.Data)\n\t\t\t\t}\n\n\t\t\t\tevent := Event{\n\t\t\t\t\tEntity: \"service\",\n\t\t\t\t\tAction: e.Action,\n\t\t\t\t\tName: e.Name,\n\t\t\t\t\tData: data,\n\t\t\t\t}\n\n\t\t\t\tif err = conn.WriteJSON(event); err != nil {\n\t\t\t\t\tlog.Errorf(\"%s:subscribe:> write service event to socket error.\", logPrefix)\n\t\t\t\t}\n\t\t\tcase e := <-namespaceEvents:\n\n\t\t\t\tvar data interface{}\n\t\t\t\tif e.Data == nil {\n\t\t\t\t\tdata = nil\n\t\t\t\t} else {\n\t\t\t\t\tdata = v1.View().Namespace().New(e.Data)\n\t\t\t\t}\n\n\t\t\t\tevent := Event{\n\t\t\t\t\tEntity: \"namespace\",\n\t\t\t\t\tAction: e.Action,\n\t\t\t\t\tName: e.Name,\n\t\t\t\t\tData: data,\n\t\t\t\t}\n\n\t\t\t\tif err = conn.WriteJSON(event); err != nil {\n\t\t\t\t\tlog.Errorf(\"%s:subscribe:> write namespace event to socket error.\", logPrefix)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo cm.Watch(clusterEvents)\n\tgo sm.Watch(serviceEvents, nil)\n\tgo nm.Watch(namespaceEvents)\n\n\tgo func() {\n\t\tfor range ticker.C {\n\t\t\tif err := conn.WriteMessage(websocket.TextMessage, []byte{}); err != nil {\n\t\t\t\tlog.Errorf(\"%s:subscribe:> writing to the client websocket err: %s\", logPrefix, err.Error())\n\t\t\t\tdone <- true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-done\n}", "func (k Key) IsIngress() bool {\n\treturn k.TrafficDirection == trafficdirection.Ingress.Uint8()\n}", "func (s *k8sStore) GetIngress(key string) (*networkingv1.Ingress, error) {\n\treturn s.listers.Ingress.ByKey(key)\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(controllerAgentName, mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to KubernetesEventSource\n\terr = c.Watch(&source.Kind{Type: &sourcesv1alpha1.KubernetesEventSource{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to owned ContainerSource\n\terr = c.Watch(&source.Kind{Type: &sourcesv1alpha1.ContainerSource{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &sourcesv1alpha1.KubernetesEventSource{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func createIngress(kubeconfig string, kubeContexts []string, ing *v1beta1.Ingress) ([]string, map[string]kubeclient.Interface, error) {\n\tclients, err := kubeutils.GetClients(kubeconfig, kubeContexts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tclusters, createErr := createIngressInClusters(ing, clients)\n\treturn clusters, clients, createErr\n}", "func EventHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tvar remote string\n\tif tmp := r.Header.Get(\"X-Forwarded-For\"); tmp != \"\" {\n\t\tremote = tmp\n\t} else {\n\t\tremote = r.RemoteAddr\n\t}\n\tlog.WithFields(logrus.Fields{\n\t\t\"module\": \"adwo\",\n\t}).Debugln(\"Incomming event from:\", remote, \"With Header:\", r.Header)\n\tlog.WithFields(logrus.Fields{\n\t\t\"module\": \"adwo\",\n\t}).Debugln(\"Request params:\", r.Form)\n\t// required fields\n\tif len(r.Form[\"appid\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No appid\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"adname\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No adname\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"adid\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No adid\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"device\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No device\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"idfa\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No idfa\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"point\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No point\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"ts\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No ts\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"sign\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No sign\", 400)\n\t\treturn\n\t}\n\tif len(r.Form[\"keyword\"]) < 1 {\n\t\tErrorAndReturnCode(w, \"Missing Required field: No keyword\", 400)\n\t\treturn\n\t}\n\t// set a new avro record\n\tstr := fmt.Sprintf(\"adid=%sadname=%sappid=%sdevice=%sidfa=%spoint=%sts=%skey=%s\", r.Form[\"adid\"][0], r.Form[\"adname\"][0], r.Form[\"appid\"][0], r.Form[\"device\"][0], r.Form[\"idfa\"][0], r.Form[\"point\"][0], r.Form[\"ts\"][0], conf.Extension.Anwo.Key)\n\tcrypted := md5.Sum([]byte(str))\n\tif fmt.Sprintf(\"%x\", crypted) != strings.Split(r.Form[\"sign\"][0], \",\")[0] {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"module\": \"adwo\",\n\t\t}).Warnf(\"Sign not matched!: %x :%s\\n, bypass sign check? %s\", crypted, r.Form[\"sign\"][0], *sign_check)\n\t\tif !*sign_check {\n\t\t\tErrorAndReturnCode(w, \"Sign mismatched!\", 400)\n\t\t\treturn\n\t\t}\n\t}\n\trecord, err := avro.NewRecord()\n\tif err != nil {\n\t\tErrorAndReturnCode(w, \"Failed to set a new avro record:\"+err.Error(), 500)\n\t\treturn\n\t}\n\t// optional fields\n\tif len(r.Form[\"ip\"]) > 0 {\n\t\trecord.Set(\"ip\", r.Form[\"ip\"][0])\n\t}\n\t// set required fields\n\trecord.Set(\"did\", r.Form[\"idfa\"][0])\n\tnsec, err := strconv.ParseInt(r.Form[\"ts\"][0], 10, 64)\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"module\": \"adwo\",\n\t\t}).Errorln(\"Failed to parse ts to int:\", err)\n\t\tErrorAndReturnCode(w, \"Failed to parse ts:\"+err.Error(), 500)\n\t\treturn\n\t}\n\tt := time.Unix(0, nsec*1000000)\n\trecord.Set(\"timestamp\", t.Format(time.RFC3339))\n\trecord.Set(\"id\", r.Form[\"keyword\"][0])\n\trecord.Set(\"event\", \"anwo_postback\")\n\trecord.Set(\"os\", \"ios\")\n\t// extensions fields\n\textension := map[string](interface{}){}\n\tfor k, v := range r.Form {\n\t\tif k != \"ip\" && k != \"aid\" && k != \"idfa\" && k != \"timestamp\" && k != \"keyword\" && k != \"sign\" && k != \"ts\" {\n\t\t\textension[k] = v[0]\n\t\t}\n\t}\n\tif len(extension) != 0 {\n\t\trecord.Set(\"extension\", extension)\n\t}\n\tlog.WithFields(logrus.Fields{\n\t\t\"module\": \"adwo\",\n\t\t\"record\": record.String(),\n\t}).Infoln(\"Recieved post back.\")\n\t// encode avro\n\tbuf := new(bytes.Buffer)\n\tif err = avro.Encode(buf, record); err != nil {\n\t\tErrorAndReturnCode(w, \"Failed to encode avro record:\"+err.Error(), 500)\n\t\treturn\n\t}\n\turl := fmt.Sprintf(\"%s?params=%s\", conf.Extension.Anwo.Td_postback_url, r.Form[\"keyword\"][0])\n\tgo func(url string) {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"module\": \"adwo\",\n\t\t\t\"url\": url,\n\t\t}).Infoln(\"Send postback to adserver with request url.\")\n\n\t\trequest, err := http.NewRequest(\"GET\", url, nil)\n\t\tif err != nil {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"module\": \"adwo\",\n\t\t\t}).Errorln(\"Failed to create request:\", err)\n\t\t\treturn\n\t\t}\n\t\trequest.Header.Add(\"Connection\", \"keep-alive\")\n\t\tresp, err := client.Do(request)\n\t\tif err != nil {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"module\": \"adwo\",\n\t\t\t}).Errorln(\"Failed to send clk to remote server:\", err)\n\t\t\treturn\n\t\t}\n\t\tif resp.StatusCode != 200 {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"module\": \"adwo\",\n\t\t\t}).Errorln(\"Error when send td_postback:\", resp.Status)\n\t\t\tstr, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"module\": \"adwo\",\n\t\t\t}).Debugln(\"Resp body:\", string(str))\n\t\t\tresp.Body.Close()\n\t\t\treturn\n\t\t}\n\t\tio.Copy(ioutil.Discard, resp.Body)\n\t\tresp.Body.Close()\n\t}(url)\n\n\t// send to kafka\n\tpart, offset, err := kafka.SendByteMessage(buf.Bytes(), \"default\")\n\tif err != nil {\n\t\tfail_safe.Println(\"error:\", err)\n\t\tfail_safe.Println(\"record:\", record)\n\t\tfail_safe.Println(\"data:\", buf.Bytes())\n\t\tErrorAndReturnCode(w, \"Failed to send message to kafka:\"+err.Error()+\"Data has been writen to a backup file. Please contact us.\", 200)\n\t\treturn\n\t}\n\t// done\n\tlog.WithFields(logrus.Fields{\n\t\t\"module\": \"adwo\",\n\t}).Debugf(\"New record partition=%d\\toffset=%d\\n\", part, offset)\n\tw.WriteHeader(200)\n\tfmt.Fprintf(w, \"1 messages have been writen.\")\n}", "func TestIngressNoUpdate(t *testing.T) {\n\tingrNoUpdate := &networkingv1.Ingress{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"red-ns\",\n\t\t\tName: \"testingr-noupdate\",\n\t\t},\n\t\tSpec: networkingv1.IngressSpec{\n\t\t\tDefaultBackend: &networkingv1.IngressBackend{\n\t\t\t\tService: &networkingv1.IngressServiceBackend{\n\t\t\t\t\tName: \"testsvc\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t_, err := kubeClient.NetworkingV1().Ingresses(\"red-ns\").Create(context.TODO(), ingrNoUpdate, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in adding Ingress: %v\", err)\n\t}\n\twaitAndverify(t, \"Ingress/red-ns/testingr-noupdate\")\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"2\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\tingrNoUpdate.Status = networkingv1.IngressStatus{\n\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\tIngress: []corev1.LoadBalancerIngress{\n\t\t\t\t{\n\t\t\t\t\tIP: \"1.1.1.1\",\n\t\t\t\t\tHostname: \"testingr.avi.internal\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP: \"2.3.4.5\",\n\t\t\t\t\tHostname: \"testingr2.avi.internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tingrNoUpdate.ResourceVersion = \"3\"\n\t_, err = kubeClient.NetworkingV1().Ingresses(\"red-ns\").Update(context.TODO(), ingrNoUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in updating Ingress: %v\", err)\n\t}\n\n\twaitAndverify(t, \"\")\n}", "func monitor(endpointPort, containerPort int, label, dockerHost string) {\n\tresp, err := client.Events(context.Background(), types.EventsOptions{})\n\neventLoop:\n\tfor {\n\t\tselect {\n\t\tcase m := <-resp:\n\t\t\thandleMessage(m, endpointPort, containerPort, label, dockerHost)\n\t\tcase err := <-err:\n\t\t\tlog.Println(err)\n\t\t\tbreak eventLoop\n\t\t}\n\t}\n\n}", "func (ct *ctrlerCtx) handleDSCProfileEventNoResolver(evt *kvstore.WatchEvent) error {\n\tswitch tp := evt.Object.(type) {\n\tcase *cluster.DSCProfile:\n\t\teobj := evt.Object.(*cluster.DSCProfile)\n\t\tkind := \"DSCProfile\"\n\n\t\tlog.Infof(\"Watcher: Got %s watch event(%s): {%+v}\", kind, evt.Type, eobj)\n\n\t\tct.Lock()\n\t\thandler, ok := ct.handlers[kind]\n\t\tct.Unlock()\n\t\tif !ok {\n\t\t\tct.logger.Fatalf(\"Cant find the handler for %s\", kind)\n\t\t}\n\t\tdscprofileHandler := handler.(DSCProfileHandler)\n\t\t// handle based on event type\n\t\tctrlCtx := &dscprofileCtx{event: evt.Type, obj: &DSCProfile{DSCProfile: *eobj, ctrler: ct}}\n\t\tswitch evt.Type {\n\t\tcase kvstore.Created:\n\t\t\tfallthrough\n\t\tcase kvstore.Updated:\n\t\t\tfobj, err := ct.getObject(kind, ctrlCtx.GetKey())\n\t\t\tif err != nil {\n\t\t\t\tct.addObject(ctrlCtx)\n\t\t\t\tct.stats.Counter(\"DSCProfile_Created_Events\").Inc()\n\n\t\t\t\t// call the event handler\n\t\t\t\tctrlCtx.Lock()\n\t\t\t\terr = dscprofileHandler.OnDSCProfileCreate(ctrlCtx.obj)\n\t\t\t\tctrlCtx.Unlock()\n\t\t\t\tif err != nil {\n\t\t\t\t\tct.logger.Errorf(\"Error creating %s %+v. Err: %v\", kind, ctrlCtx.obj.GetObjectMeta(), err)\n\t\t\t\t\tct.delObject(kind, ctrlCtx.GetKey())\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfResVer, fErr := strconv.ParseInt(fobj.GetResourceVersion(), 10, 64)\n\t\t\t\teResVer, eErr := strconv.ParseInt(eobj.GetResourceVersion(), 10, 64)\n\t\t\t\tif ct.resolver != nil && fErr == nil && eErr == nil && fResVer >= eResVer {\n\t\t\t\t\t// Event already processed.\n\t\t\t\t\tct.logger.Infof(\"Skipping update due to old resource version\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tctrlCtx := fobj.(*dscprofileCtx)\n\t\t\t\tct.stats.Counter(\"DSCProfile_Updated_Events\").Inc()\n\t\t\t\tctrlCtx.Lock()\n\t\t\t\tp := cluster.DSCProfile{Spec: eobj.Spec,\n\t\t\t\t\tObjectMeta: eobj.ObjectMeta,\n\t\t\t\t\tTypeMeta: eobj.TypeMeta,\n\t\t\t\t\tStatus: eobj.Status}\n\n\t\t\t\terr = dscprofileHandler.OnDSCProfileUpdate(ctrlCtx.obj, &p)\n\t\t\t\tctrlCtx.obj.DSCProfile = *eobj\n\t\t\t\tctrlCtx.Unlock()\n\t\t\t\tif err != nil {\n\t\t\t\t\tct.logger.Errorf(\"Error creating %s %+v. Err: %v\", kind, ctrlCtx.obj.GetObjectMeta(), err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t}\n\t\tcase kvstore.Deleted:\n\t\t\tctrlCtx := &dscprofileCtx{event: evt.Type, obj: &DSCProfile{DSCProfile: *eobj, ctrler: ct}}\n\t\t\tfobj, err := ct.findObject(kind, ctrlCtx.GetKey())\n\t\t\tif err != nil {\n\t\t\t\tct.logger.Errorf(\"Object %s/%s not found durng delete. Err: %v\", kind, eobj.GetKey(), err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tobj := fobj.(*DSCProfile)\n\t\t\tct.stats.Counter(\"DSCProfile_Deleted_Events\").Inc()\n\t\t\tobj.Lock()\n\t\t\terr = dscprofileHandler.OnDSCProfileDelete(obj)\n\t\t\tobj.Unlock()\n\t\t\tif err != nil {\n\t\t\t\tct.logger.Errorf(\"Error deleting %s: %+v. Err: %v\", kind, obj.GetObjectMeta(), err)\n\t\t\t}\n\t\t\tct.delObject(kind, ctrlCtx.GetKey())\n\t\t\treturn nil\n\n\t\t}\n\tdefault:\n\t\tct.logger.Fatalf(\"API watcher Found object of invalid type: %v on DSCProfile watch channel\", tp)\n\t}\n\n\treturn nil\n}", "func (e *EventNotif) activate(client DockerClient) {\n\tdockerEventsCh := make(chan *docker.APIEvents)\n\tif err := client.AddEventListener(dockerEventsCh); err != nil {\n\t\tlog.Fatalf(\"[ERROR] can't add even listener, %v\", err)\n\t}\n\n\tupStatuses := []string{\"start\", \"restart\"}\n\tdownStatuses := []string{\"die\", \"destroy\", \"stop\", \"pause\"}\n\n\tfor dockerEvent := range dockerEventsCh {\n\t\tif dockerEvent.Type != \"container\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !contains(dockerEvent.Status, upStatuses) && !contains(dockerEvent.Status, downStatuses) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] api event %+v\", dockerEvent)\n\t\tcontainerName := strings.TrimPrefix(dockerEvent.Actor.Attributes[\"name\"], \"/\")\n\n\t\tif !e.isAllowed(containerName) {\n\t\t\tlog.Printf(\"[INFO] container %s excluded\", containerName)\n\t\t\tcontinue\n\t\t}\n\n\t\tevent := Event{\n\t\t\tContainerID: dockerEvent.Actor.ID,\n\t\t\tContainerName: containerName,\n\t\t\tStatus: contains(dockerEvent.Status, upStatuses),\n\t\t\tTS: time.Unix(dockerEvent.Time/1000, dockerEvent.TimeNano),\n\t\t\tGroup: e.group(dockerEvent.From),\n\t\t}\n\t\tlog.Printf(\"[INFO] new event %+v\", event)\n\t\te.eventsCh <- event\n\t}\n\tlog.Fatalf(\"[ERROR] event listener failed\")\n}", "func New(\n\tnamespace string,\n\tresyncPeriod time.Duration,\n\tclient clientset.Interface,\n\tupdateCh *channels.RingChannel,\n\tdisableCatchAll bool) Storer {\n\n\tstore := &k8sStore{\n\t\tinformers: &Informer{},\n\t\tlisters: &Lister{},\n\t\tupdateCh: updateCh,\n\t\tsyncSecretMu: &sync.Mutex{},\n\t\tbackendConfigMu: &sync.RWMutex{},\n\t\tsecretIngressMap: NewObjectRefMap(),\n\t}\n\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(klog.Infof)\n\teventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{\n\t\tInterface: client.CoreV1().Events(namespace),\n\t})\n\trecorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{\n\t\tComponent: \"alb-ingress-controller\",\n\t})\n\n\tstore.listers.IngressWithAnnotation.Store = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)\n\t// create informers factory, enable and assign required informers\n\tinfFactory := informers.NewSharedInformerFactoryWithOptions(client, resyncPeriod,\n\t\tinformers.WithNamespace(namespace),\n\t)\n\n\tstore.informers.Ingress = infFactory.Networking().V1().Ingresses().Informer()\n\tstore.listers.Ingress.Store = store.informers.Ingress.GetStore()\n\n\tstore.informers.Endpoint = infFactory.Core().V1().Endpoints().Informer()\n\tstore.listers.Endpoint.Store = store.informers.Endpoint.GetStore()\n\n\tstore.informers.Service = infFactory.Core().V1().Services().Informer()\n\tstore.listers.Service.Store = store.informers.Service.GetStore()\n\n\tstore.informers.Node = infFactory.Core().V1().Nodes().Informer()\n\tstore.listers.Node.Store = store.informers.Node.GetStore()\n\n\tstore.informers.Pod = infFactory.Core().V1().Pods().Informer()\n\tstore.listers.Pod.Store = store.informers.Pod.GetStore()\n\n\tingDeleteHandler := func(obj interface{}) {\n\t\ting, ok := toIngress(obj)\n\t\tif !ok {\n\t\t\t// If we reached here it means the ingress was deleted but its final state is unrecorded.\n\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\tif !ok {\n\t\t\t\tklog.ErrorS(nil, \"Error obtaining object from tombstone\", \"key\", obj)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ting, ok = tombstone.Obj.(*networking.Ingress)\n\t\t\tif !ok {\n\t\t\t\tklog.Errorf(\"Tombstone contained object that is not an Ingress: %#v\", obj)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif !IsValid(ing) {\n\t\t\treturn\n\t\t}\n\n\t\tif isCatchAllIngress(ing.Spec) && disableCatchAll {\n\t\t\tklog.InfoS(\"Ignoring delete for catch-all because of --disable-catch-all\", \"ingress\", klog.KObj(ing))\n\t\t\treturn\n\t\t}\n\n\t\t//store.listers.IngressWithAnnotation.Delete(ing)\n\n\t\tkey := MetaNamespaceKey(ing)\n\t\tstore.secretIngressMap.Delete(key)\n\n\t\tupdateCh.In() <- helper.Event{\n\t\t\tType: helper.IngressDeleteEvent,\n\t\t\tObj: obj,\n\t\t}\n\t}\n\n\tingEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\ting, ok := toIngress(obj)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !IsValid(ing) {\n\t\t\t\tingressClass, _ := annotations.GetStringAnnotation(IngressKey, ing)\n\t\t\t\tklog.InfoS(\"Ignoring ingress\", \"ingress\", klog.KObj(ing), \"kubernetes.io/ingress.class\", ingressClass, \"ingressClassName\", pointer.StringDeref(ing.Spec.IngressClassName, \"\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif isCatchAllIngress(ing.Spec) && disableCatchAll {\n\t\t\t\tklog.InfoS(\"Ignoring add for catch-all ingress because of --disable-catch-all\", \"ingress\", klog.KObj(ing))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trecorder.Eventf(ing, corev1.EventTypeNormal, \"Sync\", \"Scheduled for sync\")\n\n\t\t\tstore.syncIngress(ing)\n\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.CreateEvent,\n\t\t\t\tObj: obj,\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: ingDeleteHandler,\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\toldIng, ok := toIngress(old)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcurIng, ok := toIngress(cur)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvalidOld := IsValid(oldIng)\n\t\t\tvalidCur := IsValid(curIng)\n\t\t\tif !validOld && validCur {\n\t\t\t\tif isCatchAllIngress(curIng.Spec) && disableCatchAll {\n\t\t\t\t\tklog.InfoS(\"ignoring update for catch-all ingress because of --disable-catch-all\", \"ingress\", klog.KObj(curIng))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tklog.InfoS(\"creating ingress\", \"ingress\", klog.KObj(curIng), \"class\", IngressKey)\n\t\t\t\trecorder.Eventf(curIng, corev1.EventTypeNormal, \"Sync\", \"Scheduled for sync\")\n\t\t\t} else if validOld && !validCur {\n\t\t\t\tklog.InfoS(\"removing ingress\", \"ingress\", klog.KObj(curIng), \"class\", IngressKey)\n\t\t\t\tingDeleteHandler(old)\n\t\t\t\treturn\n\t\t\t} else if validCur && !reflect.DeepEqual(old, cur) {\n\t\t\t\tif isCatchAllIngress(curIng.Spec) && disableCatchAll {\n\t\t\t\t\tklog.InfoS(\"ignoring update for catch-all ingress and delete old one because of --disable-catch-all\", \"ingress\", klog.KObj(curIng))\n\t\t\t\t\tingDeleteHandler(old)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\trecorder.Eventf(curIng, corev1.EventTypeNormal, \"Sync\", \"Scheduled for sync\")\n\t\t\t} else {\n\t\t\t\tklog.V(3).InfoS(\"No changes on ingress. Skipping update\", \"ingress\", klog.KObj(curIng))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tstore.syncIngress(curIng)\n\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.UpdateEvent,\n\t\t\t\tObj: cur,\n\t\t\t}\n\t\t},\n\t}\n\n\tepEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tep1 := obj.(*corev1.Endpoints)\n\t\t\tkey := MetaNamespaceKey(ep1)\n\t\t\tsvc, exist, err := store.listers.Service.GetByKey(key)\n\t\t\tif err != nil {\n\t\t\t\tklog.Error(err, \"get service GetByKey by endpoint failed\", \"endpoint\", ep1)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !exist {\n\t\t\t\tklog.Warningf(\"epEventHandler %s\", key)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts := svc.(*corev1.Service)\n\n\t\t\tklog.Info(\"controller: endpoint add event\",\n\t\t\t\tutil.NamespacedName(ep1).String())\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.EndPointEvent,\n\t\t\t\tObj: s,\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tep1 := obj.(*corev1.Endpoints)\n\t\t\tkey := MetaNamespaceKey(ep1)\n\t\t\tsvc, exist, err := store.listers.Service.GetByKey(key)\n\t\t\tif err != nil {\n\t\t\t\tklog.Error(err, \"DeleteFunc get service GetByKey by endpoint failed\", \"endpoint\", ep1)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !exist {\n\t\t\t\tklog.Warningf(\"DeleteFunc epEventHandler %s\", key)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ts := svc.(*corev1.Service)\n\n\t\t\tklog.Info(\"controller: endpoint delete event\",\n\t\t\t\tutil.NamespacedName(ep1).String())\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.EndPointEvent,\n\t\t\t\tObj: s,\n\t\t\t}\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tep1 := old.(*corev1.Endpoints)\n\t\t\tep2 := cur.(*corev1.Endpoints)\n\t\t\tif !reflect.DeepEqual(ep1.Subsets, ep2.Subsets) {\n\t\t\t\tkey := MetaNamespaceKey(ep1)\n\t\t\t\tsvc, exist, err := store.listers.Service.GetByKey(key)\n\t\t\t\tif err != nil {\n\t\t\t\t\tklog.Error(err, \"UpdateFunc get service GetByKey by endpoint failed\", \"endpoint\", ep1)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !exist {\n\t\t\t\t\tklog.Warningf(\"UpdateFunc epEventHandler %s\", key)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts := svc.(*corev1.Service)\n\n\t\t\t\tklog.Info(\"controller: endpoint update event\",\n\t\t\t\t\tutil.NamespacedName(ep1).String())\n\t\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\t\tType: helper.EndPointEvent,\n\t\t\t\t\tObj: s,\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\tpodEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\terr := store.listers.Pod.Add(obj)\n\t\t\tif err != nil {\n\t\t\t\tklog.Error(err, \"Pod Add failed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t_ = store.listers.Pod.Delete(obj)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t},\n\t}\n\tnodeEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tserviceList := store.listers.Service.List()\n\t\t\tfor _, v := range serviceList {\n\t\t\t\tsvc := v.(*corev1.Service)\n\t\t\t\tklog.Info(\"node change: enqueue service\", util.Key(svc))\n\t\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\t\tType: helper.NodeEvent,\n\t\t\t\t\tObj: svc,\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\tnodeOld := old.(*corev1.Node)\n\t\t\tnodeNew := cur.(*corev1.Node)\n\n\t\t\tif !reflect.DeepEqual(nodeOld.Labels, nodeNew.Labels) {\n\t\t\t\tserviceList := store.listers.Service.List()\n\t\t\t\tfor _, v := range serviceList {\n\t\t\t\t\tsvc := v.(*corev1.Service)\n\t\t\t\t\tklog.Info(\"node change: enqueue service\", util.Key(svc))\n\t\t\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\t\t\tType: helper.NodeEvent,\n\t\t\t\t\t\tObj: svc,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tserviceList := store.listers.Service.List()\n\t\t\tfor _, v := range serviceList {\n\t\t\t\tsvc := v.(*corev1.Service)\n\t\t\t\tklog.Info(\"node change: enqueue service\", util.Key(svc))\n\t\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\t\tType: helper.NodeEvent,\n\t\t\t\t\tObj: svc,\n\t\t\t\t}\n\t\t\t}\n\n\t\t},\n\t}\n\n\tserviceHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tcurSvc := obj.(*corev1.Service)\n\t\t\tstore.enqueueImpactedIngresses(updateCh, curSvc)\n\t\t},\n\t\tUpdateFunc: func(old, cur interface{}) {\n\t\t\t// update the server group\n\t\t\toldSvc := old.(*corev1.Service)\n\t\t\tcurSvc := cur.(*corev1.Service)\n\n\t\t\tif reflect.DeepEqual(oldSvc, curSvc) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tupdateCh.In() <- helper.Event{\n\t\t\t\tType: helper.ServiceEvent,\n\t\t\t\tObj: cur,\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t// ingress refer service to delete\n\t\t\tcurSvc := obj.(*corev1.Service)\n\t\t\tstore.enqueueImpactedIngresses(updateCh, curSvc)\n\t\t},\n\t}\n\n\t_, _ = store.informers.Ingress.AddEventHandler(ingEventHandler)\n\t_, _ = store.informers.Endpoint.AddEventHandler(epEventHandler)\n\t_, _ = store.informers.Node.AddEventHandler(podEventHandler)\n\t_, _ = store.informers.Service.AddEventHandler(serviceHandler)\n\t_, _ = store.informers.Node.AddEventHandler(nodeEventHandler)\n\treturn store\n}", "func (inf *meshInformer) OnPartOfIngressSpec(ingress string, gjsonPath GJSONPath, fn IngressSpecFunc) error {\n\tstoreKey := layout.IngressSpecKey(ingress)\n\tsyncerKey := fmt.Sprintf(\"ingress-%s\", ingress)\n\n\tspecFunc := func(event Event, value string) bool {\n\t\tingressSpec := &spec.Ingress{}\n\t\tif event.EventType != EventDelete {\n\t\t\tif err := yaml.Unmarshal([]byte(value), ingressSpec); err != nil {\n\t\t\t\tlogger.Errorf(\"BUG: unmarshal %s to yaml failed: %v\", value, err)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn fn(event, ingressSpec)\n\t}\n\n\treturn inf.onSpecPart(storeKey, syncerKey, gjsonPath, specFunc)\n}", "func (ing FakeIngress) UpdateIngress() (*networking.Ingress, error) {\n\n\t//check if resource already exists\n\tingress, err := KubeClient.NetworkingV1().Ingresses(ing.Namespace).Get(context.TODO(), ing.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//increment resource version\n\tnewIngress := ing.IngressMultiPath() //Maybe we should replace Ingress() with IngressMultiPath() completely\n\trv, _ := strconv.Atoi(ingress.ResourceVersion)\n\tnewIngress.ResourceVersion = strconv.Itoa(rv + 1)\n\n\t//update ingress resource\n\tupdatedIngress, err := KubeClient.NetworkingV1().Ingresses(newIngress.Namespace).Update(context.TODO(), newIngress, metav1.UpdateOptions{})\n\treturn updatedIngress, err\n}", "func (c *AviController) SetupServiceImportEventHandlers(numWorkers uint32) {\n\tutils.AviLog.Infof(\"Setting up ServiceImport CRD Event handlers\")\n\n\tserviceImportEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsi := obj.(*akov1alpha1.ServiceImport)\n\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(si))\n\t\t\tkey := lib.ServiceImport + \"/\" + utils.ObjKey(si)\n\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Service Import add event: Namespace: %s didn't qualify filter. Not adding Service Import\", key, namespace)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := c.GetValidator().ValidateServiceImportObj(key, si); err != nil {\n\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Validation of ServiceImport failed: %v\", key, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t},\n\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\toldObj := old.(*akov1alpha1.ServiceImport)\n\t\t\tsi := new.(*akov1alpha1.ServiceImport)\n\t\t\tif !reflect.DeepEqual(oldObj.Spec, si.Spec) {\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(si))\n\t\t\t\tkey := lib.ServiceImport + \"/\" + utils.ObjKey(si)\n\t\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Service Import update event: Namespace: %s didn't qualify filter. Not updating Service Import\", key, namespace)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err := c.GetValidator().ValidateServiceImportObj(key, si); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Validation of ServiceImport failed: %v\", key, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsi, ok := obj.(*akov1alpha1.ServiceImport)\n\t\t\tif !ok {\n\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tsi, ok = tombstone.Obj.(*akov1alpha1.ServiceImport)\n\t\t\t\tif !ok {\n\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not a ServiceImport: %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(si))\n\t\t\tkey := lib.ServiceImport + \"/\" + utils.ObjKey(si)\n\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Service Import delete event: Namespace: %s didn't qualify filter. Not deleting Service Import\", key, namespace)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t},\n\t}\n\tc.informers.ServiceImportInformer.Informer().AddEventHandler(serviceImportEventHandler)\n}", "func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {\n\tingress := &networkingv1.Ingress{}\n\n\terr := r.Get(ctx, req.NamespacedName, ingress)\n\tif apierrors.IsNotFound(err) {\n\t\t// The ingress was deleted. Construct a metadata-only ingress object\n\t\t// just for monitor deletion.\n\t\tingress = &networkingv1.Ingress{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: req.NamespacedName.Name,\n\t\t\t\tNamespace: req.NamespacedName.Namespace,\n\t\t\t},\n\t\t}\n\n\t\terr = r.monitorService.DeleteMonitor(ingress)\n\t} else if err == nil {\n\t\tif ingress.Annotations[config.AnnotationEnabled] == \"true\" {\n\t\t\tcreateAfter := time.Until(ingress.CreationTimestamp.Add(r.creationDelay))\n\n\t\t\t// If a creation delay was configured, we will requeue the\n\t\t\t// reconciliation until after the creation delay passed.\n\t\t\tif createAfter > 0 {\n\t\t\t\treturn reconcile.Result{RequeueAfter: createAfter}, nil\n\t\t\t}\n\n\t\t\terr = r.handleCreateOrUpdate(ctx, ingress)\n\t\t} else {\n\t\t\terr = r.monitorService.DeleteMonitor(ingress)\n\t\t}\n\t}\n\n\treturn reconcile.Result{}, err\n}", "func (r *IngressReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"ingress\", req.NamespacedName)\n\tlog.Info(\"reconcile\", \"request\", req)\n\n\tingress := &networkingv1beta1.Ingress{}\n\tif err := r.Get(ctx, req.NamespacedName, ingress); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tlog.Info(\"clean up\", \"request\", req)\n\t\treturn reconcile.Result{}, r.cleanup(ctx, req)\n\t}\n\tif !ingress.GetDeletionTimestamp().IsZero() {\n\t\tlog.Info(\"clean up\", \"request\", req)\n\t\treturn reconcile.Result{}, r.cleanup(ctx, req)\n\t}\n\tlog.Info(\"reconcile gateway\", \"request\", req)\n\tif err := r.ReconcileGateway(ctx, req); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tlog.Info(\"reconcile virtual service\", \"request\", req)\n\tif err := r.ReconcileVirtualService(ctx, ingress); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *ReconcileLegacyHeader) reconcileIngress(ctx context.Context, instance *operatorsv1alpha1.LegacyHeader, needToRequeue *bool) error {\n\treqLogger := log.WithValues(\"func\", \"reconcileIngress\", \"instance.Name\", instance.Name)\n\t// Define a new Ingress\n\tnewNavIngress := res.IngressForLegacyUI(instance)\n\t// Set instance as the owner and controller of the ingress\n\terr := controllerutil.SetControllerReference(instance, newNavIngress, r.scheme)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to set owner for Nav ingress\")\n\t\treturn nil\n\t}\n\terr = res.ReconcileIngress(ctx, r.client, instance.Namespace, res.LegacyReleaseName, newNavIngress, needToRequeue)\n\tif err != nil {\n\t\treturn err\n\t}\n\treqLogger.Info(\"got legacy header Ingress\")\n\n\treturn nil\n}", "func (c *HAProxyController) updateHAProxy() {\n\tlogger.Trace(\"HAProxy config sync started\")\n\n\terr := c.Client.APIStartTransaction()\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tc.Client.APIDisposeTransaction()\n\t}()\n\n\treload, restart := c.handleGlobalConfig()\n\n\tif route.CustomRoutes {\n\t\tlogger.Error(route.RoutesReset(c.Client))\n\t\troute.CustomRoutes = false\n\t}\n\n\tfor _, namespace := range c.Store.Namespaces {\n\t\tif !namespace.Relevant {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, ingress := range namespace.Ingresses {\n\t\t\tif ingress.Status == DELETED {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !c.igClassIsSupported(ingress) {\n\t\t\t\tlogger.Debugf(\"ingress '%s/%s' ignored: no matching IngressClass\", ingress.Namespace, ingress.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c.PublishService != nil {\n\t\t\t\tlogger.Error(c.k8s.UpdateIngressStatus(ingress, c.PublishService))\n\t\t\t}\n\t\t\tif ingress.DefaultBackend != nil {\n\t\t\t\tif r, errSvc := c.setDefaultService(ingress, []string{c.Cfg.FrontHTTP, c.Cfg.FrontHTTPS}); errSvc != nil {\n\t\t\t\t\tlogger.Errorf(\"Ingress '%s/%s': default backend: %s\", ingress.Namespace, ingress.Name, errSvc)\n\t\t\t\t} else {\n\t\t\t\t\treload = reload || r\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Ingress secrets\n\t\t\tlogger.Tracef(\"ingress '%s/%s': processing secrets...\", ingress.Namespace, ingress.Name)\n\t\t\tfor _, tls := range ingress.TLS {\n\t\t\t\tif tls.Status == store.DELETED {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcrt, updated, _ := c.Cfg.Certificates.HandleTLSSecret(c.Store, haproxy.SecretCtx{\n\t\t\t\t\tDefaultNS: ingress.Namespace,\n\t\t\t\t\tSecretPath: tls.SecretName.Value,\n\t\t\t\t\tSecretType: haproxy.FT_CERT,\n\t\t\t\t})\n\t\t\t\tif crt != \"\" && updated {\n\t\t\t\t\treload = true\n\t\t\t\t\tlogger.Debugf(\"Secret '%s' in ingress '%s/%s' was updated, reload required\", tls.SecretName.Value, ingress.Namespace, ingress.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Ingress annotations\n\t\t\tlogger.Tracef(\"ingress '%s/%s': processing annotations...\", ingress.Namespace, ingress.Name)\n\t\t\tif len(ingress.Rules) == 0 {\n\t\t\t\tlogger.Debugf(\"Ingress %s/%s: no rules defined\", ingress.Namespace, ingress.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.handleIngressAnnotations(ingress)\n\t\t\t// Ingress rules\n\t\t\tlogger.Tracef(\"ingress '%s/%s': processing rules...\", ingress.Namespace, ingress.Name)\n\t\t\tfor _, rule := range ingress.Rules {\n\t\t\t\tfor _, path := range rule.Paths {\n\t\t\t\t\tif r, errIng := c.handleIngressPath(ingress, rule.Host, path); errIng != nil {\n\t\t\t\t\t\tlogger.Errorf(\"Ingress '%s/%s': %s\", ingress.Namespace, ingress.Name, errIng)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treload = reload || r\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, handler := range c.UpdateHandlers {\n\t\tr, errHandler := handler.Update(c.Store, &c.Cfg, c.Client)\n\t\tlogger.Error(errHandler)\n\t\treload = reload || r\n\t}\n\n\terr = c.Client.APICommitTransaction()\n\tif err != nil {\n\t\tlogger.Error(\"unable to Sync HAProxy configuration !!\")\n\t\tlogger.Error(err)\n\t\tc.clean(true)\n\t\treturn\n\t}\n\tc.clean(false)\n\tif !c.ready {\n\t\tc.setToReady()\n\t}\n\tswitch {\n\tcase restart:\n\t\tif err = c.haproxyService(\"restart\"); err != nil {\n\t\t\tlogger.Error(err)\n\t\t} else {\n\t\t\tlogger.Info(\"HAProxy restarted\")\n\t\t}\n\tcase reload:\n\t\tif err = c.haproxyService(\"reload\"); err != nil {\n\t\t\tlogger.Error(err)\n\t\t} else {\n\t\t\tlogger.Info(\"HAProxy reloaded\")\n\t\t}\n\t}\n\n\tlogger.Trace(\"HAProxy config sync ended\")\n}", "func (ct *ctrlerCtx) handleHostEventParallelWithNoResolver(evt *kvstore.WatchEvent) error {\n\tswitch tp := evt.Object.(type) {\n\tcase *cluster.Host:\n\t\teobj := evt.Object.(*cluster.Host)\n\t\tkind := \"Host\"\n\n\t\tlog.Infof(\"Watcher: Got %s watch event(%s): {%+v}\", kind, evt.Type, eobj)\n\n\t\tct.Lock()\n\t\thandler, ok := ct.handlers[kind]\n\t\tct.Unlock()\n\t\tif !ok {\n\t\t\tct.logger.Fatalf(\"Cant find the handler for %s\", kind)\n\t\t}\n\t\thostHandler := handler.(HostHandler)\n\t\t// handle based on event type\n\t\tswitch evt.Type {\n\t\tcase kvstore.Created:\n\t\t\tfallthrough\n\t\tcase kvstore.Updated:\n\t\t\tworkFunc := func(ctx context.Context, ctrlCtx shardworkers.WorkObj) error {\n\t\t\t\tvar err error\n\t\t\t\tworkCtx := ctrlCtx.(*hostCtx)\n\t\t\t\teobj := workCtx.obj\n\t\t\t\tfobj, err := ct.getObject(kind, workCtx.GetKey())\n\t\t\t\tif err != nil {\n\t\t\t\t\tct.addObject(workCtx)\n\t\t\t\t\tct.stats.Counter(\"Host_Created_Events\").Inc()\n\t\t\t\t\teobj.Lock()\n\t\t\t\t\terr = hostHandler.OnHostCreate(eobj)\n\t\t\t\t\teobj.Unlock()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tct.logger.Errorf(\"Error creating %s %+v. Err: %v\", kind, eobj.GetObjectMeta(), err)\n\t\t\t\t\t\tct.delObject(kind, workCtx.GetKey())\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tworkCtx = fobj.(*hostCtx)\n\t\t\t\t\tobj := workCtx.obj\n\t\t\t\t\tct.stats.Counter(\"Host_Updated_Events\").Inc()\n\t\t\t\t\tobj.Lock()\n\t\t\t\t\tp := cluster.Host{Spec: eobj.Spec,\n\t\t\t\t\t\tObjectMeta: eobj.ObjectMeta,\n\t\t\t\t\t\tTypeMeta: eobj.TypeMeta,\n\t\t\t\t\t\tStatus: eobj.Status}\n\n\t\t\t\t\terr = hostHandler.OnHostUpdate(obj, &p)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tct.logger.Errorf(\"Error creating %s %+v. Err: %v\", kind, obj.GetObjectMeta(), err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tworkCtx.obj.Host = p\n\t\t\t\t\t}\n\t\t\t\t\tobj.Unlock()\n\t\t\t\t}\n\t\t\t\tworkCtx.SetWatchTs(evt.WatchTS)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tctrlCtx := &hostCtx{event: evt.Type, obj: &Host{Host: *eobj, ctrler: ct}}\n\t\t\tct.runFunction(\"Host\", ctrlCtx, workFunc)\n\t\tcase kvstore.Deleted:\n\t\t\tworkFunc := func(ctx context.Context, ctrlCtx shardworkers.WorkObj) error {\n\t\t\t\tvar err error\n\t\t\t\tworkCtx := ctrlCtx.(*hostCtx)\n\t\t\t\teobj := workCtx.obj\n\t\t\t\tfobj, err := ct.findObject(kind, workCtx.GetKey())\n\t\t\t\tif err != nil {\n\t\t\t\t\tct.logger.Errorf(\"Object %s/%s not found durng delete. Err: %v\", kind, eobj.GetKey(), err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tobj := fobj.(*Host)\n\t\t\t\tct.stats.Counter(\"Host_Deleted_Events\").Inc()\n\t\t\t\tobj.Lock()\n\t\t\t\terr = hostHandler.OnHostDelete(obj)\n\t\t\t\tobj.Unlock()\n\t\t\t\tif err != nil {\n\t\t\t\t\tct.logger.Errorf(\"Error deleting %s: %+v. Err: %v\", kind, obj.GetObjectMeta(), err)\n\t\t\t\t}\n\t\t\t\tct.delObject(kind, workCtx.GetKey())\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tctrlCtx := &hostCtx{event: evt.Type, obj: &Host{Host: *eobj, ctrler: ct}}\n\t\t\tct.runFunction(\"Host\", ctrlCtx, workFunc)\n\t\t}\n\tdefault:\n\t\tct.logger.Fatalf(\"API watcher Found object of invalid type: %v on Host watch channel\", tp)\n\t}\n\n\treturn nil\n}", "func ValidateIngress(ingress *extensions.Ingress) field.ErrorList {\n\tallErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath(\"metadata\"))\n\tallErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath(\"spec\"))...)\n\treturn allErrs\n}", "func (c *CaddyController) Run(stopCh chan struct{}) {\n\terr := c.reloadCaddy()\n\tif err != nil {\n\t\tlogrus.Errorf(\"initial caddy config load failed, %v\", err.Error())\n\t}\n\n\tdefer runtime.HandleCrash()\n\tdefer c.syncQueue.ShutDown()\n\tdefer c.statusQueue.ShutDown()\n\n\t// start the ingress informer where we listen to new / updated ingress resources\n\tgo c.informer.Run(stopCh)\n\n\t// wait for all involved caches to be synced, before processing items from the queue is started\n\tif !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {\n\t\truntime.HandleError(fmt.Errorf(\"Timed out waiting for caches to sync\"))\n\t\treturn\n\t}\n\n\t// start processing events for syncing ingress resources\n\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\n\t// start ingress status syncher and run every syncInterval\n\tgo wait.Until(c.dispatchSync, syncInterval, stopCh)\n\n\t// wait for SIGTERM\n\t<-stopCh\n\tlogrus.Info(\"stopping ingress controller\")\n\n\tvar exitCode int\n\terr = c.Shutdown()\n\tif err != nil {\n\t\tlogrus.Errorf(\"could not shutdown ingress controller properly, %v\", err.Error())\n\t\texitCode = 1\n\t}\n\n\tos.Exit(exitCode)\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to Wordpress\n\terr = c.Watch(&source.Kind{Type: &wordpressv1alpha1.Wordpress{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to WordpressRuntime\n\terr = c.Watch(&source.Kind{Type: &wordpressv1alpha1.WordpressRuntime{}}, &handler.EnqueueRequestsFromMapFunc{\n\t\tToRequests: handler.ToRequestsFunc(func(rt handler.MapObject) []reconcile.Request {\n\t\t\trtMap.lock.RLock()\n\t\t\tdefer rtMap.lock.RUnlock()\n\t\t\tvar reconciles = []reconcile.Request{}\n\t\t\tfor key, runtime := range rtMap.m {\n\t\t\t\tif runtime == rt.Meta.GetName() {\n\t\t\t\t\treconciles = append(reconciles, reconcile.Request{NamespacedName: key})\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn reconciles\n\t\t}),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for Deployment changes\n\terr = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &wordpressv1alpha1.Wordpress{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for Service changes\n\terr = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &wordpressv1alpha1.Wordpress{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO(calind): watch for PVC, CronJobs, Jobs and Ingresses\n\n\treturn nil\n}" ]
[ "0.6731203", "0.65975434", "0.65572864", "0.6251201", "0.61267525", "0.59292454", "0.58614355", "0.5846055", "0.5822735", "0.58212787", "0.5810285", "0.5738012", "0.57234436", "0.5713945", "0.57090265", "0.5678996", "0.5653033", "0.5629353", "0.56170344", "0.5590557", "0.5521334", "0.55019", "0.54824966", "0.54680574", "0.54630744", "0.5440669", "0.5435963", "0.54175264", "0.53995633", "0.53931385", "0.535756", "0.5326767", "0.5309546", "0.5307032", "0.5288716", "0.5284935", "0.52772635", "0.5271395", "0.5258076", "0.5252535", "0.5245654", "0.523889", "0.52295023", "0.5217418", "0.5202581", "0.5202581", "0.5194501", "0.5190375", "0.5180525", "0.5178024", "0.51566666", "0.5155319", "0.5137306", "0.5135252", "0.5133232", "0.5127191", "0.5105964", "0.51006866", "0.5096888", "0.50889695", "0.50875765", "0.50758713", "0.5043714", "0.5017598", "0.50018346", "0.4995229", "0.4982439", "0.49775037", "0.49606723", "0.49588096", "0.49576774", "0.49418426", "0.49300918", "0.4919738", "0.4917529", "0.49126512", "0.49087015", "0.49082917", "0.4900309", "0.48954314", "0.48909256", "0.48835295", "0.48827976", "0.48717383", "0.4868873", "0.4854175", "0.48397082", "0.48386905", "0.48336628", "0.48309302", "0.48256883", "0.4823142", "0.48185626", "0.48168105", "0.4814808", "0.48067617", "0.48041707", "0.48028684", "0.47815117", "0.47810856" ]
0.728039
0
+kubebuilder:rbac:groups=conjur.cyberark.com,resources=conjurconfigs,verbs=get;list;watch;create;update;patch;delete +kubebuilder:rbac:groups=conjur.cyberark.com,resources=conjurconfigs/status,verbs=get;update;patch +kubebuilder:rbac:groups=conjur.cyberark.com,resources=conjurconfigs/finalizers,verbs=update +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete Reconcile is part of the main kubernetes reconciliation loop which aims to move the current state of the cluster closer to the desired state. TODO(user): Modify the Reconcile function to compare the state specified by the ConjurConfig object against the actual cluster state, and then perform operations to make the cluster state reflect the state specified by the user. For more details, check Reconcile and its Result here:
func (r *ConjurConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { _ = r.Log.WithValues("conjurconfig", req.NamespacedName) // Fetch the ConjurConfig instance conjurConfig := &conjurv1alpha1.ConjurConfig{} err := r.Get(ctx, req.NamespacedName, conjurConfig) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue log.Info("ConjurConfig resource not found. Ignoring since object must be deleted") return ctrl.Result{}, nil } // Error reading the object - requeue the request. log.Error(err, "Failed to get ConjurConfig") return ctrl.Result{}, err } // Check if the ConfigMap already exists, if not create a new one found := &v1.ConfigMap{} cmName := getConfigMapName(conjurConfig) cmNamespace := conjurConfig.Namespace err = r.Get(ctx, types.NamespacedName{Name: cmName, Namespace: cmNamespace}, found) if err != nil && errors.IsNotFound(err) { // Define a new ConfigMap cm := r.configMapForConjurConfig(conjurConfig, cmName) log.Info("Creating a new ConfigMap, ", "ConfigMap.Name: ", cmName, "ConfigMap.Namespace: ", cmNamespace) err = r.Create(ctx, cm) if err != nil { log.Error(err, "Failed to create new ConfigMap, ", "ConfigMap.Name: ", cm.Name, "ConfigMap.Namespace: ", cm.Namespace) return ctrl.Result{}, err } // ConfigMap created successfully - return and requeue return ctrl.Result{Requeue: true}, nil } else if err != nil { log.Error(err, "Failed to get ConfigMap") return ctrl.Result{}, err } // TODO: Ensure ConfigMap has correct content // TODO: Add ConfigMap created and/or timestamp to status? return ctrl.Result{}, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Microk8sConfigReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, rerr error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"microk8sconfig\", req.NamespacedName)\n\n\t// your logic here\n\tmicrok8sconfig := &bootstrapv1alpha1.Microk8sConfig{}\n\n\tif err := r.Client.Get(ctx, req.NamespacedName, microk8sconfig); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\tlog.Error(err, \"failed to get config\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Look up the owner of this KubeConfig if there is one\n\tconfigOwner, err := bsutil.GetConfigOwner(ctx, r.Client, microk8sconfig)\n\tif apierrors.IsNotFound(err) {\n\t\t// Could not find the owner yet, this is not an error and will rereconcile when the owner gets set.\n\t\treturn ctrl.Result{}, nil\n\t}\n\tif err != nil {\n\t\tlog.Error(err, \"failed to get owner\")\n\t\treturn ctrl.Result{}, err\n\t}\n\tif configOwner == nil {\n\t\tlog.Error(err, \"failed to get config-owner\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\tlog = log.WithValues(\"kind\", configOwner.GetKind(), \"version\", configOwner.GetResourceVersion(), \"name\", configOwner.GetName())\n\n\t// Initialize the patch helper\n\tpatchHelper, err := patch.NewHelper(microk8sconfig, r)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif configOwner.IsControlPlaneMachine() {\n\t\t_, err := r.setupControlPlaneBootstrapData(ctx, microk8sconfig)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\treturn ctrl.Result{}, patchHelper.Patch(ctx, microk8sconfig)\n\t} else {\n\t\t// Worker node\n\t\t_, err := r.setupWorkerBootstrapData(ctx, microk8sconfig)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\treturn ctrl.Result{}, patchHelper.Patch(ctx, microk8sconfig)\n\t}\n}", "func (r *ClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}()\n\n\tctx := context.WithValue(context.Background(), requestId, uuid.New())\n\tlog := log.Logger(ctx, \"controllers\", \"cluster_controller\", \"Reconcile\")\n\tlog.WithValues(\"cluster\", req.NamespacedName)\n\tlog.Info(\"Start of the request\")\n\t//Get the resource\n\tvar cluster managerv1alpha1.Cluster\n\tif err := r.Get(ctx, req.NamespacedName, &cluster); err != nil {\n\t\treturn ctrl.Result{}, ignoreNotFound(err)\n\t}\n\n\t// Retrieve k8s secret\n\t// Get the \"best\" Bearer token\n\t// Get the ManagedCluster k8s client\n\n\tstate := managerv1alpha1.Warning\n\n\tif cluster.Status.RetryCount > 3 {\n\t\tstate = managerv1alpha1.Error\n\t}\n\n\tsecret, err := r.K8sClient.GetK8sSecret(ctx, cluster.Spec.Config.BearerTokenSecret, cluster.ObjectMeta.Namespace)\n\tif err != nil {\n\t\tlog.Error(err, \"unable to retrieve the bearer token for the given cluster\")\n\t\tdesc := fmt.Sprintf(\"unable to retrieve the bearer token for the given cluster due to error %s\", err.Error())\n\t\tr.Recorder.Event(&cluster, v1.EventTypeWarning, string(state), desc)\n\t\treturn r.UpdateStatus(ctx, &cluster, managerv1alpha1.ClusterStatus{RetryCount: cluster.Status.RetryCount + 1, ErrorDescription: desc}, state, errRequeueTime)\n\t}\n\tcfg, err := utils.PrepareK8sRestConfigFromClusterCR(ctx, &cluster, secret)\n\tif err != nil {\n\t\tlog.Error(err, \"unable to prepare the rest config for the target cluster\", \"cluster\", cluster.Spec.Name)\n\t\tdesc := fmt.Sprintf(\"unable to prepare the rest config for the target cluster due to error %s\", err.Error())\n\t\tr.Recorder.Event(&cluster, v1.EventTypeWarning, string(state), desc)\n\t\treturn r.UpdateStatus(ctx, &cluster, managerv1alpha1.ClusterStatus{RetryCount: cluster.Status.RetryCount + 1, ErrorDescription: desc}, state, errRequeueTime)\n\t}\n\n\t// Isit being deleted?\n\tif cluster.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t//Good. This is not Delete use case\n\t\t//Lets check if this is very first time use case\n\t\tif !utils.ContainsString(cluster.ObjectMeta.Finalizers, finalizerName) {\n\t\t\tlog.Info(\"New cluster resource. Adding the finalizer\", \"finalizer\", finalizerName)\n\t\t\tcluster.ObjectMeta.Finalizers = append(cluster.ObjectMeta.Finalizers, finalizerName)\n\t\t\tr.UpdateMeta(ctx, &cluster)\n\t\t}\n\t\treturn r.HandleReconcile(ctx, req, &cluster, cfg)\n\n\t} else {\n\t\t//oh oh.. This is delete use case\n\t\t//Lets make sure to clean up the iam role\n\t\tif cluster.Status.RetryCount != 0 {\n\t\t\tcluster.Status.RetryCount = cluster.Status.RetryCount + 1\n\t\t}\n\t\tlog.Info(\"Cluster delete request\")\n\t\tif err := removeRBACInManagedCluster(ctx, cfg); err != nil {\n\t\t\tlog.Error(err, \"Unable to delete the cluster\")\n\t\t\tr.UpdateStatus(ctx, &cluster, managerv1alpha1.ClusterStatus{RetryCount: cluster.Status.RetryCount + 1, ErrorDescription: err.Error()}, managerv1alpha1.Error)\n\t\t\tr.Recorder.Event(&cluster, v1.EventTypeWarning, string(managerv1alpha1.Error), \"unable to delete the cluster due to \"+err.Error())\n\t\t\treturn ctrl.Result{RequeueAfter: 30 * time.Second}, nil\n\t\t}\n\n\t\t// Ok. Lets delete the finalizer so controller can delete the custom object\n\t\tlog.Info(\"Removing finalizer from Cluster\")\n\t\tcluster.ObjectMeta.Finalizers = utils.RemoveString(cluster.ObjectMeta.Finalizers, finalizerName)\n\t\tr.UpdateMeta(ctx, &cluster)\n\t\tlog.Info(\"Successfully deleted cluster\")\n\t\tr.Recorder.Event(&cluster, v1.EventTypeNormal, \"Deleted\", \"Successfully deleted cluster\")\n\t}\n\treturn ctrl.Result{}, nil\n}", "func (r *reconciler) Reconcile(resourceName string) error {\n\tklog.V(4).Infof(\"Reconciling RBAC for %s\", resourceName)\n\n\terr := r.ensureRBACClusterRole(resourceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = r.ensureRBACClusterRoleBinding(resourceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (reconciler *ClusterReconciler) reconcile() (ctrl.Result, error) {\n\tvar err error\n\n\t// Child resources of the cluster CR will be automatically reclaimed by K8S.\n\tif reconciler.observed.cluster == nil {\n\t\treconciler.log.Info(\"The cluster has been deleted, no action to take\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\terr = reconciler.reconcileConfigMap()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\terr = reconciler.reconcileJobManagerDeployment()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\terr = reconciler.reconcileJobManagerService()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\terr = reconciler.reconcileJobManagerIngress()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\terr = reconciler.reconcileTaskManagerDeployment()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tresult, err := reconciler.reconcileJob()\n\n\treturn result, nil\n}", "func (crc *clusterReconcileContext) reconcile() error {\n\tif crc.tinkerbellCluster.Spec.ControlPlaneEndpoint.Host == \"\" {\n\t\tif err := crc.populateControlplaneHost(); err != nil {\n\t\t\treturn fmt.Errorf(\"populating controlplane host: %w\", err)\n\t\t}\n\t}\n\n\t// TODO: How can we support changing that?\n\tif crc.tinkerbellCluster.Spec.ControlPlaneEndpoint.Port != KubernetesAPIPort {\n\t\tcrc.tinkerbellCluster.Spec.ControlPlaneEndpoint.Port = KubernetesAPIPort\n\t}\n\n\tcrc.tinkerbellCluster.Status.Ready = true\n\n\tcontrollerutil.AddFinalizer(crc.tinkerbellCluster, infrastructurev1alpha3.ClusterFinalizer)\n\n\tcrc.log.Info(\"Setting cluster status to ready\")\n\n\tif err := crc.patchHelper.Patch(crc.ctx, crc.tinkerbellCluster); err != nil {\n\t\treturn fmt.Errorf(\"patching cluster object: %w\", err)\n\t}\n\n\treturn nil\n}", "func (a *Actuator) Reconcile(cluster *clusterv1.Cluster) error {\n\tlog := a.log.WithValues(\"cluster-name\", cluster.Name, \"cluster-namespace\", cluster.Namespace)\n\tlog.Info(\"Reconciling Cluster\")\n\n\tscope, err := scope.NewClusterScope(scope.ClusterScopeParams{\n\t\tCluster: cluster,\n\t\tLogger: a.log,\n\t})\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to create scope: %+v\", err)\n\t}\n\n\tdefer scope.Close()\n\n\tec2svc := ec2.NewService(scope)\n\telbsvc := elb.NewService(scope)\n\tcertSvc := certificates.NewService(scope)\n\n\t// Store cert material in spec.\n\tif err := certSvc.ReconcileCertificates(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile certificates for cluster %q\", cluster.Name)\n\t}\n\n\tif err := ec2svc.ReconcileNetwork(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile network for cluster %q\", cluster.Name)\n\t}\n\n\tif err := ec2svc.ReconcileBastion(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile bastion host for cluster %q\", cluster.Name)\n\t}\n\n\tif err := elbsvc.ReconcileLoadbalancers(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile load balancers for cluster %q\", cluster.Name)\n\t}\n\n\tif cluster.Annotations == nil {\n\t\tcluster.Annotations = make(map[string]string)\n\t}\n\tcluster.Annotations[v1alpha2.AnnotationClusterInfrastructureReady] = v1alpha2.ValueReady\n\n\t// Store KubeConfig for Cluster API NodeRef controller to use.\n\tkubeConfigSecretName := remote.KubeConfigSecretName(cluster.Name)\n\tsecretClient := a.coreClient.Secrets(cluster.Namespace)\n\tif _, err := secretClient.Get(kubeConfigSecretName, metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) {\n\t\tkubeConfig, err := a.Deployer.GetKubeConfig(cluster, nil)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to get kubeconfig for cluster %q\", cluster.Name)\n\t\t}\n\n\t\tkubeConfigSecret := &apiv1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: kubeConfigSecretName,\n\t\t\t},\n\t\t\tStringData: map[string]string{\n\t\t\t\t\"value\": kubeConfig,\n\t\t\t},\n\t\t}\n\n\t\tif _, err := secretClient.Create(kubeConfigSecret); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to create kubeconfig secret for cluster %q\", cluster.Name)\n\t\t}\n\t} else if err != nil {\n\t\treturn errors.Wrapf(err, \"failed to get kubeconfig secret for cluster %q\", cluster.Name)\n\t}\n\n\t// If the control plane is ready, try to delete the control plane configmap lock, if it exists, and return.\n\tif cluster.Annotations[v1alpha2.AnnotationControlPlaneReady] == v1alpha2.ValueReady {\n\t\tconfigMapName := scope.ControlPlaneConfigMapName()\n\t\tlog.Info(\"Checking for existence of control plane configmap lock\", \"configmap-name\", configMapName)\n\n\t\t_, err := a.coreClient.ConfigMaps(cluster.Namespace).Get(configMapName, metav1.GetOptions{})\n\t\tswitch {\n\t\tcase apierrors.IsNotFound(err):\n\t\t\t// It doesn't exist - no-op\n\t\tcase err != nil:\n\t\t\treturn errors.Wrapf(err, \"Error retrieving control plane configmap lock %q\", configMapName)\n\t\tdefault:\n\t\t\tif err := a.coreClient.ConfigMaps(cluster.Namespace).Delete(configMapName, nil); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"Error deleting control plane configmap lock %q\", configMapName)\n\t\t\t}\n\t\t}\n\n\t\t// Nothing more to reconcile - return early.\n\t\treturn nil\n\t}\n\n\tlog.Info(\"Cluster does not have ready annotation - checking for ready control plane machines\")\n\n\tmachineList := &clusterv1.MachineList{}\n\tif err := a.List(context.Background(), machineList, scope.ListOptionsLabelSelector()); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to retrieve machines in cluster %q\", cluster.Name)\n\t}\n\n\tcontrolPlaneMachines := util.GetControlPlaneMachinesFromList(machineList)\n\n\tmachineReady := false\n\tfor _, machine := range controlPlaneMachines {\n\t\tif machine.Status.NodeRef != nil {\n\t\t\tmachineReady = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !machineReady {\n\t\tlog.Info(\"No control plane machines are ready - requeuing cluster\")\n\t\treturn &controllerError.RequeueAfterError{RequeueAfter: waitForControlPlaneMachineDuration}\n\t}\n\n\tlog.Info(\"Setting cluster ready annotation\")\n\tcluster.Annotations[v1alpha2.AnnotationControlPlaneReady] = v1alpha2.ValueReady\n\n\treturn nil\n}", "func (r clusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {\n\t// Get the VSphereCluster resource for this request.\n\tvsphereCluster := &infrav1.VSphereCluster{}\n\tif err := r.Client.Get(r, req.NamespacedName, vsphereCluster); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tr.Logger.V(4).Info(\"VSphereCluster not found, won't reconcile\", \"key\", req.NamespacedName)\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Fetch the CAPI Cluster.\n\tcluster, err := clusterutilv1.GetOwnerCluster(r, r.Client, vsphereCluster.ObjectMeta)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tif cluster == nil {\n\t\tr.Logger.Info(\"Waiting for Cluster Controller to set OwnerRef on VSphereCluster\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\tif annotations.IsPaused(cluster, vsphereCluster) {\n\t\tr.Logger.V(4).Info(\"VSphereCluster %s/%s linked to a cluster that is paused\",\n\t\t\tvsphereCluster.Namespace, vsphereCluster.Name)\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// Create the patch helper.\n\tpatchHelper, err := patch.NewHelper(vsphereCluster, r.Client)\n\tif err != nil {\n\t\treturn reconcile.Result{}, errors.Wrapf(\n\t\t\terr,\n\t\t\t\"failed to init patch helper for %s %s/%s\",\n\t\t\tvsphereCluster.GroupVersionKind(),\n\t\t\tvsphereCluster.Namespace,\n\t\t\tvsphereCluster.Name)\n\t}\n\n\t// Create the cluster context for this request.\n\tclusterContext := &capvcontext.ClusterContext{\n\t\tControllerContext: r.ControllerContext,\n\t\tCluster: cluster,\n\t\tVSphereCluster: vsphereCluster,\n\t\tLogger: r.Logger.WithName(req.Namespace).WithName(req.Name),\n\t\tPatchHelper: patchHelper,\n\t}\n\n\t// Always issue a patch when exiting this function so changes to the\n\t// resource are patched back to the API server.\n\tdefer func() {\n\t\tif err := clusterContext.Patch(); err != nil {\n\t\t\tif reterr == nil {\n\t\t\t\treterr = err\n\t\t\t}\n\t\t\tclusterContext.Logger.Error(err, \"patch failed\", \"cluster\", clusterContext.String())\n\t\t}\n\t}()\n\n\tif err := setOwnerRefsOnVsphereMachines(clusterContext); err != nil {\n\t\treturn reconcile.Result{}, errors.Wrapf(err, \"failed to set owner refs on VSphereMachine objects\")\n\t}\n\n\t// Handle deleted clusters\n\tif !vsphereCluster.DeletionTimestamp.IsZero() {\n\t\treturn r.reconcileDelete(clusterContext)\n\t}\n\n\t// If the VSphereCluster doesn't have our finalizer, add it.\n\t// Requeue immediately after adding finalizer to avoid the race condition between init and delete\n\tif !ctrlutil.ContainsFinalizer(vsphereCluster, infrav1.ClusterFinalizer) {\n\t\tctrlutil.AddFinalizer(vsphereCluster, infrav1.ClusterFinalizer)\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// Handle non-deleted clusters\n\treturn r.reconcileNormal(clusterContext)\n}", "func (r *Reconciler) reconcileCRwithConfig(ctx context.Context, service *operatorv1alpha1.ConfigService, namespace string, csv *olmv1alpha1.ClusterServiceVersion) error {\n\tmerr := &util.MultiErr{}\n\n\t// Create k8s resources required by service\n\tif service.Resources != nil {\n\t\tfor _, res := range service.Resources {\n\t\t\tif res.APIVersion == \"\" {\n\t\t\t\treturn fmt.Errorf(\"The APIVersion of k8s resource is empty for operator \" + service.Name)\n\t\t\t}\n\n\t\t\tif res.Kind == \"\" {\n\t\t\t\treturn fmt.Errorf(\"The Kind of k8s resource is empty for operator \" + service.Name)\n\t\t\t}\n\t\t\tif res.Name == \"\" {\n\t\t\t\treturn fmt.Errorf(\"The Name of k8s resource is empty for operator \" + service.Name)\n\t\t\t}\n\t\t\tvar k8sResNs string\n\t\t\tif res.Namespace == \"\" {\n\t\t\t\tk8sResNs = namespace\n\t\t\t} else {\n\t\t\t\tk8sResNs = res.Namespace\n\t\t\t}\n\n\t\t\tvar k8sRes unstructured.Unstructured\n\t\t\tk8sRes.SetAPIVersion(res.APIVersion)\n\t\t\tk8sRes.SetKind(res.Kind)\n\t\t\tk8sRes.SetName(res.Name)\n\t\t\tk8sRes.SetNamespace(k8sResNs)\n\n\t\t\terr := r.Client.Get(ctx, types.NamespacedName{\n\t\t\t\tName: res.Name,\n\t\t\t\tNamespace: k8sResNs,\n\t\t\t}, &k8sRes)\n\n\t\t\tif err != nil && !apierrors.IsNotFound(err) {\n\t\t\t\tmerr.Add(errors.Wrapf(err, \"failed to get k8s resource %s/%s\", k8sResNs, res.Name))\n\t\t\t} else if apierrors.IsNotFound(err) {\n\t\t\t\tif err := r.createK8sResource(ctx, k8sRes, res.Data, res.Labels, res.Annotations); err != nil {\n\t\t\t\t\tmerr.Add(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif checkLabel(k8sRes, map[string]string{constant.OpreqLabel: \"true\"}) && res.Force {\n\t\t\t\t\t// Update k8s resource\n\t\t\t\t\tklog.V(3).Info(\"Found existing k8s resource: \" + res.Name)\n\t\t\t\t\tif err := r.updateK8sResource(ctx, k8sRes, res.Data, res.Labels, res.Annotations); err != nil {\n\t\t\t\t\t\tmerr.Add(err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tklog.V(2).Infof(\"Skip the k8s resource %s/%s which is not created by ODLM\", res.Kind, res.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(merr.Errors) != 0 {\n\t\t\treturn merr\n\t\t}\n\t}\n\n\talmExamples := csv.GetAnnotations()[\"alm-examples\"]\n\n\t// Convert CR template string to slice\n\tvar almExampleList []interface{}\n\terr := json.Unmarshal([]byte(almExamples), &almExampleList)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to convert alm-examples in the Subscription %s/%s to slice\", namespace, service.Name)\n\t}\n\n\tfoundMap := make(map[string]bool)\n\tfor cr := range service.Spec {\n\t\tfoundMap[cr] = false\n\t}\n\n\t// Merge OperandConfig and ClusterServiceVersion alm-examples\n\tfor _, almExample := range almExampleList {\n\t\t// Create an unstructured object for CR and check its value\n\t\tvar crFromALM unstructured.Unstructured\n\t\tcrFromALM.Object = almExample.(map[string]interface{})\n\n\t\tname := crFromALM.GetName()\n\t\tspec := crFromALM.Object[\"spec\"]\n\t\tif spec == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\terr := r.Client.Get(ctx, types.NamespacedName{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t}, &crFromALM)\n\n\t\tfor cr := range service.Spec {\n\t\t\tif strings.EqualFold(crFromALM.GetKind(), cr) {\n\t\t\t\tfoundMap[cr] = true\n\t\t\t}\n\t\t}\n\n\t\tif err != nil && !apierrors.IsNotFound(err) {\n\t\t\tmerr.Add(errors.Wrapf(err, \"failed to get the custom resource %s/%s\", namespace, name))\n\t\t\tcontinue\n\t\t} else if apierrors.IsNotFound(err) {\n\t\t\t// Create Custom Resource\n\t\t\tif err := r.compareConfigandExample(ctx, crFromALM, service, namespace); err != nil {\n\t\t\t\tmerr.Add(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tif checkLabel(crFromALM, map[string]string{constant.OpreqLabel: \"true\"}) {\n\t\t\t\t// Update or Delete Custom Resource\n\t\t\t\tif err := r.existingCustomResource(ctx, crFromALM, spec.(map[string]interface{}), service, namespace); err != nil {\n\t\t\t\t\tmerr.Add(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tklog.V(2).Info(\"Skip the custom resource not created by ODLM\")\n\t\t\t}\n\t\t}\n\t}\n\tif len(merr.Errors) != 0 {\n\t\treturn merr\n\t}\n\n\tfor cr, found := range foundMap {\n\t\tif !found {\n\t\t\tklog.Warningf(\"Custom resource %v doesn't exist in the alm-example of %v\", cr, csv.GetName())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *RqliteClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\t_ = context.Background()\n\tlog := r.Log.WithValues(\"Reconcile RqliteCluster \", req.Name, \" in namespace \", req.NamespacedName)\n\n\tlog.V(1).Info(\"Get Object Info\")\n\t//objectInfo := new(rqlitev1.RqliteCluster{})\n\tobjectInfo := &rqlitev1.RqliteCluster{}\n\terr := r.Get(context.TODO(), req.NamespacedName, objectInfo)\n\n\tif err != nil {\n\t\tlog.Error(err, \"Error during r.Get\")\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\tlog.Info(\"Dump Object Info\", \"ClusterName\", objectInfo.Spec.Name, \"ClusterSize\", objectInfo.Spec.ClusterSize)\n\n\tlog.V(1).Info(\"Update Object Status\")\n\tlog.V(1).Info(\"Get Object Current Status\", \"NAme\", objectInfo.Spec.Name, \"Status\", objectInfo.Status.CurrentStatus)\n\tif objectInfo.Status.CurrentStatus == \"\" {\n\t\tlog.V(1).Info(\"Creating new RqliteCluster)\n\t\tpod := newRqliteCluster(objectInfo)\n\t\tobjectInfo.Status.CurrentStatus = \"OK\"\n\t}\n\n\tlog.V(1).Info(\"Set Object Target Status : \", \"Name\", objectInfo.Spec.Name, \"Status \", objectInfo.Status.CurrentStatus)\n\n\terr = r.Status().Update(context.TODO(), objectInfo)\n\tif err != nil {\n\t\tlog.Error(err, \"Error during r.Status\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t//if anything else happens\n\treturn ctrl.Result{}, nil\n}", "func (r *reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx, cancel := context.WithCancel(r.ctx)\n\tdefer cancel()\n\n\tconfigMap := &corev1.ConfigMap{}\n\tif err := r.client.Get(ctx, types2.NamespacedName{Namespace: req.Namespace, Name: req.Name}, configMap); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\treturn ctrl.Result{}, nil // do nothing if the ConfigMap does not exist\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\tif configMap.Name == constants.BOMMetadataConfigMapName {\n\t\tif err := r.updateConditions(ctx); err != nil {\n\t\t\tif apierrors.IsConflict(errors.Cause(err)) {\n\t\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t\t}\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\ttkr, err := tkrFromConfigMap(configMap)\n\tif err != nil {\n\t\tr.log.Error(err, \"Could not create TKR from ConfigMap\", \"ConfigMap\", configMap.Name)\n\t\treturn ctrl.Result{}, nil // no need to retry: if the ConfigMap changes, we'll get called\n\t}\n\tif tkr == nil {\n\t\treturn ctrl.Result{}, nil // no need to retry: no TKR in this ConfigMap\n\t}\n\n\tif err := r.client.Create(ctx, tkr); err != nil {\n\t\tif apierrors.IsAlreadyExists(err) {\n\t\t\treturn ctrl.Result{}, nil // the TKR already exists, we're done.\n\t\t}\n\t\treturn ctrl.Result{}, errors.Wrapf(err, \"could not create TKR: ConfigMap.name='%s'\", configMap.Name)\n\t}\n\tif err := r.client.Status().Update(ctx, tkr); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif err := r.updateConditions(ctx); err != nil {\n\t\tif apierrors.IsConflict(errors.Cause(err)) {\n\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\treturn ctrl.Result{}, nil\n}", "func (r *KindClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := log.FromContext(ctx).WithValues(\"kindcluster\", req.NamespacedName)\n\n\t// Fetch the KindCluster instance\n\tkindCluster := &infrastructurev1alpha4.KindCluster{}\n\tif err := r.Get(ctx, req.NamespacedName, kindCluster); err != nil {\n\t\tif client.IgnoreNotFound(err) != nil {\n\t\t\tlog.Error(err, \"unable to fetch KindCluster\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\t// Cluster no longer exists so lets stop now\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Fetch the owner Cluster\n\tcluster, err := util.GetOwnerCluster(ctx, r.Client, kindCluster.ObjectMeta)\n\tif err != nil {\n\t\tlog.Error(err, \"failed to get owner cluster\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif cluster == nil {\n\t\tlog.Info(\"Cluster Controller has not yet set OwnerRef\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tif annotations.IsPaused(cluster, kindCluster) {\n\t\tlog.Info(\"KindCluster or linked Cluster is marked as paused. Won't reconcile\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tlog = log.WithValues(\"cluster\", kindCluster.Name)\n\thelper, err := patch.NewHelper(kindCluster, r.Client)\n\tif err != nil {\n\t\treturn reconcile.Result{}, errors.Wrap(err, \"failed to init patch helper\")\n\t}\n\n\t// Ensure we always patch the resource with the latest changes when exiting function\n\tdefer func() {\n\t\thelper.Patch(\n\t\t\tcontext.TODO(),\n\t\t\tkindCluster,\n\t\t\tpatch.WithOwnedConditions{\n\t\t\t\tConditions: []clusterv1.ConditionType{\n\t\t\t\t\tclusterv1.ReadyCondition,\n\t\t\t\t}},\n\t\t)\n\t}()\n\n\tif !kindCluster.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The KindCluster is being deleted\n\t\tif controllerutil.ContainsFinalizer(kindCluster, finalizerName) {\n\t\t\tlog.Info(\"Deleting cluster\")\n\n\t\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseDeleting\n\t\t\tkindCluster.Status.Ready = false\n\t\t\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\t\t\tlog.Error(err, \"failed to update KindCluster status\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tif err := kindClient.DeleteCluster(kindCluster.NamespacedName()); err != nil {\n\t\t\t\tlog.Error(err, \"failed to delete cluster\")\n\t\t\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonDeleteFailed\n\t\t\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tcontrollerutil.RemoveFinalizer(kindCluster, finalizerName)\n\t\t\tlog.Info(\"Removed finalizer\")\n\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Ensure our finalizer is present\n\tcontrollerutil.AddFinalizer(kindCluster, finalizerName)\n\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif kindCluster.Status.Phase == nil || *kindCluster.Status.Phase == infrastructurev1alpha4.KindClusterPhasePending {\n\t\tlog.Info(\"Creating new cluster in Kind\")\n\n\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseCreating\n\t\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\t\tlog.Error(err, \"failed to update KindCluster status\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\tif err := kindClient.CreateCluster(kindCluster); err != nil {\n\t\t\tlog.Error(err, \"failed to create cluster in kind\")\n\t\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonCreateFailed\n\t\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\tkindCluster.Status.Ready = true\n\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseReady\n\t\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\t\tlog.Error(err, \"failed to update KindCluster status\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\tlog.Info(\"Cluster created\")\n\t}\n\n\t// Ensure ready status is up-to-date\n\tisReady, err := kindClient.IsReady(kindCluster.NamespacedName())\n\tif err != nil {\n\t\tlog.Error(err, \"failed to check status of cluster\")\n\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonClusterNotFound\n\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\tkindCluster.Status.Ready = isReady\n\tif isReady {\n\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseReady\n\t} else {\n\t\tkindCluster.Status.Phase = &infrastructurev1alpha4.KindClusterPhaseCreating\n\t}\n\n\t// Ensure kubeconfig is up-to-date\n\tkc, err := kindClient.GetKubeConfig(kindCluster.NamespacedName())\n\tif err != nil {\n\t\tlog.Error(err, \"failed to check status of cluster\")\n\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonKubeConfig\n\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\tkindCluster.Status.KubeConfig = &kc\n\n\t// Populate the server endpoint details\n\tendpoint, err := kubeconfig.ExtractEndpoint(kc, kindCluster.NamespacedName())\n\tif err != nil {\n\t\tlog.Error(err, \"failed to get control plane endpoint\")\n\t\tkindCluster.Status.FailureReason = &v1alpha4.FailureReasonEndpoint\n\t\tkindCluster.Status.FailureMessage = utils.StringPtr(err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\tkindCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{\n\t\tHost: endpoint.Host,\n\t\tPort: endpoint.Port,\n\t}\n\n\tif err := helper.Patch(ctx, kindCluster); err != nil {\n\t\tlog.Error(err, \"failed to update KindCluster status\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (s *Reconciler) Reconcile() (ReconcileResponse, error) {\n\tsyncFns := []func() error{\n\t\ts.validateClusterConfigAndSetDefaultsIfNotSet,\n\t\ts.buildClusterPlan,\n\t}\n\tfor _, syncFn := range syncFns {\n\t\terr := syncFn()\n\t\tif err != nil {\n\t\t\treturn ReconcileResponse{}, err\n\t\t}\n\t}\n\ts.resetClusterConfigReconcileErrorIfAny()\n\treturn s.makeReconcileResponse()\n}", "func (r *MultiClusterObservabilityReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", req.Namespace, \"Request.Name\", req.Name)\n\treqLogger.Info(\"Reconciling MultiClusterObservability\")\n\n\t// Fetch the MultiClusterObservability instance\n\tinstance := &mcov1beta1.MultiClusterObservability{}\n\terr := r.Client.Get(context.TODO(), types.NamespacedName{\n\t\tName: config.GetMonitoringCRName(),\n\t}, instance)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Init finalizers\n\tisTerminating, err := r.initFinalization(instance)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t} else if isTerminating {\n\t\treqLogger.Info(\"MCO instance is in Terminating status, skip the reconcile\")\n\t\treturn ctrl.Result{}, err\n\t}\n\t//read image manifest configmap to be used to replace the image for each component.\n\tif _, err = config.ReadImageManifestConfigMap(r.Client); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Do not reconcile objects if this instance of mch is labeled \"paused\"\n\tif config.IsPaused(instance.GetAnnotations()) {\n\t\treqLogger.Info(\"MCO reconciliation is paused. Nothing more to do.\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tstorageClassSelected, err := getStorageClass(instance, r.Client)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t//instance.Namespace = config.GetDefaultNamespace()\n\tinstance.Spec.StorageConfig.StatefulSetStorageClass = storageClassSelected\n\t//Render the templates with a specified CR\n\trenderer := rendering.NewRenderer(instance)\n\ttoDeploy, err := renderer.Render(r.Client)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to render multiClusterMonitoring templates\")\n\t\treturn ctrl.Result{}, err\n\t}\n\tdeployer := deploying.NewDeployer(r.Client)\n\t//Deploy the resources\n\tfor _, res := range toDeploy {\n\t\tif res.GetNamespace() == config.GetDefaultNamespace() {\n\t\t\tif err := controllerutil.SetControllerReference(instance, res, r.Scheme); err != nil {\n\t\t\t\treqLogger.Error(err, \"Failed to set controller reference\")\n\t\t\t}\n\t\t}\n\t\tif err := deployer.Deploy(res); err != nil {\n\t\t\treqLogger.Error(err, fmt.Sprintf(\"Failed to deploy %s %s/%s\",\n\t\t\t\tres.GetKind(), config.GetDefaultNamespace(), res.GetName()))\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\t// expose observatorium api gateway\n\tresult, err := GenerateAPIGatewayRoute(r.Client, r.Scheme, instance)\n\tif result != nil {\n\t\treturn *result, err\n\t}\n\n\t// create the certificates\n\terr = createObservabilityCertificate(r.Client, r.Scheme, instance)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// create an Observatorium CR\n\tresult, err = GenerateObservatoriumCR(r.Client, r.Scheme, instance)\n\tif result != nil {\n\t\treturn *result, err\n\t}\n\n\t// generate grafana datasource to point to observatorium api gateway\n\tresult, err = GenerateGrafanaDataSource(r.Client, r.Scheme, instance)\n\tif result != nil {\n\t\treturn *result, err\n\t}\n\n\tenableManagedCluster, found := os.LookupEnv(\"ENABLE_MANAGED_CLUSTER\")\n\tif !found || enableManagedCluster != \"false\" {\n\t\t// create the placementrule\n\t\terr = createPlacementRule(r.Client, r.Scheme, instance)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tresult, err = r.UpdateStatus(instance)\n\tif result != nil {\n\t\treturn *result, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (tcr *TinkerbellClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\n\tcrc, err := tcr.newReconcileContext(ctx, req.NamespacedName)\n\tif err != nil {\n\t\treturn ctrl.Result{}, fmt.Errorf(\"creating reconciliation context: %w\", err)\n\t}\n\n\tif crc == nil {\n\t\treturn defaultRequeueResult(), nil\n\t}\n\n\tif !crc.tinkerbellCluster.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\tcrc.log.Info(\"Removing cluster\")\n\n\t\treturn ctrl.Result{}, crc.reconcileDelete()\n\t}\n\n\tif crc.cluster == nil {\n\t\treturn defaultRequeueResult(), nil\n\t}\n\n\tif util.IsPaused(crc.cluster, crc.tinkerbellCluster) {\n\t\tcrc.log.Info(\"TinkerbellCluster is marked as paused. Won't reconcile\")\n\n\t\treturn defaultRequeueResult(), nil\n\t}\n\n\treturn ctrl.Result{}, crc.reconcile()\n}", "func (s *StatusReconciler) Reconcile(ctx context.Context, req *ctrl.Request) (ctrl.Result, error) {\n\t// We base our status on the pod facts, so ensure our facts are up to date.\n\tif err := s.PFacts.Collect(ctx, s.Vdb); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Use all subclusters, even ones that are scheduled for removal. We keep\n\t// reporting status on the deleted ones until the statefulsets are gone.\n\tfinder := MakeSubclusterFinder(s.Client, s.Vdb)\n\tsubclusters, err := finder.FindSubclusters(ctx, FindAll)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\trefreshStatus := func(vdbChg *vapi.VerticaDB) error {\n\t\tvdbChg.Status.Subclusters = []vapi.SubclusterStatus{}\n\t\tfor i := range subclusters {\n\t\t\tif i == len(vdbChg.Status.Subclusters) {\n\t\t\t\tvdbChg.Status.Subclusters = append(vdbChg.Status.Subclusters, vapi.SubclusterStatus{})\n\t\t\t}\n\t\t\tif err := s.calculateSubclusterStatus(ctx, subclusters[i], &vdbChg.Status.Subclusters[i]); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to calculate subcluster status %s %w\", subclusters[i].Name, err)\n\t\t\t}\n\t\t}\n\t\ts.calculateClusterStatus(&vdbChg.Status)\n\t\treturn nil\n\t}\n\n\tif err := status.Update(ctx, s.Client, s.Vdb, refreshStatus); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\treturn ctrl.Result{}, nil\n}", "func (r *DiscoveryConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := logr.FromContext(ctx)\n\n\t// Get discovery config. Die if there is none\n\tconfig := &discoveryv1.DiscoveryConfig{}\n\tif err := r.Get(ctx, types.NamespacedName{\n\t\tName: req.Name,\n\t\tNamespace: req.Namespace,\n\t}, config); err != nil {\n\t\tlog.Error(err, \"unable to fetch DiscoveryConfig\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Update the DiscoveryConfig status\n\t// config.Status.LastUpdateTime = &metav1.Time{Time: time.Now()}\n\t// if err := r.Status().Update(ctx, config); err != nil {\n\t// \tlog.Error(err, \"unable to update discoveryconfig status\")\n\t// \treturn ctrl.Result{}, err\n\t// }\n\n\t// Get user token from secret provided in config\n\tif len(config.Spec.ProviderConnections) == 0 {\n\t\tlog.Info(\"No provider connections in config. Returning.\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\tsecretName := config.Spec.ProviderConnections[0]\n\tocmSecret := &corev1.Secret{}\n\terr := r.Get(context.TODO(), types.NamespacedName{Name: secretName, Namespace: req.Namespace}, ocmSecret)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tif _, ok := ocmSecret.Data[\"metadata\"]; !ok {\n\t\treturn ctrl.Result{}, fmt.Errorf(\"Secret '%s' does not contain 'metadata' field\", secretName)\n\t}\n\n\tproviderConnection := &CloudRedHatProviderConnection{}\n\terr = yaml.Unmarshal(ocmSecret.Data[\"metadata\"], providerConnection)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tuserToken := providerConnection.OCMApiToken\n\n\t// Request ephemeral access token with user token. This will be used for OCM requests\n\tauthRequest := auth_domain.AuthRequest{\n\t\tToken: userToken,\n\t}\n\tif annotations := config.GetAnnotations(); annotations != nil {\n\t\tauthRequest.BaseURL = annotations[\"ocmBaseURL\"]\n\t}\n\taccessToken, err := auth_service.AuthClient.GetToken(authRequest)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// List all already-discovered clusters\n\tvar discoveredList discoveryv1.DiscoveredClusterList\n\tif err := r.List(ctx, &discoveredList, client.InNamespace(req.Namespace)); err != nil {\n\t\tlog.Error(err, \"unable to list discovered clusters\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\texisting := make(map[string]int, len(discoveredList.Items))\n\tfor i, cluster := range discoveredList.Items {\n\t\texisting[cluster.Name] = i\n\t}\n\n\t// List all managed clusters\n\tmanagedClusters := &unstructured.UnstructuredList{}\n\tmanagedClusters.SetGroupVersionKind(managedClusterGVK)\n\tif err := r.List(ctx, managedClusters); err != nil {\n\t\t// Capture case were ManagedClusters resource does not exist\n\t\tif !apimeta.IsNoMatchError(err) {\n\t\t\treturn ctrl.Result{}, errors.Wrapf(err, \"error listing managed clusters\")\n\t\t}\n\t}\n\n\tmanagedClusterIDs := make(map[string]int, len(managedClusters.Items))\n\tfor i, mc := range managedClusters.Items {\n\t\tname := getClusterID(mc)\n\t\tif name != \"\" {\n\t\t\tmanagedClusterIDs[getClusterID(mc)] = i\n\t\t}\n\t}\n\n\tvar createClusters []discoveryv1.DiscoveredCluster\n\tvar updateClusters []discoveryv1.DiscoveredCluster\n\tvar deleteClusters []discoveryv1.DiscoveredCluster\n\tvar unchangedClusters []discoveryv1.DiscoveredCluster\n\n\trequestConfig := cluster_domain.ClusterRequest{\n\t\tToken: accessToken,\n\t\tFilter: config.Spec.Filters,\n\t}\n\tif annotations := config.GetAnnotations(); annotations != nil {\n\t\trequestConfig.BaseURL = annotations[\"ocmBaseURL\"]\n\t}\n\tclusterClient := cluster_service.ClusterClientGenerator.NewClient(requestConfig)\n\n\tnewClusters, err := clusterClient.GetClusters()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tfor _, cluster := range newClusters {\n\t\t// Build a DiscoveredCluster object from the cluster information\n\t\tdiscoveredCluster := discoveredCluster(cluster)\n\t\tdiscoveredCluster.SetNamespace(req.Namespace)\n\n\t\t// Assign dummy status\n\t\tdiscoveredCluster.Spec.Subscription = discoveryv1.SubscriptionSpec{\n\t\t\tStatus: \"Active\",\n\t\t\tSupportLevel: \"None\",\n\t\t\tManaged: false,\n\t\t\tCreatorID: \"abc123\",\n\t\t}\n\n\t\t// Assign managed status\n\t\tif _, managed := managedClusterIDs[discoveredCluster.Spec.Name]; managed {\n\t\t\tsetManagedStatus(&discoveredCluster)\n\t\t}\n\n\t\t// Add reference to secret used for authentication\n\t\tdiscoveredCluster.Spec.ProviderConnections = nil\n\t\tsecretRef, err := ref.GetReference(r.Scheme, ocmSecret)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"unable to make reference to secret\", \"secret\", secretRef)\n\t\t}\n\t\tdiscoveredCluster.Spec.ProviderConnections = append(discoveredCluster.Spec.ProviderConnections, *secretRef)\n\n\t\tind, exists := existing[discoveredCluster.Name]\n\t\tif !exists {\n\t\t\t// Newly discovered cluster\n\t\t\tcreateClusters = append(createClusters, discoveredCluster)\n\t\t\tdelete(existing, discoveredCluster.Name)\n\t\t\tcontinue\n\t\t}\n\t\t// Cluster has already been discovered. Check for changes.\n\t\tif same(discoveredCluster, discoveredList.Items[ind]) {\n\t\t\tunchangedClusters = append(unchangedClusters, discoveredCluster)\n\t\t\tdelete(existing, discoveredCluster.Name)\n\t\t} else {\n\t\t\tupdated := discoveredList.Items[ind]\n\t\t\tupdated.Spec = discoveredCluster.Spec\n\t\t\tupdateClusters = append(updateClusters, updated)\n\t\t\tdelete(existing, discoveredCluster.Name)\n\t\t}\n\t}\n\n\t// Remaining clusters are no longer found by OCM and should be labeled for delete\n\tfor _, ind := range existing {\n\t\tdeleteClusters = append(deleteClusters, discoveredList.Items[ind])\n\t}\n\n\t// Create new clusters and clean up old ones\n\tfor _, cluster := range createClusters {\n\t\tcluster := cluster\n\t\tif err := ctrl.SetControllerReference(config, &cluster, r.Scheme); err != nil {\n\t\t\tlog.Error(err, \"failed to set controller reference\", \"name\", cluster.Name)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tif err := r.Create(ctx, &cluster); err != nil {\n\t\t\tlog.Error(err, \"unable to create discovered cluster\", \"name\", cluster.Name)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tlog.Info(\"Created cluster\", \"Name\", cluster.Name)\n\t}\n\tfor _, cluster := range updateClusters {\n\t\tcluster := cluster\n\t\tif err := r.Update(ctx, &cluster); err != nil {\n\t\t\tlog.Error(err, \"unable to update discovered cluster\", \"name\", cluster.Name)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tlog.Info(\"Updated cluster\", \"Name\", cluster.Name)\n\t}\n\tfor _, cluster := range deleteClusters {\n\t\tcluster := cluster\n\t\tif err := r.Delete(ctx, &cluster); err != nil {\n\t\t\tlog.Error(err, \"unable to delete discovered cluster\", \"name\", cluster.Name)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tlog.Info(\"Deleted cluster\", \"Name\", cluster.Name)\n\t}\n\n\tlog.Info(\"Cluster categories\", \"Created\", len(createClusters), \"Updated\", len(updateClusters), \"Deleted\", len(deleteClusters), \"Unchanged\", len(unchangedClusters))\n\n\treturn ctrl.Result{RequeueAfter: reconciler.RefreshInterval}, nil\n}", "func (r *ProvisioningReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t// provisioning.metal3.io is a singleton\n\t// Note: this check is here to make sure that the early startup configuration\n\t// is correct. For day 2 operatations the webhook will validate this.\n\tif req.Name != metal3iov1alpha1.ProvisioningSingletonName {\n\t\tklog.Info(\"ignoring invalid CR\", \"name\", req.Name)\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Make sure ClusterOperator exists\n\terr := r.ensureClusterOperator(nil)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tresult := ctrl.Result{}\n\tif !r.WebHookEnabled {\n\t\tif provisioning.WebhookDependenciesReady(r.OSClient) {\n\t\t\tklog.Info(\"restarting to enable the webhook\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\t// Keep checking for our webhook dependencies to be ready, so we can\n\t\t// enable the webhook.\n\t\tresult.RequeueAfter = 5 * time.Minute\n\t}\n\n\tenabled, err := r.isEnabled()\n\tif err != nil {\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"could not determine whether to run\")\n\t}\n\tif !enabled {\n\t\t// set ClusterOperator status to disabled=true, available=true\n\t\t// We're disabled; don't requeue\n\t\treturn ctrl.Result{}, errors.Wrapf(\n\t\t\tr.updateCOStatus(ReasonUnsupported, \"Nothing to do on this Platform\", \"\"),\n\t\t\t\"unable to put %q ClusterOperator in Disabled state\", clusterOperatorName)\n\t}\n\n\tbaremetalConfig, err := r.readProvisioningCR(ctx)\n\tif err != nil {\n\t\t// Error reading the object - requeue the request.\n\t\treturn ctrl.Result{}, err\n\t}\n\tif baremetalConfig == nil {\n\t\t// Provisioning configuration not available at this time.\n\t\t// Cannot proceed wtih metal3 deployment.\n\t\tklog.Info(\"Provisioning CR not found\")\n\t\treturn result, nil\n\t}\n\n\t// Make sure ClusterOperator's ownership is updated\n\terr = r.ensureClusterOperator(baremetalConfig)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Read container images from Config Map\n\tvar containerImages provisioning.Images\n\tif err := provisioning.GetContainerImages(&containerImages, r.ImagesFilename); err != nil {\n\t\t// Images config map is not valid\n\t\t// Provisioning configuration is not valid.\n\t\t// Requeue request.\n\t\tklog.ErrorS(err, \"invalid contents in images Config Map\")\n\t\tco_err := r.updateCOStatus(ReasonInvalidConfiguration, err.Error(), \"invalid contents in images Config Map\")\n\t\tif co_err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, co_err)\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Get cluster-wide proxy information\n\tclusterWideProxy, err := r.OSClient.ConfigV1().Proxies().Get(context.Background(), \"cluster\", metav1.GetOptions{})\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tinfo := r.provisioningInfo(baremetalConfig, &containerImages, clusterWideProxy)\n\n\t// Check if Provisioning Configuartion is being deleted\n\tdeleted, err := r.checkForCRDeletion(ctx, info)\n\tif err != nil {\n\t\tvar coErr error\n\t\tif deleted {\n\t\t\tcoErr = r.updateCOStatus(ReasonDeployTimedOut, err.Error(), \"Unable to delete a metal3 resource on Provisioning CR deletion\")\n\t\t} else {\n\t\t\tcoErr = r.updateCOStatus(ReasonInvalidConfiguration, err.Error(), \"Unable to add Finalizer on Provisioning CR\")\n\t\t}\n\t\tif coErr != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, coErr)\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\tif deleted {\n\t\treturn result, errors.Wrapf(\n\t\t\tr.updateCOStatus(ReasonComplete, \"all Metal3 resources deleted\", \"\"),\n\t\t\t\"unable to put %q ClusterOperator in Available state\", clusterOperatorName)\n\t}\n\n\tspecChanged := baremetalConfig.Generation != baremetalConfig.Status.ObservedGeneration\n\tif specChanged {\n\t\terr = r.updateCOStatus(ReasonSyncing, \"\", \"Applying metal3 resources\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Syncing state: %w\", clusterOperatorName, err)\n\t\t}\n\t}\n\n\tif !r.WebHookEnabled {\n\t\t// Check if provisioning configuration is valid\n\t\tif err := baremetalConfig.ValidateBaremetalProvisioningConfig(); err != nil {\n\t\t\t// Provisioning configuration is not valid.\n\t\t\t// Requeue request.\n\t\t\tklog.Error(err, \"invalid config in Provisioning CR\")\n\t\t\terr = r.updateCOStatus(ReasonInvalidConfiguration, err.Error(), \"Unable to apply Provisioning CR: invalid configuration\")\n\t\t\tif err != nil {\n\t\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %v\", clusterOperatorName, err)\n\t\t\t}\n\t\t\t// Temporarily not requeuing request\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t}\n\n\t//Create Secrets needed for Metal3 deployment\n\tif err := provisioning.CreateAllSecrets(r.KubeClient.CoreV1(), ComponentNamespace, baremetalConfig, r.Scheme); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Check Metal3 Deployment already exists and managed by MAO.\n\tmetal3DeploymentSelector, maoOwned, err := provisioning.CheckExistingMetal3Deployment(r.KubeClient.AppsV1(), ComponentNamespace)\n\tinfo.PodLabelSelector = metal3DeploymentSelector\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"failed to check for existing Metal3 Deployment\")\n\t}\n\n\tif maoOwned {\n\t\tklog.Info(\"Adding annotation for CBO to take ownership of metal3 deployment created by MAO\")\n\t}\n\n\tfor _, ensureResource := range []ensureFunc{\n\t\tprovisioning.EnsureMetal3Deployment,\n\t\tprovisioning.EnsureMetal3StateService,\n\t\tprovisioning.EnsureImageCache,\n\t} {\n\t\tupdated, err := ensureResource(info)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tif updated {\n\t\t\treturn result, r.Client.Status().Update(ctx, baremetalConfig)\n\t\t}\n\t}\n\n\tif specChanged {\n\t\tbaremetalConfig.Status.ObservedGeneration = baremetalConfig.Generation\n\t\terr = r.Client.Status().Update(ctx, baremetalConfig)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to update observed generation: %w\", err)\n\t\t}\n\t}\n\n\t// Determine the status of the deployment\n\tdeploymentState, err := provisioning.GetDeploymentState(r.KubeClient.AppsV1(), ComponentNamespace, baremetalConfig)\n\tif err != nil {\n\t\terr = r.updateCOStatus(ReasonNotFound, \"metal3 deployment inaccessible\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, err)\n\t\t}\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"failed to determine state of metal3 deployment\")\n\t}\n\tif deploymentState == appsv1.DeploymentReplicaFailure {\n\t\terr = r.updateCOStatus(ReasonDeployTimedOut, \"metal3 deployment rollout taking too long\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, err)\n\t\t}\n\t}\n\n\t// Determine the status of the DaemonSet\n\tdaemonSetState, err := provisioning.GetDaemonSetState(r.KubeClient.AppsV1(), ComponentNamespace, baremetalConfig)\n\tif err != nil {\n\t\terr = r.updateCOStatus(ReasonNotFound, \"metal3 image cache daemonset inaccessible\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, err)\n\t\t}\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"failed to determine state of metal3 image cache daemonset\")\n\t}\n\tif daemonSetState == provisioning.DaemonSetReplicaFailure {\n\t\terr = r.updateCOStatus(ReasonDeployTimedOut, \"metal3 image cache rollout taking too long\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Degraded state: %w\", clusterOperatorName, err)\n\t\t}\n\t}\n\tif deploymentState == appsv1.DeploymentAvailable && daemonSetState == provisioning.DaemonSetAvailable {\n\t\terr = r.updateCOStatus(ReasonComplete, \"metal3 pod and image cache are running\", \"\")\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"unable to put %q ClusterOperator in Progressing state: %w\", clusterOperatorName, err)\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (r *ReconcileConfigMap) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\t// Fetch the ConfigMap instance\n\tcminstance := &corev1.ConfigMap{}\n\tif err := r.client.Get(context.TODO(), request.NamespacedName, cminstance); err != nil {\n\t\treturn reconcile.Result{}, IgnoreNotFound(err)\n\t}\n\tpolicyName, ok := cminstance.Labels[\"appName\"]\n\tif !ok {\n\t\treturn reconcile.Result{}, nil\n\t}\n\tpolicyNamespace, ok := cminstance.Labels[\"appNamespace\"]\n\tif !ok {\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\treqLogger := log.WithValues(\"SelinuxPolicy.Name\", policyName, \"SelinuxPolicy.Namespace\", policyNamespace)\n\n\tpolicyObjKey := types.NamespacedName{Name: policyName, Namespace: policyNamespace}\n\tpolicy := &spov1alpha1.SelinuxPolicy{}\n\tif err := r.client.Get(context.TODO(), policyObjKey, policy); err != nil {\n\t\treturn reconcile.Result{}, IgnoreNotFound(err)\n\t}\n\n\tif policy.Status.State == \"\" || policy.Status.State == spov1alpha1.PolicyStatePending {\n\t\tpolicyCopy := policy.DeepCopy()\n\t\tpolicyCopy.Status.State = spov1alpha1.PolicyStateInProgress\n\t\tpolicyCopy.Status.SetConditions(rcommonv1.Creating())\n\t\tif err := r.client.Status().Update(context.TODO(), policyCopy); err != nil {\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"Updating policy without status\")\n\t\t}\n\t\treturn reconcile.Result{Requeue: true}, nil\n\t}\n\n\t// object is not being deleted\n\tif cminstance.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\treturn r.reconcileInstallerPods(policy, cminstance, reqLogger)\n\t}\n\treturn reconcile.Result{}, nil\n}", "func (r *azureManagedControlPlaneReconciler) Reconcile(ctx context.Context, scope *scope.ManagedControlPlaneScope) error {\n\tdecodedSSHPublicKey, err := base64.StdEncoding.DecodeString(scope.ControlPlane.Spec.SSHPublicKey)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to decode SSHPublicKey\")\n\t}\n\n\tmanagedClusterSpec := &managedclusters.Spec{\n\t\tName: scope.ControlPlane.Name,\n\t\tResourceGroup: scope.ControlPlane.Spec.ResourceGroup,\n\t\tLocation: scope.ControlPlane.Spec.Location,\n\t\tTags: scope.ControlPlane.Spec.AdditionalTags,\n\t\tVersion: strings.TrimPrefix(scope.ControlPlane.Spec.Version, \"v\"),\n\t\tSSHPublicKey: string(decodedSSHPublicKey),\n\t\tDNSServiceIP: scope.ControlPlane.Spec.DNSServiceIP,\n\t}\n\n\tif scope.ControlPlane.Spec.NetworkPlugin != nil {\n\t\tmanagedClusterSpec.NetworkPlugin = *scope.ControlPlane.Spec.NetworkPlugin\n\t}\n\tif scope.ControlPlane.Spec.NetworkPolicy != nil {\n\t\tmanagedClusterSpec.NetworkPolicy = *scope.ControlPlane.Spec.NetworkPolicy\n\t}\n\tif scope.ControlPlane.Spec.LoadBalancerSKU != nil {\n\t\tmanagedClusterSpec.LoadBalancerSKU = *scope.ControlPlane.Spec.LoadBalancerSKU\n\t}\n\n\tscope.V(2).Info(\"Reconciling managed cluster resource group\")\n\tif err := r.groupsSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile managed cluster resource group\")\n\t}\n\n\tscope.V(2).Info(\"Reconciling managed cluster\")\n\tif err := r.reconcileManagedCluster(ctx, scope, managedClusterSpec); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile managed cluster\")\n\t}\n\n\tscope.V(2).Info(\"Reconciling endpoint\")\n\tif err := r.reconcileEndpoint(ctx, scope, managedClusterSpec); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile control plane endpoint\")\n\t}\n\n\tscope.V(2).Info(\"Reconciling kubeconfig\")\n\tif err := r.reconcileKubeconfig(ctx, scope, managedClusterSpec); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile kubeconfig secret\")\n\t}\n\n\treturn nil\n}", "func (r *RedPandaClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\n\t//// Reconcile successful - don't requeue\n\t//return ctrl.Result{}, nil\n\t//// Reconcile failed due to error - requeue\n\t//return ctrl.Result{}, err\n\t//// Requeue for any reason other than an error\n\t//return ctrl.Result{Requeue: true}, nil\n\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"redpandacluster\", req.NamespacedName)\n\n\tredPandaCluster := &eventstreamv1alpha1.RedPandaCluster{}\n\terr := r.Get(ctx, req.NamespacedName, redPandaCluster)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tlog.Info(\"RedPandaCluster resource not statefulSetFound. Ignoring...\")\n\t\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t\t}\n\t\tlog.Error(err, \"unable to fetch RedPandaCluster\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tserviceFound := &corev1.Service{}\n\terr = r.Get(ctx, types.NamespacedName{Name: redPandaCluster.Name, Namespace: redPandaCluster.Namespace}, serviceFound)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tlog.Info(\"Service not found. Trying to create one...\")\n\t\t\tvar rpService *corev1.Service\n\t\t\trpService = r.serviceFor(redPandaCluster)\n\t\t\tlog.Info(\"creating service \", \"Service.Namespace\", rpService.Namespace, \"Service.Name\", rpService.Name)\n\t\t\terr := r.Create(ctx, rpService)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"Failed to create new service\", \"Service.Namespace\", rpService.Namespace, \"Service.Name\", rpService.Name)\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t}\n\t\tlog.Info(\"unable to fetch Service resource\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// ConfigMap\n\tconfigMapFound := &corev1.ConfigMap{}\n\terr = r.Get(ctx, types.NamespacedName{Name: redPandaCluster.Name + \"base-config\", Namespace: redPandaCluster.Namespace}, configMapFound)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tlog.Info(\"ConfigMap not found. Trying to create one...\")\n\t\t\tvar rpConfigMap *corev1.ConfigMap\n\t\t\trpConfigMap = r.configMapFor(redPandaCluster)\n\t\t\terr := r.Create(ctx, rpConfigMap)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"Failed to create ConfigMap resource\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t}\n\t\tlog.Info(\"unable to fetch Service resource\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// StatefulSet\n\tstatefulSetFound := &appsv1.StatefulSet{}\n\terr = r.Get(ctx, types.NamespacedName{Namespace: redPandaCluster.Namespace, Name: redPandaCluster.Name}, statefulSetFound)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tlog.Info(\"StatefulSet not found \")\n\t\t\tvar rpStatefulSet *appsv1.StatefulSet\n\t\t\trpStatefulSet = r.statefulSetFor(redPandaCluster, configMapFound, serviceFound)\n\t\t\terr := r.Create(ctx, rpStatefulSet)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"Failed to create StatefulSet resource\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tobservedPods := &corev1.PodList{}\n\terr = r.List(ctx, observedPods, &client.ListOptions{\n\t\tLabelSelector: labels.SelectorFromSet(redPandaCluster.Labels),\n\t\tNamespace: redPandaCluster.Namespace,\n\t})\n\tif err != nil {\n\t\tlog.Error(err, \"unable to fetch PodList resource\")\n\t\treturn ctrl.Result{}, err\n\t}\n\tvar observedNodes []string\n\tfor _, item := range observedPods.Items {\n\t\tobservedNodes = append(observedNodes, item.Name)\n\t}\n\tif !reflect.DeepEqual(observedNodes, redPandaCluster.Status.Nodes) {\n\t\tredPandaCluster.Status.Nodes = observedNodes\n\t\terr := r.Status().Update(ctx, redPandaCluster)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to update RedPandaClusterStatus\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\tlog.Info(\"reconcile loop ends\")\n\treturn ctrl.Result{}, nil\n}", "func (r *JoinedClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"joinedcluster\", req.NamespacedName)\n\tvar err error\n\tvar joinedCluster clustermanagerv1alpha1.JoinedCluster\n\tif err = r.Get(ctx, req.NamespacedName, &joinedCluster); err != nil {\n\t\tif apierrs.IsNotFound(err) {\n\t\t\t//handle delete of the JoinedCluster CR\n\t\t\tlog.Error(err, \"Unable to get JoinedCluster from the server\")\n\t\t\treturn ctrl.Result{}, ignoreNotFound(err)\n\t\t}\n\t}\n\n\t// handle finalizer\n\t// register a custom finalizer\n\tjoinedClusterFinalizer := \"storage.finalizers.onprem.openshift.io\"\n\n\t// examine DeletionTimestamp to determine if object is under deletion\n\tif joinedCluster.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The object is not being deleted, so if it does not have our finalizer,\n\t\t// then lets add the finalizer and update the object. This is equivalent\n\t\t// registering our finalizer.\n\t\tif !containsString(joinedCluster.ObjectMeta.Finalizers, joinedClusterFinalizer) {\n\t\t\tjoinedCluster.ObjectMeta.Finalizers = append(joinedCluster.ObjectMeta.Finalizers, joinedClusterFinalizer)\n\t\t\tif err = r.Update(context.Background(), &joinedCluster); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// The object is being deleted\n\t\tif containsString(joinedCluster.ObjectMeta.Finalizers, joinedClusterFinalizer) {\n\t\t\t// our finalizer is present, so lets handle any external dependency\n\t\t\tif err = r.deleteExternalResources(&req, &joinedCluster); err != nil {\n\t\t\t\t// if fail to delete the external dependency here, return with error\n\t\t\t\t// so that it can be retried\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\tjoinedCluster.ObjectMeta.Finalizers = removeString(joinedCluster.ObjectMeta.Finalizers, joinedClusterFinalizer)\n\t\t\tif err = r.Update(context.Background(), &joinedCluster); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t//continue with the controller logic\n\tcondition := joinedCluster.IsCondition(clustermanagerv1alpha1.ConditionTypeReadyToJoin)\n\tif joinedCluster.Status.ClusterAgentInfo != nil {\n\t\t//ready to join, check for staleness, disconnects\n\t\tsinceLastUpdate := time.Since(joinedCluster.Status.ClusterAgentInfo.LastUpdateTime.Time)\n\t\tif sinceLastUpdate >= joinedCluster.Spec.StaleDuration.Duration &&\n\t\t\tsinceLastUpdate < joinedCluster.Spec.DisconnectDuration.Duration {\n\t\t\tjoinedCluster.SetCondition(clustermanagerv1alpha1.ConditionTypeAgentStale)\n\t\t} else if sinceLastUpdate > joinedCluster.Spec.DisconnectDuration.Duration {\n\t\t\tjoinedCluster.SetCondition(clustermanagerv1alpha1.ConditionTypeAgentDisconnected)\n\t\t}\n\n\t} else if condition == nil {\n\t\t// not ready to join, create SA, rolebinding KubeConfig\n\t\t// set ServiceAccount and JoinCommand status subresource fields.\n\t\tserviceAccount, err := createServiceAccount(r, &req, &joinedCluster, log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\tsaSecret, err := getSecret(r, serviceAccount, log)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Error getting the sa secret\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\t_, err = createRoleBinding(r, &req, &joinedCluster, log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\tserverUrl, err := getServerUrl(r, log)\n\t\tif _, exists := saSecret.Data[\"service-ca.crt\"]; exists {\n\t\t\tif _, exists := saSecret.Data[\"token\"]; exists {\n\t\t\t\tjoinSecret, err := createJoinSecret(r, saSecret.Data[\"service-ca.crt\"], saSecret.Data[\"token\"], joinedCluster.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t\tyamlFile, err := ioutil.ReadFile(yamlFilePath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Info(\"Cannot read yaml file from the deployment dir\")\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t\tjoinCommand := fmt.Sprintf(joinCommandTemplate, joinSecret.Name, joinSecret.Namespace,\n\t\t\t\t\tjoinSecret.Name, joinSecret.Namespace, joinedCluster.Name, joinedCluster.Namespace, serverUrl, string(yamlFile))\n\t\t\t\tlog.Info(\"Command output:\", \"joincommand\", joinCommand)\n\t\t\t\tjoinedCluster.Status.JoinCommand = &joinCommand\n\n\t\t\t} else {\n\t\t\t\tlog.Info(\"Couldn't find the token key in the secret\")\n\t\t\t\treturn ctrl.Result{}, errors.New(\"Token key not found for the sa secret\")\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Info(\"Couldn't find the service-ca.crt key in the secret\")\n\t\t\treturn ctrl.Result{}, errors.New(\"service-ca.crt not found in the secret\")\n\t\t}\n\t\t// at this point we have a role binding created, now get the sa token and create\n\t\t// kubeconfig file.\n\t\tsaName := serviceAccount.Name\n\t\tjoinedCluster.Status.ServiceAccountName = &saName\n\t\tjoinedCluster.SetCondition(clustermanagerv1alpha1.ConditionTypeReadyToJoin)\n\t}\n\n\t//update the status subresource now on the API server\n\tif err := r.Status().Update(ctx, &joinedCluster); err != nil {\n\t\tlog.Error(err, \"unable to update JoinedCluster status\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {\n\t_ = r.Log.WithValues(\"cluster\", req.NamespacedName)\n\t// Fetch the Cluster instance.\n\tcluster, err := r.resourceFetcher.FetchCluster(ctx, req.NamespacedName)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\t// Initialize the patch helper\n\tpatchHelper, err := patch.NewHelper(cluster, r.Client)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tdefer func() {\n\t\t// Always attempt to patch the object and status after each reconciliation.\n\t\tif err := patchHelper.Patch(ctx, cluster); err != nil {\n\t\t\treterr = kerrors.NewAggregate([]error{reterr, err})\n\t\t}\n\t}()\n\n\t// Ignore deleted Clusters, this can happen when foregroundDeletion\n\t// is enabled\n\tif !cluster.DeletionTimestamp.IsZero() {\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// If the external object is paused, return without any further processing.\n\tif cluster.IsReconcilePaused() {\n\t\tr.Log.Info(\"eksa reconciliation is paused\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// dry run\n\tresult, err := r.reconcile(ctx, req.NamespacedName, true)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Dry run failed to reconcile Cluster\")\n\t\treturn result, err\n\t}\n\t// non dry run\n\tresult, err = r.reconcile(ctx, req.NamespacedName, false)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Failed to reconcile Cluster\")\n\t}\n\treturn result, err\n}", "func (r *KcrdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tLog := log.FromContext(ctx)\n\n\t// your logic here\n\n\tkcrd := &kapiv1.Kcrd{}\n\terr := r.Client.Get(ctx, req.NamespacedName, kcrd)\n\tfmt.Println(kcrd.Name)\n\n\tgetDep := &v1.Deployment{}\n\terr = r.Client.Get(ctx, req.NamespacedName, getDep)\n\tif errors.IsNotFound(err) {\n\t\tfmt.Println(\"new deployment creating... +++++++\")\n\t\tif err = r.Client.Create(ctx, newDeployment(kcrd)); err != nil {\n\t\t\tLog.Error(err, \"error creating deployments ++++++\")\n\t\t} else {\n\t\t\tfmt.Println(kcrd.Name + \"-dep \" + \"created +++++++++\")\n\t\t}\n\t}\n\n\tgetSvc := &v12.Service{}\n\terr = r.Client.Get(ctx, req.NamespacedName, getSvc)\n\tif errors.IsNotFound(err) {\n\t\tfmt.Println(\"new service creating... +++++++\")\n\t\tif err = r.Client.Create(ctx, newService(kcrd)); err != nil {\n\t\t\tLog.Error(err, \"error creating service ++++++\")\n\t\t} else {\n\t\t\tfmt.Println(kcrd.Name + \"-svc \" + \"created +++++++++\")\n\t\t}\n\t}\n\n\tgetIng := &netv1.Ingress{}\n\terr = r.Client.Get(ctx, req.NamespacedName, getIng)\n\tif errors.IsNotFound(err) {\n\t\tfmt.Println(\"new ingress creating... +++++++\")\n\t\tif err = r.Client.Create(ctx, newIngress(kcrd)); err != nil {\n\t\t\tLog.Error(err, \"error creating ingress ++++++\")\n\t\t} else {\n\t\t\tfmt.Println(kcrd.Name + \"-ing \" + \"created +++++++++\")\n\t\t}\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *ReconcileKubemanager) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tvar err error\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling Kubemanager\")\n\tinstanceType := \"kubemanager\"\n\tinstance := &v1alpha1.Kubemanager{}\n\tcassandraInstance := v1alpha1.Cassandra{}\n\tzookeeperInstance := v1alpha1.Zookeeper{}\n\trabbitmqInstance := v1alpha1.Rabbitmq{}\n\tconfigInstance := v1alpha1.Config{}\n\n\terr = r.Client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil && errors.IsNotFound(err) {\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tcassandraActive := cassandraInstance.IsActive(instance.Spec.ServiceConfiguration.CassandraInstance,\n\t\trequest.Namespace, r.Client)\n\tzookeeperActive := zookeeperInstance.IsActive(instance.Spec.ServiceConfiguration.ZookeeperInstance,\n\t\trequest.Namespace, r.Client)\n\trabbitmqActive := rabbitmqInstance.IsActive(instance.Labels[\"contrail_cluster\"],\n\t\trequest.Namespace, r.Client)\n\tconfigActive := configInstance.IsActive(instance.Labels[\"contrail_cluster\"],\n\t\trequest.Namespace, r.Client)\n\tif !configActive || !cassandraActive || !rabbitmqActive || !zookeeperActive {\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tmanagerInstance, err := instance.OwnedByManager(r.Client, request)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tif managerInstance != nil {\n\t\tif managerInstance.Spec.Services.Kubemanagers != nil {\n\t\t\tfor _, kubemanagerManagerInstance := range managerInstance.Spec.Services.Kubemanagers {\n\t\t\t\tif kubemanagerManagerInstance.Name == request.Name {\n\t\t\t\t\tinstance.Spec.CommonConfiguration = utils.MergeCommonConfiguration(\n\t\t\t\t\t\tmanagerInstance.Spec.CommonConfiguration,\n\t\t\t\t\t\tkubemanagerManagerInstance.Spec.CommonConfiguration)\n\t\t\t\t\terr = r.Client.Update(context.TODO(), instance)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn reconcile.Result{}, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tconfigMap, err := instance.CreateConfigMap(request.Name+\"-\"+instanceType+\"-configmap\",\n\t\tr.Client,\n\t\tr.Scheme,\n\t\trequest)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tintendedDeployment, err := instance.PrepareIntendedDeployment(GetDeployment(),\n\t\t&instance.Spec.CommonConfiguration,\n\t\trequest,\n\t\tr.Scheme)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tinstance.AddVolumesToIntendedDeployments(intendedDeployment,\n\t\tmap[string]string{configMap.Name: request.Name + \"-\" + instanceType + \"-volume\"})\n\n\tvar serviceAccountName string\n\tif instance.Spec.ServiceConfiguration.ServiceAccount != \"\" {\n\t\tserviceAccountName = instance.Spec.ServiceConfiguration.ServiceAccount\n\t} else {\n\t\tserviceAccountName = \"contrail-service-account\"\n\t}\n\n\tvar clusterRoleName string\n\tif instance.Spec.ServiceConfiguration.ClusterRole != \"\" {\n\t\tclusterRoleName = instance.Spec.ServiceConfiguration.ClusterRole\n\t} else {\n\t\tclusterRoleName = \"contrail-cluster-role\"\n\t}\n\n\tvar clusterRoleBindingName string\n\tif instance.Spec.ServiceConfiguration.ClusterRoleBinding != \"\" {\n\t\tclusterRoleBindingName = instance.Spec.ServiceConfiguration.ClusterRoleBinding\n\t} else {\n\t\tclusterRoleBindingName = \"contrail-cluster-role-binding\"\n\t}\n\n\texistingServiceAccount := &corev1.ServiceAccount{}\n\terr = r.Client.Get(context.TODO(), types.NamespacedName{Name: serviceAccountName, Namespace: instance.Namespace}, existingServiceAccount)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tserviceAccount := &corev1.ServiceAccount{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\tKind: \"ServiceAccount\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: serviceAccountName,\n\t\t\t\tNamespace: instance.Namespace,\n\t\t\t},\n\t\t}\n\t\tcontrollerutil.SetControllerReference(instance, serviceAccount, r.Scheme)\n\t\terr = r.Client.Create(context.TODO(), serviceAccount)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\texistingClusterRole := &rbacv1.ClusterRole{}\n\terr = r.Client.Get(context.TODO(), types.NamespacedName{Name: clusterRoleName}, existingClusterRole)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tclusterRole := &rbacv1.ClusterRole{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"rbac/v1\",\n\t\t\t\tKind: \"ClusterRole\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: clusterRoleName,\n\t\t\t\tNamespace: instance.Namespace,\n\t\t\t},\n\t\t\tRules: []rbacv1.PolicyRule{{\n\t\t\t\tVerbs: []string{\n\t\t\t\t\t\"*\",\n\t\t\t\t},\n\t\t\t\tAPIGroups: []string{\n\t\t\t\t\t\"*\",\n\t\t\t\t},\n\t\t\t\tResources: []string{\n\t\t\t\t\t\"*\",\n\t\t\t\t},\n\t\t\t}},\n\t\t}\n\t\tcontrollerutil.SetControllerReference(instance, clusterRole, r.Scheme)\n\t\terr = r.Client.Create(context.TODO(), clusterRole)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\texistingClusterRoleBinding := &rbacv1.ClusterRoleBinding{}\n\terr = r.Client.Get(context.TODO(), types.NamespacedName{Name: clusterRoleBindingName}, existingClusterRoleBinding)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tclusterRoleBinding := &rbacv1.ClusterRoleBinding{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"rbac/v1\",\n\t\t\t\tKind: \"ClusterRoleBinding\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: clusterRoleBindingName,\n\t\t\t\tNamespace: instance.Namespace,\n\t\t\t},\n\t\t\tSubjects: []rbacv1.Subject{{\n\t\t\t\tKind: \"ServiceAccount\",\n\t\t\t\tName: serviceAccountName,\n\t\t\t\tNamespace: instance.Namespace,\n\t\t\t}},\n\t\t\tRoleRef: rbacv1.RoleRef{\n\t\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\t\tKind: \"ClusterRole\",\n\t\t\t\tName: clusterRoleName,\n\t\t\t},\n\t\t}\n\t\tcontrollerutil.SetControllerReference(instance, clusterRoleBinding, r.Scheme)\n\t\terr = r.Client.Create(context.TODO(), clusterRoleBinding)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\tintendedDeployment.Spec.Template.Spec.ServiceAccountName = serviceAccountName\n\tfor idx, container := range intendedDeployment.Spec.Template.Spec.Containers {\n\t\tif container.Name == \"kubemanager\" {\n\t\t\tcommand := []string{\"bash\", \"-c\",\n\t\t\t\t\"/usr/bin/python /usr/bin/contrail-kube-manager -c /etc/mycontrail/kubemanager.${POD_IP}\"}\n\t\t\t//command = []string{\"sh\", \"-c\", \"while true; do echo hello; sleep 10;done\"}\n\t\t\t(&intendedDeployment.Spec.Template.Spec.Containers[idx]).Command = command\n\n\t\t\tvolumeMountList := []corev1.VolumeMount{}\n\t\t\tif len((&intendedDeployment.Spec.Template.Spec.Containers[idx]).VolumeMounts) > 0 {\n\t\t\t\tvolumeMountList = (&intendedDeployment.Spec.Template.Spec.Containers[idx]).VolumeMounts\n\t\t\t}\n\t\t\tvolumeMount := corev1.VolumeMount{\n\t\t\t\tName: request.Name + \"-\" + instanceType + \"-volume\",\n\t\t\t\tMountPath: \"/etc/mycontrail\",\n\t\t\t}\n\t\t\tvolumeMountList = append(volumeMountList, volumeMount)\n\t\t\t(&intendedDeployment.Spec.Template.Spec.Containers[idx]).VolumeMounts = volumeMountList\n\t\t\t(&intendedDeployment.Spec.Template.Spec.Containers[idx]).Image = instance.Spec.ServiceConfiguration.Images[container.Name]\n\t\t}\n\t}\n\n\tfor idx, container := range intendedDeployment.Spec.Template.Spec.InitContainers {\n\t\tfor containerName, image := range instance.Spec.ServiceConfiguration.Images {\n\t\t\tif containerName == container.Name {\n\t\t\t\t(&intendedDeployment.Spec.Template.Spec.InitContainers[idx]).Image = image\n\t\t\t}\n\t\t}\n\t}\n\n\terr = instance.CompareIntendedWithCurrentDeployment(intendedDeployment,\n\t\t&instance.Spec.CommonConfiguration,\n\t\trequest,\n\t\tr.Scheme,\n\t\tr.Client,\n\t\tfalse)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tpodIPList, podIPMap, err := instance.PodIPListAndIPMap(request, r.Client)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tif len(podIPList.Items) > 0 {\n\t\terr = instance.InstanceConfiguration(request,\n\t\t\tpodIPList,\n\t\t\tr.Client)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\terr = instance.SetPodsToReady(podIPList, r.Client)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\terr = instance.ManageNodeStatus(podIPMap, r.Client)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\terr = instance.SetInstanceActive(r.Client, &instance.Status, intendedDeployment, request)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (r *Reconciler) Reconcile(log logr.Logger) error {\n\tlog = log.WithValues(\"component\", envoyutils.ComponentName)\n\n\tlog.V(1).Info(\"Reconciling\")\n\n\tif r.KafkaCluster.Spec.GetIngressController() == envoyutils.IngressControllerName {\n\t\tfor _, eListener := range r.KafkaCluster.Spec.ListenersConfig.ExternalListeners {\n\t\t\tif eListener.GetAccessMethod() == corev1.ServiceTypeLoadBalancer {\n\t\t\t\tingressConfigs, defaultControllerName, err := util.GetIngressConfigs(r.KafkaCluster.Spec, eListener)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tvar externalListernerResources []resources.ResourceWithLogAndExternalListenerSpecificInfos\n\t\t\t\texternalListernerResources = append(externalListernerResources,\n\t\t\t\t\tr.service,\n\t\t\t\t\tr.configMap,\n\t\t\t\t\tr.deployment,\n\t\t\t\t)\n\n\t\t\t\tif r.KafkaCluster.Spec.EnvoyConfig.GetDistruptionBudget().DisruptionBudget.Create {\n\t\t\t\t\texternalListernerResources = append(externalListernerResources, r.podDisruptionBudget)\n\t\t\t\t}\n\t\t\t\tfor name, ingressConfig := range ingressConfigs {\n\t\t\t\t\tif !util.IsIngressConfigInUse(name, defaultControllerName, r.KafkaCluster, log) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, res := range externalListernerResources {\n\t\t\t\t\t\to := res(log, eListener, ingressConfig, name, defaultControllerName)\n\t\t\t\t\t\terr := k8sutil.Reconcile(log, r.Client, o, r.KafkaCluster)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.V(1).Info(\"Reconciled\")\n\n\treturn nil\n}", "func (r *Reconciler) reconcile(reqLogger logr.Logger, current *appsv1.DaemonSet) (reconcile.Result, error) {\n\t// Compare labels\n\tobjLabels := current.GetLabels()\n\n\tif !utils.CheckSubset(fbsyncer.Labels, objLabels) {\n\t\treqLogger.Info(\"Is not interesting\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tsyncers := []syncer.Interface{fbsyncer.NewFluentbitSyncer(r.client, r.scheme),\n\t\tfbsyncer.NewFluentbitCfgMapSyncer(r.client, r.scheme),\n\t}\n\n\tfor _, sync := range syncers {\n\t\tif err := syncer.Sync(context.TODO(), sync, r.recorder); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (r *ReconcileCanary) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling Canary\")\n\n\t// Fetch the Canary instance\n\tinstance := &kharonv1alpha1.Canary{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Validate the CR instance\n\tif ok, err := r.IsValid(instance); !ok {\n\t\treturn r.ManageError(instance, err)\n\t}\n\n\t// Search for the target ref\n\tvar target runtime.Object\n\tvar targetRef = instance.Spec.TargetRef\n\tswitch kind := instance.Spec.TargetRef.Kind; kind {\n\tcase \"Deployment\":\n\t\ttarget = &appsv1.Deployment{}\n\t\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: targetRef.Name, Namespace: request.NamespacedName.Namespace}, target)\n\t\tif err != nil && errors.IsNotFound(err) {\n\t\t\tlog.Info(fmt.Sprintf(\"Target Deployment was not found!\"))\n\t\t\treturn r.ManageError(instance, err)\n\t\t} else if err != nil {\n\t\t\treturn r.ManageError(instance, err)\n\t\t}\n\tcase \"DeploymentConfig\":\n\t\ttarget = &oappsv1.DeploymentConfig{}\n\t\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: targetRef.Name, Namespace: request.NamespacedName.Namespace}, target)\n\t\tif err != nil && errors.IsNotFound(err) {\n\t\t\tlog.Info(fmt.Sprintf(\"Target DeploymentConfig was not found!\"))\n\t\t\treturn r.ManageError(instance, err)\n\t\t} else if err != nil {\n\t\t\treturn r.ManageError(instance, err)\n\t\t}\n\tdefault:\n\t\tlog.Info(\"==== isOther ====\" + kind)\n\t}\n\n\t//log.Info(fmt.Sprintf(\"==== target ==== %s\", target))\n\n\t// Now that we have a target let's initialize the CR instance\n\tif initialized, err := r.IsInitialized(instance, target); err == nil && !initialized {\n\t\terr := r.client.Update(context.TODO(), instance)\n\t\tif err != nil {\n\t\t\tlog.Error(err, errorUnableToUpdateInstance, \"instance\", instance)\n\t\t\treturn r.ManageError(instance, err)\n\t\t}\n\t\treturn reconcile.Result{}, nil\n\t} else {\n\t\tif err != nil {\n\t\t\treturn r.ManageError(instance, err)\n\t\t}\n\t}\n\n\t// If reentering from a canary rollback\n\tif instance.Status.Status == kharonv1alpha1.CanaryConditionStatusFailure && instance.Status.Reason == errorRolledbackRelease {\n\t\t// If target is already pointing to the previous release, we're fine\n\t\tif instance.Spec.TargetRef == instance.Status.ReleaseHistory[len(instance.Status.ReleaseHistory)-1].Ref {\n\t\t\treturn r.ManageSuccess(instance, 0, kharonv1alpha1.NoAction)\n\t\t}\n\n\t\t// Else... we need to update TargetRef to point to the current release (hence rollback)\n\t\tfromTarget := instance.Spec.TargetRef\n\t\tinstance.Spec.TargetRef = instance.Status.ReleaseHistory[len(instance.Status.ReleaseHistory)-1].Ref\n\t\tif err := r.client.Update(context.TODO(), instance); err != nil {\n\t\t\tlog.Error(err, errorUnableToUpdateInstance, \"instance\", instance)\n\t\t\treturn r.ManageError(instance, err)\n\t\t}\n\t\t// Send notification event\n\t\tr.recorder.Eventf(instance, \"Normal\", string(kharonv1alpha1.RollbackReleaseEnd), \"Instance %s was rollback from %s to %s\", instance.ObjectMeta.Name, fromTarget.Name, instance.Spec.TargetRef.Name)\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// Canary is inititialized, target is fine... cotainer, port... all OK\n\n\t// First we have to figure out what action to trigger\n\n\t// If there's no Primary\n\tif len(instance.Status.ReleaseHistory) <= 0 {\n\t\t// Then Primary is the TargetRef ==> Action: Create Primary Release (and leave)\n\t\treturn r.CreatePrimaryRelease(instance)\n\t} else {\n\t\t// Else, there's Primary\n\n\t\t// If TargetRef is different\n\t\tif instance.Spec.TargetRef != instance.Status.ReleaseHistory[len(instance.Status.ReleaseHistory)-1].Ref {\n\n\t\t\t// Then TargetRef is a Canary (a Canary IS already running OR starting)\n\n\t\t\t// If Canary metric is not met, increase failedCheck counter\n\t\t\tif metricValue, err := _metrics.ExecuteMetricQuery(instance); err == nil {\n\t\t\t\tcurrentCanaryMetricValue.WithLabelValues(instance.Namespace, instance.Name, instance.Spec.TargetRef.Name).Set(metricValue)\n\t\t\t\tinstance.Status.CanaryMetricValue = metricValue\n\t\t\t\tif !_metrics.ValidateMetricValue(metricValue, instance.Spec.CanaryAnalysis.Metric.Operator, instance.Spec.CanaryAnalysis.Metric.Threshold) {\n\t\t\t\t\tinstance.Status.FailedChecks++\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tlog.Error(err, fmt.Sprintf(\"Error %s\", err))\n\t\t\t}\n\n\t\t\t// If failedCheck threshold is met, rollback\n\t\t\tif instance.Status.FailedChecks > instance.Spec.CanaryAnalysis.Threshold {\n\t\t\t\treturn r.RollbackRelease(instance)\n\t\t\t}\n\n\t\t\t// If it's been more than the interval beween Canary steps\n\t\t\ttimeSinceLastStep := time.Since(instance.Status.LastStepTime.Time)\n\t\t\tif timeSinceLastStep > time.Duration(instance.Spec.CanaryAnalysis.Interval)*time.Second {\n\t\t\t\t// If Progress is < 100 % ==> Action: Progress Canary Release\n\t\t\t\tif instance.Status.CanaryWeight < 100 {\n\t\t\t\t\treturn r.ProgressCanaryRelease(instance)\n\t\t\t\t}\n\t\t\t\t// Else ==> Action: End Canary Release ==> Action Create Primary Release From Canary\n\t\t\t\treturn r.EndCanaryRelease(instance)\n\t\t\t} else {\n\t\t\t\treturn r.ManageSuccess(instance, time.Duration(instance.Spec.CanaryAnalysis.Metric.Interval)*time.Second, kharonv1alpha1.RequeueEvent)\n\t\t\t}\n\t\t} else {\n\t\t\t// If TargetRef is the same ==> Action: No Action ==> it means reset status to zero (so to speak) if it's not zero\n\t\t\tlog.Info(\"ACTION {NO_ACTION}\")\n\t\t\treturn r.ManageSuccess(instance, 0, kharonv1alpha1.NoAction)\n\t\t}\n\t}\n}", "func ReconcileRBAC(er record.EventRecorder, k8sClient client.Client, saNamespace string, resNames *factory.ForwarderResourceNames, owner metav1.OwnerReference) error {\n\tdesiredCRB := NewMetaDataReaderClusterRoleBinding(saNamespace, resNames.MetadataReaderClusterRoleBinding, resNames.ServiceAccount, owner)\n\tif err := reconcile.ClusterRoleBinding(k8sClient, resNames.MetadataReaderClusterRoleBinding, func() *rbacv1.ClusterRoleBinding { return desiredCRB }); err != nil {\n\t\treturn err\n\t}\n\tdesiredSCCRole := NewServiceAccountSCCRole(saNamespace, resNames.CommonName, owner)\n\tif err := reconcile.Role(er, k8sClient, desiredSCCRole); err != nil {\n\t\treturn err\n\t}\n\n\tdesiredSCCRoleBinding := NewServiceAccountSCCRoleBinding(saNamespace, resNames.CommonName, resNames.ServiceAccount, owner)\n\treturn reconcile.RoleBinding(er, k8sClient, desiredSCCRoleBinding)\n}", "func (r *HierarchyConfigReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tif EX[req.Namespace] {\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tstats.StartHierConfigReconcile()\n\tdefer stats.StopHierConfigReconcile()\n\n\tctx := context.Background()\n\tns := req.NamespacedName.Namespace\n\n\trid := (int)(atomic.AddInt32(&r.reconcileID, 1))\n\tlog := r.Log.WithValues(\"ns\", ns, \"rid\", rid)\n\n\t// TODO remove this log and use the HNSReconcilerEnabled to toggle the behavour of this\n\t// reconciler accordingly. See issue: https://github.com/kubernetes-sigs/multi-tenancy/issues/467\n\t// Output a log for testing.\n\tlog.Info(\"HC will be reconciled with\", \"HNSReconcilerEnabled\", r.HNSReconcilerEnabled)\n\n\treturn ctrl.Result{}, r.reconcile(ctx, log, ns)\n}", "func (r *azureClusterReconciler) Reconcile(ctx context.Context) error {\n\tif err := r.createOrUpdateNetworkAPIServerIP(); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create or update network API server IP for cluster %s in location %s\", r.scope.ClusterName(), r.scope.Location())\n\t}\n\n\tif err := r.setFailureDomainsForLocation(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to get availability zones\")\n\t}\n\n\tr.scope.SetControlPlaneIngressRules()\n\n\tif err := r.groupsSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile resource group\")\n\t}\n\n\tif err := r.vnetSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile virtual network\")\n\t}\n\n\tif err := r.securityGroupSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile network security group\")\n\t}\n\n\tif err := r.routeTableSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile route table\")\n\t}\n\n\tif err := r.subnetsSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile subnet\")\n\t}\n\n\tif err := r.publicIPSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile public IP\")\n\t}\n\n\tif err := r.loadBalancerSvc.Reconcile(ctx); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to reconcile load balancer\")\n\t}\n\n\treturn nil\n}", "func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {\n\tlog := r.log.WithValues(strings.ToLower(r.gvk.Kind), req.NamespacedName)\n\tlog.V(1).Info(\"Reconciliation triggered\")\n\n\tobj := &unstructured.Unstructured{}\n\tobj.SetGroupVersionKind(*r.gvk)\n\terr = r.client.Get(ctx, req.NamespacedName, obj)\n\tif apierrors.IsNotFound(err) {\n\t\tlog.V(1).Info(\"Resource %s/%s not found, nothing to do\", req.NamespacedName.Namespace, req.NamespacedName.Name)\n\t\treturn ctrl.Result{}, nil\n\t}\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tu := updater.New(r.client)\n\tdefer func() {\n\t\tapplyErr := u.Apply(ctx, obj)\n\t\tif err == nil && !apierrors.IsNotFound(applyErr) {\n\t\t\terr = applyErr\n\t\t}\n\t}()\n\n\tactionClient, err := r.actionClientGetter.ActionClientFor(obj)\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingClient, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeDeployed),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeInitialized),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\tupdater.EnsureDeployedRelease(nil),\n\t\t)\n\t\t// NOTE: If obj has the uninstall finalizer, that means a release WAS deployed at some point\n\t\t// in the past, but we don't know if it still is because we don't have an actionClient to check.\n\t\t// So the question is, what do we do with the finalizer? We could:\n\t\t// - Leave it in place. This would make the CR impossible to delete without either resolving this error, or\n\t\t// manually uninstalling the release, deleting the finalizer, and deleting the CR.\n\t\t// - Remove the finalizer. This would make it possible to delete the CR, but it would leave around any\n\t\t// release resources that are not owned by the CR (those in the cluster scope or in other namespaces).\n\t\t//\n\t\t// The decision made for now is to leave the finalizer in place, so that the user can intervene and try to\n\t\t// resolve the issue, instead of the operator silently leaving some dangling resources hanging around after the\n\t\t// CR is deleted.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// As soon as we get the actionClient, lookup the release and\n\t// update the status with this info. We need to do this as\n\t// early as possible in case other irreconcilable errors occur.\n\t//\n\t// We also make sure not to return any errors we encounter so\n\t// we can still attempt an uninstall if the CR is being deleted.\n\trel, err := actionClient.Get(obj.GetName())\n\tif errors.Is(err, driver.ErrReleaseNotFound) {\n\t\tu.UpdateStatus(updater.EnsureCondition(conditions.Deployed(corev1.ConditionFalse, \"\", \"\")))\n\t} else if err == nil {\n\t\tensureDeployedRelease(&u, rel)\n\t}\n\tu.UpdateStatus(updater.EnsureCondition(conditions.Initialized(corev1.ConditionTrue, \"\", \"\")))\n\n\tif obj.GetDeletionTimestamp() != nil {\n\t\terr := r.handleDeletion(ctx, actionClient, obj, log)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tvals, err := r.getValues(ctx, obj)\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingValues, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\trel, state, err := r.getReleaseState(actionClient, obj, vals.AsMap())\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingReleaseState, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeDeployed),\n\t\t\tupdater.EnsureDeployedRelease(nil),\n\t\t)\n\t\treturn ctrl.Result{}, err\n\t}\n\tu.UpdateStatus(updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, \"\", \"\")))\n\n\tfor _, h := range r.preHooks {\n\t\tif err := h.Exec(obj, vals, log); err != nil {\n\t\t\tlog.Error(err, \"pre-release hook failed\")\n\t\t}\n\t}\n\n\tswitch state {\n\tcase stateNeedsInstall:\n\t\trel, err = r.doInstall(actionClient, &u, obj, vals.AsMap(), log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\tcase stateNeedsUpgrade:\n\t\trel, err = r.doUpgrade(actionClient, &u, obj, vals.AsMap(), log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\tcase stateUnchanged:\n\t\tif err := r.doReconcile(actionClient, &u, rel, log); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\tdefault:\n\t\treturn ctrl.Result{}, fmt.Errorf(\"unexpected release state: %s\", state)\n\t}\n\n\tfor _, h := range r.postHooks {\n\t\tif err := h.Exec(obj, *rel, log); err != nil {\n\t\t\tlog.Error(err, \"post-release hook failed\", \"name\", rel.Name, \"version\", rel.Version)\n\t\t}\n\t}\n\n\tensureDeployedRelease(&u, rel)\n\tu.UpdateStatus(\n\t\tupdater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionFalse, \"\", \"\")),\n\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, \"\", \"\")),\n\t)\n\n\treturn ctrl.Result{RequeueAfter: r.reconcilePeriod}, nil\n}", "func (r *FoundationDBClusterReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {\n\tcluster := &fdbtypes.FoundationDBCluster{}\n\n\terr := r.Get(ctx, request.NamespacedName, cluster)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn ctrl.Result{}, nil\n\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tclusterLog := log.WithValues(\"namespace\", cluster.Namespace, \"cluster\", cluster.Name)\n\n\tif cluster.Spec.Skip {\n\t\tclusterLog.Info(\"Skipping cluster with skip value true\", \"skip\", cluster.Spec.Skip)\n\t\t// Don't requeue\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\terr = internal.NormalizeClusterSpec(cluster, r.DeprecationOptions)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tadminClient, err := r.getDatabaseClientProvider().GetAdminClient(cluster, r)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tdefer adminClient.Close()\n\n\tsupportedVersion, err := adminClient.VersionSupported(cluster.Spec.Version)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tif !supportedVersion {\n\t\treturn ctrl.Result{}, fmt.Errorf(\"version %s is not supported\", cluster.Spec.Version)\n\t}\n\n\tsubReconcilers := []clusterSubReconciler{\n\t\tupdateStatus{},\n\t\tupdateLockConfiguration{},\n\t\tupdateConfigMap{},\n\t\tcheckClientCompatibility{},\n\t\treplaceMisconfiguredProcessGroups{},\n\t\treplaceFailedProcessGroups{},\n\t\tdeletePodsForBuggification{},\n\t\taddProcessGroups{},\n\t\taddServices{},\n\t\taddPVCs{},\n\t\taddPods{},\n\t\tgenerateInitialClusterFile{},\n\t\tupdateSidecarVersions{},\n\t\tupdatePodConfig{},\n\t\tupdateLabels{},\n\t\tupdateDatabaseConfiguration{},\n\t\tchooseRemovals{},\n\t\texcludeInstances{},\n\t\tchangeCoordinators{},\n\t\tbounceProcesses{},\n\t\tupdatePods{},\n\t\tremoveServices{},\n\t\tremoveProcessGroups{},\n\t\tupdateStatus{},\n\t}\n\n\toriginalGeneration := cluster.ObjectMeta.Generation\n\tnormalizedSpec := cluster.Spec.DeepCopy()\n\tdelayedRequeue := false\n\n\tfor _, subReconciler := range subReconcilers {\n\t\t// We have to set the normalized spec here again otherwise any call to Update() for the status of the cluster\n\t\t// will reset all normalized fields...\n\t\tcluster.Spec = *(normalizedSpec.DeepCopy())\n\t\tclusterLog.Info(\"Attempting to run sub-reconciler\", \"subReconciler\", fmt.Sprintf(\"%T\", subReconciler))\n\n\t\trequeue := subReconciler.reconcile(r, ctx, cluster)\n\t\tif requeue == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif requeue.delayedRequeue {\n\t\t\tclusterLog.Info(\"Delaying requeue for sub-reconciler\",\n\t\t\t\t\"subReconciler\", fmt.Sprintf(\"%T\", subReconciler),\n\t\t\t\t\"message\", requeue.message)\n\t\t\tdelayedRequeue = true\n\t\t\tcontinue\n\t\t}\n\n\t\treturn processRequeue(requeue, subReconciler, cluster, r.Recorder, clusterLog)\n\t}\n\n\tif cluster.Status.Generations.Reconciled < originalGeneration || delayedRequeue {\n\t\tclusterLog.Info(\"Cluster was not fully reconciled by reconciliation process\", \"status\", cluster.Status.Generations)\n\n\t\treturn ctrl.Result{Requeue: true}, nil\n\t}\n\n\tclusterLog.Info(\"Reconciliation complete\", \"generation\", cluster.Status.Generations.Reconciled)\n\tr.Recorder.Event(cluster, corev1.EventTypeNormal, \"ReconciliationComplete\", fmt.Sprintf(\"Reconciled generation %d\", cluster.Status.Generations.Reconciled))\n\n\treturn ctrl.Result{}, nil\n}", "func (sbc *ClusterBrokerController) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tcsbExists, err := sbc.clusterBrokerFacade.Exist()\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tcacList := v1alpha1.ClusterAddonsConfigurationList{}\n\terr = sbc.cli.List(context.TODO(), &cacList)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tconfigurationsExist := len(cacList.Items) > 0\n\tinstancesExist, err := sbc.instanceChecker.AnyServiceInstanceExistsForClusterServiceBroker()\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif csbExists && (!configurationsExist && !instancesExist) {\n\t\tif err = sbc.clusterBrokerFacade.Delete(); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\tif !csbExists && (configurationsExist || instancesExist) {\n\t\tif err = sbc.clusterBrokerFacade.Create(); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\treturn reconcile.Result{}, nil\n}", "func (r *KubeFedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlogger := log.FromContext(ctx)\n\n\tobj := &corev1beta1.KubeFedCluster{}\n\tif err := r.Get(ctx, req.NamespacedName, obj); err != nil {\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\n\tmgr, err := util.NewManager(obj, r.Namespace, r.Client, r.Scheme)\n\tif err != nil {\n\t\tlogger.Error(err, \"Unable to create manager\", \"cluster\", obj.Name)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tutil.AddclusterClient(obj.Name, mgr.GetClient())\n\n\tif err = (&typescontrollers.FederatedObjectReconciler{\n\t\tClient: mgr.GetClient(),\n\t\tScheme: mgr.GetScheme(),\n\t\tMaxConcurrentReconciles: r.MaxConcurrentReconciles,\n\t\tClusterName: util.FederationClusterName,\n\t\tTargetClusterName: obj.Name,\n\t}).SetupWithManager(mgr); err != nil {\n\t\tlogger.Error(err, \"Unable to create controller\", \"cluster\", obj.Name)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tgo mgr.Start(context.TODO())\n\n\treturn ctrl.Result{}, nil\n}", "func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {\n\tlog := r.log.WithValues(strings.ToLower(r.gvk.Kind), req.NamespacedName)\n\n\tobj := &unstructured.Unstructured{}\n\tobj.SetGroupVersionKind(*r.gvk)\n\terr = r.client.Get(ctx, req.NamespacedName, obj)\n\tif apierrors.IsNotFound(err) {\n\t\treturn ctrl.Result{}, nil\n\t}\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tu := updater.New(r.client)\n\tdefer func() {\n\t\tapplyErr := u.Apply(ctx, obj)\n\t\tif err == nil && !apierrors.IsNotFound(applyErr) {\n\t\t\terr = applyErr\n\t\t}\n\t}()\n\n\tactionClient, err := r.actionClientGetter.ActionClientFor(obj)\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingClient, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeDeployed),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeInitialized),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\tupdater.EnsureDeployedRelease(nil),\n\t\t)\n\t\t// NOTE: If obj has the uninstall finalizer, that means a release WAS deployed at some point\n\t\t// in the past, but we don't know if it still is because we don't have an actionClient to check.\n\t\t// So the question is, what do we do with the finalizer? We could:\n\t\t// - Leave it in place. This would make the CR impossible to delete without either resolving this error, or\n\t\t// manually uninstalling the release, deleting the finalizer, and deleting the CR.\n\t\t// - Remove the finalizer. This would make it possible to delete the CR, but it would leave around any\n\t\t// release resources that are not owned by the CR (those in the cluster scope or in other namespaces).\n\t\t//\n\t\t// The decision made for now is to leave the finalizer in place, so that the user can intervene and try to\n\t\t// resolve the issue, instead of the operator silently leaving some dangling resources hanging around after the\n\t\t// CR is deleted.\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// As soon as we get the actionClient, lookup the release and\n\t// update the status with this info. We need to do this as\n\t// early as possible in case other irreconcilable errors occur.\n\t//\n\t// We also make sure not to return any errors we encounter so\n\t// we can still attempt an uninstall if the CR is being deleted.\n\trel, err := actionClient.Get(obj.GetName())\n\tif errors.Is(err, driver.ErrReleaseNotFound) {\n\t\tu.UpdateStatus(updater.EnsureCondition(conditions.Deployed(corev1.ConditionFalse, \"\", \"\")))\n\t} else if err == nil {\n\t\tr.ensureDeployedRelease(&u, rel)\n\t}\n\tu.UpdateStatus(updater.EnsureCondition(conditions.Initialized(corev1.ConditionTrue, \"\", \"\")))\n\n\tfor _, ext := range r.preExtensions {\n\t\tif err := ext(ctx, obj, u.UpdateStatusCustom, r.log); err != nil {\n\t\t\tu.UpdateStatus(\n\t\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonReconcileError, err)),\n\t\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\t)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tif obj.GetDeletionTimestamp() != nil {\n\t\terr := r.handleDeletion(ctx, actionClient, obj, log)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tvals, err := r.getValues(ctx, obj)\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingValues, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\trel, state, err := r.getReleaseState(actionClient, obj, vals.AsMap())\n\tif err != nil {\n\t\tu.UpdateStatus(\n\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingReleaseState, err)),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\tupdater.EnsureConditionUnknown(conditions.TypeDeployed),\n\t\t\tupdater.EnsureDeployedRelease(nil),\n\t\t)\n\t\treturn ctrl.Result{}, err\n\t}\n\tif state == statePending {\n\t\treturn r.handlePending(actionClient, rel, &u, log)\n\t}\n\n\tu.UpdateStatus(updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, \"\", \"\")))\n\n\tfor _, h := range r.preHooks {\n\t\tif err := h.Exec(obj, vals, log); err != nil {\n\t\t\tlog.Error(err, \"pre-release hook failed\")\n\t\t}\n\t}\n\n\tswitch state {\n\tcase stateNeedsInstall:\n\t\trel, err = r.doInstall(actionClient, &u, obj, vals.AsMap(), log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\tcase stateNeedsUpgrade:\n\t\trel, err = r.doUpgrade(actionClient, &u, obj, vals.AsMap(), log)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\tcase stateUnchanged:\n\t\tif err := r.doReconcile(actionClient, &u, rel, log); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\tdefault:\n\t\treturn ctrl.Result{}, fmt.Errorf(\"unexpected release state: %s\", state)\n\t}\n\n\tfor _, h := range r.postHooks {\n\t\tif err := h.Exec(obj, *rel, log); err != nil {\n\t\t\tlog.Error(err, \"post-release hook failed\", \"name\", rel.Name, \"version\", rel.Version)\n\t\t}\n\t}\n\n\tfor _, ext := range r.postExtensions {\n\t\tif err := ext(ctx, obj, u.UpdateStatusCustom, r.log); err != nil {\n\t\t\tu.UpdateStatus(\n\t\t\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonReconcileError, err)),\n\t\t\t\tupdater.EnsureConditionUnknown(conditions.TypeReleaseFailed),\n\t\t\t)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tr.ensureDeployedRelease(&u, rel)\n\tu.UpdateStatus(\n\t\tupdater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionFalse, \"\", \"\")),\n\t\tupdater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, \"\", \"\")),\n\t)\n\n\treturn ctrl.Result{RequeueAfter: r.reconcilePeriod}, nil\n}", "func (r *ReconcileCollectd) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling Collectd\")\n\n\t// Fetch the Collectd instance\n\tinstance := &collectdmonv1alpha1.Collectd{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treqLogger.Info(\"Request object not found\")\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Assign the generated resource version to the status\n\tif instance.Status.RevNumber == \"\" {\n\t\tinstance.Status.RevNumber = instance.ObjectMeta.ResourceVersion\n\t\tr.UpdateCondition(instance, \"provision spec to desired state\", reqLogger)\n\t}\n\n\t// Check if serviceaccount already exists, if not create a new one\n\treturnValues := r.ReconcileServiceAccount(instance, reqLogger)\n\tif returnValues.err != nil {\n\t\treturn reconcile.Result{}, err\n\t} else if returnValues.reQueue {\n\t\treturn reconcile.Result{Requeue: true}, nil\n\t}\n\n\t//currentConfigHash, err := r.ReconcileConfigMap(instance, reqLogger)\n\treturnValues = r.ReconcileConfigMapWithHash(instance, reqLogger)\n\tif returnValues.err != nil {\n\t\treturn reconcile.Result{}, err\n\t} else if returnValues.reQueue {\n\t\treturn reconcile.Result{Requeue: true}, nil\n\t}\n\n\t//desiredConfigMap := &corev1.ConfigMap{} // where to ge desired configmap\n\t//eq := reflect.DeepEqual(currentConfigMap, currentConfigMap)\n\t//currentConfigHash\n\treturnValues = r.ReconcileDeployment(instance, returnValues.hash256String, reqLogger)\n\tif returnValues.err != nil {\n\t\treturn reconcile.Result{}, err\n\t} else if returnValues.reQueue {\n\t\treturn reconcile.Result{Requeue: true}, nil\n\t}\n\n\t//size := instance.Spec.DeploymentPlan.Size\n\n\t// Pod already exists - don't requeue\n\n\treturn reconcile.Result{}, nil\n}", "func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {\n\tlog.V(logging.Debug).Info(\"reconciling\", \"kind\", v1alpha1.ReplicationGroupKindAPIVersion, \"request\", req)\n\n\tctx, cancel := context.WithTimeout(context.Background(), reconcileTimeout)\n\tdefer cancel()\n\n\trd := &v1alpha1.ReplicationGroup{}\n\tif err := r.kube.Get(ctx, req.NamespacedName, rd); err != nil {\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn reconcile.Result{Requeue: false}, nil\n\t\t}\n\t\treturn reconcile.Result{Requeue: false}, errors.Wrapf(err, \"cannot get resource %s\", req.NamespacedName)\n\t}\n\n\tclient, err := r.Connect(ctx, rd)\n\tif err != nil {\n\t\trd.Status.SetFailed(reasonFetchingClient, err.Error())\n\t\treturn reconcile.Result{Requeue: true}, errors.Wrapf(r.kube.Update(ctx, rd), \"cannot update resource %s\", req.NamespacedName)\n\t}\n\n\t// The resource has been deleted from the API server. Delete from AWS.\n\tif rd.DeletionTimestamp != nil {\n\t\treturn reconcile.Result{Requeue: client.Delete(ctx, rd)}, errors.Wrapf(r.kube.Update(ctx, rd), \"cannot update resource %s\", req.NamespacedName)\n\t}\n\n\t// The group is unnamed. Assume it has not been created in AWS.\n\tif rd.Status.GroupName == \"\" {\n\t\trequeue, authToken := client.Create(ctx, rd)\n\t\tif err := r.upsertSecret(ctx, connectionSecretWithPassword(rd, authToken)); err != nil {\n\t\t\trd.Status.SetFailed(reasonSyncingSecret, err.Error())\n\t\t\trequeue = true\n\t\t}\n\t\treturn reconcile.Result{Requeue: requeue}, errors.Wrapf(r.kube.Update(ctx, rd), \"cannot update resource %s\", req.NamespacedName)\n\t}\n\n\tif err := r.upsertSecret(ctx, connectionSecret(rd)); err != nil {\n\t\trd.Status.SetFailed(reasonSyncingSecret, err.Error())\n\t\treturn reconcile.Result{Requeue: true}, errors.Wrapf(r.kube.Update(ctx, rd), \"cannot update resource %s\", req.NamespacedName)\n\t}\n\n\t// The resource exists in the API server and AWS. Sync it.\n\treturn reconcile.Result{Requeue: client.Sync(ctx, rd)}, errors.Wrapf(r.kube.Update(ctx, rd), \"cannot update resource %s\", req.NamespacedName)\n}", "func Reconcile(inquirer inquirer.ReconcilerInquirer) error {\n\tklog.V(1).Infof(\"reconciling component %q with role %q\", inquirer.Component().Name, inquirer.Component().Role)\n\tvar componentObj components.Component\n\tswitch inquirer.Component().Role {\n\tcase component.ControlPlaneRole:\n\t\tcomponentObj = &components.ControlPlane{}\n\tcase component.ControlPlaneIngressRole:\n\t\tcomponentObj = &components.ControlPlaneIngress{}\n\t}\n\tinquirer.Component().Conditions.SetCondition(\n\t\tcomponent.ReconcileStarted,\n\t\tconditions.ConditionTrue,\n\t)\n\tres := componentObj.Reconcile(inquirer)\n\tif res == nil {\n\t\tinquirer.Component().Conditions.SetCondition(\n\t\t\tcomponent.ReconcileSucceeded,\n\t\t\tconditions.ConditionTrue,\n\t\t)\n\t} else {\n\t\tinquirer.Component().Conditions.SetCondition(\n\t\t\tcomponent.ReconcileSucceeded,\n\t\t\tconditions.ConditionFalse,\n\t\t)\n\t}\n\treturn res\n}", "func (r *KonfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\treqLogger := log.FromContext(ctx)\n\treconcileStart := time.Now()\n\n\treqLogger.Info(\"Reconciling konfiguration\")\n\n\t// Look up the konfiguration that triggered this request\n\tkonfig := &konfigurationv1.Konfiguration{}\n\tif err := r.Client.Get(ctx, req.NamespacedName, konfig); err != nil {\n\t\t// Check if object was deleted\n\t\tif client.IgnoreNotFound(err) == nil {\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Record suspended status metric\n\tdefer r.recordSuspension(ctx, konfig)\n\n\t// Add our finalizer if it does not exist\n\tif !controllerutil.ContainsFinalizer(konfig, konfigurationv1.KonfigurationFinalizer) {\n\t\treqLogger.Info(\"Registering finalizer to Konfiguration\")\n\t\tcontrollerutil.AddFinalizer(konfig, konfigurationv1.KonfigurationFinalizer)\n\t\tif err := r.Update(ctx, konfig); err != nil {\n\t\t\treqLogger.Error(err, \"failed to register finalizer\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\t// Examine if the object is under deletion\n\tif !konfig.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\treturn r.reconcileDelete(ctx, konfig)\n\t}\n\n\t// Check if the konfiguration is suspended\n\tif konfig.IsSuspended() {\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: konfig.GetInterval(),\n\t\t}, nil\n\t}\n\n\t// Get the revision and the path we are going to operate on\n\trevision, path, clean, err := r.prepareSource(ctx, konfig)\n\tif err != nil {\n\t\tr.recordReadiness(ctx, konfig)\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: konfig.GetRetryInterval(),\n\t\t}, nil\n\t}\n\tdefer clean()\n\n\t// Check if there are any dependencies and that they are all ready\n\tif err := r.checkDependencies(ctx, konfig); err != nil {\n\t\tif statusErr := konfig.SetNotReady(ctx, r.Client, konfigurationv1.NewStatusMeta(\n\t\t\trevision, meta.DependencyNotReadyReason, err.Error(),\n\t\t)); statusErr != nil {\n\t\t\treqLogger.Error(err, \"failed to update status for dependency not ready\")\n\t\t}\n\t\tmsg := fmt.Sprintf(\"Dependencies do not meet ready condition, retrying in %s\", r.dependencyRequeueDuration.String())\n\t\treqLogger.Info(msg)\n\t\tr.event(ctx, konfig, &EventData{\n\t\t\tRevision: revision,\n\t\t\tSeverity: events.EventSeverityInfo,\n\t\t\tMessage: msg,\n\t\t})\n\t\tr.recordReadiness(ctx, konfig)\n\t\treturn ctrl.Result{RequeueAfter: r.dependencyRequeueDuration}, nil\n\t}\n\n\t// record reconciliation duration\n\tif r.MetricsRecorder != nil {\n\t\tobjRef, err := reference.GetReference(r.Scheme, konfig)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tdefer r.MetricsRecorder.RecordDuration(*objRef, reconcileStart)\n\t}\n\n\t// set the status to progressing\n\tif err := konfig.SetProgressing(ctx, r.Client); err != nil {\n\t\treqLogger.Error(err, \"unable to update status to progressing\")\n\t\treturn ctrl.Result{Requeue: true}, err\n\t}\n\tr.recordReadiness(ctx, konfig)\n\n\t// Do reconciliation\n\tsnapshot, err := r.reconcile(ctx, konfig, revision, path)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Error during reconciliation\")\n\t\tr.event(ctx, konfig, &EventData{\n\t\t\tRevision: revision,\n\t\t\tSeverity: events.EventSeverityError,\n\t\t\tMessage: err.Error(),\n\t\t})\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: konfig.GetRetryInterval(),\n\t\t}, nil\n\t}\n\n\tupdated := konfig.Status.Snapshot == nil || snapshot.Checksum != konfig.Status.Snapshot.Checksum\n\n\t// Set the konfiguration as ready\n\tmsg := fmt.Sprintf(\"Applied revision: %s\", revision)\n\tif err := konfig.SetReady(ctx, r.Client, snapshot, konfigurationv1.NewStatusMeta(\n\t\trevision, meta.ReconciliationSucceededReason, msg),\n\t); err != nil {\n\t\treturn ctrl.Result{Requeue: true}, err\n\t}\n\n\treqLogger.Info(fmt.Sprintf(\"Reconcile finished, next run in %s\", konfig.GetInterval().String()), \"Revision\", revision)\n\n\tif updated {\n\t\tr.event(ctx, konfig, &EventData{\n\t\t\tRevision: revision,\n\t\t\tSeverity: events.EventSeverityInfo,\n\t\t\tMessage: \"Update Complete\",\n\t\t\tMetadata: map[string]string{\n\t\t\t\t\"commit_status\": \"update\",\n\t\t\t},\n\t\t})\n\t}\n\treturn ctrl.Result{\n\t\tRequeueAfter: konfig.GetInterval(),\n\t}, nil\n}", "func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", req.Namespace, \"Request.Name\", req.Name)\n\treqLogger.Info(\"Reconciling RedisCluster\")\n\n\t// Fetch the RedisCluster instance\n\tinstance := &redisv1beta1.RedisCluster{}\n\terr := r.Client.Get(ctx, req.NamespacedName, instance)\n\tif err != nil {\n\t\treqLogger.Info(\"Error when retrieving namespaced name\", \"namespacedName\", req.NamespacedName)\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treqLogger.Info(\"RedisCluster delete\")\n\t\t\tinstance.Namespace = req.NamespacedName.Namespace\n\t\t\tinstance.Name = req.NamespacedName.Name\n\t\t\tr.handler.metaCache.Del(instance)\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treqLogger.V(5).Info(fmt.Sprintf(\"RedisCluster Spec:\\n %+v\", instance))\n\n\tif err = r.handler.Do(instance); err != nil {\n\t\tif err.Error() == needRequeueMsg {\n\t\t\treturn ctrl.Result{RequeueAfter: 20 * time.Second}, nil\n\t\t}\n\t\treqLogger.Error(err, \"Reconcile handler\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif err = r.handler.rcChecker.CheckSentinelReadyReplicas(instance); err != nil {\n\t\treqLogger.Info(err.Error())\n\t\treturn ctrl.Result{RequeueAfter: 20 * time.Second}, nil\n\t}\n\n\treturn ctrl.Result{RequeueAfter: time.Duration(reconcileTime) * time.Second}, nil\n}", "func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tlogger := log.WithValues(\"WebhookConfig\", request.Name)\n\tlogger.Info(\"reconciling MutatingWebhookConfiguration\")\n\t// get current webhook config\n\tcurrentConfig := &v1beta1.MutatingWebhookConfiguration{}\n\terr := r.Client.Get(context.TODO(), request.NamespacedName, currentConfig)\n\tif err != nil {\n\t\tlog.Info(\"MutatingWebhookConfiguration does not exist yet. No action taken\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\tnamespace := request.Name[len(webhookConfigNamePrefix):]\n\tcaRoot, err := common.GetRootCertFromSecret(r.Client, namespace, serviceAccountSecretName)\n\tif err != nil {\n\t\tlogger.Info(\"could not get secret: \" + err.Error())\n\t\treturn reconcile.Result{}, nil\n\t}\n\t// update caBundle if it doesn't match what's in the secret\n\tupdated := false\n\tnewConfig := currentConfig.DeepCopyObject().(*v1beta1.MutatingWebhookConfiguration)\n\tfor i := range newConfig.Webhooks {\n\t\tupdated = common.InjectCABundle(&newConfig.Webhooks[i].ClientConfig, caRoot) || updated\n\t}\n\n\tif updated {\n\t\terr := r.Client.Update(context.TODO(), newConfig)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"failed to update CABundle\")\n\t\t}\n\t\tlogger.Info(\"CABundle updated\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tlogger.Info(\"Correct CABundle already present. Ignoring\")\n\treturn reconcile.Result{}, nil\n}", "func (r *ReconcileRethinkDBCluster) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"namespace\", request.Namespace, \"name\", request.Name)\n\treqLogger.Info(\"reconciling RethinkDBCluster\")\n\n\t// Fetch the RethinkDBCluster instance\n\tcluster := &rethinkdbv1alpha1.RethinkDBCluster{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, cluster)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Verify resource defaults have been initialized\n\tif setDefaults(cluster) {\n\t\t// defaults updated, update and requeue\n\t\treqLogger.Info(\"default spec values initialized\")\n\t\treturn reconcile.Result{Requeue: true}, r.client.Update(context.TODO(), cluster)\n\t}\n\n\t// Reconcile the cluster CA secret\n\tcaSecret, err := r.reconcileCASecret(cluster)\n\tif err != nil {\n\t\treqLogger.Error(err, \"unable to reconcile ca secret\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Reconcile the cluster CA configmap\n\terr = r.reconcileCAConfigMap(cluster, caSecret)\n\tif err != nil {\n\t\treqLogger.Error(err, \"unable to reconcile ca configmap\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Reconcile the admin service\n\terr = r.reconcileAdminService(cluster)\n\tif err != nil {\n\t\treqLogger.Error(err, \"unable to reconcile admin service\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Reconcile the driver service\n\terr = r.reconcileDriverService(cluster)\n\tif err != nil {\n\t\treqLogger.Error(err, \"unable to reconcile driver service\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Reconcile the cluster TLS secrets\n\terr = r.reconcileTLSSecrets(cluster, caSecret)\n\tif err != nil {\n\t\treqLogger.Error(err, \"unable to reconcile tls secrets\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Reconcile the cluster admin secret\n\terr = r.reconcileAdminSecret(cluster)\n\tif err != nil {\n\t\treqLogger.Error(err, \"unable to reconcile admin secret\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Reconcile the cluster persistent volume claims\n\t// err = r.reconcilePersistentVolumeClaims(cluster)\n\t// if err != nil {\n\t// \treqLogger.Error(err, \"unable to reconcile persistent volume claims\")\n\t// \treturn reconcile.Result{}, err\n\t// }\n\n\t// Reconcile the cluster server pods\n\terr = r.reconcileServerPods(cluster)\n\tif err != nil {\n\t\treqLogger.Error(err, \"unable to reconcile server pods\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// No errors, return and don't requeue\n\treturn reconcile.Result{}, nil\n}", "func (r *MilvusClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tif !config.IsDebug() {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tr.logger.Error(err.(error), \"reconcile panic\")\n\t\t\t}\n\t\t}()\n\t}\n\n\tmilvuscluster := &milvusiov1alpha1.MilvusCluster{}\n\tif err := r.Get(ctx, req.NamespacedName, milvuscluster); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// The resource may have be deleted after reconcile request coming in\n\t\t\t// Reconcile is done\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\n\t\treturn ctrl.Result{}, fmt.Errorf(\"error get milvus cluster: %w\", err)\n\t}\n\n\t// Finalize\n\tif milvuscluster.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\tif !controllerutil.ContainsFinalizer(milvuscluster, MCFinalizerName) {\n\t\t\tcontrollerutil.AddFinalizer(milvuscluster, MCFinalizerName)\n\t\t\tif err := r.Update(ctx, milvuscluster); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\tif controllerutil.ContainsFinalizer(milvuscluster, MCFinalizerName) {\n\t\t\tif err := r.Finalize(ctx, *milvuscluster); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tcontrollerutil.RemoveFinalizer(milvuscluster, MCFinalizerName)\n\t\t\tif err := r.Update(ctx, milvuscluster); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t\t// Stop reconciliation as the item is being deleted\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Start reconcile\n\tr.logger.Info(\"start reconcile\")\n\told := milvuscluster.DeepCopy()\n\n\tif err := r.SetDefault(ctx, milvuscluster); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif !IsEqual(old.Spec, milvuscluster.Spec) {\n\t\treturn ctrl.Result{}, r.Update(ctx, milvuscluster)\n\t}\n\n\tif err := r.ReconcileAll(ctx, *milvuscluster); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif err := r.UpdateStatus(ctx, milvuscluster); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif milvuscluster.Status.Status == v1alpha1.StatusUnHealthy {\n\t\treturn ctrl.Result{RequeueAfter: 30 * time.Second}, nil\n\t}\n\n\tif config.IsDebug() {\n\t\tdiff, err := client.MergeFrom(old).Data(milvuscluster)\n\t\tif err != nil {\n\t\t\tr.logger.Info(\"Update diff\", \"diff\", string(diff))\n\t\t}\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (c *Controller) Reconcile(ctx context.Context) error {\n\tvar (\n\t\tl = log.WithFields(logrus.Fields{\n\t\t\t\"component\": \"Controller.Reconcile\",\n\t\t})\n\t)\n\n\t// retrieve all CiliumBGPPeeringPolicies\n\tpolicies, err := c.PolicyLister.List()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list CiliumBGPPeeringPolicies\")\n\t}\n\tl.WithField(\"count\", len(policies)).Debug(\"Successfully listed CiliumBGPPeeringPolicies\")\n\n\t// perform policy selection based on node.\n\tlabels, err := c.NodeSpec.Labels()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to retrieve labels for Node: %w\", err)\n\t}\n\tpolicy, err := PolicySelection(ctx, labels, policies)\n\tif err != nil {\n\t\tl.WithError(err).Error(\"Policy selection failed\")\n\t\tc.FullWithdrawal(ctx)\n\t\treturn err\n\t}\n\tif policy == nil {\n\t\t// no policy was discovered, tell router manager to withdrawal peers if\n\t\t// they are configured.\n\t\tl.Debug(\"No BGP peering policy applies to this node, any existing BGP sessions will be removed.\")\n\t\tc.FullWithdrawal(ctx)\n\t\treturn nil\n\t}\n\n\t// apply policy defaults to have consistent default config across sub-systems\n\tpolicy = policy.DeepCopy() // deepcopy to not modify the policy object in store\n\tpolicy.SetDefaults()\n\n\terr = c.validatePolicy(policy)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid BGP peering policy %s: %w\", policy.Name, err)\n\t}\n\n\t// parse any virtual router specific attributes defined on this node via\n\t// kubernetes annotations\n\t//\n\t// if we notice one or more malformed annotations report the errors up and\n\t// fail reconciliation.\n\tannotations, err := c.NodeSpec.Annotations()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to retrieve Node's annotations: %w\", err)\n\t}\n\n\tannoMap, err := NewAnnotationMap(annotations)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse annotations: %w\", err)\n\t}\n\n\tpodCIDRs, err := c.NodeSpec.PodCIDRs()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to retrieve Node's pod CIDR ranges: %w\", err)\n\t}\n\n\tcurrentNodeName, err := c.NodeSpec.CurrentNodeName()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to retrieve current node's name: %w\", err)\n\t}\n\n\tipv4, _ := ip.AddrFromIP(nodeaddr.GetIPv4())\n\tipv6, _ := ip.AddrFromIP(nodeaddr.GetIPv6())\n\n\t// define our current point-in-time control plane state.\n\tstate := &ControlPlaneState{\n\t\tPodCIDRs: podCIDRs,\n\t\tAnnotations: annoMap,\n\t\tIPv4: ipv4,\n\t\tIPv6: ipv6,\n\t\tCurrentNodeName: currentNodeName,\n\t}\n\n\t// call bgp sub-systems required to apply this policy's BGP topology.\n\tl.Debug(\"Asking configured BGPRouterManager to configure peering\")\n\tif err := c.BGPMgr.ConfigurePeers(ctx, policy, state); err != nil {\n\t\treturn fmt.Errorf(\"failed to configure BGP peers, cannot apply BGP peering policy: %w\", err)\n\t}\n\n\treturn nil\n}", "func (r *RoleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t// The TeleportRole OpenAPI spec does not validate typing of Label fields like `node_labels`.\n\t// This means we can receive invalid data, by default it won't be unmarshalled properly and will crash the operator.\n\t// To handle this more gracefully we unmarshall first in an unstructured object.\n\t// The unstructured object will be converted later to a typed one, in r.UpsertExternal.\n\t// See `/operator/crdgen/schemagen.go` and https://github.com/gravitational/teleport/issues/15204 for context.\n\t// TODO: (Check how to handle multiple versions)\n\tobj := GetUnstructuredObjectFromGVK(TeleportRoleGVKV5)\n\treturn ResourceBaseReconciler{\n\t\tClient: r.Client,\n\t\tDeleteExternal: r.Delete,\n\t\tUpsertExternal: r.Upsert,\n\t}.Do(ctx, req, obj)\n}", "func (r *NicClusterPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\treqLogger := r.Log.WithValues(\"nicclusterpolicy\", req.NamespacedName)\n\treqLogger.V(consts.LogLevelInfo).Info(\"Reconciling NicClusterPolicy\")\n\n\t// Fetch the NicClusterPolicy instance\n\tinstance := &mellanoxv1alpha1.NicClusterPolicy{}\n\terr := r.Get(context.TODO(), req.NamespacedName, instance)\n\tif err != nil {\n\t\tif apiErrors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treqLogger.V(consts.LogLevelError).Info(\"Error occurred on GET CRD request from API server.\", \"error:\", err)\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif req.Name != consts.NicClusterPolicyResourceName {\n\t\terr := r.handleUnsupportedInstance(instance, req, reqLogger)\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Create a new State service catalog\n\tsc := state.NewInfoCatalog()\n\tif instance.Spec.OFEDDriver != nil || instance.Spec.NVPeerDriver != nil ||\n\t\tinstance.Spec.RdmaSharedDevicePlugin != nil {\n\t\t// Create node infoProvider and add to the service catalog\n\t\treqLogger.V(consts.LogLevelInfo).Info(\"Creating Node info provider\")\n\t\tnodeList := &corev1.NodeList{}\n\t\terr = r.List(context.TODO(), nodeList, nodeinfo.MellanoxNICListOptions...)\n\t\tif err != nil {\n\t\t\t// Failed to get node list\n\t\t\treqLogger.V(consts.LogLevelError).Info(\"Error occurred on LIST nodes request from API server.\", \"error:\", err)\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tnodePtrList := make([]*corev1.Node, len(nodeList.Items))\n\t\tnodeNames := make([]*string, len(nodeList.Items))\n\t\tfor i := range nodePtrList {\n\t\t\tnodePtrList[i] = &nodeList.Items[i]\n\t\t\tnodeNames[i] = &nodeList.Items[i].Name\n\t\t}\n\t\treqLogger.V(consts.LogLevelDebug).Info(\"Node info provider with\", \"Nodes:\", nodeNames)\n\t\tinfoProvider := nodeinfo.NewProvider(nodePtrList)\n\t\tsc.Add(state.InfoTypeNodeInfo, infoProvider)\n\t}\n\t// Create manager\n\tmanagerStatus, err := r.stateManager.SyncState(instance, sc)\n\tr.updateCrStatus(instance, managerStatus)\n\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\terr = r.updateNodeLabels(instance)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif managerStatus.Status != state.SyncStateReady {\n\t\treturn reconcile.Result{\n\t\t\tRequeueAfter: time.Duration(config.FromEnv().Controller.RequeueTimeSeconds) * time.Second,\n\t\t}, nil\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *PodRefreshReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t_ = context.Background()\n\t_ = r.Log.WithValues(\"secret\", req.NamespacedName)\n\n\tr.Log.Info(\"Reconciling CertManager TLS Certificates\")\n\n\t// Fetch secret in the cluster.\n\tsecret := &corev1.Secret{}\n\terr := r.Get(context.TODO(), req.NamespacedName, secret)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile req.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the req.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// If secret doesn't have cert-manager annotations, stop reconciliing it. This is the failsafe to prevent\n\t// a bounce on a resource that is not a cert-manager-related secret.\n\t// DEPRECATED(2020-10-23): This exists as a safety precuation, but predicates should be filtering these out. Remove in the future.\n\tants := secret.GetAnnotations()\n\tif _, ok := ants[issuerKindAnnotation]; !ok {\n\t\tr.Log.Info(\"Secret is not a cert-manager issued certificate. Disregarding.\", \"Secret.Name\", secret.GetName(), \"Secret.Namespace\", secret.GetNamespace())\n\t\treturn reconcile.Result{}, nil\n\t}\n\tr.Log.Info(\"Secret is a cert-manager issued certificate. Checking deployments/statefulsets/daemonsets using Secret.\", \"Secret.Name\", secret.GetName(), \"Secret.Namespace\", secret.GetNamespace())\n\n\t// If Secret has been updated, try to find deployments in the same namespace that needs to be bounced.\n\tr.Log.V(2).Info(\"Looking for deployments in namespace using certificate\", \"Secret.Name\", secret.GetName(), \"Secret.Namespace\", secret.GetNamespace())\n\tdeployList := appsv1.DeploymentList{}\n\terr = r.List(context.TODO(), &deployList, &client.ListOptions{Namespace: secret.GetNamespace()})\n\tif err != nil {\n\t\tr.Log.Error(err, \"Error listing deployments\", \"req.Namespace\", secret.GetNamespace())\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// If Secret has been updated, try to find daemonsets in the same namespace that needs to be bounced.\n\tr.Log.V(2).Info(\"Looking for daemonsets in namespace using certificate\", \"Secret.Name\", secret.GetName(), \"Secret.Namespace\", secret.GetNamespace())\n\tdsetList := appsv1.DaemonSetList{}\n\terr = r.List(context.TODO(), &dsetList, &client.ListOptions{Namespace: secret.GetNamespace()})\n\tif err != nil {\n\t\tr.Log.Error(err, \"Error listing daemonsets\", \"req.Namespace\", secret.GetNamespace())\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// If Secret has been updated, try to find statefulsets in the same namespace that needs to be bounced.\n\tr.Log.V(2).Info(\"Looking for statefulset in namespace using certificate\", \"Secret.Name\", secret.GetName(), \"Secret.Namespace\", secret.GetNamespace())\n\tstsList := appsv1.StatefulSetList{}\n\terr = r.List(context.TODO(), &stsList, &client.ListOptions{Namespace: secret.GetNamespace()})\n\tif err != nil {\n\t\tr.Log.Error(err, \"Error listing statefulsets\", \"req.Namespace\", secret.GetNamespace())\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Since we are not sending a requeue if a refresh fails, we log it instead.\n\trefreshErrors := make([]refreshErrorData, 0)\n\tvar updateFailed bool\n\n\t// Check deployments in the relevant namespace\n\tfor _, deploy := range deployList.Items {\n\t\tr.Log.Info(\"Checking deployment for usage of certificate found in secret\", \"Secret\", secret.GetName(), \"Deployment\", deploy.GetName(), \"Namespace\", secret.GetNamespace()) //debug make higher verbosity level\n\t\tupdatedAt := time.Now().Format(\"2006-1-2.1504\")\n\t\tif hasAllowRestartAnnotation(deploy.ObjectMeta) && usesSecret(secret, deploy.Spec.Template.Spec) && outdatedSecretInUse(secret.GetName(), secret.GetResourceVersion(), deploy.GetObjectMeta().GetAnnotations()) {\n\t\t\tr.Event(&deploy, corev1.EventTypeNormal, refresh.reason, refresh.message)\n\t\t\tr.Log.Info(\"Deployment makes use of secret and has opted-in\", \"Secret\", secret.GetName(), \"Deployment\", deploy.GetName(), \"Namespace\", secret.GetNamespace())\n\t\t\tupdatedDeploy := deploy.DeepCopy()\n\t\t\tupdatedDeploy.ObjectMeta.Labels[timeRestartedLabel] = updatedAt\n\t\t\tupdatedDeploy.Spec.Template.ObjectMeta.Labels[timeRestartedLabel] = updatedAt\n\t\t\tupdateSecretRevisionAnnotation(secret.GetName(), secret.GetResourceVersion(), updatedDeploy.GetAnnotations())\n\t\t\tr.Log.Info(\"Initiating refresh\", \"Secret\", secret.GetName(), \"Deployment\", deploy.GetName(), \"Namespace\", secret.GetNamespace())\n\t\t\terr := r.Update(context.TODO(), updatedDeploy)\n\t\t\tif err != nil {\n\t\t\t\tr.Event(&deploy, corev1.EventTypeWarning, refreshFailure.reason, refreshFailure.message)\n\t\t\t\tr.Log.Error(err, \"Unable to restart deployment.\", \"Deployment.Name\", deploy.GetName())\n\t\t\t\trefreshErrors = append(refreshErrors, refreshErrorData{kind: deploy.Kind, name: deploy.GetName(), namespace: deploy.GetNamespace(), errorMsg: err.Error()})\n\t\t\t\tupdateFailed = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check daemonsets in the relevant namespace\n\tfor _, dset := range dsetList.Items {\n\t\tr.Log.Info(\"Checking Daemonset for usage of certificate found in secret\", \"Secret\", secret.GetName(), \"Daemonset\", dset.GetName(), \"Namespace\", secret.GetNamespace()) //debug make higher verbosity level\n\t\tupdatedAt := time.Now().Format(\"2006-1-2.1504\")\n\t\tif hasAllowRestartAnnotation(dset.ObjectMeta) && usesSecret(secret, dset.Spec.Template.Spec) && outdatedSecretInUse(secret.GetName(), secret.GetResourceVersion(), dset.GetObjectMeta().GetAnnotations()) {\n\t\t\tr.Log.Info(\"Daemonset makes use of secret and has opted-in\", \"Secret\", secret.GetName(), \"Daemonset\", dset.GetName(), \"Namespace\", secret.GetNamespace())\n\t\t\tupdatedDset := dset.DeepCopy()\n\t\t\tupdatedDset.ObjectMeta.Labels[timeRestartedLabel] = updatedAt\n\t\t\tupdatedDset.Spec.Template.ObjectMeta.Labels[timeRestartedLabel] = updatedAt\n\t\t\tupdateSecretRevisionAnnotation(secret.GetName(), secret.GetResourceVersion(), updatedDset.GetAnnotations())\n\t\t\tr.Log.Info(\"Initiating refresh\", \"Secret\", secret.GetName(), \"Daemonset\", dset.GetName(), \"Namespace\", secret.GetNamespace())\n\t\t\terr := r.Update(context.TODO(), updatedDset)\n\t\t\tif err != nil {\n\t\t\t\tr.Log.Error(err, \"Unable to restart Daemonset.\", \"Daemonset.Name\", dset.GetName())\n\t\t\t\tr.Event(&dset, corev1.EventTypeWarning, refreshFailure.reason, refreshFailure.message)\n\t\t\t\trefreshErrors = append(refreshErrors, refreshErrorData{kind: dset.Kind, name: dset.GetName(), namespace: dset.GetNamespace(), errorMsg: err.Error()})\n\t\t\t\tupdateFailed = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check statefulsets in the relevant namespace\n\tfor _, sts := range stsList.Items {\n\t\tr.Log.Info(\"Checking Statefulset for usage of certificate found in secret\", \"Secret\", secret.GetName(), \"Statefulset\", sts.GetName(), \"Namespace\", secret.GetNamespace()) //debug make higher verbosity level\n\t\tupdatedAt := time.Now().Format(\"2006-1-2.1504\")\n\t\tif hasAllowRestartAnnotation(sts.ObjectMeta) && usesSecret(secret, sts.Spec.Template.Spec) && outdatedSecretInUse(secret.GetName(), secret.GetResourceVersion(), sts.GetObjectMeta().GetAnnotations()) {\n\t\t\tr.Log.Info(\"Statefulset makes use of secret and has opted-in\", \"Secret\", secret.GetName(), \"Statefulset\", sts.GetName(), \"Namespace\", secret.GetNamespace())\n\t\t\tupdatedsts := sts.DeepCopy()\n\t\t\tupdatedsts.ObjectMeta.Labels[timeRestartedLabel] = updatedAt\n\t\t\tupdatedsts.Spec.Template.ObjectMeta.Labels[timeRestartedLabel] = updatedAt\n\t\t\tupdateSecretRevisionAnnotation(secret.GetName(), secret.GetResourceVersion(), updatedsts.GetAnnotations())\n\t\t\tr.Log.Info(\"Initiating refresh\", \"Secret\", secret.GetName(), \"Statefulset\", sts.GetName(), \"Namespace\", secret.GetNamespace())\n\t\t\terr := r.Update(context.TODO(), updatedsts)\n\t\t\tif err != nil {\n\t\t\t\tr.Log.Error(err, \"Unable to restart Statefulset.\", \"Statefulset.Name\", sts.GetName())\n\t\t\t\tr.Event(&sts, corev1.EventTypeWarning, refreshFailure.reason, refreshFailure.message)\n\t\t\t\trefreshErrors = append(refreshErrors, refreshErrorData{kind: sts.Kind, name: sts.GetName(), namespace: sts.GetNamespace(), errorMsg: err.Error()})\n\t\t\t\tupdateFailed = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// If updating anything failed\n\t// TODO(komish): This requeues if _any_ of the refreshes fail, but this would cause a successful deployment to\n\t// be restarted continuously. Need to requeue but with only the failed deployment.\n\tif updateFailed {\n\t\tr.Log.Info(\"Resource(s) that opted-in to refreshes have failed to refresh but the request will not be requeued\",\n\t\t\t\"Secret.Name\", secret.GetName(),\n\t\t\t\"Secret.Namespace\", secret.GetNamespace(),\n\t\t\t\"Error Message\", refreshErrors)\n\t\t// return reconcile.Result{}, err // don't uncomment, see above.\n\t}\n\n\tr.Log.Info(\"Done Reconciling CertManager TLS Certificates\")\n\n\treturn ctrl.Result{}, nil\n}", "func (c *controller) Reconcile(request reconciler.Request) (reconciler.Result, error) {\n\tklog.V(4).Infof(\"reconcile node %s for cluster %s\", request.Name, request.ClusterName)\n\tvExists := true\n\tvNodeObj, err := c.MultiClusterController.Get(request.ClusterName, request.Namespace, request.Name)\n\tif err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn reconciler.Result{Requeue: true}, err\n\t\t}\n\t\tvExists = false\n\t}\n\n\tif vExists {\n\t\tvNode := vNodeObj.(*v1.Node)\n\t\tif vNode.Labels[constants.LabelVirtualNode] != \"true\" {\n\t\t\t// We only handle virtual nodes created by syncer\n\t\t\treturn reconciler.Result{}, nil\n\t\t}\n\t\tc.Lock()\n\t\tif _, exist := c.nodeNameToCluster[request.Name]; !exist {\n\t\t\tc.nodeNameToCluster[request.Name] = make(map[string]struct{})\n\t\t}\n\t\tc.nodeNameToCluster[request.Name][request.ClusterName] = struct{}{}\n\t\tc.Unlock()\n\t} else {\n\t\tc.Lock()\n\t\tif _, exists := c.nodeNameToCluster[request.Name]; exists {\n\t\t\tdelete(c.nodeNameToCluster[request.Name], request.ClusterName)\n\t\t}\n\t\tc.Unlock()\n\n\t}\n\treturn reconciler.Result{}, nil\n}", "func (r *StatusReconciler) Reconcile() error {\n\n\tlogData := map[string]interface{}{\n\t\t\"PipelineDeployment.Namespace\": r.pipelineDeployment.Spec.DeploymentNamespace,\n\t\t\"PipelineDeployment.Name\": r.pipelineDeployment.Spec.DeploymentName,\n\t}\n\treqLogger := log.WithValues(\"data\", logData)\n\n\tpipelineDeploymentStatus, err := r.getStatus(r.pipelineDeployment, r.request)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to get PipelineDeployment status.\")\n\t\treturn err\n\t}\n\n\tnotifMessages := []*algov1beta1.NotifMessage{}\n\n\tif r.pipelineDeployment.Status.Status != pipelineDeploymentStatus.Status {\n\t\tr.pipelineDeployment.Status.Status = pipelineDeploymentStatus.Status\n\n\t\tloglevel := v1beta1.LOGLEVELS_INFO\n\t\tnotifType := v1beta1.NOTIFTYPES_PIPELINE_DEPLOYMENT_STATUS\n\t\tnotifMessage := &algov1beta1.NotifMessage{\n\t\t\tMessageTimestamp: time.Now(),\n\t\t\tLevel: &loglevel,\n\t\t\tType: &notifType,\n\t\t\tDeploymentStatusMessage: &algov1beta1.DeploymentStatusMessage{\n\t\t\t\tDeploymentOwner: r.pipelineDeployment.Spec.DeploymentOwner,\n\t\t\t\tDeploymentName: r.pipelineDeployment.Spec.DeploymentName,\n\t\t\t\tStatus: r.pipelineDeployment.Status.Status,\n\t\t\t},\n\t\t}\n\n\t\tnotifMessages = append(notifMessages, notifMessage)\n\n\t}\n\n\t// Iterate the existing deployment statuses and update if changed\n\tfor _, deplStatus := range r.pipelineDeployment.Status.ComponentStatuses {\n\t\tfor _, newDeplStatus := range pipelineDeploymentStatus.ComponentStatuses {\n\t\t\tif newDeplStatus.DeploymentName == deplStatus.DeploymentName {\n\n\t\t\t\tif !cmp.Equal(deplStatus, newDeplStatus) {\n\t\t\t\t\tdeplStatus = newDeplStatus\n\t\t\t\t\t//reqLogger.Info(\"Deployment Status Differences\", \"Differences\", diff)\n\t\t\t\t\tloglevel := v1beta1.LOGLEVELS_INFO\n\t\t\t\t\tnotifType := v1beta1.NOTIFTYPES_PIPELINE_DEPLOYMENT\n\t\t\t\t\tnotifMessage := &algov1beta1.NotifMessage{\n\t\t\t\t\t\tMessageTimestamp: time.Now(),\n\t\t\t\t\t\tLevel: &loglevel,\n\t\t\t\t\t\tType: &notifType,\n\t\t\t\t\t\tDeploymentStatusMessage: &algov1beta1.DeploymentStatusMessage{\n\t\t\t\t\t\t\tDeploymentOwner: r.pipelineDeployment.Spec.DeploymentOwner,\n\t\t\t\t\t\t\tDeploymentName: r.pipelineDeployment.Spec.DeploymentName,\n\t\t\t\t\t\t\tStatus: r.pipelineDeployment.Status.Status,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\tnotifMessages = append(notifMessages, notifMessage)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}\n\n\t// Iterate the existing pod statuses and update if changed\n\tfor _, podStatus := range r.pipelineDeployment.Status.PodStatuses {\n\t\tfor _, newPodStatus := range pipelineDeploymentStatus.PodStatuses {\n\t\t\tif newPodStatus.PodName == podStatus.PodName {\n\n\t\t\t\tif !cmp.Equal(podStatus, newPodStatus) {\n\t\t\t\t\tpodStatus = newPodStatus\n\n\t\t\t\t\t// reqLogger.Info(\"Deployment Pod Status Differences\", \"Differences\", diff)\n\t\t\t\t\tloglevel := v1beta1.LOGLEVELS_INFO\n\t\t\t\t\tnotifType := v1beta1.NOTIFTYPES_PIPELINE_DEPLOYMENT_POD\n\t\t\t\t\tnotifMessage := &algov1beta1.NotifMessage{\n\t\t\t\t\t\tMessageTimestamp: time.Now(),\n\t\t\t\t\t\tLevel: &loglevel,\n\t\t\t\t\t\tType: &notifType,\n\t\t\t\t\t\tDeploymentStatusMessage: &algov1beta1.DeploymentStatusMessage{\n\t\t\t\t\t\t\tDeploymentOwner: r.pipelineDeployment.Spec.DeploymentOwner,\n\t\t\t\t\t\t\tDeploymentName: r.pipelineDeployment.Spec.DeploymentName,\n\t\t\t\t\t\t\tStatus: r.pipelineDeployment.Status.Status,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\tnotifMessages = append(notifMessages, notifMessage)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}\n\n\tif !cmp.Equal(r.pipelineDeployment.Status, *pipelineDeploymentStatus) {\n\t\t// reqLogger.Info(\"Pipeline Deployment Status Differences\", \"Differences\", diff)\n\n\t\t//r.pipelineDeployment.Status = *pipelineDeploymentStatus\n\t\tpatch := client.MergeFrom(r.pipelineDeployment.DeepCopy())\n\t\tr.pipelineDeployment.Status = *pipelineDeploymentStatus\n\t\terr := r.client.Patch(r.context, r.pipelineDeployment, patch)\n\n\t\t//err = r.client.Status().Update(r.context, r.pipelineDeployment)\n\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to patch PipelineDeployment status.\")\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// Send all notifications\n\tif len(notifMessages) > 0 {\n\t\tutils.NotifyAll(notifMessages)\n\t}\n\n\treturn nil\n\n}", "func (r *ChartGroupReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\treclog := acglog.WithValues(\"namespace\", request.Namespace, \"acg\", request.Name)\n\treclog.Info(\"Reconciling\")\n\n\tinstance := &av1.ArmadaChartGroup{}\n\tinstance.SetNamespace(request.Namespace)\n\tinstance.SetName(request.Name)\n\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\n\tif apierrors.IsNotFound(err) {\n\t\t// We are working asynchronously. By the time we receive the event,\n\t\t// the object is already gone\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tif err != nil {\n\t\treclog.Error(err, \"Failed to lookup ArmadaChartGroup\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tinstance.Init()\n\tmgr := r.managerFactory.NewArmadaChartGroupManager(instance)\n\treclog = reclog.WithValues(\"acg\", mgr.ResourceName())\n\n\tvar shouldRequeue bool\n\tif shouldRequeue, err = r.updateFinalizers(instance); shouldRequeue {\n\t\t// Need to requeue because finalizer update does not change metadata.generation\n\t\treturn reconcile.Result{Requeue: true}, err\n\t}\n\n\tif err := r.ensureSynced(mgr, instance); err != nil {\n\t\tif !instance.IsDeleted() {\n\t\t\t// TODO(jeb): Changed the behavior to stop only if we are not\n\t\t\t// in a delete phase.\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\tif instance.IsDeleted() {\n\t\tif shouldRequeue, err = r.deleteArmadaChartGroup(mgr, instance); shouldRequeue {\n\t\t\t// Need to requeue because finalizer update does not change metadata.generation\n\t\t\treturn reconcile.Result{Requeue: true}, err\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif instance.IsTargetStateUninitialized() {\n\t\treclog.Info(\"TargetState uninitialized; skipping\")\n\t\terr = r.updateResource(instance)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\terr = r.client.Status().Update(context.TODO(), instance)\n\t\treturn reconcile.Result{}, err\n\t}\n\n\thrc := av1.HelmResourceCondition{\n\t\tType: av1.ConditionInitialized,\n\t\tStatus: av1.ConditionStatusTrue,\n\t}\n\tinstance.Status.SetCondition(hrc, instance.Spec.TargetState)\n\n\tswitch {\n\tcase !mgr.IsInstalled():\n\t\tif shouldRequeue, err = r.installArmadaChartGroup(mgr, instance); shouldRequeue {\n\t\t\t// we updated the ownership of the charts. Let's wake up\n\t\t\t// one more time later to enable the first chart.\n\t\t\treturn reconcile.Result{RequeueAfter: r.reconcilePeriod}, err\n\t\t}\n\t\treturn reconcile.Result{}, err\n\tcase mgr.IsUpdateRequired():\n\t\tif shouldRequeue, err = r.updateArmadaChartGroup(mgr, instance); shouldRequeue {\n\t\t\treturn reconcile.Result{RequeueAfter: r.reconcilePeriod}, err\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tforcedRequeue, err := r.reconcileArmadaChartGroup(mgr, instance)\n\tif err != nil {\n\t\t// Let's don't force a requeue.\n\t\treturn reconcile.Result{}, err\n\t}\n\tif forcedRequeue {\n\t\t// We have been waked up out of order ?\n\t\treturn reconcile.Result{RequeueAfter: r.reconcilePeriod}, nil\n\t}\n\n\treclog.Info(\"Reconciled ChartGroup\")\n\tif err = r.updateResourceStatus(instance); err != nil {\n\t\treturn reconcile.Result{Requeue: true}, err\n\t}\n\treturn reconcile.Result{}, nil\n}", "func (r *ManagedCAObserver) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tcontrollerLog := r.Log.WithValues(\"configmap\", req.NamespacedName)\n\tctx := context.Background()\n\n\tif req.Namespace != ManagedConfigNamespace {\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tvar err error\n\tswitch req.Name {\n\tcase RouterCAConfigMap:\n\t\terr = r.syncConfigMap(ctx, controllerLog, req.NamespacedName, \"ca-bundle.crt\", \"router-ca\")\n\tcase ServiceCAConfigMap:\n\t\terr = r.syncConfigMap(ctx, controllerLog, req.NamespacedName, \"ca-bundle.crt\", \"service-ca\")\n\t}\n\n\treturn ctrl.Result{}, err\n}", "func (r *ReconcileConfigMap) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tif request.Namespace != \"openshift-file-integrity\" || request.Name != \"aide-conf\" {\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling ConfigMap\")\n\n\t// Fetch the ConfigMap instance\n\tinstance := &corev1.ConfigMap{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// only continue if the configmap received an update through the user-provided config\n\tif _, ok := instance.Annotations[\"fileintegrity.openshift.io/updated\"]; !ok {\n\t\treqLogger.Info(\"DBG: updated annotation not found - removing from queue\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// handling the re-init daemonSets: these are created by the FileIntegrity controller when the AIDE config has been\n\t// updated by the user. They touch a file on the node host and then sleep. The file signals to the AIDE pod\n\t// daemonSets that they need to back up and re-initialize the AIDE database. So once we've confirmed that the\n\t// re-init daemonSets have started running we can delete them and continue with the rollout of the AIDE pods.\n\treinitDS := &appsv1.DaemonSet{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: common.ReinitDaemonSetName, Namespace: common.FileIntegrityNamespace}, reinitDS)\n\tif err != nil {\n\t\t// includes notFound, we will requeue here at least once.\n\t\treqLogger.Error(err, \"error getting reinit daemonSet\")\n\t\treturn reconcile.Result{}, err\n\t}\n\t// not ready, requeue\n\tif !daemonSetIsReady(reinitDS) {\n\t\treqLogger.Info(\"DBG: requeue of DS\")\n\t\treturn reconcile.Result{RequeueAfter: time.Duration(5 * time.Second)}, nil // guessing on 5 seconds as acceptable requeue rate\n\t}\n\n\treqLogger.Info(\"reinitDaemonSet statuses\", \"Status\", reinitDS.Status)\n\n\t// reinit daemonSet is ready, so we're finished with it\n\tif err := r.client.Delete(context.TODO(), reinitDS); err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tds := &appsv1.DaemonSet{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: common.DaemonSetName, Namespace: common.FileIntegrityNamespace}, ds)\n\tif err != nil {\n\t\treqLogger.Error(err, \"error getting daemonSet\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif err := triggerDaemonSetRollout(r.client, ds); err != nil {\n\t\treqLogger.Error(err, \"error triggering daemonSet rollout\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treqLogger.Info(\"DBG: rollout triggered, clearing update annotation\")\n\t// unset update annotation\n\tconf := instance.DeepCopy()\n\tconf.Annotations = nil\n\tif err := r.client.Update(context.TODO(), conf); err != nil {\n\t\treqLogger.Error(err, \"error clearing configMap annotations\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (r *RuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\n\t_ = r.Log.WithValues(\"rule\", req.NamespacedName)\n\n\tvar rule oathkeeperv1alpha1.Rule\n\n\tskipValidation := false\n\n\tif err := r.Get(ctx, req.NamespacedName, &rule); err != nil {\n\t\tif apierrs.IsNotFound(err) {\n\t\t\t// just return here, the finalizers have already run\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif !skipValidation {\n\t\tif err := rule.ValidateWith(r.ValidationConfig); err != nil {\n\t\t\trule.Status.Validation = &oathkeeperv1alpha1.Validation{}\n\t\t\trule.Status.Validation.Valid = boolPtr(false)\n\t\t\trule.Status.Validation.Error = stringPtr(err.Error())\n\t\t\tr.Log.Info(fmt.Sprintf(\"validation error in Rule %s/%s: \\\"%s\\\"\", rule.Namespace, rule.Name, err.Error()))\n\t\t\tif err := r.Update(ctx, &rule); err != nil {\n\t\t\t\tr.Log.Error(err, \"unable to update Rule status\")\n\t\t\t\t//Invoke requeue directly without logging error with whole stacktrace\n\t\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t\t}\n\t\t\t// continue, as validation can't be fixed by requeuing request and we still have to update the configmap\n\t\t} else {\n\t\t\t// rule valid - set the status\n\t\t\trule.Status.Validation = &oathkeeperv1alpha1.Validation{}\n\t\t\trule.Status.Validation.Valid = boolPtr(true)\n\t\t\tif err := r.Update(ctx, &rule); err != nil {\n\t\t\t\tr.Log.Error(err, \"unable to update Rule status\")\n\t\t\t\t//Invoke requeue directly without logging error with whole stacktrace\n\t\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t\t}\n\t\t}\n\t}\n\n\tvar rulesList oathkeeperv1alpha1.RuleList\n\n\tif rule.Spec.ConfigMapName != nil {\n\t\tif err := r.List(ctx, &rulesList, client.InNamespace(req.NamespacedName.Namespace)); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t} else {\n\t\tif err := r.List(ctx, &rulesList); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\t// examine DeletionTimestamp to determine if object is under deletion\n\tif rule.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The object is not being deleted, so if it does not have our finalizer,\n\t\t// then lets add the finalizer and update the object. This is equivalent\n\t\t// registering our finalizer.\n\t\tif !containsString(rule.ObjectMeta.Finalizers, FinalizerName) {\n\t\t\trule.ObjectMeta.Finalizers = append(rule.ObjectMeta.Finalizers, FinalizerName)\n\t\t\tif err := r.Update(ctx, &rule); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// The object is being deleted\n\t\tif containsString(rule.ObjectMeta.Finalizers, FinalizerName) {\n\t\t\t// our finalizer is present, so lets handle any external dependency\n\t\t\trulesList = rulesList.FilterOutRule(rule)\n\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\trule.ObjectMeta.Finalizers = removeString(rule.ObjectMeta.Finalizers, FinalizerName)\n\t\t\tif err := r.Update(ctx, &rule); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar err error\n\tvar oathkeeperRulesJSON []byte\n\n\tif rule.Spec.ConfigMapName != nil {\n\t\tr.Log.Info(fmt.Sprintf(\"Found ConfigMap definition in Rule %s/%s: Writing data to \\\"%s\\\"\", rule.Namespace, rule.Name, *rule.Spec.ConfigMapName))\n\t\toathkeeperRulesJSON, err = rulesList.FilterNotValid().FilterConfigMapName(rule.Spec.ConfigMapName).ToOathkeeperRules()\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t} else {\n\t\toathkeeperRulesJSON, err = rulesList.FilterNotValid().ToOathkeeperRules()\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tif err := r.OperatorMode.CreateOrUpdate(ctx, oathkeeperRulesJSON, &rule); err != nil {\n\t\tr.Log.Error(err, \"unable to process rules Configmap\")\n\t\tos.Exit(1)\n\t}\n\treturn ctrl.Result{}, nil\n}", "func (r *NestedEtcdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := r.Log.WithValues(\"nestedetcd\", req.NamespacedName)\n\tlog.Info(\"Reconciling NestedEtcd...\")\n\tvar netcd clusterv1.NestedEtcd\n\tif err := r.Get(ctx, req.NamespacedName, &netcd); err != nil {\n\t\treturn ctrl.Result{}, ctrlcli.IgnoreNotFound(err)\n\t}\n\tlog.Info(\"creating NestedEtcd\",\n\t\t\"namespace\", netcd.GetNamespace(),\n\t\t\"name\", netcd.GetName())\n\n\t// check if the ownerreference has been set by the NestedControlPlane controller.\n\towner := getOwner(netcd.ObjectMeta)\n\tif owner == (metav1.OwnerReference{}) {\n\t\t// requeue the request if the owner NestedControlPlane has\n\t\t// not been set yet.\n\t\tlog.Info(\"the owner has not been set yet, will retry later\",\n\t\t\t\"namespace\", netcd.GetNamespace(),\n\t\t\t\"name\", netcd.GetName())\n\t\treturn ctrl.Result{Requeue: true}, nil\n\t}\n\n\tvar netcdSts appsv1.StatefulSet\n\tif err := r.Get(ctx, types.NamespacedName{\n\t\tNamespace: netcd.GetNamespace(),\n\t\tName: netcd.GetName(),\n\t}, &netcdSts); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// as the statefulset is not found, mark the NestedEtcd as unready\n\t\t\tif IsComponentReady(netcd.Status.CommonStatus) {\n\t\t\t\tnetcd.Status.Phase =\n\t\t\t\t\tstring(clusterv1.Unready)\n\t\t\t\tlog.V(5).Info(\"The corresponding statefulset is not found, \" +\n\t\t\t\t\t\"will mark the NestedEtcd as unready\")\n\t\t\t\tif err := r.Status().Update(ctx, &netcd); err != nil {\n\t\t\t\t\tlog.Error(err, \"fail to update the status of the NestedEtcd Object\")\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// the statefulset is not found, create one\n\t\t\tif err := createNestedComponentSts(ctx,\n\t\t\t\tr.Client, netcd.ObjectMeta,\n\t\t\t\tnetcd.Spec.NestedComponentSpec,\n\t\t\t\tclusterv1.Etcd, owner.Name, log); err != nil {\n\t\t\t\tlog.Error(err, \"fail to create NestedEtcd StatefulSet\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tlog.Info(\"successfully create the NestedEtcd StatefulSet\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\tlog.Error(err, \"fail to get NestedEtcd StatefulSet\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif netcdSts.Status.ReadyReplicas == netcdSts.Status.Replicas {\n\t\tlog.Info(\"The NestedEtcd StatefulSet is ready\")\n\t\tif IsComponentReady(netcd.Status.CommonStatus) {\n\t\t\t// As the NestedEtcd StatefulSet is ready, update NestedEtcd status\n\t\t\tip, err := getNestedEtcdSvcClusterIP(ctx, r.Client, netcd)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"fail to get NestedEtcd Service ClusterIP\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tnetcd.Status.Phase = string(clusterv1.Ready)\n\t\t\tnetcd.Status.Addresses = []clusterv1.NestedEtcdAddress{\n\t\t\t\t{\n\t\t\t\t\tIP: ip,\n\t\t\t\t\tPort: 2379,\n\t\t\t\t},\n\t\t\t}\n\t\t\tlog.V(5).Info(\"The corresponding statefulset is ready, \" +\n\t\t\t\t\"will mark the NestedEtcd as ready\")\n\t\t\tif err := r.Status().Update(ctx, &netcd); err != nil {\n\t\t\t\tlog.Error(err, \"fail to update NestedEtcd Object\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tlog.Info(\"Successfully set the NestedEtcd object to ready\",\n\t\t\t\t\"address\", netcd.Status.Addresses)\n\t\t}\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// As the NestedEtcd StatefulSet is unready, mark the NestedEtcd as unready\n\t// if its current status is ready\n\tif IsComponentReady(netcd.Status.CommonStatus) {\n\t\tnetcd.Status.Phase = string(clusterv1.Unready)\n\t\tif err := r.Status().Update(ctx, &netcd); err != nil {\n\t\t\tlog.Error(err, \"fail to update NestedEtcd Object\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tlog.Info(\"Successfully set the NestedEtcd object to unready\")\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *Reconciler) reconcileKubeDNSConfigMap(log logr.Logger, desiredState k8sutil.DesiredState) error {\n\tvar cm apiv1.ConfigMap\n\n\terr := r.Client.Get(context.Background(), types.NamespacedName{\n\t\tName: \"kube-dns\",\n\t\tNamespace: \"kube-system\",\n\t}, &cm)\n\tif k8serrors.IsNotFound(err) {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn emperror.Wrap(err, \"could not get kube-dns configmap\")\n\t}\n\n\tstubDomains := make(map[string][]string, 0)\n\tif cm.Data[\"stubDomains\"] != \"\" {\n\t\terr = json.Unmarshal([]byte(cm.Data[\"stubDomains\"]), &stubDomains)\n\t\tif err != nil {\n\t\t\treturn emperror.Wrap(err, \"could not unmarshal stubDomains\")\n\t\t}\n\t}\n\n\tif desiredState == k8sutil.DesiredStatePresent {\n\t\tvar svc apiv1.Service\n\t\terr = r.Client.Get(context.Background(), types.NamespacedName{\n\t\t\tName: serviceName,\n\t\t\tNamespace: r.Config.Namespace,\n\t\t}, &svc)\n\t\tif err != nil {\n\t\t\treturn emperror.Wrap(err, \"could not get Istio coreDNS service\")\n\t\t}\n\t\tstubDomains[\"global\"] = []string{svc.Spec.ClusterIP}\n\t} else if desiredState == k8sutil.DesiredStateAbsent {\n\t\t_, ok := stubDomains[\"global\"]\n\t\tif ok {\n\t\t\tdelete(stubDomains, \"global\")\n\t\t}\n\t}\n\n\tstubDomainsData, err := json.Marshal(&stubDomains)\n\tif err != nil {\n\t\treturn emperror.Wrap(err, \"could not marshal updated stub domains\")\n\t}\n\n\tif cm.Data == nil {\n\t\tcm.Data = make(map[string]string, 0)\n\t}\n\tcm.Data[\"stubDomains\"] = string(stubDomainsData)\n\n\terr = r.Client.Update(context.Background(), &cm)\n\tif err != nil {\n\t\treturn emperror.Wrap(err, \"could not update kube-dns configmap\")\n\t}\n\n\treturn nil\n}", "func (cr *ClonerReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {\n\tlog := pkglog.FromContext(ctx)\n\n\t// Get the Deployents or DaemonSet from the cache\n\tdeployment := &appsv1.Deployment{}\n\tdaemonset := &appsv1.DaemonSet{}\n\n\tkind := \"Deployment\"\n\n\terr := cr.Client.Get(ctx, req.NamespacedName, deployment)\n\tif errors.IsNotFound(err) {\n\t\tlog.Info(\"not a Deployment, checking for Daemonset\")\n\n\t\terr = cr.Client.Get(ctx, req.NamespacedName, daemonset)\n\t\tif errors.IsNotFound(err) {\n\t\t\tlog.Info(\"not a Daemonset\")\n\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\n\t\tkind = \"DaemonSet\"\n\t}\n\n\tif err != nil {\n\t\tlog.Error(err, \"could not fetch Deployment or DaemonSet\")\n\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tlog.Info(\"reconciling Deployment\", \"deployment name\", deployment.Name)\n\n\tif kind == \"Deployment\" && isDeploymentReady(deployment) && len(deployment.Spec.Template.Spec.ImagePullSecrets) == 0 {\n\t\treturn cr.reconcileDeployment(ctx, deployment)\n\t}\n\n\tif kind == \"DaemonSet\" && isDaemonSetReady(daemonset) && len(daemonset.Spec.Template.Spec.ImagePullSecrets) == 0 {\n\t\treturn cr.reconcileDaemonSet(ctx, daemonset)\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (r *IstioControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlogger := r.Log.WithValues(\"istiocontrolplane\", req.NamespacedName)\n\n\ticp := &servicemeshv1alpha1.IstioControlPlane{}\n\terr := r.Get(ctx, req.NamespacedName, icp)\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\t// Object not found, return. Created objects are automatically garbage collected.\n\t\t\t// For additional cleanup logic use finalizers.\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif icp.Spec.Version == \"\" {\n\t\terr = errors.New(\"please set spec.version in your istiocontrolplane CR to be reconciled by this operator\")\n\t\tlogger.Error(err, \"\", \"name\", icp.Name, \"namespace\", icp.Namespace)\n\n\t\treturn reconcile.Result{\n\t\t\tRequeue: false,\n\t\t}, nil\n\t}\n\n\tif !IsIstioVersionSupported(icp.Spec.Version) {\n\t\terr = errors.New(\"intended Istio version is unsupported by this version of the operator\")\n\t\tlogger.Error(err, \"\", \"version\", icp.Spec.Version)\n\n\t\treturn reconcile.Result{\n\t\t\tRequeue: false,\n\t\t}, nil\n\t}\n\n\tif requeueNeeded, err := k8sutil.IsReqeueNeededCosNamespaceTermination(ctx, r.GetClient(), icp); requeueNeeded && err == nil {\n\t\tlogger.Info(\"namespace is terminating, requeue needed\")\n\n\t\treturn ctrl.Result{\n\t\t\tRequeueAfter: nsTerminationRequeueDuration,\n\t\t}, nil\n\t} else if err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tresult, err := r.reconcile(ctx, icp, logger)\n\tif err != nil {\n\t\tupdateErr := components.UpdateStatus(ctx, r.Client, icp, components.ConvertConfigStateToReconcileStatus(servicemeshv1alpha1.ConfigState_ReconcileFailed), err.Error())\n\t\tif updateErr != nil {\n\t\t\tlogger.Error(updateErr, \"failed to update state\")\n\n\t\t\treturn result, errors.WithStack(err)\n\t\t}\n\n\t\tif result.Requeue {\n\t\t\treturn result, nil\n\t\t}\n\n\t\treturn result, err\n\t}\n\n\tupdateErr := components.UpdateStatus(ctx, r.Client, icp, components.ConvertConfigStateToReconcileStatus(servicemeshv1alpha1.ConfigState_Available), \"\")\n\tif updateErr != nil && !k8serrors.IsNotFound(updateErr) {\n\t\tlogger.Error(updateErr, \"failed to update state\")\n\n\t\treturn result, errors.WithStack(err)\n\t}\n\n\terr = util.RemoveFinalizer(ctx, r.Client, icp, istioControlPlaneFinalizerID, true)\n\tif err != nil {\n\t\treturn result, errors.WithStack(err)\n\t}\n\n\treturn result, nil\n}", "func (r *Reconciler) reconcile(ctx context.Context, channel *kafkav1alpha1.KafkaChannel) error {\n\n\t// NOTE - The sequential order of reconciliation must be \"Topic\" then \"Channel / Dispatcher\" in order for the\n\t// EventHub Cache to know the dynamically determined EventHub Namespace / Kafka Secret selected for the topic.\n\n\t// Reconcile The KafkaChannel's Kafka Topic\n\terr := r.reconcileTopic(ctx, channel)\n\tif err != nil {\n\t\treturn fmt.Errorf(constants.ReconciliationFailedError)\n\t}\n\n\t// Reconcile The KafkaChannel's Channel & Dispatcher Deployment/Service\n\tchannelError := r.reconcileChannel(channel)\n\tdispatcherError := r.reconcileDispatcher(channel)\n\tif channelError != nil || dispatcherError != nil {\n\t\treturn fmt.Errorf(constants.ReconciliationFailedError)\n\t}\n\n\t// Return Success\n\treturn nil\n}", "func (bmc *Controller) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tll := bmc.log.WithFields(logrus.Fields{\n\t\t\"method\": \"Reconcile\",\n\t\t\"name\": req.Name,\n\t})\n\n\tvar err error\n\t// if name in request doesn't start with namePrefix controller tries to read k8s node object at first\n\t// however if it get NotFound error it tries to read Node object as well\n\tif !strings.HasPrefix(req.Name, namePrefix) {\n\t\tk8sNode := new(coreV1.Node)\n\t\terr = bmc.k8sClient.ReadCR(context.Background(), req.Name, \"\", k8sNode)\n\t\tswitch {\n\t\tcase err == nil:\n\t\t\tll.Infof(\"Reconcile k8s node %s\", k8sNode.Name)\n\t\t\treturn bmc.reconcileForK8sNode(k8sNode)\n\t\tcase !k8sError.IsNotFound(err):\n\t\t\tll.Errorf(\"Unable to read node object: %v\", err)\n\t\t\treturn ctrl.Result{Requeue: true}, err\n\t\t}\n\t}\n\n\t// try to read Node\n\tbmNode := new(nodecrd.Node)\n\terr = bmc.k8sClient.ReadCR(context.Background(), req.Name, \"\", bmNode)\n\tswitch {\n\tcase err == nil:\n\t\tll.Infof(\"Reconcile Node %s\", bmNode.Name)\n\t\treturn bmc.reconcileForCSIBMNode(bmNode)\n\tcase !k8sError.IsNotFound(err):\n\t\tll.Errorf(\"Unable to read Node object: %v\", err)\n\t\treturn ctrl.Result{Requeue: true}, err\n\t}\n\n\tll.Warnf(\"unable to detect for which object (%s) that reconcile is. The object may have been deleted\", req.String())\n\treturn ctrl.Result{}, nil\n}", "func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {\n\tlistenersToNotify := []nodeNetworkConfigListener{}\n\tnnc, err := r.nnccli.Get(ctx, req.NamespacedName)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tlogger.Printf(\"[cns-rc] CRD not found, ignoring %v\", err)\n\t\t\treturn reconcile.Result{}, errors.Wrapf(client.IgnoreNotFound(err), \"NodeNetworkConfig %v not found\", req.NamespacedName)\n\t\t}\n\t\tlogger.Errorf(\"[cns-rc] Error retrieving CRD from cache : %v\", err)\n\t\treturn reconcile.Result{}, errors.Wrapf(err, \"failed to get NodeNetworkConfig %v\", req.NamespacedName)\n\t}\n\n\tlogger.Printf(\"[cns-rc] CRD Spec: %+v\", nnc.Spec)\n\n\tipAssignments := 0\n\n\t// for each NC, parse it in to a CreateNCRequest and forward it to the appropriate Listener\n\tfor i := range nnc.Status.NetworkContainers {\n\t\t// check if this NC matches the Node IP if we have one to check against\n\t\tif r.nodeIP != \"\" {\n\t\t\tif r.nodeIP != nnc.Status.NetworkContainers[i].NodeIP {\n\t\t\t\t// skip this NC since it was created for a different node\n\t\t\t\tlogger.Printf(\"[cns-rc] skipping network container %s found in NNC because node IP doesn't match, got %s, expected %s\",\n\t\t\t\t\tnnc.Status.NetworkContainers[i].ID, nnc.Status.NetworkContainers[i].NodeIP, r.nodeIP)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tvar req *cns.CreateNetworkContainerRequest\n\t\tvar err error\n\t\tswitch nnc.Status.NetworkContainers[i].AssignmentMode { //nolint:exhaustive // skipping dynamic case\n\t\t// For Overlay and Vnet Scale Scenarios\n\t\tcase v1alpha.Static:\n\t\t\treq, err = CreateNCRequestFromStaticNC(nnc.Status.NetworkContainers[i])\n\t\t// For Pod Subnet scenario\n\t\tdefault: // For backward compatibility, default will be treated as Dynamic too.\n\t\t\treq, err = CreateNCRequestFromDynamicNC(nnc.Status.NetworkContainers[i])\n\t\t\t// in dynamic, we will also push this NNC to the IPAM Pool Monitor when we're done.\n\t\t\tlistenersToNotify = append(listenersToNotify, r.ipampoolmonitorcli)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"[cns-rc] failed to generate CreateNCRequest from NC: %v, assignmentMode %s\", err,\n\t\t\t\tnnc.Status.NetworkContainers[i].AssignmentMode)\n\t\t\treturn reconcile.Result{}, errors.Wrapf(err, \"failed to generate CreateNCRequest from NC \"+\n\t\t\t\t\"assignmentMode %s\", nnc.Status.NetworkContainers[i].AssignmentMode)\n\t\t}\n\n\t\tresponseCode := r.cnscli.CreateOrUpdateNetworkContainerInternal(req)\n\t\tif err := restserver.ResponseCodeToError(responseCode); err != nil {\n\t\t\tlogger.Errorf(\"[cns-rc] Error creating or updating NC in reconcile: %v\", err)\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"failed to create or update network container\")\n\t\t}\n\t\tipAssignments += len(req.SecondaryIPConfigs)\n\t}\n\n\t// record assigned IPs metric\n\tallocatedIPs.Set(float64(ipAssignments))\n\n\t// push the NNC to the registered NNC listeners.\n\tfor _, l := range listenersToNotify {\n\t\tif err := l.Update(nnc); err != nil {\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"nnc listener return error during update\")\n\t\t}\n\t}\n\n\t// we have received and pushed an NNC update, we are \"Started\"\n\tr.once.Do(func() {\n\t\tclose(r.started)\n\t\tlogger.Printf(\"[cns-rc] CNS NNC Reconciler Started\")\n\t})\n\treturn reconcile.Result{}, nil\n}", "func (r *BalancingRuleReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"mitigationrule\", req.NamespacedName)\n\n\tlog.Info(\"start Reconcile\")\n\tbalancingRule := &api.BalancingRule{}\n\tif err := r.Get(ctx, req.NamespacedName, balancingRule); err != nil {\n\t\tlog.Error(err, \"unable to get mitigation rule\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// debug\n\tlog.Info(\"succeed to get mitigation rule\", \"mitigation rule\", balancingRule)\n\n\tvsList, err := r.IstioClientset.NetworkingV1alpha3().VirtualServices(virtualServiceNamespace).List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\tlog.Error(err, \"unable to get VirtualService list\")\n\t\treturn ctrl.Result{}, err\n\t}\n\tlog.Info(\"succeed to get VirtualService List\", \"VirtualService\", vsList)\n\trm := make(map[Host]*RoutingRate)\n\tfor i := range vsList.Items {\n\t\titem := vsList.Items[i]\n\t\tfor ori := range item.OwnerReferences {\n\t\t\tif item.OwnerReferences[ori].Kind == \"BalancingRule\" {\n\t\t\t\tinternalHost := item.ObjectMeta.GetLabels()[\"InternalHost\"]\n\t\t\t\texternalHost := item.ObjectMeta.GetLabels()[\"ExternalHost\"]\n\t\t\t\tspec := item.Spec\n\t\t\t\thost := Host(spec.GetHosts()[0])\n\t\t\t\tvar iw int32\n\t\t\t\tvar ew int32\n\t\t\t\tfor _, ri := range spec.GetHttp()[0].GetRoute() {\n\t\t\t\t\tdw := ri.GetWeight()\n\t\t\t\t\tdh := ri.Destination.Host\n\t\t\t\t\tif dh == internalHost {\n\t\t\t\t\t\tiw = dw\n\t\t\t\t\t} else if dh == externalHost {\n\t\t\t\t\t\tew = dw\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trm[host] = &RoutingRate{\n\t\t\t\t\tHost: host,\n\t\t\t\t\tInternalWeight: iw,\n\t\t\t\t\tExternalWeight: ew,\n\t\t\t\t\tVersion: item.ObjectMeta.ResourceVersion,\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tcurrentRR := &RoutingRule{RuleMap: rm}\n\n\troutingRule, err := r.Calculator.Calculate(ctx, r.Log, currentRR, balancingRule.Spec)\n\tif err != nil {\n\t\tlog.Error(err, \"unable to calculate routing rule\")\n\t\treturn ctrl.Result{RequeueAfter: reconcilePeriod}, nil\n\t}\n\tfor _, v := range currentRR.RuleMap {\n\t\tlog.Info(\"current\", \"routing rule\", v)\n\t}\n\tfor _, v := range routingRule.RuleMap {\n\t\tlog.Info(\"latest\", \"routing rule\", v)\n\t}\n\tlog.Info(\"succeed to get routing rule\", \"routing rule\", routingRule)\n\n\t// check and renew authorization token to CloudRun before expired\n\tear := balancingRule.Spec.ExternalAuthorizationRef\n\tsn := balancingRule.Spec.SecretNamespace\n\tauthorization, needUpsert, err := r.getAuthorizationToken(sn, ear.Name, ear.Key)\n\tif err != nil {\n\t\tlog.Error(err, \"unable to get authorization token from secret\", \"namespace\", sn, \"name\", ear.Name, \"key\", ear.Key)\n\t\treturn ctrl.Result{RequeueAfter: reconcilePeriod}, nil\n\t}\n\tauthToken := authorization.getValue()\n\tauthTokenVersion := authorization.getVersion()\n\tif needUpsert {\n\t\tserviceURL := fmt.Sprintf(\"https://%s\", balancingRule.Spec.ExternalHost)\n\t\ttokenURL := fmt.Sprintf(\"/instance/service-accounts/default/identity?audience=%s\", serviceURL)\n\t\tauthToken, err = metadata.Get(tokenURL)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"unable to get metadata\", \"tokenURL\", tokenURL)\n\t\t\treturn ctrl.Result{RequeueAfter: reconcilePeriod}, nil\n\t\t}\n\t\terr = r.upsertSecret(ctx, balancingRule, sn, ear.Name, authorization.getVersion(), ear.Key, authToken)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"unable to upsert secret\", \"namespace\", sn, \"name\", ear.Name, \"version\", authTokenVersion, \"key\", ear.Key)\n\t\t\t// want to continue processing when spike occur so not return\n\t\t}\n\t\tlog.Info(\"succeed to upsert secret\", \"namespace\", sn, \"name\", ear.Name, \"key\", ear.Key)\n\t}\n\n\tif currentRR.Equal(routingRule) {\n\t\treturn ctrl.Result{RequeueAfter: 90 * time.Second}, nil\n\t}\n\n\toakr := balancingRule.Spec.OptionalAuthorization.KeyRef\n\toaKey, err := r.getSecretValue(sn, oakr.Name, oakr.Key)\n\tif err != nil {\n\t\tlog.Error(err, \"unable to get secret\")\n\t\treturn ctrl.Result{RequeueAfter: reconcilePeriod}, nil\n\t}\n\toavr := balancingRule.Spec.OptionalAuthorization.ValueRef\n\toaValue, err := r.getSecretValue(sn, oavr.Name, oavr.Key)\n\tif err != nil {\n\t\tlog.Error(err, \"unable to get secret\")\n\t\treturn ctrl.Result{RequeueAfter: reconcilePeriod}, nil\n\t}\n\thihkr := balancingRule.Spec.HostInfoHeaderKeyRef\n\thostInfoHeaderKey, err := r.getSecretValue(sn, hihkr.Name, hihkr.Key)\n\tif err != nil {\n\t\tlog.Error(err, \"unable to get secret\")\n\t\treturn ctrl.Result{RequeueAfter: reconcilePeriod}, nil\n\t}\n\terr = r.apply(balancingRule, currentRR, routingRule, hostInfoHeaderKey.value, balancingRule.Spec.GatewayName, authorization.getValue(), oaKey.value, oaValue.value)\n\n\t// TODO make RequeueAfter to be able change per loop\n\treturn ctrl.Result{RequeueAfter: reconcilePeriod}, err\n}", "func (r *ReconcileDeployment) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling Deployment\")\n\tkey := request.Namespace + \"/\" + request.Name\n\tstartTime := time.Now()\n\tklog.V(4).Infof(\"Started syncing deployment %q (%v)\", key, startTime)\n\tdefer func() {\n\t\tklog.V(4).Infof(\"Finished syncing deployment %q (%v)\", key, time.Since(startTime))\n\t}()\n\n\t// Fetch the Deployment instance\n\tdeployment := &ketiv1.Deployment{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, deployment)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tklog.V(2).Infof(\"Deployment %v has been deleted\", key)\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\tr.initDeployment(deployment)\n\td := deployment.DeepCopy()\n\teverything := metav1.LabelSelector{}\n\tif reflect.DeepEqual(d.Spec.Selector, &everything) {\n\t\tif d.Status.ObservedGeneration < d.Generation {\n\t\t\td.Status.ObservedGeneration = d.Generation\n\t\t\tdep, err := r.KetiClient.Deployments(d.Namespace).UpdateStatus(d)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorln(err)\n\t\t\t}\n\t\t\tklog.Infoln(dep.Name, \"Update State Complete\")\n\t\t}\n\t\treturn reconcile.Result{}, nil\n\t}\n\trsList, err := r.getReplicaSetsForDeployment(d)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\t// List all Pods owned by this Deployment, grouped by their ReplicaSet.\n\t// Current uses of the podMap are:\n\t//\n\t// * check if a Pod is labeled correctly with the pod-template-hash label.\n\t// * check that no old Pods are running in the middle of Recreate Deployments.\n\tpodMap, err := r.getPodMapForDeployment(d, rsList)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif d.DeletionTimestamp != nil {\n\t\treturn reconcile.Result{}, r.syncStatusOnly(d, rsList)\n\t}\n\n\t// Update deployment conditions with an Unknown condition when pausing/resuming\n\t// a deployment. In this way, we can be sure that we won't timeout when a user\n\t// resumes a Deployment with a set progressDeadlineSeconds.\n\tif err = r.checkPausedConditions(d); err != nil {\n\t\treturn reconcile.Result{},err\n\t}\n\n\tif d.Spec.Paused {\n\t\treturn reconcile.Result{}, r.sync(d, rsList)\n\t}\n\n\t// rollback is not re-entrant in case the underlying replica sets are updated with a new\n\t// revision so we should ensure that we won't proceed to update replica sets until we\n\t// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.\n\tif getRollbackTo(d) != nil {\n\t\treturn reconcile.Result{}, r.rollback(d, rsList)\n\t}\n\n\tscalingEvent, err := r.isScalingEvent(d, rsList)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tif scalingEvent {\n\t\treturn reconcile.Result{}, r.sync(d, rsList)\n\t}\n\n\tswitch d.Spec.Strategy.Type {\n\tcase ketiv1.RecreateDeploymentStrategyType:\n\t\treturn reconcile.Result{}, r.rolloutRecreate(d, rsList, podMap)\n\tcase ketiv1.RollingUpdateDeploymentStrategyType:\n\t\treturn reconcile.Result{}, r.rolloutRolling(d, rsList)\n\t}\n\treturn reconcile.Result{}, fmt.Errorf(\"unexpected deployment strategy type: %s\", d.Spec.Strategy.Type)\n}", "func (w *worker) reconcileConfigMap(\n\tchi *chop.ClickHouseInstallation,\n\tconfigMap *core.ConfigMap,\n\tupdate bool,\n) error {\n\tw.a.V(2).M(chi).S().P()\n\tdefer w.a.V(2).M(chi).E().P()\n\n\t// Check whether this object already exists in k8s\n\tcurConfigMap, err := w.c.getConfigMap(&configMap.ObjectMeta, false)\n\n\tif curConfigMap != nil {\n\t\t// We have ConfigMap - try to update it\n\t\tif !update {\n\t\t\treturn nil\n\t\t}\n\t\terr = w.updateConfigMap(chi, configMap)\n\t}\n\n\tif apierrors.IsNotFound(err) {\n\t\t// ConfigMap not found - even during Update process - try to create it\n\t\terr = w.createConfigMap(chi, configMap)\n\t}\n\n\tif err != nil {\n\t\tw.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).\n\t\t\tWithStatusAction(chi).\n\t\t\tWithStatusError(chi).\n\t\t\tM(chi).A().\n\t\t\tError(\"FAILED to reconcile ConfigMap: %s CHI: %s \", configMap.Name, chi.Name)\n\t}\n\n\treturn err\n}", "func (gr *Reconciler) ReconcileCR(namespacedname types.NamespacedName) (reconcile.Result, error) {\n\tvar p time.Duration\n\tperiod := DefaultReconcilePeriod\n\texpected := &resource.Bag{}\n\trsrc := gr.CR.Handle.(runtime.Object).DeepCopyObject().(cr.Handle)\n\tname := reflect.TypeOf(rsrc).String() + \"/\" + namespacedname.String()\n\trm := gr.RsrcMgr.Get(\"k8s\")\n\terr := k8s.Get(rm, namespacedname, rsrc.(runtime.Object))\n\tcrhandle := cr.CustomResource{Handle: rsrc}\n\tif err != nil {\n\t\tif apierror.IsNotFound(err) {\n\t\t\turt.HandleError(fmt.Errorf(\"not found %s. %s\", name, err.Error()))\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{RequeueAfter: CRGetFailureReconcilePeriod}, err\n\t}\n\to := rsrc.(metav1.Object)\n\tlog.Printf(\"%s Validating spec\\n\", name)\n\terr = crhandle.Validate()\n\tif err == nil {\n\t\tlog.Printf(\"%s Applying defaults\\n\", name)\n\t\tcrhandle.ApplyDefaults()\n\t\tcomponents := rsrc.Components()\n\t\tfor _, component := range components {\n\t\t\tif o.GetDeletionTimestamp() == nil {\n\t\t\t\tp, err = gr.ReconcileComponent(name, component, expected)\n\t\t\t} else {\n\t\t\t\terr = gr.FinalizeComponent(name, component, expected)\n\t\t\t\tp = FinalizeReconcilePeriod\n\t\t\t}\n\t\t\tif p != 0 && p < period {\n\t\t\t\tperiod = p\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\turt.HandleError(fmt.Errorf(\"error reconciling %s. %s\", name, err.Error()))\n\t\trsrc.HandleError(err)\n\t}\n\terr = rm.Update(resource.Item{Obj: &k8s.Object{Obj: rsrc.(metav1.Object)}})\n\tif err != nil {\n\t\turt.HandleError(fmt.Errorf(\"error updating %s. %s\", name, err.Error()))\n\t}\n\treturn reconcile.Result{RequeueAfter: period}, err\n}", "func (r *PrestoClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\t_ = context.Background()\n\t_ = r.Log.WithValues(\"prestocluster\", req.NamespacedName)\n\tvar log = r.Log.WithValues(\n\t\t\"cluster\", req.NamespacedName)\n\tvar controller = PrestoClusterController{\n\t\tk8sClient: r.Client,\n\t\trequest: req,\n\t\tinspected: InspectedClusterState{},\n\t\tcontext: context.Background(),\n\t\tlog: log,\n\t\trecorder: r.Recorder,\n\t}\n\treturn controller.reconcile(req)\n}", "func (r *ServiceGraphReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"servicegraph\", req.NamespacedName)\n\n\t// your logic here\n\tservicegraph := &onlabv2.ServiceGraph{}\n\terr := r.Get(ctx, req.NamespacedName, servicegraph)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\tlog.Info(\"Servicegraph resource not found. Ignoring since object must be deleted\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\tlog.Error(err, \"Failed to get ServiceGraph resource\")\n\t\treturn ctrl.Result{}, err\n\t}\n\t// printServiceGraph(servicegraph)\n\n\tfor _, node := range servicegraph.Spec.Nodes {\n\t\t// Check if the deployment for the node already exists, if not create a new one\n\t\tfound := &appsv1.Deployment{}\n\n\t\terr = r.Get(ctx, types.NamespacedName{Name: node.Name, Namespace: \"default\"}, found)\n\t\tif err != nil && errors.IsNotFound(err) {\n\t\t\t//fmt.Printf(\"######### CREATE: %d node type: %T\\n\", i, node)\n\t\t\t// Define a new deployment for the node\n\t\t\tdep := r.deploymentForNode(node, servicegraph)\n\t\t\tlog.Info(\"Creating a new Deployment\", \"Deployment.Namespace\", dep.Namespace, \"Deployment.Name\", dep.Name)\n\n\t\t\terr = r.Create(ctx, dep)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"Failed to create new Deployment\", \"Deployment.Namespace\", dep.Namespace, \"Deployment.Name\", dep.Name)\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\t// Deployment created successfully - return and requeue\n\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t} else if err != nil {\n\t\t\tlog.Error(err, \"Failed to get Deployment\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\n\t\t// Ensure the deployment size is the same as the spec\n\t\tsize := int32(node.Replicas)\n\t\tif *found.Spec.Replicas != size {\n\t\t\tfound.Spec.Replicas = &size\n\t\t\terr = r.Update(ctx, found)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"Failed to update Deployment\", \"Deployment.Namespace\", found.Namespace, \"Deployment.Name\", found.Name)\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\t// Spec updated - return and requeue\n\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t}\n\n\t\t// // Update/create services\n\n\t\t// foundSvc := &corev1.Service{}\n\n\t\t// err = r.Get(ctx, client.ObjectKey{Namespace: \"default\", Name: \"name\"}, foundSvc)\n\n\t\t// if err != nil && errors.IsNotFound(err) {\n\t\t// \tsvc := r.serviceForNode(node, servicegraph)\n\t\t// \tlog.Info(\"Creating a new Service\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", svc.Name)\n\t\t// \t// Yes. This is not awesome, but works\n\t\t// \t// \t_ = r.Delete(ctx, svc)\n\t\t// \t// err = r.Create(ctx, svc)\n\t\t// \terr = r.Create(ctx, svc)\n\t\t// \tif err != nil {\n\t\t// \t\tlog.Error(err, \"Failed to create new Service\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", svc.Name)\n\t\t// \t\treturn ctrl.Result{}, err\n\t\t// \t}\n\t\t// \t// Deployment created successfully - return and requeue\n\t\t// \treturn ctrl.Result{Requeue: true}, nil\n\t\t// } else if err != nil {\n\t\t// \tlog.Error(err, \"Failed to get SVC\")\n\t\t// \treturn ctrl.Result{}, err\n\t\t// }\n\t}\n\n\t// Update/create services\n\tfor _, node := range servicegraph.Spec.Nodes {\n\t\tsvc := r.serviceForNode(node, servicegraph)\n\t\t// Yes. This is not awesome, but works\n\t\t//_ = r.Delete(ctx, svc)\n\t\terr = r.Create(ctx, svc)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to create new Service\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", svc.Name)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\t// Deployment created successfully - return and requeue\n\t\treturn ctrl.Result{Requeue: true}, nil\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *ReconcileRokku) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Rokku\", request)\n\treqLogger.Info(\"Starting Rokku reconciling\")\n\tdefer reqLogger.Info(\"Finishing Rokku reconciling\")\n\n\tctx := context.Background()\n\n\tinstance := &rokkuv1alpha1.Rokku{}\n\terr := r.client.Get(ctx, request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treqLogger.Info(\"Rokku resource not found, skipping reconcile\")\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\n\t\treqLogger.Error(err, \"Unable to get Rokku resource\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif err := r.reconcileRokku(ctx, instance); err != nil {\n\t\treqLogger.Error(err, \"Fail to reconcile\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tif err := r.refreshStatus(ctx, instance); err != nil {\n\t\treqLogger.Error(err, \"Fail to refresh status subresource\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (r *NacosReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := r.Log.WithValues(\"nacos\", req.NamespacedName)\n\n\tlog.Info(\"this.is.req.info\", \"req.Name\", req.Name, \"req.Namespace\", req.Namespace)\n\n\tnacos := &corev1beta1.Nacos{}\n\terr := r.Get(ctx, req.NamespacedName, nacos)\n\tif err != nil {\n\t\tif isNotFound(err) {\n\n\t\t\tcm := &corev1.ConfigMap{}\n\t\t\tcmErr := r.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, cm)\n\t\t\tlog.Info(\"select configmap:\", \"name and namespace\", req.Name+\"#\"+req.Namespace, \"error content:\", cmErr, \"foundSvc\", cm)\n\t\t\tif cmErr == nil {\n\t\t\t\tlog.Info(\"Deleteing a new configmap\", \"configmap.Namespace\", cm.Namespace, \"configmap.Name\", cm.Name)\n\t\t\t\tcmErr = r.Delete(ctx, cm)\n\t\t\t\tif cmErr != nil {\n\t\t\t\t\tlog.Error(cmErr, \"Failed to delete old configmap\", \"configmap.Namespace\", cm.Namespace, \"configmap.Name\", cm.Name)\n\t\t\t\t\treturn ctrl.Result{}, cmErr\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfoundSvc := &corev1.Service{}\n\t\t\terr = r.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, foundSvc)\n\t\t\tlog.Info(\"select service:\", \"name and namespace\", req.Name+\"#\"+req.Namespace, \"error content:\", err, \"foundSvc\", foundSvc)\n\t\t\tif err == nil {\n\t\t\t\tlog.Info(\"Deleteing a new Service\", \"Service.Namespace\", foundSvc.Namespace, \"Service.Name\", foundSvc.Name)\n\t\t\t\terr = r.Delete(ctx, foundSvc)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err, \"Failed to delete old Service\", \"Service.Namespace\", foundSvc.Namespace, \"Service.Name\", foundSvc.Name)\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Info(\"select service result\", \"is not found\", isNotFound(err))\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tfoundNodeSvc := &corev1.Service{}\n\t\t\terr = r.Get(ctx, types.NamespacedName{Name: req.Name + \"node\", Namespace: req.Namespace}, foundNodeSvc)\n\t\t\tlog.Info(\"select service:\", \"name and namespace\", req.Name+\"node#\"+req.Namespace, \"error content:\", err, \"foundSvc\", foundNodeSvc)\n\t\t\tif err == nil {\n\t\t\t\tlog.Info(\"Deleteing a new Service\", \"Service.Namespace\", foundNodeSvc.Namespace, \"Service.Name\", foundNodeSvc.Name)\n\t\t\t\terr = r.Delete(ctx, foundNodeSvc)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err, \"Failed to delete old Service\", \"Service.Namespace\", foundNodeSvc.Namespace, \"Service.Name\", foundNodeSvc.Name)\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Info(\"select service result\", \"is not found\", isNotFound(err))\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tlog.Info(\"nacos resource not found. Ignoring since object must be deleted\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\tlog.Error(err, \"Failed to get nacos\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t//create configmap\n\tcm := &corev1.ConfigMap{}\n\tcmErr := r.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, cm)\n\tif cmErr != nil {\n\t\tif isNotFound(cmErr) {\n\t\t\tcmmeta := metav1.ObjectMeta{\n\t\t\t\tName: req.Name,\n\t\t\t\tNamespace: req.Namespace,\n\t\t\t}\n\t\t\tcm.ObjectMeta = cmmeta\n\t\t\tcm.Data = make(map[string]string)\n\t\t\thandleConfigMapFromFileSources(cm, []string{\"/application.properties\", \"/nacos-logback.xml\", \"/cluster.conf\"})\n\n\t\t\tcmErr = r.Create(ctx, cm)\n\t\t\tif cmErr != nil {\n\t\t\t\tlog.Info(\"create a new configmap failed\", \"Configmap.Namespace\", cm.Namespace, \"Configmap.Name\", cm.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tfound := &appsv1.StatefulSet{}\n\n\tif err = r.Get(ctx, types.NamespacedName{Name: nacos.Name, Namespace: nacos.Namespace}, found); err != nil {\n\t\tif isNotFound(err) {\n\t\t\tdep := r.statefulSetForNacos(nacos)\n\t\t\tlog.Info(\"Creating a new StatefulSet1\", \"StatefulSet.Namespace\", dep.Namespace, \"StatefulSet.Name\", dep.Name)\n\t\t\terr = r.Create(ctx, dep)\n\t\t\tfoundSvc := &corev1.Service{}\n\t\t\terr = r.Get(ctx, types.NamespacedName{Name: nacos.Name, Namespace: nacos.Namespace}, foundSvc)\n\t\t\tif err != nil {\n\t\t\t\tif isNotFound(err) {\n\n\t\t\t\t\tsvc := r.serviceForNacos(nacos, \"None\", corev1.ServiceTypeClusterIP)\n\t\t\t\t\tlog.Info(\"Creating a new Service\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", svc.Name)\n\t\t\t\t\terr = r.Create(ctx, svc)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(err, \"Failed to create new Service\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", dep.Name)\n\t\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t\t}\n\n\t\t\t\t\tsvc = r.serviceForNacos(nacos, \"\", corev1.ServiceTypeNodePort)\n\t\t\t\t\tlog.Info(\"Creating a new Service2\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", svc.Name)\n\t\t\t\t\terr = r.Create(ctx, svc)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(err, \"Failed to create new Service2\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", dep.Name)\n\t\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t//TODO check error for already exist\n\t\t\t\tlog.Info(\"create stateful svc failed!\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Info(\"select.found.err\", \"err string \", err.Error(), \"found:\", found)\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t} else {\n\t\t// update sts\n\t\t// if found.Spec.Replicas != &nacos.Spec.Size {\n\t\t// found.Spec.Replicas = &nacos.Spec.Size\n\t\t// servers := \"\"\n\t\t// for i := 0; i < int(*found.Spec.Replicas); i++ {\n\t\t// \t//pod.svc.ns.svc.cluster.local cluster.local--> cluster domain\n\t\t// \tservers += nacos.Name + \"-\" + strconv.Itoa(i) + \".\" + nacos.Name + \".\" + nacos.Namespace + \".svc.cluster.local\" + \":8848 \"\n\t\t// }\n\n\t\t// newEnv := []corev1.EnvVar{}\n\t\t// for _, v := range found.Spec.Template.Spec.Containers[0].Env {\n\t\t// \tif v.Name == \"NACOS_SERVERS\" {\n\t\t// \t\tv.Value = servers\n\t\t// \t}\n\t\t// \tnewEnv = append(newEnv, v)\n\t\t// }\n\t\t// //TODO we need more graceful for update nacos cluster info\n\t\t// found.Spec.Template.Spec.Containers[0].Env = newEnv\n\t\t// log.Info(\"envs now is \", \"envs\", found.Spec.Template.Spec.Containers[0].Env)\n\t\t// err = r.Update(ctx, found)\n\t\t// if err != nil {\n\t\t// \tlog.Error(err, \"update stateful failed!\")\n\t\t// }\n\t\t// }\n\t}\n\tlog.Info(\"select.found\", \"found:\", found)\n\treturn ctrl.Result{}, nil\n}", "func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {\n\tlog.V(logging.Debug).Info(\"reconciling\", \"kind\", v1alpha1.ResourceGroupKindAPIVersion, \"request\", req)\n\n\tctx, cancel := context.WithTimeout(context.Background(), reconcileTimeout)\n\tdefer cancel()\n\n\trg := &v1alpha1.ResourceGroup{}\n\tif err := r.kube.Get(ctx, req.NamespacedName, rg); err != nil {\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn reconcile.Result{Requeue: false}, nil\n\t\t}\n\t\treturn reconcile.Result{Requeue: false}, errors.Wrapf(err, \"cannot get resource %s\", req.NamespacedName)\n\t}\n\n\tclient, err := r.Connect(ctx, rg)\n\tif err != nil {\n\t\trg.Status.SetFailed(reasonFetchingClient, err.Error())\n\t\treturn reconcile.Result{Requeue: true}, errors.Wrapf(r.kube.Update(ctx, rg), \"cannot update resource %s\", req.NamespacedName)\n\t}\n\n\t// The resource has been deleted from the API server. Delete from Azure.\n\tif rg.DeletionTimestamp != nil {\n\t\treturn reconcile.Result{Requeue: client.Delete(ctx, rg)}, errors.Wrapf(r.kube.Update(ctx, rg), \"cannot update resource %s\", req.NamespacedName)\n\t}\n\n\t// The resource is unnamed. Assume it has not been created in Azure.\n\tif rg.Status.Name == \"\" {\n\t\treturn reconcile.Result{Requeue: client.Create(ctx, rg)}, errors.Wrapf(r.kube.Update(ctx, rg), \"cannot update resource %s\", req.NamespacedName)\n\t}\n\n\t// The resource exists in the API server and Azure. Sync it.\n\treturn reconcile.Result{Requeue: client.Sync(ctx, rg)}, errors.Wrapf(r.kube.Update(ctx, rg), \"cannot update resource %s\", req.NamespacedName)\n}", "func (bc *FederatedReplicaSetPlacementController) Reconcile(k types.ReconcileKey) error {\n\t// INSERT YOUR CODE HERE\n\tlog.Printf(\"Implement the Reconcile function on federatedreplicasetplacement.FederatedReplicaSetPlacementController to reconcile %s\\n\", k.Name)\n\treturn nil\n}", "func (r *StatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := log.WithValues(\"Request.Namespace\", req.Namespace, \"Request.Name\", req.Name)\n\tlog.Info(\"Reconciling\")\n\n\t// Fetch the ObservabilityAddon instance in hub cluster\n\thubObsAddon := &oav1beta1.ObservabilityAddon{}\n\terr := r.HubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: hubNamespace}, hubObsAddon)\n\tif err != nil {\n\t\tlog.Error(err, \"Failed to get observabilityaddon in hub cluster\", \"namespace\", hubNamespace)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Fetch the ObservabilityAddon instance in local cluster\n\tobsAddon := &oav1beta1.ObservabilityAddon{}\n\terr = r.Client.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: namespace}, obsAddon)\n\tif err != nil {\n\t\tlog.Error(err, \"Failed to get observabilityaddon\", \"namespace\", namespace)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\thubObsAddon.Status = obsAddon.Status\n\n\terr = r.HubClient.Status().Update(ctx, hubObsAddon)\n\tif err != nil {\n\t\tlog.Error(err, \"Failed to update status for observabilityaddon in hub cluster\", \"namespace\", hubNamespace)\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func Test_Reconcile(t *testing.T) {\n\t// Fake client is buggy, and it looks like it more or less works for very basic and simple scenarios\n\t// https://github.com/kubernetes-sigs/controller-runtime/issues/348\n\tfakeClientBuilder := fake.NewClientBuilder()\n\n\t//mock registry\n\tmockRegistry := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, \"Fake registry\")\n\t}))\n\tdefer mockRegistry.Close()\n\n\tu,_ := url.Parse(mockRegistry.URL)\n\n\treconc := reconciler{\n\t\tclient: nil,\n\t\tignoredNamespaces: map[string]struct{}{\"kube-system\": {}},\n\t\tbackupRegistry: u.Host+\"/namespace/backup\",\n\t}\n\n\ttests := []struct {\n\t\t// test case short title\n\t\ttitle string\n\t\tobjects []client.Object\n\t\texpetedImage string\n\t\texpectError bool\n\t}{\n\t\t{\n\t\t\ttitle: \"reconcile deployment\",\n\t\t\texpetedImage: reconc.getTargetImage(u.Host+\"/nginx:latest\"),\n\t\t\tobjects: []client.Object{\n\t\t\t\t&appsv1.Deployment{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"server\",\n\t\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\tMatchLabels: map[string]string{\"deployment\": \"test\" + \"-deployment\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Labels: map[string]string{\"deployment\": \"test\" + \"-deployment\"}},\n\t\t\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"nginx\",\n\t\t\t\t\t\t\t\t\t\tImage: u.Host+\"/nginx:latest\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttitle: \"reconcile daemonset\",\n\t\t\texpetedImage: reconc.getTargetImage(u.Host+\"/nginx:latest\"),\n\t\t\tobjects: []client.Object{\n\t\t\t\t&appsv1.DaemonSet{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"server\",\n\t\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: appsv1.DaemonSetSpec{\n\t\t\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\tMatchLabels: map[string]string{\"deployment\": \"test\" + \"-deployment\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Labels: map[string]string{\"deployment\": \"test\" + \"-deployment\"}},\n\t\t\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"nginx\",\n\t\t\t\t\t\t\t\t\t\tImage: u.Host+\"/nginx:latest\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.title, func(t *testing.T) {\n\t\t\t//Put mock objects to fake client\n\t\t\tfakeClientBuilder.WithObjects(test.objects...)\n\t\t\t//Set fake client to reconciler\n\t\t\treconc.client = fakeClientBuilder.Build()\n\n\t\t\tfor _, o := range test.objects {\n\t\t\t\tkind := \"\"\n\n\t\t\t\tif _, isDeployment := o.(*appsv1.Deployment); isDeployment {\n\t\t\t\t\tkind = \"Deployment\"\n\t\t\t\t}else {\n\t\t\t\t\tkind = \"DaemonSet\"\n\t\t\t\t}\n\n\t\t\t\tr := reconcile.Request{NamespacedName: types.NamespacedName{\n\t\t\t\t\tNamespace: o.GetNamespace(),\n\t\t\t\t\tName: fmt.Sprintf(\"%s:%s\", kind, o.GetName()),\n\t\t\t\t}}\n\t\t\t\t_, e := reconc.Reconcile(context.Background(), r)\n\t\t\t\trequire.Nil(t, e)\n\n\t\t\t\t//Checking if reconciled object has the right image\n\t\t\t\tkey := types.NamespacedName{\n\t\t\t\t\tName: o.GetName(),\n\t\t\t\t\tNamespace: o.GetNamespace(),\n\t\t\t\t}\n\t\t\t\tswitch kind {\n\t\t\t\tcase \"Deployment\":\n\t\t\t\t\tdp := appsv1.Deployment{}\n\t\t\t\t\treconc.client.Get(context.Background(),key,&dp)\n\t\t\t\t\trequire.Equal(t,dp.Spec.Template.Spec.Containers[0].Image,test.expetedImage)\n\n\t\t\t\tcase \"DaemonSet\":\n\t\t\t\t\tds := appsv1.DaemonSet{}\n\t\t\t\t\treconc.client.Get(context.Background(),key,&ds)\n\t\t\t\t\trequire.Equal(t,ds.Spec.Template.Spec.Containers[0].Image,test.expetedImage)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func (r *GatekeeperReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlogger := r.Log.WithValues(\"gatekeeper\", req.NamespacedName)\n\tlogger.Info(\"Reconciling Gatekeeper\")\n\n\tif req.Name != defaultGatekeeperCrName {\n\t\terr := fmt.Errorf(\"Gatekeeper resource name must be '%s'\", defaultGatekeeperCrName)\n\t\tlogger.Error(err, \"Invalid Gatekeeper resource name\")\n\t\t// Return success to avoid requeue\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tgatekeeper := &operatorv1alpha1.Gatekeeper{}\n\terr := r.Get(ctx, req.NamespacedName, gatekeeper)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tisGatekeeperMarkedToBeDeleted := gatekeeper.GetDeletionTimestamp() != nil\n\tif isGatekeeperMarkedToBeDeleted {\n\t\tif sets.NewString(gatekeeper.GetFinalizers()...).Has(gatekeeperFinalizer) {\n\n\t\t\tif err := r.finalizeGatekeeper(logger, gatekeeper); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tcontrollerutil.RemoveFinalizer(gatekeeper, gatekeeperFinalizer)\n\t\t\terr := r.Update(ctx, gatekeeper)\n\t\t\tif err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tif !sets.NewString(gatekeeper.GetFinalizers()...).Has(gatekeeperFinalizer) {\n\t\tif err := r.addFinalizer(logger, gatekeeper); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\terr = r.deployGatekeeperResources(gatekeeper)\n\tif err != nil {\n\t\treturn ctrl.Result{}, errors.Wrap(err, \"Unable to deploy Gatekeeper resources\")\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *ReconcileProvisioner) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"sfcluster\", req.NamespacedName)\n\n\t// Fetch the SFCluster\n\tclusterInstance := &resourcev1alpha1.SFCluster{}\n\terr := r.Get(ctx, req.NamespacedName, clusterInstance)\n\tif err != nil {\n\t\tif apiErrors.IsNotFound(err) {\n\t\t\t// Object not found, return.\n\t\t\terr = removeClusterFromWatch(req.Name)\n\t\t\tif err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\tlog.Error(err, \"Failed to get SFCluster...\", \"clusterId\", req.NamespacedName.Name)\n\t\t// Error reading the object - requeue the request.\n\t\treturn ctrl.Result{}, err\n\t}\n\tclusterID := clusterInstance.GetName()\n\tlog.Info(\"reconciling cluster\", \"clusterID\", clusterID)\n\n\t//reconcile primaryClusterID in the configmap\n\terr = r.reconcilePrimaryClusterIDConfig()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Get targetClient for targetCluster\n\ttargetClient, err := r.clusterRegistry.GetClient(clusterID)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// 2. Get deploment instance for provisioner\n\tdeplomentInstance := &appsv1.Deployment{}\n\terr = r.Get(ctx, types.NamespacedName{\n\t\tName: constants.ProvisionerTemplateName,\n\t\tNamespace: constants.InteroperatorNamespace,\n\t}, deplomentInstance)\n\tif err != nil {\n\t\tlog.Error(err, \"Failed to get provisioner deployment from master cluster\", \"clusterId\", clusterID)\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// 3. Register sf CRDs\n\terr = r.registerSFCrds(clusterID, targetClient)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// 4. Add watches on resources in target sfcluster. Must be done after\n\t// registering sf crds, since we are trying to watch on sfserviceinstance\n\t// and sfservicebinding.\n\terr = addClusterToWatch(clusterID)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// 5. Create/Update Namespace in target cluster for provisioner\n\tnamespace := deplomentInstance.GetNamespace()\n\terr = r.reconcileNamespace(namespace, clusterID, targetClient)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// 6. Creating/Updating sfcluster in target cluster\n\terr = r.reconcileSfClusterCrd(clusterInstance, clusterID, targetClient)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// 7. Creating/Updating kubeconfig secret for sfcluster in target cluster\n\t// Fetch current primary cluster id from configmap\n\tinteroperatorCfg := r.cfgManager.GetConfig()\n\tcurrPrimaryClusterID := interoperatorCfg.PrimaryClusterID\n\n\terr = r.reconcileSecret(namespace, clusterInstance.Spec.SecretRef, clusterID, targetClient)\n\tif err != nil {\n\t\t// Skip if secret not found for leader cluster\n\t\tif !(apiErrors.IsNotFound(err) && clusterID == currPrimaryClusterID) {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tlog.Info(\"Ignoring secret not found error for leader cluster\", \"clusterId\", clusterID,\n\t\t\t\"secretRef\", clusterInstance.Spec.SecretRef)\n\t}\n\n\t// 8. Deploy cluster rolebinding\n\terr = r.reconcileClusterRoleBinding(namespace, clusterID, targetClient)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// 9. Creating/Updating imagepull secrets for provisioner deployment in target cluster\n\tfor _, secretRef := range deplomentInstance.Spec.Template.Spec.ImagePullSecrets {\n\t\terr = r.reconcileSecret(namespace, secretRef.Name, clusterID, targetClient)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\t// 10. Create Deployment in target cluster for provisioner\n\terr = r.reconcileDeployment(deplomentInstance, clusterID, targetClient)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\trequeueAfter, err := time.ParseDuration(interoperatorCfg.ClusterReconcileInterval)\n\tif err != nil {\n\t\tlog.Error(err, \"Failed to parse ClusterReconcileInterval\",\n\t\t\t\"ClusterReconcileInterval\", interoperatorCfg.ClusterReconcileInterval)\n\t\trequeueAfter, _ = time.ParseDuration(constants.DefaultClusterReconcileInterval)\n\t}\n\treturn ctrl.Result{\n\t\tRequeueAfter: requeueAfter,\n\t}, nil\n}", "func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {\n\tlog.Info(\"reconciling\", \"request\", request)\n\n\t// Only proceed if we can get the ingress resource.\n\tingress := &configv1.Ingress{}\n\tif err := r.cache.Get(ctx, request.NamespacedName, ingress); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tlog.Info(\"ingress cr not found; reconciliation will be skipped\", \"request\", request)\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, fmt.Errorf(\"failed to get ingress %q: %w\", request.NamespacedName, err)\n\t}\n\n\t// Get the list of componentRoutes defined in both the spec and status of the ingress resource that require\n\t// roles and roleBindings.\n\tcomponentRoutes := intersectingComponentRoutes(ingress.Spec.ComponentRoutes, ingress.Status.ComponentRoutes)\n\n\t// Ensure role and roleBindings exist for each valid componentRoute.\n\tfor _, componentRoute := range componentRoutes {\n\t\t// Ensure role.\n\t\troleName, err := r.ensureServiceCertKeyPairSecretRole(componentRoute)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, fmt.Errorf(\"failed to create role: %v\", err)\n\t\t}\n\n\t\t// Get the role just created so the UID is available for the ownerReference on the roleBinding.\n\t\trole := &rbacv1.Role{}\n\t\tif err := r.client.Get(ctx, types.NamespacedName{Namespace: r.config.SecretNamespace, Name: roleName}, role); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\t// Ensure roleBinding.\n\t\tif err := r.ensureServiceCertKeyPairSecretRoleBinding(role, componentRoute); err != nil {\n\t\t\treturn reconcile.Result{}, fmt.Errorf(\"failed to create roleBinding: %v\", err)\n\t\t}\n\t}\n\n\texistingHashes := sets.String{}\n\tfor _, cr := range componentRoutes {\n\t\texistingHashes.Insert(cr.Hash)\n\t}\n\n\t// Delete any roles or roleBindings that were generated for componentRoutes that are no longer defined.\n\t// RoleBindings are cleanedup by garbage collector due to owner reference to Role.\n\tif err := utilerrors.NewAggregate(r.deleteOrphanedRoles(componentRoutes, existingHashes)); err != nil {\n\t\treturn reconcile.Result{}, fmt.Errorf(\"error(s) deleting orphaned roles: %v\", err)\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (ncc *NetworkConfigCreator) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t// Wait, in case the configuration has not completed yet.\n\tif !ncc.secretWatcher.WaitForConfigured(ctx) || !ncc.serviceWatcher.WaitForConfigured(ctx) {\n\t\treturn ctrl.Result{}, errors.New(\"context expired before initialization completed\")\n\t}\n\n\tklog.V(4).Infof(\"Reconciling ForeignCluster %q\", req.Name)\n\ttracer := trace.New(\"Reconcile\", trace.Field{Key: \"ForeignCluster\", Value: req.Name})\n\tctx = trace.ContextWithTrace(ctx, tracer)\n\tdefer tracer.LogIfLong(traceutils.LongThreshold())\n\n\t// Get the foreign cluster object.\n\tvar fc discoveryv1alpha1.ForeignCluster\n\tif err := ncc.Get(ctx, req.NamespacedName, &fc); err != nil {\n\t\t// Remove the ForeignCluster from the list of known ones.\n\t\tncc.foreignClusters.Remove(req.NamespacedName.Name)\n\n\t\tif !kerrors.IsNotFound(err) {\n\t\t\tklog.Errorf(\"Failed retrieving ForeignCluster: %v\", err)\n\t\t}\n\t\t// Reconcile was triggered by a delete request.\n\t\t// No need to delete anything, as automatically collected by the owner reference.\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\n\tif fc.Spec.ClusterIdentity.ClusterID == \"\" {\n\t\tklog.V(4).Infof(\"ForeignCluster %q not yet associated with a cluster ID\", req.Name)\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tif !foreigncluster.IsNetworkingEnabled(&fc) {\n\t\tklog.V(4).Infof(\"Networking for cluster %q is disabled, hence no need to create the networkconfig\", req.Name)\n\t}\n\n\t// Add the ForeignCluster to the list of known ones.\n\tncc.foreignClusters.Add(req.NamespacedName.Name)\n\n\t// A peering is (being) established and networking is enabled, hence we need to ensure the network interconnection.\n\tif fc.GetDeletionTimestamp().IsZero() && foreigncluster.IsNetworkingEnabled(&fc) &&\n\t\t(foreigncluster.IsIncomingJoined(&fc) || foreigncluster.IsOutgoingJoined(&fc)) {\n\t\treturn ctrl.Result{}, ncc.EnforceNetworkConfigPresence(ctx, &fc)\n\t}\n\n\t// A peering is not established or the networking has been disabled, hence we need to tear down the network interconnection.\n\treturn ctrl.Result{}, ncc.EnforceNetworkConfigAbsence(ctx, &fc)\n}", "func (r *ReconcileSdewan) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling Sdewan\")\n\n\t// Fetch the Sdewan instance\n\tinstance := &sdewanv1alpha1.Sdewan{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\tfor i, network := range instance.Spec.Networks {\n\t\tif network.Interface == \"\" {\n\t\t\tinstance.Spec.Networks[i].Interface = fmt.Sprintf(\"net%d\", i)\n\t\t}\n\t}\n\n\tcm := newConfigmapForCR(instance)\n\tif err := controllerutil.SetControllerReference(instance, cm, r.scheme); err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tfoundcm := &corev1.ConfigMap{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}, foundcm)\n\tif err != nil && errors.IsNotFound(err) {\n\t\treqLogger.Info(\"Creating a new Configmap\", \"Configmap.Namespace\", cm.Namespace, \"Configmap.Name\", cm.Name)\n\t\terr = r.client.Create(context.TODO(), cm)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t} else if err != nil {\n\t\treturn reconcile.Result{}, err\n\t} else if reflect.DeepEqual(foundcm.Data, cm.Data) {\n\t\treqLogger.Info(\"Updating Configmap\", \"Configmap.Namespace\", cm.Namespace, \"Configmap.Name\", cm.Name)\n\t\terr = r.client.Update(context.TODO(), cm)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t} else {\n\t\treqLogger.Info(\"Configmap not changed\", \"Configmap.Namespace\", foundcm.Namespace, \"Configmap.Name\", foundcm.Name)\n\t}\n\t// Define a new Pod object\n\tpod := newPodForCR(instance)\n\n\t// Set Sdewan instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(instance, pod, r.scheme); err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Check if this Pod already exists\n\tfoundpod := &corev1.Pod{}\n\terr = r.client.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, foundpod)\n\tif err != nil && errors.IsNotFound(err) {\n\t\treqLogger.Info(\"Creating a new Pod\", \"Pod.Namespace\", pod.Namespace, \"Pod.Name\", pod.Name)\n\t\terr = r.client.Create(context.TODO(), pod)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\n\t\t// Pod created successfully - don't requeue\n reqLogger.Info(\"A new Pod created\", \"Pod.Namespace\", pod.Namespace, \"Pod.Name\", pod.Name)\n\t} else if err != nil {\n\t\treturn reconcile.Result{}, err\n\t} else {\n\t\t// Pod already exists - don't requeue\n\t\treqLogger.Info(\"Pod already exists\", \"Pod.Namespace\", foundpod.Namespace, \"Pod.Name\", foundpod.Name)\n\t}\n\n svc := newSvcForCR(instance)\n\t// Set Sdewan instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(instance, svc, r.scheme); err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n // Check if this svc already exists\n foundsvc := &corev1.Service{}\n err = r.client.Get(context.TODO(), types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}, foundsvc)\n if err != nil && errors.IsNotFound(err) {\n reqLogger.Info(\"Creating a new Service\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", svc.Name)\n err = r.client.Create(context.TODO(), svc)\n if err != nil {\n return reconcile.Result{}, err\n }\n reqLogger.Info(\"A new Service created\", \"Service.Namespace\", svc.Namespace, \"Service.Name\", svc.Name)\n } else if err != nil {\n return reconcile.Result{}, err\n } else {\n reqLogger.Info(\"Service already exists\", \"Service.Namespace\", foundsvc.Namespace, \"Service.Name\", foundsvc.Name)\n }\n\n\treturn reconcile.Result{}, nil\n}", "func (r *yandexContainerRegistryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := r.log.WithValues(\"name\", req.NamespacedName)\n\tlog.V(1).Info(\"started reconciliation\")\n\n\t// Try to retrieve object from k8s\n\tvar object connectorsv1.YandexContainerRegistry\n\tif err := r.Get(ctx, req.NamespacedName, &object); err != nil {\n\t\t// It still can be OK if we have not found it, and we do not need to reconcile it again\n\n\t\t// This outcome signifies that we just cannot find object, that is ok\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tlog.V(1).Info(\"object not found in k8s, reconciliation not possible\")\n\t\t\treturn config.GetNeverResult()\n\t\t}\n\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to get object from k8s: %w\", err))\n\t}\n\n\t// If object must be currently finalized, do it and quit\n\tif phase.MustBeFinalized(&object.ObjectMeta, ycrconfig.FinalizerName) {\n\t\tif err := r.finalize(ctx, log.WithName(\"finalize\"), &object); err != nil {\n\t\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to finalize object: %w\", err))\n\t\t}\n\t\treturn config.GetNormalResult()\n\t}\n\n\tif err := phase.RegisterFinalizer(\n\t\tctx, r.Client, log, &object.ObjectMeta, &object, ycrconfig.FinalizerName,\n\t); err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to register finalizer: %w\", err))\n\t}\n\n\tres, err := r.allocateResource(ctx, log.WithName(\"allocate-resource\"), &object)\n\tif err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to allocate resource: %w\", err))\n\t}\n\n\tif err := r.matchSpec(ctx, log.WithName(\"match-spec\"), &object, res); err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to match spec: %w\", err))\n\t}\n\n\tif err := r.updateStatus(ctx, log.WithName(\"update-status\"), &object, res); err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to update status: %w\", err))\n\t}\n\n\tif err := phase.ProvideConfigmap(\n\t\tctx,\n\t\tr.Client,\n\t\tlog.WithName(\"provide-configmap\"),\n\t\tobject.Name, ycrconfig.ShortName, object.Namespace,\n\t\tmap[string]string{\"ID\": object.Status.ID},\n\t); err != nil {\n\t\treturn config.GetErroredResult(fmt.Errorf(\"unable to provide configmap: %w\", err))\n\t}\n\n\tlog.V(1).Info(\"finished reconciliation\")\n\treturn config.GetNormalResult()\n}", "func (c *KubernetesDefaultRouter) Reconcile(canary *flaggerv1.Canary) error {\n\tapexName, _, _ := canary.GetServiceNames()\n\n\t// main svc\n\terr := c.reconcileService(canary, apexName, fmt.Sprintf(\"%s-primary\", c.labelValue), canary.Spec.Service.Apex)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reconcileService failed: %w\", err)\n\t}\n\n\treturn nil\n}", "func (kor *KubernetesOAMRouter) Reconcile(canary *flaggerv1.Canary) error {\n\tc := kor.innerK8sRouter\n\tapexName, _, _ := canary.GetServiceNames()\n\n\t// main svc also elect all the component pod\n\terr := c.reconcileService(canary, apexName, kor.componentName, canary.Spec.Service.Apex)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reconcileService failed: %w\", err)\n\t}\n\n\treturn nil\n}", "func (r *ReconcileVirtualcluster) Reconcile(request reconcile.Request) (rncilRslt reconcile.Result, err error) {\n\tlog.Info(\"reconciling Virtualcluster...\")\n\tvc := &tenancyv1alpha1.Virtualcluster{}\n\terr = r.Get(context.TODO(), request.NamespacedName, vc)\n\tif err != nil {\n\t\t// set NotFound error as nil\n\t\tif apierrors.IsNotFound(err) {\n\t\t\terr = nil\n\t\t}\n\t\treturn\n\t}\n\n\t// TODO implement the delete logic (finalizer)\n\n\t// reconcile Virtualcluster (vc) based on vc status\n\t// NOTE: vc status is required by other components (e.g. syncer need to\n\t// know the vc status in order to setup connection to tenant master)\n\tswitch vc.Status.Phase {\n\tcase \"\":\n\t\t// set vc status as ClusterPending if no status is set\n\t\tlog.Info(\"will create a Virtualcluster\", \"vc\", vc.Name)\n\t\terr = retry.RetryOnConflict(retry.DefaultRetry, func() error {\n\t\t\tvc.Status.Phase = tenancyv1alpha1.ClusterPending\n\t\t\tvc.Status.Message = \"creating virtual cluster...\"\n\t\t\tvc.Status.Reason = \"ClusterCreating\"\n\t\t\tupdateErr := r.Update(context.TODO(), vc)\n\t\t\tif err = r.Get(context.TODO(), request.NamespacedName, vc); err != nil {\n\t\t\t\tlog.Info(\"fail to get vc on update failure\", \"error\", err.Error())\n\t\t\t}\n\t\t\treturn updateErr\n\t\t})\n\t\treturn\n\tcase tenancyv1alpha1.ClusterPending:\n\t\t// create new virtualcluster when vc is pending\n\t\tlog.Info(\"Virtualcluster is pending\", \"vc\", vc.Name)\n\t\tcvs := &tenancyv1alpha1.ClusterVersionList{}\n\t\terr = r.List(context.TODO(), cvs, client.InNamespace(\"\"))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tcv := getClusterVersion(cvs, vc.Spec.ClusterVersionName)\n\t\tif cv == nil {\n\t\t\terr = fmt.Errorf(\"desired ClusterVersion %s not found\",\n\t\t\t\tvc.Spec.ClusterVersionName)\n\t\t\treturn\n\t\t}\n\t\terr = r.createVirtualcluster(vc, cv)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t// all components are ready, update vc status\n\t\terr = retry.RetryOnConflict(retry.DefaultRetry, func() error {\n\t\t\tvc.Status.Phase = \"Running\"\n\t\t\tvc.Status.Message = \"tenant master is running\"\n\t\t\tvc.Status.Reason = \"TenantMasterRunning\"\n\t\t\tupdateErr := r.Update(context.TODO(), vc)\n\t\t\tif err = r.Get(context.TODO(), request.NamespacedName, vc); err != nil {\n\t\t\t\tlog.Info(\"fail to get vc on update failure\", \"error\", err.Error())\n\t\t\t}\n\t\t\treturn updateErr\n\t\t})\n\t\treturn\n\tcase tenancyv1alpha1.ClusterRunning:\n\t\tlog.Info(\"Virtualcluster is running\", \"vc\", vc.Name)\n\t\treturn\n\tdefault:\n\t\terr = fmt.Errorf(\"unknown vc phase: %s\", vc.Status.Phase)\n\t\treturn\n\t}\n}", "func (r *ReconcileSyncSet) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\t// Fetch the ClusterDeployment instance\n\tcd := &hivev1.ClusterDeployment{}\n\n\terr := r.Get(context.TODO(), request.NamespacedName, cd)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Object not found, return\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request\n\t\tr.logger.WithError(err).WithField(\"clusterDeployment\", request.NamespacedName).Error(\"error looking up cluster deployment\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tcdLog := r.logger.WithFields(log.Fields{\n\t\t\"clusterDeployment\": request.NamespacedName,\n\t})\n\n\t// If the clusterdeployment is deleted, do not reconcile.\n\tif cd.DeletionTimestamp != nil {\n\t\tcdLog.Debug(\"clusterdeployment is being deleted, nothing to do\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tif !cd.Spec.Installed {\n\t\t// Cluster isn't installed yet, return\n\t\tcdLog.Debug(\"cluster installation is not complete\")\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tcdLog.Info(\"reconciling sync sets for cluster deployment\")\n\n\t// get all sync sets that apply to cd\n\tsyncSets, err := r.getRelatedSyncSets(cd)\n\tif err != nil {\n\t\tcdLog.WithError(err).Error(\"unable to list related sync sets for cluster deployment\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// get all selector sync sets that apply to cd\n\tselectorSyncSets, err := r.getRelatedSelectorSyncSets(cd)\n\tif err != nil {\n\t\tcdLog.WithError(err).Error(\"unable to list related sync sets for cluster deployment\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tsyncSetInstances, err := r.getRelatedSyncSetInstances(cd)\n\tif err != nil {\n\t\tcdLog.WithError(err).Error(\"unable to list related sync set instances for cluster deployment\")\n\t\treturn reconcile.Result{}, err\n\t}\n\n\ttoAdd, toUpdate, toDelete, err := r.reconcileSyncSetInstances(cd, syncSets, selectorSyncSets, syncSetInstances)\n\tif err != nil {\n\t\tcdLog.WithError(err).Error(\"unable to reconcile sync set instances for cluster deployment\")\n\t}\n\n\tfor _, syncSetInstance := range toUpdate {\n\t\terr := r.Update(context.TODO(), syncSetInstance)\n\t\tif err != nil {\n\t\t\tname := fmt.Sprintf(\"%s/%s\", syncSetInstance.Namespace, syncSetInstance.Name)\n\t\t\tcdLog.WithError(err).WithField(\"syncSetInstance\", name).Log(controllerutils.LogLevel(err), \"cannot update sync set instance\")\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\tfor _, syncSetInstance := range toDelete {\n\t\terr := r.Delete(context.TODO(), syncSetInstance)\n\t\tif err != nil && !errors.IsNotFound(err) {\n\t\t\tname := fmt.Sprintf(\"%s/%s\", syncSetInstance.Namespace, syncSetInstance.Name)\n\t\t\tcdLog.WithError(err).WithField(\"syncSetInstance\", name).Log(controllerutils.LogLevel(err), \"cannot delete sync set instance\")\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\tfor _, syncSetInstance := range toAdd {\n\t\terr := r.Create(context.TODO(), syncSetInstance)\n\t\tif err != nil {\n\t\t\tname := fmt.Sprintf(\"%s/%s\", syncSetInstance.Namespace, syncSetInstance.Name)\n\t\t\tcdLog.WithError(err).WithField(\"syncSetInstance\", name).Log(controllerutils.LogLevel(err), \"cannot create sync set instance\")\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\tcdLog.Info(\"done reconciling sync sets for cluster deployment\")\n\treturn reconcile.Result{}, nil\n}", "func (r *AlertsConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t_ = r.Log.WithValues(\"alertsconfig\", req.NamespacedName)\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}()\n\tctx = context.WithValue(ctx, requestId, uuid.New())\n\tlog := log.Logger(ctx, \"controllers\", \"alertconfig_controller\", \"Reconcile\")\n\tlog = log.WithValues(\"alertconfig_cr\", req.NamespacedName)\n\tlog.Info(\"Start of the request\")\n\n\t// Get the CR\n\tvar alertsConfig alertmanagerv1alpha1.AlertsConfig\n\tif err := r.Get(ctx, req.NamespacedName, &alertsConfig); err != nil {\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\n\t// Check if it is delete request\n\tif !alertsConfig.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\trequeueFlag := false\n\t\t// Delete use case\n\t\tif err := r.HandleDelete(ctx, &alertsConfig); err != nil {\n\t\t\tlog.Error(err, \"unable to delete the alert\")\n\t\t\trequeueFlag = true\n\t\t}\n\t\treturn ctrl.Result{Requeue: requeueFlag}, nil\n\t}\n\n\t//First time use case\n\tif !utils.ContainsString(alertsConfig.ObjectMeta.Finalizers, alertsConfigFinalizerName) {\n\t\tlog.Info(\"New alerts config resource. Adding the finalizer\", \"finalizer\", alertsConfigFinalizerName)\n\n\t\talertsConfig.ObjectMeta.Finalizers = append(alertsConfig.ObjectMeta.Finalizers, alertsConfigFinalizerName)\n\t\tr.CommonClient.UpdateMeta(ctx, &alertsConfig)\n\t\t//That's fine- Let it come for requeue and we can create the alert\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\talertHashMap := alertsConfig.Status.AlertsStatus\n\tglobalMap := alertsConfig.Spec.GlobalParams\n\t// Handle create/update here\n\tfor alertName, config := range alertsConfig.Spec.Alerts {\n\n\t\t// Calculate checksum and compare it with the status checksum\n\t\texist, reqChecksum := utils.CalculateAlertConfigChecksum(ctx, config, globalMap)\n\t\t// if request and status checksum matches then there is NO change in this specific alert config\n\t\tif exist && alertHashMap[alertName].LastChangeChecksum == reqChecksum {\n\t\t\tlog.V(1).Info(\"checksum is equal so there is no change. skipping\", \"alertName\", alertName)\n\t\t\t//skip it\n\t\t\tcontinue\n\t\t}\n\t\t// if there is a diff\n\t\t// Get Alert CR\n\n\t\tvar wfAlert alertmanagerv1alpha1.WavefrontAlert\n\t\twfAlertNamespacedName := types.NamespacedName{Namespace: req.Namespace, Name: alertName}\n\t\tif err := r.Get(ctx, wfAlertNamespacedName, &wfAlert); err != nil {\n\t\t\tlog.Error(err, \"unable to get the wavefront alert details for the requested name\", \"wfAlertName\", alertName)\n\t\t\t// This means wavefront alert itself is not created.\n\t\t\t// There could be 2 use cases\n\t\t\t// 1. There was a race condition if wavefrontalert and alerts config got created 'almost at the same time'\n\t\t\t// 2. Wrong alert name and user is going to correct\n\t\t\t// Ideal way to handle this is to make the alert config status to error and requeue it once in 5 mins or so instead of standard kube builder requeue time\n\t\t\t// Update the status and retry it\n\t\t\treturn r.PatchIndividualAlertsConfigError(ctx, &alertsConfig, alertName, alertmanagerv1alpha1.Error, err)\n\t\t}\n\t\tvar alert wf.Alert\n\t\t//Get the processed wf alert\n\t\t//merge the alerts config global params and individual params\n\t\tparams := utils.MergeMaps(ctx, globalMap, config.Params)\n\n\t\tif err := controllercommon.GetProcessedWFAlert(ctx, &wfAlert, params, &alert); err != nil {\n\t\t\treturn r.PatchIndividualAlertsConfigError(ctx, &alertsConfig, alertName, alertmanagerv1alpha1.Error, err)\n\t\t}\n\t\t// Create/Update Alert\n\t\tif alertHashMap[alertName].ID == \"\" {\n\t\t\t// Create use case\n\t\t\tif err := r.WavefrontClient.CreateAlert(ctx, &alert); err != nil {\n\t\t\t\tr.Recorder.Event(&alertsConfig, v1.EventTypeWarning, err.Error(), \"unable to create the alert\")\n\t\t\t\tstate := alertmanagerv1alpha1.Error\n\t\t\t\tif strings.Contains(err.Error(), \"Exceeded limit setting\") {\n\t\t\t\t\t// For ex: error is \"Exceeded limit setting: 100 alerts allowed per customer\"\n\t\t\t\t\tstate = alertmanagerv1alpha1.ClientExceededLimit\n\t\t\t\t}\n\t\t\t\tlog.Error(err, \"unable to create the alert\")\n\n\t\t\t\treturn r.PatchIndividualAlertsConfigError(ctx, &alertsConfig, alertName, state, err)\n\t\t\t}\n\t\t\talertStatus := alertmanagerv1alpha1.AlertStatus{\n\t\t\t\tID: *alert.ID,\n\t\t\t\tName: alert.Name,\n\t\t\t\tLastChangeChecksum: reqChecksum,\n\t\t\t\tLink: fmt.Sprintf(\"https://%s/alerts/%s\", internalconfig.Props.WavefrontAPIUrl(), *alert.ID),\n\t\t\t\tState: alertmanagerv1alpha1.Ready,\n\t\t\t\tAssociatedAlert: alertmanagerv1alpha1.AssociatedAlert{\n\t\t\t\t\tCR: alertName,\n\t\t\t\t},\n\t\t\t\tAssociatedAlertsConfig: alertmanagerv1alpha1.AssociatedAlertsConfig{\n\t\t\t\t\tCR: alertsConfig.Name,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif err := r.CommonClient.PatchWfAlertAndAlertsConfigStatus(ctx, alertmanagerv1alpha1.Ready, &wfAlert, &alertsConfig, alertStatus); err != nil {\n\t\t\t\tlog.Error(err, \"unable to patch wfalert and alertsconfig status objects\")\n\t\t\t\treturn r.PatchIndividualAlertsConfigError(ctx, &alertsConfig, alertName, alertmanagerv1alpha1.Error, err)\n\t\t\t}\n\t\t\tlog.Info(\"alert successfully got created\", \"alertID\", alert.ID)\n\n\t\t} else {\n\t\t\talertID := alertHashMap[alertName].ID\n\t\t\talert.ID = &alertID\n\t\t\t//TODO: Move this to common so it can be used for both wavefront and alerts config\n\t\t\t//Update use case\n\t\t\tif err := r.WavefrontClient.UpdateAlert(ctx, &alert); err != nil {\n\t\t\t\tr.Recorder.Event(&alertsConfig, v1.EventTypeWarning, err.Error(), \"unable to update the alert\")\n\t\t\t\tstate := alertmanagerv1alpha1.Error\n\t\t\t\tif strings.Contains(err.Error(), \"Exceeded limit setting\") {\n\t\t\t\t\t// For ex: error is \"Exceeded limit setting: 100 alerts allowed per customer\"\n\t\t\t\t\tstate = alertmanagerv1alpha1.ClientExceededLimit\n\t\t\t\t}\n\t\t\t\tlog.Error(err, \"unable to create the alert\")\n\n\t\t\t\treturn r.PatchIndividualAlertsConfigError(ctx, &alertsConfig, alertName, state, err)\n\t\t\t}\n\n\t\t\talertStatus := alertHashMap[alertName]\n\t\t\talertStatus.LastChangeChecksum = reqChecksum\n\n\t\t\tif err := r.CommonClient.PatchWfAlertAndAlertsConfigStatus(ctx, alertmanagerv1alpha1.Ready, &wfAlert, &alertsConfig, alertStatus); err != nil {\n\t\t\t\tlog.Error(err, \"unable to patch wfalert and alertsconfig status objects\")\n\t\t\t\treturn r.PatchIndividualAlertsConfigError(ctx, &alertsConfig, alertName, alertmanagerv1alpha1.Error, err)\n\t\t\t}\n\t\t\tlog.Info(\"alert successfully got updated\", \"alertID\", alert.ID)\n\t\t}\n\t}\n\n\t// Now - lets see if there is any config is removed compared to the status\n\t// If there is any, we need to make a call to delete the alert\n\treturn r.HandleIndividalAlertConfigRemoval(ctx, req.NamespacedName)\n}", "func (r *Reconciler) Reconcile() (reconcile.Result, error) {\n\n\tres := reconcile.Result{}\n\tlog := r.Logger\n\tlog.Infof(\"Start ...\")\n\n\tutil.KubeCheck(r.NooBaaAccount)\n\n\tif r.NooBaaAccount.UID == \"\" {\n\t\tlog.Infof(\"NooBaaAccount %q not found or deleted. Skip reconcile.\", r.NooBaaAccount.Name)\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tif util.EnsureCommonMetaFields(r.NooBaaAccount, nbv1.Finalizer) {\n\t\tif !util.KubeUpdate(r.NooBaaAccount) {\n\t\t\tlog.Errorf(\"❌ NooBaaAccount %q failed to add mandatory meta fields\", r.NooBaaAccount.Name)\n\n\t\t\tres.RequeueAfter = 3 * time.Second\n\t\t\treturn res, nil\n\t\t}\n\t}\n\n\tsystem.CheckSystem(r.NooBaa)\n\n\tvar err error\n\tif r.NooBaaAccount.DeletionTimestamp != nil {\n\t\terr = r.ReconcileDeletion()\n\t} else {\n\t\terr = r.ReconcilePhases()\n\t}\n\tif err != nil {\n\t\tif perr, isPERR := err.(*util.PersistentError); isPERR {\n\t\t\tr.SetPhase(nbv1.NooBaaAccountPhaseRejected, perr.Reason, perr.Message)\n\t\t\tlog.Errorf(\"❌ Persistent Error: %s\", err)\n\t\t\tif r.Recorder != nil {\n\t\t\t\tr.Recorder.Eventf(r.NooBaaAccount, corev1.EventTypeWarning, perr.Reason, perr.Message)\n\t\t\t}\n\t\t} else {\n\t\t\tres.RequeueAfter = 3 * time.Second\n\t\t\t// leave current phase as is\n\t\t\tr.SetPhase(\"\", \"TemporaryError\", err.Error())\n\t\t\tlog.Warnf(\"⏳ Temporary Error: %s\", err)\n\t\t}\n\t} else {\n\t\tr.SetPhase(\n\t\t\tnbv1.NooBaaAccountPhaseReady,\n\t\t\t\"NooBaaAccountPhaseReady\",\n\t\t\t\"noobaa operator completed reconcile - noobaa account is ready\",\n\t\t)\n\t\tlog.Infof(\"✅ Done\")\n\t}\n\n\terr = r.UpdateStatus()\n\t// if updateStatus will fail to update the CR for any reason we will continue to requeue the reconcile\n\t// until the spec status will reflect the actual status of the bucketclass\n\tif err != nil {\n\t\tres.RequeueAfter = 3 * time.Second\n\t\tlog.Warnf(\"⏳ Temporary Error: %s\", err)\n\t}\n\treturn res, nil\n}", "func (r *TenantReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tlog := clog.WithName(\"reconcile\").WithValues(\"tenant\", req.NamespacedName)\n\n\t// get tenant info\n\ttenant := tenantv1.Tenant{}\n\terr := r.Client.Get(ctx, req.NamespacedName, &tenant)\n\tif err != nil {\n\t\tlog.Warn(\"get tenant fail, %v\", err)\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// if .spec.namespace not equal the standard name\n\tnsName := \"kubecube-tenant-\" + req.Name\n\tif tenant.Spec.Namespace != nsName {\n\t\ttenant.Spec.Namespace = nsName\n\t\terr = r.Client.Update(ctx, &tenant)\n\t\tif err != nil {\n\t\t\tlog.Error(\"update tenant .spec.namespace fail, %v\", err)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\t// if annotation not content kubecube.io/sync, add it\n\tano := tenant.Annotations\n\tif ano == nil {\n\t\tano = make(map[string]string)\n\t}\n\tif _, ok := ano[\"kubecube.io/sync\"]; !ok {\n\t\tano[\"kubecube.io/sync\"] = \"1\"\n\t\ttenant.Annotations = ano\n\t\terr = r.Client.Update(ctx, &tenant)\n\t\tif err != nil {\n\t\t\tlog.Error(\"update tenant .metadata.annotations fail, %v\", err)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\t// weather namespace exist, create one\n\tnamespace := corev1.Namespace{}\n\terr = r.Client.Get(ctx, types.NamespacedName{Name: nsName}, &namespace)\n\tif err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\tlog.Warn(\"get tenant namespaces fail, %v\", err)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tnamespace := corev1.Namespace{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tKind: \"Namespace\",\n\t\t\t\tAPIVersion: \"v1\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: nsName,\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"kubecube.io/sync\": \"1\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\terr = r.Client.Create(ctx, &namespace)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"create tenant namespaces fail, %v\", err)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *Reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tr.log.Debug(\"Reconciling\")\n\tctx, cancel := context.WithTimeout(context.Background(), reconcileTimeout)\n\tdefer cancel()\n\t// fetch the app context\n\tappContext := &v1alpha2.ApplicationContext{}\n\tif err := r.client.Get(ctx, request.NamespacedName, appContext); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// stop processing this resource\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, errors.Wrap(err, errGetAppContex)\n\t}\n\n\tctx = util.SetNamespaceInCtx(ctx, appContext.Namespace)\n\tdm, err := discoverymapper.New(r.mgr.GetConfig())\n\tif err != nil {\n\t\treturn reconcile.Result{}, fmt.Errorf(\"create discovery dm fail %w\", err)\n\t}\n\t// fetch the appRevision it points to\n\tappRevision := &v1alpha2.ApplicationRevision{}\n\tkey := types.NamespacedName{Namespace: appContext.Namespace, Name: appContext.Spec.ApplicationRevisionName}\n\tif err := r.client.Get(ctx, key, appRevision); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// stop processing this resource\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, errors.Wrap(err, errGetAppRevision)\n\t}\n\n\t// copy the status\n\tacRaw := appRevision.Spec.ApplicationConfiguration\n\tappConfig, err := ConvertRawExtention2AppConfig(acRaw)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\tappConfig.Status = appContext.Status\n\t// the name of the appConfig has to be the same as the appContext\n\tappConfig.ObjectMeta = metav1.ObjectMeta{Namespace: appContext.Namespace, Name: appContext.Name, UID: appContext.UID}\n\t// call into the old ac Reconciler and copy the status back\n\tacReconciler := ac.NewReconciler(r.mgr, dm, r.log, ac.WithRecorder(r.record), ac.WithApplyOnceOnlyMode(r.applyMode))\n\treconResult := acReconciler.ACReconcile(ctx, appConfig, r.log)\n\tappContext.Status = appConfig.Status\n\t// always update ac status and set the error\n\terr = errors.Wrap(r.client.Status().Update(ctx, appContext), errUpdateAppContextStatus)\n\t// use the controller build-in backoff mechanism if an error occurs\n\tif err != nil {\n\t\treconResult.RequeueAfter = 0\n\t}\n\treturn reconResult, err\n}", "func (r *CustomResourceDiscoverySetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tvar (\n\t\tctx = context.Background()\n\t\tresult ctrl.Result\n\t)\n\n\tcrDiscoverySet := &corev1alpha1.CustomResourceDiscoverySet{}\n\tif err := r.Get(ctx, req.NamespacedName, crDiscoverySet); err != nil {\n\t\treturn result, client.IgnoreNotFound(err)\n\t}\n\n\tif util.AddFinalizer(crDiscoverySet, metav1.FinalizerDeleteDependents) {\n\t\tif err := r.Client.Update(ctx, crDiscoverySet); err != nil {\n\t\t\treturn ctrl.Result{}, fmt.Errorf(\"updating CustomResourceDiscoverySet finalizers: %w\", err)\n\t\t}\n\t}\n\tif !crDiscoverySet.DeletionTimestamp.IsZero() {\n\t\t// nothing to do, let kube controller-manager foregroundDeletion wait until every created object is deleted\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// List ServiceClusters\n\tserviceClusterSelector, err := metav1.LabelSelectorAsSelector(&crDiscoverySet.Spec.ServiceClusterSelector)\n\tif err != nil {\n\t\treturn result, fmt.Errorf(\"parsing ServiceCluster selector: %w\", err)\n\t}\n\tserviceClusterList := &corev1alpha1.ServiceClusterList{}\n\tif err := r.List(ctx, serviceClusterList,\n\t\tclient.InNamespace(crDiscoverySet.Namespace),\n\t\tclient.MatchingLabelsSelector{Selector: serviceClusterSelector},\n\t); err != nil {\n\t\treturn result, fmt.Errorf(\"listing ServiceClusters: %w\", err)\n\t}\n\n\t// Reconcile CRDiscoveries\n\tvar unreadyCRDiscoveryNames []string\n\tvar readyCRDReferences []corev1alpha1.CustomResourceDiscoverySetCRDReference\n\texistingCRDiscoveryNames := map[string]struct{}{}\n\tfor _, serviceCluster := range serviceClusterList.Items {\n\t\tcurrentCRDiscovery, err := r.reconcileCRDiscovery(ctx, &serviceCluster, crDiscoverySet)\n\t\tif err != nil {\n\t\t\treturn result, fmt.Errorf(\n\t\t\t\t\"reconciling CustomResourceDiscovery for ServiceCluster %s: %w\", serviceCluster.Name, err)\n\t\t}\n\t\texistingCRDiscoveryNames[currentCRDiscovery.Name] = struct{}{}\n\n\t\tif currentCRDiscovery.IsReady() {\n\t\t\treadyCRDReferences = append(readyCRDReferences,\n\t\t\t\tcorev1alpha1.CustomResourceDiscoverySetCRDReference{\n\t\t\t\t\tCRD: corev1alpha1.ObjectReference{\n\t\t\t\t\t\tName: currentCRDiscovery.Status.ManagementClusterCRD.Name,\n\t\t\t\t\t},\n\t\t\t\t\tServiceCluster: corev1alpha1.ObjectReference{\n\t\t\t\t\t\tName: serviceCluster.Name,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t} else {\n\t\t\tunreadyCRDiscoveryNames = append(unreadyCRDiscoveryNames, currentCRDiscovery.Name)\n\t\t}\n\t}\n\n\t// Cleanup uncontrolled CRDiscoveries\n\tcrDiscoveryList := &corev1alpha1.CustomResourceDiscoveryList{}\n\tif err := r.List(ctx, crDiscoveryList, client.MatchingLabels{\n\t\tcrDiscoveriesLabel: crDiscoverySet.Namespace + \".\" + crDiscoverySet.Name,\n\t},\n\t\tclient.InNamespace(crDiscoverySet.Namespace),\n\t); err != nil {\n\t\treturn result, fmt.Errorf(\n\t\t\t\"listing all CustomResourceDiscovery for this Set: %w\", err)\n\t}\n\tfor _, crDiscovery := range crDiscoveryList.Items {\n\t\t_, ok := existingCRDiscoveryNames[crDiscovery.Name]\n\t\tif ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// delete crDiscovery that should no longer exist\n\t\tif err := r.Delete(ctx, &crDiscovery); err != nil {\n\t\t\treturn result, fmt.Errorf(\"deleting CustomResourceDiscovery: %w\", err)\n\t\t}\n\t}\n\n\t// Report status\n\tcrDiscoverySet.Status.ManagementClusterCRDs = readyCRDReferences\n\tcrDiscoverySet.Status.ObservedGeneration = crDiscoverySet.Generation\n\tif len(unreadyCRDiscoveryNames) > 0 {\n\t\t// Unready\n\t\tcrDiscoverySet.Status.SetCondition(corev1alpha1.CustomResourceDiscoverySetCondition{\n\t\t\tType: corev1alpha1.CustomResourceDiscoverySetReady,\n\t\t\tStatus: corev1alpha1.ConditionFalse,\n\t\t\tReason: \"ComponentsUnready\",\n\t\t\tMessage: fmt.Sprintf(\n\t\t\t\t\"Some CustomResourceDiscovery objects are unready [%s]\", strings.Join(unreadyCRDiscoveryNames, \",\")),\n\t\t})\n\t} else {\n\t\t// Ready\n\t\tcrDiscoverySet.Status.SetCondition(corev1alpha1.CustomResourceDiscoverySetCondition{\n\t\t\tType: corev1alpha1.CustomResourceDiscoverySetReady,\n\t\t\tStatus: corev1alpha1.ConditionTrue,\n\t\t\tReason: \"ComponentsReady\",\n\t\t\tMessage: \"All CustomResourceDiscovery objects are ready.\",\n\t\t})\n\t}\n\n\tif err := r.Status().Update(ctx, crDiscoverySet); err != nil {\n\t\treturn result, fmt.Errorf(\"updating Status: %w\", err)\n\t}\n\treturn result, nil\n}", "func (r *BackupLocationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\tctx := context.Background()\n\tlog := r.Log.WithValues(\"backuplocation\", req.NamespacedName)\n\n\tvar backupLoc kubedrv1alpha1.BackupLocation\n\tif err := r.Get(ctx, req.NamespacedName, &backupLoc); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// we'll ignore not-found errors, since they can't be fixed by an immediate\n\t\t\t// requeue (we'll need to wait for a new notification).\n\t\t\tlog.Info(\"BackupLocation (\" + req.NamespacedName.Name + \") is not found\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\n\t\tlog.Error(err, \"unable to fetch BackupLocation\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Skip if spec hasn't changed. This check prevents reconcile on status\n\t// updates.\n\tif backupLoc.Status.ObservedGeneration == backupLoc.ObjectMeta.Generation {\n\t\tr.Log.Info(\"Skipping reconcile as generation number hasn't changed\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tfinalizer := \"backuplocation.finalizers.kubedr.catalogicsoftware.com\"\n\n\tif backupLoc.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The object is not being deleted, so if it does not have our finalizer,\n\t\t// then lets add the finalizer and update the object. This is equivalent\n\t\t// to registering our finalizer.\n\t\tif !containsString(backupLoc.ObjectMeta.Finalizers, finalizer) {\n\t\t\tbackupLoc.ObjectMeta.Finalizers = append(backupLoc.ObjectMeta.Finalizers, finalizer)\n\t\t\tif err := r.Update(context.Background(), &backupLoc); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// The object is being deleted\n\t\tif containsString(backupLoc.ObjectMeta.Finalizers, finalizer) {\n\t\t\t// our finalizer is present, handle any pre-deletion logic here.\n\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\tbackupLoc.ObjectMeta.Finalizers = removeString(backupLoc.ObjectMeta.Finalizers, finalizer)\n\n\t\t\tif err := r.Update(context.Background(), &backupLoc); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\n\t\t// Nothing more to do for DELETE.\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Check annotations to see if repo is already initialized.\n\t// Ideally, we should check the repo itself to confirm that it is\n\t// initialized, instead of depending on annotation.\n\tinitAnnotation := \"initialized.annotations.kubedr.catalogicsoftware.com\"\n\n\tinitialized, exists := backupLoc.ObjectMeta.Annotations[initAnnotation]\n\tif exists && (initialized == \"true\") {\n\t\t// No need to initialize the repo.\n\t\tlog.Info(\"Repo is already initialized\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\t// Annotation doesn't exist so we need to initialize the repo.\n\n\tinitPodName := backupLoc.Name + \"-init-pod\"\n\n\t// Since we don't generate a unique name for the pod that initializes the repo,\n\t// we need to explicitly check and delete the pod if it exists. We may eventually\n\t// use a unique name but that will also require cleanup of old pods.\n\tvar pod corev1.Pod\n\tif err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: initPodName}, &pod); err == nil {\n\t\tlog.Info(\"Found init pod, will delete it and continue...\")\n\t\tif err := r.Delete(ctx, &pod); ignoreNotFound(err) != nil {\n\t\t\tlog.Error(err, \"Error in deleting init pod\")\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\tr.setStatus(&backupLoc, \"Initializing\", \"\")\n\n\t// Initialize the repo.\n\tinitPod, err := buildResticRepoInitPod(&backupLoc, log)\n\tif err != nil {\n\t\tlog.Error(err, \"Error in creating init pod\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif err := ctrl.SetControllerReference(&backupLoc, initPod, r.Scheme); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tlog.Info(\"Starting a new Pod\", \"Pod.Namespace\", initPod.Namespace, \"Pod.Name\", initPod.Name)\n\terr = r.Create(ctx, initPod)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Error in starting init pod\")\n\t\tr.setStatus(&backupLoc, \"Failed\", err.Error())\n\t\treturn ctrl.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *ReconcileCanary) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\t// Fetch the Canary instance\n\tinstance := &canaryv1beta1.Canary{}\n\terr := r.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// TODO(munisystem): Set a status into the Canary resource if the target deployment doesn't exist\n\ttarget, err := r.getDeployment(instance.Spec.TargetDeploymentName, instance.Namespace)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\treturn reconcile.Result{}, err\n\t}\n\tcopied := target.DeepCopy()\n\n\t// Inject data into Canary's Deployment\n\tlabels := make(map[string]string, len(copied.GetLabels())+1)\n\tlabels[\"canary\"] = \"true\"\n\tfor key, value := range copied.GetLabels() {\n\t\tlabels[key] = value\n\t}\n\n\tspec := copied.Spec\n\tspec.Template.Spec.Hostname = \"canary\"\n\tspec.Selector.MatchLabels[\"canary\"] = \"true\"\n\tspec.Template.Labels[\"canary\"] = \"true\"\n\n\tcontainers := make(map[string]canaryv1beta1.CanaryContainer, 0)\n\tfor _, container := range instance.Spec.TargetContainers {\n\t\tcontainers[container.Name] = container\n\t}\n\tfor i := range spec.Template.Spec.Containers {\n\t\tif container, ok := containers[spec.Template.Spec.Containers[i].Name]; ok {\n\t\t\tspec.Template.Spec.Containers[i].Image = container.Image\n\t\t}\n\t\tspec.Template.Spec.Containers[i].Env = append(spec.Template.Spec.Containers[i].Env, corev1.EnvVar{\n\t\t\tName: \"CANARY_ENABLED\",\n\t\t\tValue: \"1\",\n\t\t})\n\t}\n\tcanary := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: copied.ObjectMeta.Name + \"-canary\",\n\t\t\tNamespace: instance.Namespace,\n\t\t\tLabels: labels,\n\t\t\tAnnotations: copied.ObjectMeta.Annotations,\n\t\t},\n\t\tSpec: spec,\n\t}\n\n\tif err := controllerutil.SetControllerReference(instance, canary, r.scheme); err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\t_, err = r.getDeployment(canary.Name, canary.Namespace)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlog.Info(\"Creating Deployment for Canary server\", \"namespace\", canary.Namespace, \"name\", canary.Name)\n\t\terr = r.Create(context.TODO(), canary)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t} else if err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (r *ConfigAuditReportReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t// your logic here\n\t// Fetch the ConfigAuditReport instance\n\tinstance := &aquasecurityv1alpha1.ConfigAuditReport{}\n\terr := r.Client.Get(context.TODO(), req.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\tworkloadInfo, err := r.getWorkloadInfo(instance)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// if !instance.ObjectMeta.DeletionTimestamp.IsZero() {\n\t// \t// The object is not being deleted, so do nothing\n\t// \tlogger.Info(\"Remove the original report\")\n\t// \tlogger.Info(\"workloadInfo\")\n\t// \terr := r.removeReport(workloadInfo)\n\t// \tif err != nil {\n\t// \t\treturn reconcile.Result{}, err\n\t// \t}\n\t// \treturn reconcile.Result{}, nil\n\t// }\n\n\t// examine DeletionTimestamp to determine if object is under deletion\n\tif instance.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The object is not being deleted, so if it does not have our finalizer,\n\t\t// then lets add the finalizer and update the object. This is equivalent\n\t\t// registering our finalizer.\n\t\tif !containsString(instance.GetFinalizers(), finalizerName) {\n\t\t\taddFinalizer(instance, finalizerName)\n\t\t\tif err := r.Client.Update(context.TODO(), instance); err != nil {\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// The object is being deleted\n\t\tif containsString(instance.GetFinalizers(), finalizerName) {\n\t\t\t// our finalizer is present, so lets handle any external dependency\n\t\t\tif err := r.removeReport(workloadInfo); err != nil {\n\t\t\t\t// if fail to delete the external dependency here, return with error\n\t\t\t\t// so that it can be retried\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\tremoveFinalizer(instance, finalizerName)\n\t\t\tif err := r.Client.Update(context.TODO(), instance); err != nil {\n\t\t\t\treturn reconcile.Result{}, err\n\t\t\t}\n\t\t}\n\n\t\t// Stop reconciliation as the item is being deleted\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\terr = r.generateReport(ctx, workloadInfo)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tctx := context.TODO()\n\tr.logger.Info(\"reconciling ClusterChannelProvisioner\", zap.Any(\"request\", request))\n\n\t// Workaround until https://github.com/kubernetes-sigs/controller-runtime/issues/214 is fixed.\n\t// The reconcile requests triggered because of objects owned by this ClusterChannelProvisioner (e.g k8s service)\n\t// will contain the namespace of that object. Since ClusterChannelProvisioner is cluster-scoped we need to unset the\n\t// namespace or otherwise the provisioner object cannot be found.\n\trequest.NamespacedName.Namespace = \"\"\n\n\tprovisioner := &v1alpha1.ClusterChannelProvisioner{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, provisioner)\n\n\tif errors.IsNotFound(err) {\n\t\tr.logger.Info(\"could not find ClusterChannelProvisioner\", zap.Any(\"request\", request))\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tif err != nil {\n\t\tr.logger.Error(\"could not fetch ClusterChannelProvisioner\", zap.Error(err))\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Skip channel provisioners that we don't manage\n\tif provisioner.Name != Name {\n\t\tr.logger.Info(\"not reconciling ClusterChannelProvisioner, it is not controlled by this Controller\", zap.Any(\"request\", request))\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\tnewProvisioner := provisioner.DeepCopy()\n\n\t// Reconcile this copy of the Provisioner and then write back any status\n\t// updates regardless of whether the reconcile error out.\n\terr = r.reconcile(ctx, newProvisioner)\n\tif err != nil {\n\t\tr.logger.Info(\"error reconciling ClusterProvisioner\", zap.Error(err))\n\t\t// Note that we do not return the error here, because we want to update the Status\n\t\t// regardless of the error.\n\t}\n\tif updateStatusErr := util.UpdateClusterChannelProvisionerStatus(ctx, r.client, newProvisioner); updateStatusErr != nil {\n\t\tr.logger.Info(\"error updating ClusterChannelProvisioner Status\", zap.Error(updateStatusErr))\n\t\treturn reconcile.Result{}, updateStatusErr\n\t}\n\n\t// Requeue if the resource is not ready:\n\treturn reconcile.Result{}, err\n}", "func (r *RuleReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\n\tctx := context.Background()\n\t_ = r.Log.WithValues(\"rule\", req.NamespacedName)\n\n\tvar rule oathkeeperv1alpha1.Rule\n\tskipValidation := false\n\n\tif err := r.Get(ctx, req.NamespacedName, &rule); err != nil {\n\t\tif apierrs.IsNotFound(err) {\n\t\t\t// just return here, the finalizers have already run\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif !skipValidation {\n\t\tif err := rule.ValidateWith(r.ValidationConfig); err != nil {\n\t\t\trule.Status.Validation = &oathkeeperv1alpha1.Validation{}\n\t\t\trule.Status.Validation.Valid = boolPtr(false)\n\t\t\trule.Status.Validation.Error = stringPtr(err.Error())\n\t\t\tr.Log.Info(fmt.Sprintf(\"validation error in Rule %s/%s: \\\"%s\\\"\", rule.Namespace, rule.Name, err.Error()))\n\t\t\tif err := r.Update(ctx, &rule); err != nil {\n\t\t\t\tr.Log.Error(err, \"unable to update Rule status\")\n\t\t\t\t//Invoke requeue directly without logging error with whole stacktrace\n\t\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t\t}\n\t\t\t// continue, as validation can't be fixed by requeuing request and we still have to update the configmap\n\t\t} else {\n\t\t\t// rule valid - set the status\n\t\t\trule.Status.Validation = &oathkeeperv1alpha1.Validation{}\n\t\t\trule.Status.Validation.Valid = boolPtr(true)\n\t\t\tif err := r.Update(ctx, &rule); err != nil {\n\t\t\t\tr.Log.Error(err, \"unable to update Rule status\")\n\t\t\t\t//Invoke requeue directly without logging error with whole stacktrace\n\t\t\t\treturn ctrl.Result{Requeue: true}, nil\n\t\t\t}\n\t\t}\n\t}\n\n\tvar rulesList oathkeeperv1alpha1.RuleList\n\n\tif err := r.List(ctx, &rulesList, client.InNamespace(req.NamespacedName.Namespace)); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// examine DeletionTimestamp to determine if object is under deletion\n\tif rule.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\t// The object is not being deleted, so if it does not have our finalizer,\n\t\t// then lets add the finalizer and update the object. This is equivalent\n\t\t// registering our finalizer.\n\t\tif !containsString(rule.ObjectMeta.Finalizers, FinalizerName) {\n\t\t\trule.ObjectMeta.Finalizers = append(rule.ObjectMeta.Finalizers, FinalizerName)\n\t\t\tif err := r.Update(ctx, &rule); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// The object is being deleted\n\t\tif containsString(rule.ObjectMeta.Finalizers, FinalizerName) {\n\t\t\t// our finalizer is present, so lets handle any external dependency\n\t\t\trulesList = rulesList.FilterOutRule(rule)\n\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\trule.ObjectMeta.Finalizers = removeString(rule.ObjectMeta.Finalizers, FinalizerName)\n\t\t\tif err := r.Update(ctx, &rule); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\toathkeeperRulesJSON, err := rulesList.FilterNotValid().\n\t\tFilterConfigMapName(rule.Spec.ConfigMapName).\n\t\tToOathkeeperRules()\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tconfigMap := r.RuleConfigmap\n\tif rule.Spec.ConfigMapName != nil {\n\t\tconfigMap = types.NamespacedName{\n\t\t\tName: *rule.Spec.ConfigMapName,\n\t\t\tNamespace: req.NamespacedName.Namespace,\n\t\t}\n\t}\n\tif err := r.updateOrCreateRulesConfigmap(ctx, configMap, string(oathkeeperRulesJSON)); err != nil {\n\t\tr.Log.Error(err, \"unable to process rules Configmap\")\n\t\tos.Exit(1)\n\t}\n\n\treturn ctrl.Result{}, nil\n}", "func (r *otherNamespaceReconciler) Reconcile(ctx context.Context, in *v2.CatalogSourceConfig) (out *v2.CatalogSourceConfig, nextPhase *shared.Phase, err error) {\n\t// Do nothing as this object has already been placed in the failed phase.\n\tif in.Status.CurrentPhase.Name == phase.Failed {\n\t\treturn\n\t}\n\n\terr = fmt.Errorf(\"Will only reconcile resources in the operator's namespace\")\n\tr.log.Error(err)\n\tout = in\n\tnextPhase = phase.GetNextWithMessage(phase.Failed, err.Error())\n\treturn\n}", "func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr error) {\n\tvar appRollout oamv1alpha2.AppRollout\n\tctx, cancel := context.WithTimeout(context.TODO(), reconcileTimeOut)\n\tdefer cancel()\n\n\tstartTime := time.Now()\n\tdefer func() {\n\t\tif retErr == nil {\n\t\t\tif res.Requeue || res.RequeueAfter > 0 {\n\t\t\t\tklog.InfoS(\"Finished reconciling appRollout\", \"controller request\", req, \"time spent\",\n\t\t\t\t\ttime.Since(startTime), \"result\", res)\n\t\t\t} else {\n\t\t\t\tklog.InfoS(\"Finished reconcile appRollout\", \"controller request\", req, \"time spent\",\n\t\t\t\t\ttime.Since(startTime))\n\t\t\t}\n\t\t} else {\n\t\t\tklog.Errorf(\"Failed to reconcile appRollout %s: %v\", req, retErr)\n\t\t}\n\t}()\n\n\tif err := r.Get(ctx, req.NamespacedName, &appRollout); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tklog.InfoS(\"appRollout does not exist\", \"appRollout\", klog.KRef(req.Namespace, req.Name))\n\t\t}\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\tklog.InfoS(\"Start to reconcile \", \"appRollout\", klog.KObj(&appRollout))\n\n\tr.handleFinalizer(&appRollout)\n\ttargetAppName := appRollout.Spec.TargetAppRevisionName\n\tsourceAppName := appRollout.Spec.SourceAppRevisionName\n\n\tctx = oamutil.SetNamespaceInCtx(ctx, appRollout.Namespace)\n\t// handle rollout target/source change\n\tif appRollout.Status.RollingState == v1alpha1.RolloutSucceedState ||\n\t\tappRollout.Status.RollingState == v1alpha1.RolloutFailedState {\n\t\tif appRollout.Status.LastUpgradedTargetAppRevision == targetAppName &&\n\t\t\tappRollout.Status.LastSourceAppRevision == sourceAppName {\n\t\t\tklog.InfoS(\"rollout terminated, no need to reconcile\", \"source\", sourceAppName,\n\t\t\t\t\"target\", targetAppName)\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t}\n\tif appRollout.Status.LastUpgradedTargetAppRevision != targetAppName ||\n\t\tappRollout.Status.LastSourceAppRevision != sourceAppName {\n\t\tklog.InfoS(\"rollout target changed, restart the rollout\", \"new source\", sourceAppName,\n\t\t\t\"new target\", targetAppName)\n\t\tappRollout.Status.RolloutModified()\n\t}\n\n\t// Get the target application\n\tvar targetApp oamv1alpha2.ApplicationConfiguration\n\tsourceApp := &oamv1alpha2.ApplicationConfiguration{}\n\tif err := r.Get(ctx, ktypes.NamespacedName{Namespace: req.Namespace, Name: targetAppName},\n\t\t&targetApp); err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tklog.ErrorS(err, \"target application revision not exist\", \"target application revision\",\n\t\t\t\tklog.KRef(req.Namespace, targetAppName))\n\t\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, nil\n\t\t}\n\t\tklog.ErrorS(err, \"cannot locate target application revision\", \"target application revision\",\n\t\t\tklog.KRef(req.Namespace, targetAppName))\n\t\treturn ctrl.Result{}, err\n\t}\n\t// check if the app is templated\n\tif targetApp.Status.RollingStatus != oamv1alpha2.RollingTemplated {\n\t\tklog.Info(\"target app revision is not ready for rolling yet\", \"application revision\", targetAppName)\n\t\tr.record.Event(&appRollout, event.Normal(\"Rollout Paused\",\n\t\t\t\"target app revision is not ready for rolling yet\", \"application revision\", targetAppName))\n\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, nil\n\t}\n\t// Get the source application\n\tif sourceAppName == \"\" {\n\t\tklog.Info(\"source app fields not filled, we assume it is deployed for the first time\")\n\t\tsourceApp = nil\n\t} else {\n\t\tif err := r.Get(ctx, ktypes.NamespacedName{Namespace: req.Namespace, Name: sourceAppName}, sourceApp); err != nil {\n\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\tklog.ErrorS(err, \"target application revision not exist\", \"source application revision\",\n\t\t\t\t\tklog.KRef(req.Namespace, sourceAppName))\n\t\t\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, nil\n\t\t\t}\n\t\t\tklog.ErrorS(err, \"cannot locate source application revision\", \"source application revision\",\n\t\t\t\tklog.KRef(req.Namespace, sourceAppName))\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\t// check if the app is templated\n\t\tif sourceApp.Status.RollingStatus != oamv1alpha2.RollingTemplated {\n\t\t\tklog.Info(\"source app revision is not ready for rolling yet\", \"application revision\", sourceAppName)\n\t\t\tr.record.Event(&appRollout, event.Normal(\"Rollout Paused\",\n\t\t\t\t\"source app revision is not ready for rolling yet\", \"application revision\", sourceAppName))\n\t\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, nil\n\t\t}\n\t}\n\n\ttargetWorkload, sourceWorkload, err := r.extractWorkloads(ctx, appRollout.Spec.ComponentList, &targetApp, sourceApp)\n\tif err != nil {\n\t\tklog.ErrorS(err, \"cannot fetch the workloads to upgrade\", \"target application\",\n\t\t\tklog.KRef(req.Namespace, targetAppName), \"source application\", klog.KRef(req.Namespace, sourceAppName),\n\t\t\t\"commonComponent\", appRollout.Spec.ComponentList)\n\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, client.IgnoreNotFound(err)\n\t}\n\tklog.InfoS(\"get the target workload we need to work on\", \"targetWorkload\", klog.KObj(targetWorkload))\n\n\tif sourceWorkload != nil {\n\t\tklog.InfoS(\"get the source workload we need to work on\", \"sourceWorkload\", klog.KObj(sourceWorkload))\n\t}\n\n\t// reconcile the rollout part of the spec given the target and source workload\n\trolloutPlanController := rollout.NewRolloutPlanController(r, &appRollout, r.record,\n\t\t&appRollout.Spec.RolloutPlan, &appRollout.Status.RolloutStatus, targetWorkload, sourceWorkload)\n\tresult, rolloutStatus := rolloutPlanController.Reconcile(ctx)\n\t// make sure that the new status is copied back\n\tappRollout.Status.RolloutStatus = *rolloutStatus\n\tappRollout.Status.LastUpgradedTargetAppRevision = targetAppName\n\tappRollout.Status.LastSourceAppRevision = sourceAppName\n\tif rolloutStatus.RollingState == v1alpha1.RolloutSucceedState {\n\t\tif sourceApp != nil {\n\t\t\t// mark the source app as an application revision only so that it stop being reconciled\n\t\t\toamutil.RemoveAnnotations(sourceApp, []string{oam.AnnotationAppRollout})\n\t\t\toamutil.AddAnnotations(sourceApp, map[string]string{oam.AnnotationAppRevision: strconv.FormatBool(true)})\n\t\t\tif err := r.Update(ctx, sourceApp); err != nil {\n\t\t\t\tklog.ErrorS(err, \"cannot add the app revision annotation\", \"source application\",\n\t\t\t\t\tklog.KRef(req.Namespace, sourceAppName))\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t\t// remove the rollout annotation so that the target appConfig controller can take over the rest of the work\n\t\toamutil.RemoveAnnotations(&targetApp, []string{oam.AnnotationAppRollout})\n\t\tif err := r.Update(ctx, &targetApp); err != nil {\n\t\t\tklog.ErrorS(err, \"cannot remove the rollout annotation\", \"target application\",\n\t\t\t\tklog.KRef(req.Namespace, targetAppName))\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tklog.InfoS(\"rollout succeeded, record the source and target app revision\", \"source\", sourceAppName,\n\t\t\t\"target\", targetAppName)\n\t}\n\t// update the appRollout status\n\treturn result, r.updateStatus(ctx, &appRollout)\n}", "func (r *updateReconciler) Reconcile(ctx context.Context, in *v2.CatalogSourceConfig) (out *v2.CatalogSourceConfig, nextPhase *shared.Phase, err error) {\n\tout = in.DeepCopy()\n\n\t// The TargetNamespace of the CatalogSourceConfig object has changed\n\tif r.targetChanged {\n\t\t// Best case attempt at deleting the objects in the old TargetNamespace\n\t\t// If the csc is not cached we don't want to fail because there are\n\t\t// cases where we won't be able to find the objects.\n\t\tr.deleteObjects(in)\n\t}\n\n\t// Remove it from the cache so that it does not get picked up during\n\t// the \"Configuring\" phase\n\tr.cache.Evict(in)\n\n\t// Drop existing Status field so that reconciliation can start anew.\n\tout.Status = v2.CatalogSourceConfigStatus{}\n\tnextPhase = phase.GetNext(phase.Configuring)\n\n\tr.log.Info(\"Spec has changed, scheduling for configuring\")\n\n\treturn\n}", "func (r *ReconcileRedisClusterBackup) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\treqLogger := log.WithValues(\"Request.Namespace\", request.Namespace, \"Request.Name\", request.Name)\n\treqLogger.Info(\"Reconciling RedisClusterBackup\")\n\n\t// Fetch the RedisClusterBackup instance\n\tinstance := &redisv1alpha1.RedisClusterBackup{}\n\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t//// Check if the RedisClusterBackup instance is marked to be deleted, which is\n\t//// indicated by the deletion timestamp being set.\n\t//isBackupMarkedToBeDeleted := instance.GetDeletionTimestamp() != nil\n\t//if isBackupMarkedToBeDeleted {\n\t//\tif contains(instance.GetFinalizers(), backupFinalizer) {\n\t//\t\t// Run finalization logic for backupFinalizer. If the\n\t//\t\t// finalization logic fails, don't remove the finalizer so\n\t//\t\t// that we can retry during the next reconciliation.\n\t//\t\tif err := r.finalizeBackup(reqLogger, instance); err != nil {\n\t//\t\t\treturn reconcile.Result{}, err\n\t//\t\t}\n\t//\n\t//\t\t// Remove backupFinalizer. Once all finalizers have been\n\t//\t\t// removed, the object will be deleted.\n\t//\t\tinstance.SetFinalizers(remove(instance.GetFinalizers(), backupFinalizer))\n\t//\t\terr := r.client.Update(context.TODO(), instance)\n\t//\t\tif err != nil {\n\t//\t\t\treturn reconcile.Result{}, err\n\t//\t\t}\n\t//\t}\n\t//\treturn reconcile.Result{}, nil\n\t//}\n\n\t//// Add finalizer for this CR\n\t//if !contains(instance.GetFinalizers(), backupFinalizer) {\n\t//\tif err := r.addFinalizer(reqLogger, instance); err != nil {\n\t//\t\treturn reconcile.Result{}, err\n\t//\t}\n\t//}\n\n\tif err := r.create(reqLogger, instance); err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\treturn reconcile.Result{}, nil\n}", "func (a *agent) reconcile() {\n\treconcileStartTime := time.Now()\n\tr := reconciler.New(a.registry)\n\tstatus := r.Reconcile(a.ctx, a.currentState, a.intendedState)\n\ta.currentState = status.NewCurrentState\n\n\t// Update variables needed to resume reconciliation\n\t// after async operation(s).\n\tif status.AsyncOpsInProgress {\n\t\tlog.Debug(\"Some config operations continue in the background\")\n\t}\n\ta.cancelAsyncOps = status.CancelAsyncOps\n\ta.resumeReconciliation = status.ReadyToResume\n\ta.waitForAsyncOps = status.WaitForAsyncOps\n\n\t// Log every executed operation.\n\tfor _, opLog := range status.OperationLog {\n\t\tvar withErr string\n\t\tif opLog.Err != nil {\n\t\t\twithErr = fmt.Sprintf(\" with error: %v\", opLog.Err)\n\t\t}\n\t\tvar verb string\n\t\tif opLog.InProgress {\n\t\t\tverb = \"started async execution of\"\n\t\t} else {\n\t\t\tif opLog.StartTime.Before(reconcileStartTime) {\n\t\t\t\tverb = \"finalized async execution of\"\n\t\t\t} else {\n\t\t\t\t// synchronous operation\n\t\t\t\tverb = \"executed\"\n\t\t\t}\n\t\t}\n\t\tlog.Infof(\"State Reconciler %s %v for %v%s, content: %s\",\n\t\t\tverb, opLog.Operation, dg.Reference(opLog.Item),\n\t\t\twithErr, opLog.Item.String())\n\t}\n\n\t// Log transitions from no-error to error and vice-versa.\n\tvar failed, fixed []string\n\tfor _, opLog := range status.OperationLog {\n\t\titemRef := dg.Reference(opLog.Item)\n\t\tif opLog.Err != nil {\n\t\t\ta.failingItems[itemRef] = opLog.Err\n\t\t} else {\n\t\t\tdelete(a.failingItems, itemRef)\n\t\t}\n\t\tif opLog.PrevErr == nil && opLog.Err != nil {\n\t\t\tfailed = append(failed, fmt.Sprintf(\"%v (err: %v)\", itemRef, opLog.Err))\n\t\t}\n\t\tif opLog.PrevErr != nil && opLog.Err == nil {\n\t\t\tfixed = append(fixed, itemRef.String())\n\t\t}\n\t}\n\tif len(failed) > 0 {\n\t\tlog.Errorf(\"Newly failed config items: %s\",\n\t\t\tstrings.Join(failed, \", \"))\n\t}\n\tif len(fixed) > 0 {\n\t\tlog.Infof(\"Fixed config items: %s\",\n\t\t\tstrings.Join(fixed, \", \"))\n\t}\n}", "func (c *Controller) Reconcile(ctx context.Context) error {\n\tvar (\n\t\tl = log.WithFields(logrus.Fields{\n\t\t\t\"component\": \"Controller.Reconcile\",\n\t\t})\n\t)\n\n\tlocalNode, err := c.LocalNodeStore.Get(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to retrieve local node: %w\", err)\n\t}\n\n\t// retrieve all CiliumBGPPeeringPolicies\n\tpolicies, err := c.PolicyLister.List()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list CiliumBGPPeeringPolicies\")\n\t}\n\tl.WithField(\"count\", len(policies)).Debug(\"Successfully listed CiliumBGPPeeringPolicies\")\n\n\t// perform policy selection based on node.\n\tlabels := localNode.Labels\n\tpolicy, err := PolicySelection(ctx, labels, policies)\n\tif err != nil {\n\t\tl.WithError(err).Error(\"Policy selection failed\")\n\t\tc.FullWithdrawal(ctx)\n\t\treturn err\n\t}\n\tif policy == nil {\n\t\t// no policy was discovered, tell router manager to withdrawal peers if\n\t\t// they are configured.\n\t\tl.Debug(\"No BGP peering policy applies to this node, any existing BGP sessions will be removed.\")\n\t\tc.FullWithdrawal(ctx)\n\t\treturn nil\n\t}\n\n\t// apply policy defaults to have consistent default config across sub-systems\n\tpolicy = policy.DeepCopy() // deepcopy to not modify the policy object in store\n\tpolicy.SetDefaults()\n\n\terr = c.validatePolicy(policy)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid BGP peering policy %s: %w\", policy.Name, err)\n\t}\n\n\t// call bgp sub-systems required to apply this policy's BGP topology.\n\tl.Debug(\"Asking configured BGPRouterManager to configure peering\")\n\tif err := c.BGPMgr.ConfigurePeers(ctx, policy, &localNode); err != nil {\n\t\treturn fmt.Errorf(\"failed to configure BGP peers, cannot apply BGP peering policy: %w\", err)\n\t}\n\n\treturn nil\n}", "func (r *ChartReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {\n\n\tlog := r.Log.WithValues(\"chart\", req.NamespacedName)\n\tinstance := &stablev1.Chart{}\n\tfinalizer := \"helm.operator.finalizer.io\"\n\tforGroundFinalizer := \"foregroundDeletion\"\n\t// your logic here\n\n\tif err := r.Get(ctx, req.NamespacedName, instance); err != nil {\n\t\treturn ctrl.Result{}, ignoreNotFound(err)\n\t}\n\tif instance.ObjectMeta.DeletionTimestamp.IsZero() {\n\t\tif !containsString(instance.ObjectMeta.Finalizers, finalizer) {\n\t\t\tinstance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, finalizer)\n\t\t\tif err := r.Update(context.Background(), instance); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t\tif err := getChart(instance); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tyamlString, err := templateChart(instance)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tresources := bytes.Split(yamlString, []byte(`---`))\n\t\t// your logic here\n\t\tfor _, resource := range resources {\n\t\t\t// Helm sometimes templates just comments so skip these\n\t\t\tif !strings.Contains(string(resource), \"kind\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Decode the YAML to an object.\n\t\t\tu := &unstructured.Unstructured{Object: map[string]interface{}{}}\n\t\t\tif err := yaml.Unmarshal(resource, &u.Object); err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\t// set controller reference\n\t\t\tif err := ctrl.SetControllerReference(instance, u, r.Scheme); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\t// set namespace of the resource (by default helm does not template this out)\n\t\t\tu.SetNamespace(instance.Spec.NameSpaceSelector)\n\t\t\t// Get the reference of the resource to attach to the chart instance\n\t\t\tobjRef, err := ref.GetReference(r.Scheme, u)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"unable to make reference\", \"Object\", u.GetName())\n\t\t\t}\n\t\t\t// Get Key to fetch resource if exists\n\t\t\tkey, err := client.ObjectKeyFromObject(u)\n\t\t\tif err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\t// Get resource\n\t\t\tif err := r.Client.Get(ctx, key, u); err != nil {\n\t\t\t\t// if error is anything but is not found, return error\n\t\t\t\tif !apierrs.IsNotFound(err) {\n\t\t\t\t\tlog.Error(err, \"unable to get object, unknown error occured\")\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\n\t\t\t\t// set finalizer of resource\n\t\t\t\tu.SetFinalizers([]string{forGroundFinalizer})\n\n\t\t\t\t// Create Object\n\t\t\t\tif err := r.Create(ctx, u); err != nil {\n\t\t\t\t\tlog.Error(err, fmt.Sprintf(\"unable to apply %v\", u.GroupVersionKind()))\n\t\t\t\t\tinstance.Status.Status = \"Failed\"\n\t\t\t\t\tif err := r.UpdateStatus(instance); err != nil {\n\t\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t\t}\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t\tlog.V(1).Info(fmt.Sprintf(\"Applying: %v\", u.GroupVersionKind()))\n\n\t\t\t\t// Check if resource reference is attached to instance, if not add it\n\t\t\t\tif !refInSlice(*objRef, instance.Status.Resource) {\n\t\t\t\t\tinstance.Status.Resource = append(instance.Status.Resource, *objRef)\n\t\t\t\t\tif err := r.UpdateStatus(instance); err != nil {\n\t\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontinue\n\t\t\t// Implement Patch if resource already exist\n\t\t\t//log.V(1).Info(fmt.Sprintf(\"Updating: %v\", u.GroupVersionKind()))\n\t\t}\n\n\t\tinstance.Status.Status = \"Deployed\"\n\t\tif err := r.UpdateStatus(instance); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tlog.V(1).Info(\"reconciling the Chart\")\n\t\treturn ctrl.Result{}, nil\n\t} else {\n\t\tif containsString(instance.ObjectMeta.Finalizers, finalizer) {\n\t\t\t// our finalizer is present, so lets handle any external dependency\n\t\t\tif err := r.deleteExternalResources(instance); err != nil {\n\t\t\t\t// if fail to delete the external dependency here, return with error\n\t\t\t\t// so that it can be retried\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\t// remove our finalizer from the list and update it.\n\t\t\tinstance.ObjectMeta.Finalizers = removeString(instance.ObjectMeta.Finalizers, finalizer)\n\t\t\tif err := r.Update(context.Background(), instance); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t}\n\t}\n\treturn ctrl.Result{}, nil\n}", "func (r *NamespaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\tnamespace := &corev1.Namespace{}\n\tif err := r.Get(context.TODO(), req.NamespacedName, namespace); err != nil {\n\t\tklog.Errorf(\"%s --> Unable to get namespace '%s'\", err, req.Name)\n\t\treturn ctrl.Result{}, client.IgnoreNotFound(err)\n\t}\n\n\tnamespaceMaps := &mapsv1alpha1.NamespaceMapList{}\n\tif err := r.List(context.TODO(), namespaceMaps); err != nil {\n\t\tklog.Error(err, \" --> Unable to List NamespaceMaps\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\tif len(namespaceMaps.Items) == 0 {\n\t\tklog.Info(\" No namespaceMaps at the moment in the cluster\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tremoveMappings := make(map[string]*mapsv1alpha1.NamespaceMap)\n\tfor i := range namespaceMaps.Items {\n\t\tremoveMappings[namespaceMaps.Items[i].GetLabels()[liqoconst.RemoteClusterID]] = &namespaceMaps.Items[i]\n\t}\n\n\tif !namespace.GetDeletionTimestamp().IsZero() {\n\t\tklog.Infof(\"The namespace '%s' is requested to be deleted\", namespace.GetName())\n\t\tif err := r.removeDesiredMappings(namespace.GetName(), removeMappings); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tctrlutils.RemoveFinalizer(namespace, namespaceControllerFinalizer)\n\n\t\tif err := r.Update(context.TODO(), namespace); err != nil {\n\t\t\tklog.Errorf(\"%s --> Unable to remove finalizer from namespace '%s'\", err, namespace.GetName())\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tklog.Infof(\"Finalizer is correctly removed from namespace'%s'\", namespace.GetName())\n\n\t\treturn ctrl.Result{}, nil\n\t}\n\n\tif !ctrlutils.ContainsFinalizer(namespace, namespaceControllerFinalizer) {\n\t\tctrlutils.AddFinalizer(namespace, namespaceControllerFinalizer)\n\t\tif err := r.Patch(context.TODO(), namespace, client.Merge); err != nil {\n\t\t\tklog.Errorf(\" %s --> Unable to add finalizer on namespace '%s'\", err, namespace.GetName())\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\t// 1. If mapping.liqo.io label is not present there are no remote namespaces associated\n\t// with this namespace, removeMappings is full\n\tif remoteNamespaceName, ok := namespace.GetLabels()[mappingLabel]; ok {\n\t\t// 2.a If offloading.liqo.io is present there are remote namespaces on all virtual nodes\n\t\tif _, ok = namespace.GetLabels()[offloadingLabel]; ok {\n\t\t\tklog.Infof(\" Offload namespace '%s' on all remote clusters\", namespace.GetName())\n\t\t\tif err := r.addDesiredMappings(namespace, remoteNamespaceName, removeMappings); err != nil {\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tfor k := range removeMappings {\n\t\t\t\tdelete(removeMappings, k)\n\t\t\t}\n\t\t} else {\n\t\t\t// 2.b Iterate on all virtual nodes' labels, if the namespace has all the requested labels, is necessary to\n\t\t\t// offload it onto remote cluster associated with the virtual node\n\t\t\tnodes := &corev1.NodeList{}\n\t\t\tif err := r.List(context.TODO(), nodes,\n\t\t\t\tclient.MatchingLabels{liqoconst.TypeLabel: liqoconst.TypeNode}); err != nil {\n\t\t\t\tklog.Error(err, \" --> Unable to List all virtual nodes\")\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\n\t\t\tif len(nodes.Items) == 0 {\n\t\t\t\tklog.Info(\" No VirtualNode at the moment\")\n\t\t\t\treturn ctrl.Result{}, nil\n\t\t\t}\n\n\t\t\tfor i := range nodes.Items {\n\t\t\t\tif checkOffloadingLabels(namespace, &nodes.Items[i]) {\n\t\t\t\t\tif err := r.addDesiredMapping(namespace, remoteNamespaceName,\n\t\t\t\t\t\tremoveMappings[nodes.Items[i].Annotations[liqoconst.RemoteClusterID]]); err != nil {\n\t\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t\t}\n\t\t\t\t\tdelete(removeMappings, nodes.Items[i].Annotations[liqoconst.RemoteClusterID])\n\t\t\t\t\tklog.Infof(\" Offload namespace '%s' on remote cluster: %s\", namespace.GetName(),\n\t\t\t\t\t\tnodes.Items[i].Annotations[liqoconst.RemoteClusterID])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(removeMappings) > 0 {\n\t\tklog.Info(\" Delete all unnecessary entries in NamespaceMaps\")\n\t\tif err := r.removeDesiredMappings(namespace.GetName(), removeMappings); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t}\n\n\treturn ctrl.Result{}, nil\n}" ]
[ "0.69418895", "0.673754", "0.67340815", "0.6717705", "0.66167885", "0.6560468", "0.65270925", "0.651194", "0.65093696", "0.65086573", "0.6498707", "0.6473226", "0.64207655", "0.6415937", "0.6399798", "0.6377271", "0.6375372", "0.63494164", "0.6330805", "0.6324661", "0.62523454", "0.6231509", "0.62309504", "0.62287545", "0.62124735", "0.6205258", "0.62043023", "0.61886185", "0.61813694", "0.6155894", "0.6152539", "0.6127649", "0.6122857", "0.6099629", "0.60921705", "0.6081899", "0.6080456", "0.6079341", "0.60790795", "0.6078423", "0.60622704", "0.60613495", "0.6045061", "0.6041767", "0.6030745", "0.60174745", "0.6010221", "0.60098565", "0.5999609", "0.5991656", "0.59867215", "0.59788346", "0.5973433", "0.5972173", "0.596986", "0.59392864", "0.59370196", "0.5930564", "0.59194887", "0.5918152", "0.59171647", "0.5915965", "0.591427", "0.59140474", "0.5907012", "0.590384", "0.5899814", "0.5892689", "0.5874371", "0.5874173", "0.58645886", "0.5863861", "0.5858951", "0.5852755", "0.5848718", "0.584703", "0.5835825", "0.58341646", "0.58305633", "0.58287966", "0.58248687", "0.58157283", "0.5809241", "0.58075017", "0.58065224", "0.57870096", "0.57852906", "0.57840526", "0.5778994", "0.57657737", "0.57617164", "0.57531697", "0.57440555", "0.574308", "0.5738342", "0.5737087", "0.5732109", "0.5730478", "0.572669", "0.57253534" ]
0.6687816
4
configMapForConjurConfig returns a Conjur connect ConfigMap object
func (r *ConjurConfigReconciler) configMapForConjurConfig( c *conjurv1alpha1.ConjurConfig, name string) *v1.ConfigMap { ls := labelsForConjurConfig(c.Name) conjurAccount := os.Getenv("conjurAccount") b, err := ioutil.ReadFile("/etc/conjur/conjurAccount") if err == nil { log.Info("Found conjurAccount file, using instead of env var") conjurAccount = string(b) } conjurSslCertificate := os.Getenv("conjurSslCertificate") b, err = ioutil.ReadFile("/etc/conjur/conjurSslCertificate") if err == nil { log.Info("Found conjurSslCertificate file, using instead of env var") conjurSslCertificate = string(b) } configMap := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: c.Namespace, Labels: ls, }, Data: map[string]string{ "CONJUR_ACCOUNT": conjurAccount, "CONJUR_APPLIANCE_URL": os.Getenv("conjurApplianceUrl"), "CONJUR_AUTHN_URL": fmt.Sprintf("%s/authn-k8s/%s", os.Getenv("conjurApplianceUrl"), os.Getenv("authnK8sAuthenticatorID")), "CONJUR_SSL_CERTIFICATE": conjurSslCertificate, }, } // Set ConjurConfig instance as the owner and controller ctrl.SetControllerReference(c, configMap, r.Scheme) return configMap }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetConfigmapConfig(configmap *v1.ConfigMap) Config {\n\treturn Config{\n\t\tNamespace: configmap.Namespace,\n\t\tResourceName: configmap.Name,\n\t\tResourceAnnotations: configmap.Annotations,\n\t\tAnnotation: options.ConfigmapUpdateOnChangeAnnotation,\n\t\tSHAValue: GetSHAfromConfigmap(configmap),\n\t\tType: constants.ConfigmapEnvVarPostfix,\n\t}\n}", "func (o IopingSpecVolumeVolumeSourceOutput) ConfigMap() IopingSpecVolumeVolumeSourceConfigMapPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSource) *IopingSpecVolumeVolumeSourceConfigMap { return v.ConfigMap }).(IopingSpecVolumeVolumeSourceConfigMapPtrOutput)\n}", "func ConfigMap(\n\tinCluster *v1beta1.PostgresCluster,\n\toutConfigMap *corev1.ConfigMap,\n) error {\n\tif inCluster.Spec.UserInterface == nil || inCluster.Spec.UserInterface.PGAdmin == nil {\n\t\t// pgAdmin is disabled; there is nothing to do.\n\t\treturn nil\n\t}\n\n\tinitialize.StringMap(&outConfigMap.Data)\n\n\t// To avoid spurious reconciles, the following value must not change when\n\t// the spec does not change. [json.Encoder] and [json.Marshal] do this by\n\t// emitting map keys in sorted order. Indent so the value is not rendered\n\t// as one long line by `kubectl`.\n\tbuffer := new(bytes.Buffer)\n\tencoder := json.NewEncoder(buffer)\n\tencoder.SetEscapeHTML(false)\n\tencoder.SetIndent(\"\", \" \")\n\terr := encoder.Encode(systemSettings(inCluster.Spec.UserInterface.PGAdmin))\n\tif err == nil {\n\t\toutConfigMap.Data[settingsConfigMapKey] = buffer.String()\n\t}\n\treturn err\n}", "func GetConfigmap(namespace string, configmapName string, testData string) *v1.ConfigMap {\n\treturn &v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: configmapName,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: map[string]string{\"firstLabel\": \"temp\"},\n\t\t},\n\t\tData: map[string]string{\"test.url\": testData},\n\t}\n}", "func (r *HookRunner) configMap() (mp *core.ConfigMap, err error) {\n\tworkload, err := r.workload()\n\tif err != nil {\n\t\treturn\n\t}\n\tplaybook, err := r.playbook()\n\tif err != nil {\n\t\treturn\n\t}\n\tplan, err := r.plan()\n\tif err != nil {\n\t\treturn\n\t}\n\tmp = &core.ConfigMap{\n\t\tObjectMeta: meta.ObjectMeta{\n\t\t\tLabels: r.labels(),\n\t\t\tNamespace: r.Plan.Namespace,\n\t\t\tGenerateName: strings.ToLower(\n\t\t\t\tstrings.Join([]string{\n\t\t\t\t\tr.Plan.Name,\n\t\t\t\t\tr.vm.ID,\n\t\t\t\t\tr.vm.Phase},\n\t\t\t\t\t\"-\")) + \"-\",\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"workload.yml\": workload,\n\t\t\t\"playbook.yml\": playbook,\n\t\t\t\"plan.yml\": plan,\n\t\t},\n\t}\n\n\treturn\n}", "func ConfigMapName() string {\n\treturn configName\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesOutput) ConfigMap() IopingSpecVolumeVolumeSourceProjectedSourcesConfigMapPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSources) *IopingSpecVolumeVolumeSourceProjectedSourcesConfigMap {\n\t\treturn v.ConfigMap\n\t}).(IopingSpecVolumeVolumeSourceProjectedSourcesConfigMapPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesOutput) ConfigMap() FioSpecVolumeVolumeSourceProjectedSourcesConfigMapPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSources) *FioSpecVolumeVolumeSourceProjectedSourcesConfigMap {\n\t\treturn v.ConfigMap\n\t}).(FioSpecVolumeVolumeSourceProjectedSourcesConfigMapPtrOutput)\n}", "func (c Component) ConfigurationAsMap() map[string]interface{} {\n\tresult := make(map[string]interface{}, len(c.Configuration))\n\tfor _, cfg := range c.Configuration {\n\t\tresult[cfg.Key] = cfg.Value\n\t}\n\treturn result\n}", "func (o IopingSpecVolumeVolumeSourcePtrOutput) ConfigMap() IopingSpecVolumeVolumeSourceConfigMapPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSource) *IopingSpecVolumeVolumeSourceConfigMap {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ConfigMap\n\t}).(IopingSpecVolumeVolumeSourceConfigMapPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceOutput) ConfigMap() FioSpecVolumeVolumeSourceConfigMapPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSource) *FioSpecVolumeVolumeSourceConfigMap { return v.ConfigMap }).(FioSpecVolumeVolumeSourceConfigMapPtrOutput)\n}", "func (c Config) toMap() map[string]string {\n\tm := make(map[string]string)\n\tm[chefsolo.NAME] = meta.MC.Name\n\tm[chefsolo.CHEFREPO_GIT] = c.ChefRepoGit\n\tm[chefsolo.CHEFREPO_TARBALL] = c.ChefRepoTarball\n\tm[chefsolo.CHEFREPO_COOKBOOK] = c.Cookbook\n\treturn m\n}", "func ConnectionMap(ctx context.Context) map[string]*interfaces.Interface {\n\tconnectionMap := ctx.Value(connectionMapKey)\n\tif connectionMap != nil {\n\t\treturn connectionMap.(map[string]*interfaces.Interface)\n\t}\n\treturn nil\n}", "func (manager Manager) websocketMapConfig() map[string]interface{} {\n\treturn manager.viperConfig.GetStringMap(\"websocket\")\n}", "func ConfigFromMap(m map[string]interface{}) (ServerConfig, error) {\n\tconfig := defaultServerConfig\n\tif err := gconv.Struct(m, &config); err != nil {\n\t\treturn config, err\n\t}\n\treturn config, nil\n}", "func (c *configuration) ConfigMaps(clientSet ClientSet) *ConfigMaps {\n\tif clientSet != nil {\n\t\treturn NewConfigMaps(clientSet)\n\t}\n\treturn nil\n}", "func (o FioSpecVolumeVolumeSourcePtrOutput) ConfigMap() FioSpecVolumeVolumeSourceConfigMapPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSource) *FioSpecVolumeVolumeSourceConfigMap {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ConfigMap\n\t}).(FioSpecVolumeVolumeSourceConfigMapPtrOutput)\n}", "func (o ArgoCDSpecTlsCaOutput) ConfigMapName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecTlsCa) *string { return v.ConfigMapName }).(pulumi.StringPtrOutput)\n}", "func (c *Creater) GetConfigmaps() []*components.ConfigMap {\n\n\tvar configMaps []*components.ConfigMap\n\n\thubConfig := components.NewConfigMap(horizonapi.ConfigMapConfig{Namespace: c.blackDuck.Spec.Namespace, Name: util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"config\")})\n\n\thubData := map[string]string{\n\t\t\"RUN_SECRETS_DIR\": \"/tmp/secrets\",\n\t\t\"HUB_VERSION\": c.blackDuck.Spec.Version,\n\t}\n\n\tblackduckServiceData := map[string]string{\n\t\t\"HUB_AUTHENTICATION_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"authentication\"),\n\t\t\"AUTHENTICATION_HOST\": fmt.Sprintf(\"%s:%d\", util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"authentication\"), authenticationPort),\n\t\t\"CLIENT_CERT_CN\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"binaryscanner\"),\n\t\t\"CFSSL\": fmt.Sprintf(\"%s:8888\", util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"cfssl\")),\n\t\t\"HUB_CFSSL_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"cfssl\"),\n\t\t\"BLACKDUCK_CFSSL_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"cfssl\"),\n\t\t\"BLACKDUCK_CFSSL_PORT\": \"8888\",\n\t\t\"HUB_DOC_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"documentation\"),\n\t\t\"HUB_JOBRUNNER_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"jobrunner\"),\n\t\t\"HUB_LOGSTASH_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"logstash\"),\n\t\t\"RABBIT_MQ_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"rabbitmq\"),\n\t\t\"BROKER_URL\": fmt.Sprintf(\"amqps://%s/protecodesc\", util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"rabbitmq\")),\n\t\t\"HUB_REGISTRATION_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"registration\"),\n\t\t\"HUB_SCAN_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"scan\"),\n\t\t\"BLACKDUCK_UPLOAD_CACHE_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"uploadcache\"),\n\t\t\"HUB_UPLOAD_CACHE_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"uploadcache\"),\n\t\t\"HUB_WEBAPP_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"webapp\"),\n\t\t\"HUB_WEBSERVER_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"webserver\"),\n\t\t\"HUB_ZOOKEEPER_HOST\": util.GetResourceName(c.blackDuck.Name, util.BlackDuckName, \"zookeeper\"),\n\t}\n\n\tif c.config.IsOpenshift {\n\t\tblackduckServiceData[\"BLACKDUCK_ORCHESTRATION_TYPE\"] = \"OPENSHIFT\"\n\t} else {\n\t\tblackduckServiceData[\"BLACKDUCK_ORCHESTRATION_TYPE\"] = \"KUBERNETES\"\n\t}\n\n\thubData = util.MergeEnvMaps(blackduckServiceData, hubData)\n\n\tfor _, value := range c.blackDuck.Spec.Environs {\n\t\tvalues := strings.SplitN(value, \":\", 2)\n\t\tif len(values) == 2 {\n\t\t\tmapKey := strings.TrimSpace(values[0])\n\t\t\tmapValue := strings.TrimSpace(values[1])\n\t\t\tif len(mapKey) > 0 && len(mapValue) > 0 {\n\t\t\t\thubData[mapKey] = mapValue\n\t\t\t}\n\t\t}\n\t}\n\n\t// merge default and input environs\n\tenvirons := GetHubKnobs(c.blackDuck.Spec.Version)\n\n\thubData = util.MergeEnvMaps(hubData, environs)\n\n\thubConfig.AddData(hubData)\n\thubConfig.AddLabels(c.GetVersionLabel(\"configmap\"))\n\tconfigMaps = append(configMaps, hubConfig)\n\n\treturn configMaps\n}", "func (o ArgoCDSpecTlsCaPtrOutput) ConfigMapName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecTlsCa) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ConfigMapName\n\t}).(pulumi.StringPtrOutput)\n}", "func ConfigMapName() string {\n\tcm := os.Getenv(configMapNameEnv)\n\tif cm == \"\" {\n\t\treturn \"config-redis\"\n\t}\n\treturn cm\n}", "func (c *Controller) ConfigMaps() typedv1.ConfigMapInterface {\n\treturn c.client.ConfigMaps(c.namespace.Name)\n}", "func GetConfigMap(client client.Client, parentNamespace string, configMapRef *corev1.ObjectReference) (configMap *corev1.ConfigMap, err error) {\n\tsrLogger := log.WithValues(\"package\", \"utils\", \"method\", \"getConfigMap\")\n\tif configMapRef != nil {\n\t\tsrLogger.Info(\"Retrieve configMap \", \"parentNamespace\", parentNamespace, \"configMapRef.Name\", configMapRef.Name)\n\t\tns := configMapRef.Namespace\n\t\tif ns == \"\" {\n\t\t\tns = parentNamespace\n\t\t}\n\t\tconfigMap = &corev1.ConfigMap{}\n\t\terr = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: configMapRef.Name}, configMap)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\tsrLogger.Error(err, \"ConfigMap not found \", \"Name:\", configMapRef.Name, \" on namespace: \", ns)\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\tsrLogger.Error(err, \"Failed to get configMap \", \"Name:\", configMapRef.Name, \" on namespace: \", ns)\n\t\t\treturn nil, err\n\t\t}\n\t\tsrLogger.Info(\"ConfigMap found \", \"Name:\", configMapRef.Name, \" on namespace: \", ns)\n\t} else {\n\t\tsrLogger.Info(\"no configMapRef defined \", \"parentNamespace\", parentNamespace)\n\t}\n\treturn configMap, err\n}", "func (e *Signer) getConfigMap() *v1.ConfigMap {\n\tconfigMap, err := e.configMapLister.ConfigMaps(e.configMapNamespace).Get(e.configMapName)\n\n\t// If we can't get the configmap just return nil. The resync will eventually\n\t// sync things up.\n\tif err != nil {\n\t\tif !apierrors.IsNotFound(err) {\n\t\t\tutilruntime.HandleError(err)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn configMap\n}", "func (c *Controller) getConfigMap(obj *meta.ObjectMeta) (*core.ConfigMap, error) {\n\t// Check whether object with such name already exists in k8s\n\tres, err := c.configMapLister.ConfigMaps(obj.Namespace).Get(obj.Name)\n\n\tif res != nil {\n\t\t// Object found by name\n\t\treturn res, nil\n\t}\n\n\tif apierrors.IsNotFound(err) {\n\t\t// Object with such name not found\n\t\t// Try to find by labels\n\t\tif set, err := chopmodel.GetSelectorHostFromObjectMeta(obj); err == nil {\n\t\t\tselector := labels.SelectorFromSet(set)\n\t\t\tobjects, err := c.configMapLister.ConfigMaps(obj.Namespace).List(selector)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif len(objects) == 1 {\n\t\t\t\t// Object found by labels\n\t\t\t\treturn objects[0], nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// Object not found\n\treturn nil, err\n}", "func (c *ConfigMapConfig) ToConfigMap() corev1.ConfigMap {\n\treturn corev1.ConfigMap{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: ApiVersion,\n\t\t\tKind: KindConfigMap,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: c.Name,\n\t\t\tNamespace: c.Namespace,\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"csi.json\": c.JsonData,\n\t\t},\n\t}\n}", "func NewConfigFromMap(configMap map[string]string) (*Config, error) {\n\tnc := defaultConfig()\n\n\tif err := cm.Parse(configMap,\n\t\tcm.AsString(QueueSidecarImageKey, &nc.QueueSidecarImage),\n\t\tcm.AsDuration(ProgressDeadlineKey, &nc.ProgressDeadline),\n\t\tcm.AsStringSet(registriesSkippingTagResolvingKey, &nc.RegistriesSkippingTagResolving),\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif nc.QueueSidecarImage == \"\" {\n\t\treturn nil, errors.New(\"queueSidecarImage cannot be empty or unset\")\n\t}\n\n\tif nc.ProgressDeadline <= 0 {\n\t\treturn nil, fmt.Errorf(\"progressDeadline cannot be a non-positive duration, was %v\", nc.ProgressDeadline)\n\t}\n\n\treturn nc, nil\n}", "func (b *BackendConfiguration) ToConfigMapConfig() (ConfigMapConfig, error) {\n\tconfig := struct {\n\t\tBackends BackendConfiguration `json:\"backends\"`\n\t}{*b}\n\n\toutput, err := json.MarshalIndent(&config, \"\", \" \")\n\tif err != nil {\n\t\treturn ConfigMapConfig{}, helper.LogErrorf(\" json.MarshalIndent failed: %v\", err)\n\t}\n\n\treturn ConfigMapConfig{\n\t\tName: b.Name,\n\t\tNamespace: b.NameSpace,\n\t\tJsonData: string(output),\n\t}, nil\n}", "func newConfigmap(customConfigmap *customConfigMapv1alpha1.CustomConfigMap) *corev1.ConfigMap {\n\tlabels := map[string]string{\n\t\t\"name\": customConfigmap.Spec.ConfigMapName,\n\t\t\"customConfigName\": customConfigmap.Name,\n\t\t\"latest\": \"true\",\n\t}\n\tname := fmt.Sprintf(\"%s-%s\", customConfigmap.Spec.ConfigMapName, RandomSequence(5))\n\tconfigName := NameValidation(name)\n\treturn &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: configName,\n\t\t\tNamespace: customConfigmap.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(customConfigmap, customConfigMapv1alpha1.SchemeGroupVersion.WithKind(\"CustomConfigMap\")),\n\t\t\t},\n\t\t\tLabels: labels,\n\t\t},\n\t\tData: customConfigmap.Spec.Data,\n\t\tBinaryData: customConfigmap.Spec.BinaryData,\n\t}\n}", "func NewConfigFromMap(configMap map[string]string) (*Config, error) {\n\tnc := defaultConfig()\n\n\tif err := cm.Parse(configMap,\n\t\tasRequiredString(QueueSidecarImageKey, &nc.QueueSidecarImage),\n\t\tcm.AsDuration(ProgressDeadlineKey, &nc.ProgressDeadline),\n\t\tasStringSet(registriesSkippingTagResolvingKey, &nc.RegistriesSkippingTagResolving),\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif nc.ProgressDeadline <= 0 {\n\t\treturn nil, fmt.Errorf(\"ProgressDeadline cannot be a non-positive duration, was %v\", nc.ProgressDeadline)\n\t}\n\n\treturn nc, nil\n}", "func (r *ConjurConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {\n\t_ = r.Log.WithValues(\"conjurconfig\", req.NamespacedName)\n\n\t// Fetch the ConjurConfig instance\n\tconjurConfig := &conjurv1alpha1.ConjurConfig{}\n\terr := r.Get(ctx, req.NamespacedName, conjurConfig)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\tlog.Info(\"ConjurConfig resource not found. Ignoring since object must be deleted\")\n\t\t\treturn ctrl.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\tlog.Error(err, \"Failed to get ConjurConfig\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// Check if the ConfigMap already exists, if not create a new one\n\tfound := &v1.ConfigMap{}\n\tcmName := getConfigMapName(conjurConfig)\n\tcmNamespace := conjurConfig.Namespace\n\terr = r.Get(ctx, types.NamespacedName{Name: cmName, Namespace: cmNamespace}, found)\n\tif err != nil && errors.IsNotFound(err) {\n\t\t// Define a new ConfigMap\n\t\tcm := r.configMapForConjurConfig(conjurConfig, cmName)\n\t\tlog.Info(\"Creating a new ConfigMap, \", \"ConfigMap.Name: \", cmName,\n\t\t\t\"ConfigMap.Namespace: \", cmNamespace)\n\t\terr = r.Create(ctx, cm)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to create new ConfigMap, \", \"ConfigMap.Name: \", cm.Name, \"ConfigMap.Namespace: \", cm.Namespace)\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\t// ConfigMap created successfully - return and requeue\n\t\treturn ctrl.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\tlog.Error(err, \"Failed to get ConfigMap\")\n\t\treturn ctrl.Result{}, err\n\t}\n\n\t// TODO: Ensure ConfigMap has correct content\n\n\t// TODO: Add ConfigMap created and/or timestamp to status?\n\n\treturn ctrl.Result{}, nil\n}", "func Map() map[string]interface{} {\n\treturn DefaultConfig.Map()\n}", "func NewConfigFromMap(data map[string]string) (*RedisConfig, error) {\n\trc := defaultConfig()\n\tif numC, ok := data[redisConfigKey]; ok {\n\t\trc.NumConsumers = numC\n\t}\n\treturn rc, nil\n}", "func labelsForConjurConfig(name string) map[string]string {\n\treturn map[string]string{\"app\": \"conjur-config\", \"conjur-config-cr\": name}\n}", "func (o ClusterBuildStrategySpecBuildStepsEnvFromOutput) ConfigMapRef() ClusterBuildStrategySpecBuildStepsEnvFromConfigMapRefPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsEnvFrom) *ClusterBuildStrategySpecBuildStepsEnvFromConfigMapRef {\n\t\treturn v.ConfigMapRef\n\t}).(ClusterBuildStrategySpecBuildStepsEnvFromConfigMapRefPtrOutput)\n}", "func (o BuildStrategySpecBuildStepsEnvFromOutput) ConfigMapRef() BuildStrategySpecBuildStepsEnvFromConfigMapRefPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsEnvFrom) *BuildStrategySpecBuildStepsEnvFromConfigMapRef {\n\t\treturn v.ConfigMapRef\n\t}).(BuildStrategySpecBuildStepsEnvFromConfigMapRefPtrOutput)\n}", "func (o ServiceOutput) ConvergeConfig() ServiceConvergeConfigPtrOutput {\n\treturn o.ApplyT(func(v *Service) ServiceConvergeConfigPtrOutput { return v.ConvergeConfig }).(ServiceConvergeConfigPtrOutput)\n}", "func mapServerConf(conf Config) server.Config {\n\tvar c server.Config\n\tvar err error\n\n\tif conf.Webserver.LogFile != \"\" {\n\t\tif c.LogFile, err = os.OpenFile(logFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666); err != nil {\n\t\t\tlog.Printf(\"Unable to open logfile (%v) for writing: %v\", logFile, err)\n\t\t\tos.Exit(2)\n\t\t}\n\n\t}\n\tif conf.Webserver.LogFormat == \"\" {\n\t\tconf.Webserver.LogFormat = server.DefaultLogFormat\n\t}\n\tc.LogTemplate = template.New(\"logfile\")\n\tif _, err := c.LogTemplate.Parse(conf.Webserver.LogFormat); err != nil {\n\t\tlog.Printf(\"Could not parse log template: %v error: %v\", conf.Webserver.LogFormat, err)\n\t\tos.Exit(3)\n\t}\n\n\t//\titerate providers\n\tfor _, provider := range conf.Providers {\n\t\tc.Providers = append(c.Providers, server.Provider{\n\t\t\tName: provider.Name,\n\t\t\tType: provider.Type,\n\t\t\tHost: provider.Host,\n\t\t\tPort: provider.Port,\n\t\t\tDatabase: provider.Database,\n\t\t\tUser: provider.User,\n\t\t\tPassword: provider.Password,\n\t\t})\n\t}\n\n\t//\titerate maps\n\tfor _, m := range conf.Maps {\n\t\tserverMap := server.Map{\n\t\t\tName: m.Name,\n\t\t}\n\n\t\t//\titerate layers\n\t\tfor _, l := range m.Layers {\n\t\t\tserverMap.Layers = append(serverMap.Layers, server.Layer{\n\t\t\t\tName: l.Name,\n\t\t\t\tProvider: l.Provider,\n\t\t\t\tMinzoom: l.Minzoom,\n\t\t\t\tMaxzoom: l.Maxzoom,\n\t\t\t\tTableName: l.TableName,\n\t\t\t\tSQL: l.SQL,\n\t\t\t})\n\t\t}\n\n\t\tc.Maps = append(c.Maps, serverMap)\n\t}\n\n\treturn c\n}", "func GetConfigMaps(contextName string, namespace string) []corev1.ConfigMap {\n\n\tctxReceiver, ok := contextReceivers[contextName]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tnsReceiver, ok := ctxReceiver.namespaceReceivers[namespace]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treceiver := nsReceiver.configMapEventReceiver\n\tif receiver == nil {\n\t\treturn nil\n\t}\n\n\treturn receiver.getConfigMaps()\n}", "func GetConcertoConfig() (*Config, error) {\n\tif cachedConfig == nil {\n\t\treturn nil, fmt.Errorf(\"configuration hasn't been initialized\")\n\t}\n\treturn cachedConfig, nil\n}", "func NewConfigFromMap(configMap map[string]string) (*Config, error) {\n\tnc := defaultConfig()\n\tqsideCarImage, ok := configMap[QueueSidecarImageKey]\n\tif !ok {\n\t\treturn nil, errors.New(\"queue sidecar image is missing\")\n\t}\n\tnc.QueueSidecarImage = qsideCarImage\n\n\tif pd, ok := configMap[ProgressDeadlineKey]; ok {\n\t\tv, err := time.ParseDuration(pd)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error parsing %s=%s as duration, %w\", ProgressDeadlineKey, pd, err)\n\t\t} else if v <= 0 {\n\t\t\treturn nil, fmt.Errorf(\"%s cannot be non-positive duration, was %v\", ProgressDeadlineKey, v)\n\t\t}\n\t\tnc.ProgressDeadline = v\n\t}\n\n\tif registries, ok := configMap[registriesSkippingTagResolvingKey]; ok {\n\t\tnc.RegistriesSkippingTagResolving = sets.NewString(strings.Split(registries, \",\")...)\n\t}\n\treturn nc, nil\n}", "func GetCredentialsAsConfigMap(credentials []byte) *v1.ConfigMap {\n\treturn &v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: constants.ConfigMapName,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"builder\": \"kaniko\",\n\t\t\t},\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"config.json\": string(credentials),\n\t\t},\n\t}\n}", "func getWebConsoleConfigMap(config *restclient.Config) (*corev1.ConfigMap, error) {\n\tmyScheme := runtime.NewScheme()\n\tcl, _ := client.New(config, client.Options{Scheme: myScheme})\n\tcorev1.AddToScheme(myScheme)\n\tconfigmap := &corev1.ConfigMap{}\n\terr := cl.Get(context.TODO(), types.NamespacedName{\n\t\tNamespace: \"openshift-web-console\", Name: \"webconsole-config\"}, configmap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcmCopy := configmap.DeepCopy()\n\tif cmCopy == nil {\n\t\terr = errors.New(\"getWebConsoleConfigMap: Failed to copy web-console configuration data\")\n\t}\n\n\treturn cmCopy, err\n}", "func (f *FakeConfigMapsNamespacer) ConfigMaps(ns string) v1.ConfigMapInterface {\n\tiface, ok := f.ToReturn[ns]\n\tif !ok {\n\t\tiface = &FakeConfigMapsInterface{}\n\t}\n\tf.Returned[ns] = iface\n\treturn iface\n}", "func ConnectionConfig() *ConnectionConfigApplyConfiguration {\n\treturn &ConnectionConfigApplyConfiguration{}\n}", "func ConfigMapKey(pandaCluster *vectorizedv1alpha1.Cluster) types.NamespacedName {\n\treturn types.NamespacedName{Name: resourceNameTrim(pandaCluster.Name, baseSuffix), Namespace: pandaCluster.Namespace}\n}", "func (c *Cluster) GetConfigMap(ctx context.Context, namespace, name string) (model.ConfigMap, error) {\n\tresult := model.ConfigMap{}\n\n\terr := c.Config()\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tconfigmap, err := c.ClientSet.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tresult.Name = configmap.ObjectMeta.Name\n\tresult.Namespace = configmap.ObjectMeta.Namespace\n\tresult.UID = string(configmap.ObjectMeta.UID)\n\tresult.CreationTimestamp = configmap.ObjectMeta.CreationTimestamp.String()\n\tresult.Data = configmap.Data\n\tresult.Labels = configmap.ObjectMeta.Labels\n\n\treturn result, nil\n}", "func (em *envMap) Config() (map[string]interface{}, error) {\n\tif em.emap == nil {\n\t\tem.emap = em.getenvironment(os.Environ(), func(item string) (key, val string) {\n\t\t\tsplits := strings.Split(item, \"=\")\n\n\t\t\t// allow dot representation, eg \"sever__port\" => \"server.port\"\n\t\t\tkey = strings.Replace(splits[0], em.dotAlias, \".\", -1)\n\t\t\tval = strings.Join(splits[1:], \"=\")\n\t\t\treturn\n\t\t})\n\t}\n\treturn em.emap, nil\n}", "func Map() map[string]interface{} {\n\treturn conf.Map()\n}", "func (k *KafkaConsumer) buildConfigMap(config map[string]string) (*kafka.ConfigMap, error) {\n\t// outbound channel buffer size\n\tbufsize, err := strconv.Atoi(config[\"buffersize\"])\n\tif err != nil {\n\t\treturn nil, errors.New(\"kafkaconsumer: failed to convert config 'buffersize' to integer\")\n\t}\n\n\t// default kafka consumer config\n\tconfigMap := &kafka.ConfigMap{\n\t\t\"bootstrap.servers\": config[\"brokers\"],\n\t\t\"group.id\": config[\"consumergroup\"],\n\t\t\"session.timeout.ms\": 10000,\n\t\t\"enable.auto.commit\": true,\n\t\t\"auto.commit.interval.ms\": 5000,\n\t\t\"statistics.interval.ms\": 60000,\n\t\t\"go.events.channel.enable\": true,\n\t\t\"go.events.channel.size\": bufsize,\n\t\t\"go.application.rebalance.enable\": true,\n\t\t\"default.topic.config\": kafka.ConfigMap{\"auto.offset.reset\": \"earliest\"},\n\t\t\"socket.keepalive.enable\": true,\n\t\t\"log.connection.close\": false,\n\t}\n\n\terr = util.ApplyLibrdkafkaConf(config, configMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn configMap, nil\n}", "func (adm Admin) GetConfig(cluster string, scope string, keys []string) map[string]interface{} {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\treturn nil\n\t}\n\tdefer conn.Disconnect()\n\n\tresult := make(map[string]interface{})\n\n\tswitch scope {\n\tcase \"CLUSTER\":\n\t\tkb := KeyBuilder{cluster}\n\t\tpath := kb.clusterConfig()\n\n\t\tfor _, k := range keys {\n\t\t\tresult[k] = conn.GetSimpleFieldValueByKey(path, k)\n\t\t}\n\tcase \"CONSTRAINT\":\n\tcase \"PARTICIPANT\":\n\tcase \"PARTITION\":\n\tcase \"RESOURCE\":\n\t}\n\n\treturn result\n}", "func mapToConfig(cfgMap map[string]interface{}) (*FSConfig, error) {\n\tif cfgMap == nil {\n\t\treturn DefaultFSConfig(), nil\n\t}\n\tcfg := &FSConfig{}\n\tif err := mapstructure.Decode(cfgMap, cfg); err != nil {\n\t\treturn nil, err\n\t}\n\treturn cfg, nil\n}", "func (c *Config) Map() map[string]string {\n\treturn c.v\n}", "func (k *K8S) Config(map[string]string, string, string) (map[string]interface{}, error) {\n\treturn nil, nil\n}", "func ConfigToMap(in interface{}, tag string) (map[string]string, error) {\n\tout := make(map[string]string)\n\n\tv := reflect.ValueOf(in)\n\tif v.Kind() == reflect.Ptr {\n\t\tv = v.Elem()\n\t}\n\n\tif v.Kind() != reflect.Struct {\n\t\treturn nil, fmt.Errorf(\"ConfigToMap only accepts structs got %T\", v)\n\t}\n\n\ttyp := v.Type()\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfi := typ.Field(i)\n\t\tif mtag := fi.Tag.Get(tag); mtag != \"\" {\n\t\t\tout[mtag] = fmt.Sprint(v.Field(i).Interface())\n\t\t}\n\t}\n\treturn out, nil\n}", "func (s S) ConfigMaps() []v1.ConfigMap {\n\treturn s.configMaps\n}", "func InitializeConfig(k8sClientset kubernetes.Interface, configMapName string) (*Config, error) {\n\tconfigMapCluster, err := k8sClientset.CoreV1().ConfigMaps(defaults.Namespace()).Get(context.Background(), configMapName, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8errors.IsNotFound(err) {\n\t\t\tconfigMemoryCache = &Config{} // We create an empty config so that we don't try to initialize again\n\t\t\t// If the configmap is not found, we return\n\t\t\treturn configMemoryCache, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"failed to get configmap %s/%s: %w\", defaults.Namespace(), configMapName, err)\n\t}\n\n\tvar trafficRouterPlugins []types.PluginItem\n\tif err = yaml.Unmarshal([]byte(configMapCluster.Data[\"trafficRouterPlugins\"]), &trafficRouterPlugins); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal traffic router plugins while initializing: %w\", err)\n\t}\n\n\tvar metricProviderPlugins []types.PluginItem\n\tif err = yaml.Unmarshal([]byte(configMapCluster.Data[\"metricProviderPlugins\"]), &metricProviderPlugins); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal metric provider plugins while initializing: %w\", err)\n\t}\n\n\tmutex.Lock()\n\tconfigMemoryCache = &Config{\n\t\tconfigMap: configMapCluster,\n\t\tplugins: append(trafficRouterPlugins, metricProviderPlugins...),\n\t}\n\tmutex.Unlock()\n\n\terr = configMemoryCache.ValidateConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"validation of config due to (%w)\", err)\n\t}\n\n\treturn configMemoryCache, nil\n}", "func (rc *RemoteConfig) getUrlMap() url.Values {\n\turlMap := url.Values{}\n\turlMap.Set(constant.ConfigUsernameKey, rc.Username)\n\turlMap.Set(constant.ConfigPasswordKey, rc.Password)\n\turlMap.Set(constant.ConfigTimeoutKey, rc.Timeout)\n\turlMap.Set(constant.ClientNameKey, clientNameID(rc, rc.Protocol, rc.Protocol))\n\n\tfor key, val := range rc.Params {\n\t\turlMap.Set(key, val)\n\t}\n\treturn urlMap\n}", "func (conf *Config) ToMap() map[string]bool {\n\tresult := make(map[string]bool, 0)\n\n\tif conf == nil {\n\t\treturn result\n\t}\n\n\tc := reflect.Indirect(reflect.ValueOf(conf))\n\n\tfor i := 0; i < c.NumField(); i++ {\n\t\tname := strings.Split(c.Type().Field(i).Tag.Get(\"json\"), \",\")[0]\n\t\tif strings.HasPrefix(name, \"-\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tif c.Field(i).IsNil() {\n\t\t\tresult[name] = false\n\t\t} else {\n\t\t\tresult[name] = true\n\t\t}\n\t}\n\n\treturn result\n}", "func configMapObserver(logger *zap.SugaredLogger, configMap *v1.ConfigMap) {\n\tif configMap == nil {\n\t\tlogger.Warn(\"Nil ConfigMap passed to configMapObserver; ignoring\")\n\t\treturn\n\t}\n\n\tif dispatcher == nil {\n\t\t// This typically happens during startup\n\t\tlogger.Info(\"Dispatcher is nil during call to configMapObserver; ignoring changes\")\n\t\treturn\n\t}\n\n\t// Toss the new config map to the dispatcher for inspection and action\n\tnewDispatcher := dispatcher.ConfigChanged(configMap)\n\tif newDispatcher != nil {\n\t\t// The configuration change caused a new dispatcher to be created, so switch to that one\n\t\tdispatcher = newDispatcher\n\t}\n}", "func (t *Topic) Config() (map[string]string, error) {\n\tvalue, _, err := t.kz.conn.Get(fmt.Sprintf(\"%s/config/topics/%s\", t.kz.conf.Chroot, t.Name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar topicConfig struct {\n\t\tConfigMap map[string]string `json:\"config\"`\n\t}\n\n\tif err := json.Unmarshal(value, &topicConfig); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn topicConfig.ConfigMap, nil\n}", "func (r *InstanceReconciler) configmapForInstance(m *terraformv1alpha1.Instance, input util.TerraVars) *corev1.ConfigMap {\n\tconfigMapData := make(map[string]string, 0)\n\n\te := reflect.ValueOf(&input).Elem()\n\n\tfor i := 0; i < e.NumField(); i++ {\n\t\tvarName := e.Type().Field(i).Name\n\t\t//varType := e.Type().Field(i).Type\n\t\tvarValue := fmt.Sprintf(\"%v\", e.Field(i).Interface())\n\n\t\tconfigMapData[varName] = varValue\n\t}\n\n\tcm := &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: m.Name,\n\t\t\tNamespace: m.Namespace,\n\t\t},\n\t\tData: configMapData,\n\t}\n\treturn cm\n}", "func (o KafkaConnectorOutput) Config() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *KafkaConnector) pulumi.StringMapOutput { return v.Config }).(pulumi.StringMapOutput)\n}", "func newConfigMap(configMapName, namespace string, labels map[string]string,\n\tkibanaIndexMode, esUnicastHost, rootLogger, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount string) *v1.ConfigMap {\n\n\terr, data := renderData(kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount, rootLogger)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn &v1.ConfigMap{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"ConfigMap\",\n\t\t\tAPIVersion: v1.SchemeGroupVersion.String(),\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: configMapName,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tData: data,\n\t}\n}", "func (c BaseConfig) GetBaseConfig() BaseConfig { return c }", "func GetConfigs() map[string]interface{} {\n\treturn DefaultConf.ConfigFactory.GetConfigurations()\n}", "func (tp *TemplateProvider) TemplateToConfig(path string, mapping map[string]interface{}) (*api.Config, error) {\n\tb, err := tp.getMappedTemplate(path, mapping)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn convertToConfig(b)\n}", "func (c *controller) ApplyConfigMap(namespace string, configMap *ConfigMap) error {\n\tcm := apicorev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: configMap.Name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tData: map[string]string{\n\t\t\tconfigMap.FileName: configMap.Data,\n\t\t},\n\t}\n\t_, err := c.k8sCoreClient.ConfigMaps(namespace).Get(cm.Name, metav1.GetOptions{})\n\tif err == nil {\n\t\t// exists, we update instead\n\t\t_, err = c.k8sCoreClient.ConfigMaps(namespace).Update(&cm)\n\t\treturn err\n\t}\n\t_, err = c.k8sCoreClient.ConfigMaps(namespace).Create(&cm)\n\treturn err\n}", "func (w *worker) reconcileConfigMap(\n\tchi *chop.ClickHouseInstallation,\n\tconfigMap *core.ConfigMap,\n\tupdate bool,\n) error {\n\tw.a.V(2).M(chi).S().P()\n\tdefer w.a.V(2).M(chi).E().P()\n\n\t// Check whether this object already exists in k8s\n\tcurConfigMap, err := w.c.getConfigMap(&configMap.ObjectMeta, false)\n\n\tif curConfigMap != nil {\n\t\t// We have ConfigMap - try to update it\n\t\tif !update {\n\t\t\treturn nil\n\t\t}\n\t\terr = w.updateConfigMap(chi, configMap)\n\t}\n\n\tif apierrors.IsNotFound(err) {\n\t\t// ConfigMap not found - even during Update process - try to create it\n\t\terr = w.createConfigMap(chi, configMap)\n\t}\n\n\tif err != nil {\n\t\tw.a.WithEvent(chi, eventActionReconcile, eventReasonReconcileFailed).\n\t\t\tWithStatusAction(chi).\n\t\t\tWithStatusError(chi).\n\t\t\tM(chi).A().\n\t\t\tError(\"FAILED to reconcile ConfigMap: %s CHI: %s \", configMap.Name, chi.Name)\n\t}\n\n\treturn err\n}", "func GetBackendConfigmap(ctx context.Context, configmapMeta string) (*coreV1.ConfigMap, error) {\n\tnamespace, name, err := pkgUtils.SplitMetaNamespaceKey(configmapMeta)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"split configmap meta %s namespace failed, error: %v\", configmapMeta, err)\n\t}\n\n\tconfigmap, err := app.GetGlobalConfig().K8sUtils.GetConfigmap(ctx, name, namespace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get configmap for [%s] failed, error: %v\", configmapMeta, err)\n\t}\n\n\treturn configmap, nil\n}", "func newConfigFromMap(cfgMap map[string]string) (*configstore, error) {\n\tdata, ok := cfgMap[configdatakey]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"config data not present\")\n\t}\n\treturn &configstore{data}, nil\n}", "func (c client) GetConfigMap(objectKey k8sClient.ObjectKey) (corev1.ConfigMap, error) {\n\tcm := corev1.ConfigMap{}\n\tif err := c.Get(context.TODO(), objectKey, &cm); err != nil {\n\t\treturn corev1.ConfigMap{}, err\n\t}\n\treturn cm, nil\n}", "func configMapAPIPath(cm *apiv1.ConfigMap) string {\n\treturn fmt.Sprintf(\"/api/v1/namespaces/%s/configmaps/%s\", cm.Namespace, cm.Name)\n}", "func createConfigMap() *v1.ConfigMap {\n\treturn &v1.ConfigMap{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"ConfigMap\",\n\t\t\tAPIVersion: \"v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"descheduler-policy-configmap\",\n\t\t\tNamespace: \"kube-system\",\n\t\t},\n\t\t// strategies:\\n \\\"RemoveDuplicates\\\":\\n enabled: true\n\t\tData: map[string]string{\n\t\t\t\"policy.yaml\": \"apiVersion: \\\"descheduler/v1alpha1\\\"\\nkind: \\\"DeschedulerPolicy\\\"\\nstrategies:\\n \\\"RemoveDuplicates\\\":\\n enabled: true\\n\",\n\t\t},\n\t}\n}", "func NewConfig(cfg map[string]interface{}) *Config {\n\tif cfg == nil {\n\t\tcfg = make(map[string]interface{})\n\t}\n\treturn &Config{\n\t\tm: cfg,\n\t}\n}", "func getconf() (map[string]string, error) {\n\tif _, err := os.Stat(config.Conf()); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tf, err := os.Open(config.Conf())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tret := make(map[string]string)\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tkv := strings.SplitN(scanner.Text(), \":\", 2)\n\t\tif len(kv) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tret[kv[0]] = kv[1]\n\t}\n\treturn ret, nil\n}", "func (c *ConfigImpl) GetConfig(key string) (GoConfig, error) {\n\tkeys := strings.Split(key, \".\")\n\tvalues := subMap(&c.values, keys, false)\n\tif nil == values {\n\t\treturn nil, errors.New(\"Key '\" + key + \"' does not exsists\")\n\t}\n\treturn &ConfigImpl{values: *values, parent: c, def: c.def}, nil\n}", "func newConfigMapForCR(cr *storagev1.CSIPowerMaxRevProxy) (*v1.ConfigMap, error) {\n\tconfig := cr.Spec.RevProxy\n\tif config.Mode == \"\" {\n\t\tconfig.Mode = DefaultMode\n\t}\n\tif config.Port == 0 {\n\t\tconfig.Port = DefaultPort\n\t}\n\tout, err := yaml.Marshal(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfigMapData := make(map[string]string)\n\tconfigMapData[ConfigFileName] = string(out)\n\tlabels := map[string]string{\n\t\t\"name\": ReverseProxyName,\n\t}\n\treturn &v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: ConfigMapName,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t\tOwnerReferences: getOwnerReferences(cr),\n\t\t},\n\t\tData: configMapData,\n\t}, nil\n}", "func (c *Conn) Config() *ConnConfig { return c.config.Copy() }", "func (p *BaseProvider) GetConfiguration(defaultTemplate string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) {\n\ttmplContent, err := p.getTemplateContent(defaultTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p.CreateConfiguration(tmplContent, funcMap, templateObjects)\n}", "func (r *ReconcileRethinkDBCluster) reconcileCAConfigMap(cr *rethinkdbv1alpha1.RethinkDBCluster, caSecret *corev1.Secret) error {\n\tname := fmt.Sprintf(\"%s-ca\", cr.Name)\n\tfound := &corev1.ConfigMap{}\n\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: cr.Namespace}, found)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlog.Info(\"creating new configmap\", \"configmap\", name)\n\t\tcm, err := newCAConfigMap(cr, caSecret)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Set RethinkDBCluster instance as the owner and controller\n\t\tif err = controllerutil.SetControllerReference(cr, cm, r.scheme); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn r.client.Create(context.TODO(), cm)\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"configmap exists\", \"configmap\", found.Name)\n\treturn nil\n}", "func (conn *Conn) Config() *Config {\n\treturn conn.cfg\n}", "func (adm Admin) GetConfig(\n\tcluster string, scope string, builder []string) (map[string]interface{}, error) {\n\tresult := make(map[string]interface{})\n\n\tswitch scope {\n\tcase \"CLUSTER\":\n\t\tkb := KeyBuilder{cluster}\n\t\tpath := kb.clusterConfig()\n\n\t\tfor _, k := range builder {\n\t\t\tvar err error\n\t\t\tval, err := adm.zkClient.GetSimpleFieldValueByKey(path, k)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresult[k] = val\n\t\t}\n\tcase \"CONSTRAINT\":\n\tcase \"PARTICIPANT\":\n\tcase \"PARTITION\":\n\tcase \"RESOURCE\":\n\t}\n\n\treturn result, nil\n}", "func FromConfigMap(ctx context.Context, clientSet kubernetes.Interface, namespace string) (*Config, error) {\n\tconfig, err := clientSet.CoreV1().ConfigMaps(namespace).Get(ctx, configMapName, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8errors.IsNotFound(err) {\n\t\t\tglog.Infof(\"cannot find launcher configmap: name=%q namespace=%q, will use default config\", configMapName, namespace)\n\t\t\t// LauncherConfig is optional, so ignore not found error.\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn &Config{data: config.Data}, nil\n}", "func (c *ConfigParser) GetConfigMap() (confmap map[string]map[string]string, err error) {\n\tconfmap = make(map[string]map[string]string)\n\terr = c.Parse()\n\tif err != nil {\n\t\treturn\n\t}\n\tfor section, secdata := range c.sections {\n\t\tconfmap[section] = make(map[string]string)\n\t\tfor option, value := range secdata.options {\n\t\t\tconfmap[section][option] = value\n\t\t}\n\t}\n\treturn\n}", "func GetConfig(c *caddy.Controller) *Config {\r\n\tctx := c.Context().(*netContext)\r\n\tkey := strings.Join(c.ServerBlockKeys, \"~\")\r\n\r\n\t//only check for config if the value is proxy or echo\r\n\t//we need to do this because we specify the ports in the server block\r\n\t//and those values need to be ignored as they are also sent from caddy main process.\r\n\tif strings.Contains(key, \"echo\") || strings.Contains(key, \"proxy\") {\r\n\t\tif cfg, ok := ctx.keysToConfigs[key]; ok {\r\n\t\t\treturn cfg\r\n\t\t}\r\n\t}\r\n\r\n\t// we should only get here if value of key in server block\r\n\t// is not echo or proxy i.e port number :12017\r\n\t// we can't return a nil because caddytls.RegisterConfigGetter will panic\r\n\t// so we return a default (blank) config value\r\n\tcaddytlsConfig, err := caddytls.NewConfig(ctx.instance)\r\n\tif err != nil {\r\n\t\tlog.Printf(\"[ERROR] Making new TLS configuration: %v\", err)\r\n\t\treturn new(Config)\r\n\t}\r\n\r\n\treturn &Config{TLS: caddytlsConfig}\r\n}", "func Map(src config.Cfg) {\n\tcfg.Map(src)\n}", "func configMapOperations(t *testing.T, kubeclient clientset.Interface, namespace string) {\n\t// create, get, watch, update, patch, list and delete configmap.\n\tconfigMap := &apiv1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"audit-configmap\",\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"map-key\": \"map-value\",\n\t\t},\n\t}\n\t// add admission label to config maps that are to be sent to webhook\n\tif namespace != nonAdmissionWebhookNamespace {\n\t\tconfigMap.Labels = map[string]string{\n\t\t\t\"admission\": \"true\",\n\t\t}\n\t}\n\n\t_, err := kubeclient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap, metav1.CreateOptions{})\n\texpectNoError(t, err, \"failed to create audit-configmap\")\n\n\t_, err = kubeclient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMap.Name, metav1.GetOptions{})\n\texpectNoError(t, err, \"failed to get audit-configmap\")\n\n\tconfigMapChan, err := kubeclient.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), watchOptions)\n\texpectNoError(t, err, \"failed to create watch for config maps\")\n\tfor range configMapChan.ResultChan() {\n\t\t// Block until watchOptions.TimeoutSeconds expires.\n\t\t// If the test finishes before watchOptions.TimeoutSeconds expires, the watch audit\n\t\t// event at stage ResponseComplete will not be generated.\n\t}\n\n\t_, err = kubeclient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})\n\texpectNoError(t, err, \"failed to update audit-configmap\")\n\n\t_, err = kubeclient.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), configMap.Name, types.JSONPatchType, patch, metav1.PatchOptions{})\n\texpectNoError(t, err, \"failed to patch configmap\")\n\n\t_, err = kubeclient.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{})\n\texpectNoError(t, err, \"failed to list config maps\")\n\n\terr = kubeclient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configMap.Name, metav1.DeleteOptions{})\n\texpectNoError(t, err, \"failed to delete audit-configmap\")\n}", "func (c Config) TopicMap() (m map[string]TopicConfig) {\n\tm = make(map[string]TopicConfig)\n\tfor i, t := range c.Topics {\n\t\tm[t.Name] = c.Topics[i]\n\t}\n\treturn\n}", "func (o Origins) GetConfig(originName string) *oo.Options {\n\tif c, ok := o[originName]; ok {\n\t\treturn c.Configuration()\n\t}\n\treturn nil\n}", "func (a *SpecConfig) getAlertConfigMap() *components.ConfigMap {\n\tconfigMap := components.NewConfigMap(horizonapi.ConfigMapConfig{\n\t\tName: util.GetResourceName(a.alert.Name, util.AlertName, \"blackduck-config\"),\n\t\tNamespace: a.alert.Spec.Namespace,\n\t})\n\n\tconfigMapData := map[string]string{}\n\t// Add Black Duck CFSSL host\n\tif *a.alert.Spec.StandAlone {\n\t\tconfigMapData[\"HUB_CFSSL_HOST\"] = util.GetResourceName(a.alert.Name, util.AlertName, \"cfssl\")\n\t}\n\n\t// Add Environs\n\tfor _, environ := range a.alert.Spec.Environs {\n\t\tvals := strings.SplitN(environ, \":\", 2)\n\t\tif len(vals) != 2 {\n\t\t\tlog.Errorf(\"Could not split environ '%s' on ':'\", environ)\n\t\t\tcontinue\n\t\t}\n\t\tenvironKey := strings.TrimSpace(vals[0])\n\t\tenvironVal := strings.TrimSpace(vals[1])\n\t\tif len(environKey) > 0 && len(environVal) > 0 {\n\t\t\tconfigMapData[environKey] = environVal\n\t\t}\n\t}\n\n\t// Add data to the ConfigMap\n\tconfigMap.AddData(configMapData)\n\n\tconfigMap.AddLabels(map[string]string{\"app\": util.AlertName, \"name\": a.alert.Name, \"component\": \"alert\"})\n\n\treturn configMap\n}", "func (regionEnv *RegionEnv) porter2k8sConfigMap() {\n\tgvk := schema.GroupVersionKind{Version: \"v1\", Kind: \"ConfigMap\"}\n\tmapping, _ := regionEnv.Mapper.RESTMapping(gvk.GroupKind(), gvk.Version)\n\tdynamicInterface := regionEnv.DynamicClient.Resource(mapping.Resource).Namespace(regionEnv.Cfg.Namespace)\n\tconfigMap, configMapErr := dynamicInterface.Get(regionEnv.Context, \"porter2k8s\", metav1.GetOptions{})\n\t// Fall back to the porter2k8s configmap if it exists\n\tif errors.IsNotFound(configMapErr) {\n\t\tregionEnv.Logger.Info(\"No cluster specific porter2k8s config found. Looking for porter2k8s config instead.\")\n\t\tconfigMap, configMapErr = dynamicInterface.Get(regionEnv.Context, \"porter2k8s\", metav1.GetOptions{})\n\t\tif errors.IsNotFound(configMapErr) {\n\t\t\tregionEnv.Logger.Info(\"No cluster specific porter2k8s config found.\")\n\t\t}\n\t}\n\tif configMapErr == nil {\n\t\tdata, _, _ := unstructured.NestedStringMap(configMap.Object, \"data\")\n\t\t// Overwrite cluster settings from the namespace with those from the service's configuration.\n\t\tfor key, value := range regionEnv.ClusterSettings {\n\t\t\tdata[key] = value\n\t\t}\n\t\tregionEnv.ClusterSettings = data\n\t\treturn\n\t}\n\tregionEnv.Logger.Errorf(\"unexpected Error retrieving porter2k8s configMap.\\n%v\", configMapErr)\n\treturn\n}", "func (tr *MongoCollection) GetConnectionDetailsMapping() map[string]string {\n\treturn nil\n}", "func NewConfig() Config {\n\treturn make(map[string]interface{})\n}", "func ConfigMapNameReference() *ConfigMapNameReferenceApplyConfiguration {\n\treturn &ConfigMapNameReferenceApplyConfiguration{}\n}", "func GetBackendConfigmapMap(ctx context.Context, configmapMeta string) (map[string]interface{}, error) {\n\tconfigmap, err := GetBackendConfigmap(ctx, configmapMeta)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ConvertConfigmapToMap(ctx, configmap)\n}", "func (d *Dao) Configs(c context.Context) (res map[string]string, err error) {\n\tvar rows *sql.Rows\n\tif rows, err = d.db.Query(c, _getAllConfigSQL); err != nil {\n\t\tlog.Error(\"d.getAllConfigSQL.Query error(%v)\", err)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tres = map[string]string{}\n\tfor rows.Next() {\n\t\tvar r model.Config\n\t\tif err = rows.Scan(&r.ID, &r.Property, &r.Name, &r.Val, &r.Ctime); err != nil {\n\t\t\tlog.Error(\"row.Scan() error(%v)\", err)\n\t\t\tres = nil\n\t\t\treturn\n\t\t}\n\t\tres[r.Property] = r.Val\n\t}\n\terr = rows.Err()\n\treturn\n}", "func (r *NodeReconciler) specConfigmap(node *filecoinv1alpha1.Node, configmap *corev1.ConfigMap, configToml string) {\n\tlabels := map[string]string{\n\t\t\"name\": \"node\",\n\t\t\"instance\": node.Name,\n\t}\n\n\tconfigmap.ObjectMeta.Labels = labels\n\n\tif configmap.Data == nil {\n\t\tconfigmap.Data = map[string]string{}\n\t}\n\n\tconfigmap.Data[\"config.toml\"] = configToml\n\n}", "func (cop *ConnPool) GetConf() config.Config {\n\tconf := *cop.conf\n\treturn conf\n}", "func Map(configMap config.Map) Option {\n\treturn func(options *options) {\n\t\toptions.configMap = configMap\n\t}\n}" ]
[ "0.6452948", "0.6371874", "0.63398594", "0.63267", "0.63186795", "0.6278472", "0.62564194", "0.62310785", "0.6168107", "0.61330783", "0.612575", "0.6114168", "0.60752857", "0.60731614", "0.6038828", "0.6006406", "0.59711593", "0.5966193", "0.59642905", "0.59300566", "0.5902356", "0.5900611", "0.5898247", "0.58857596", "0.5851891", "0.5819966", "0.5783119", "0.5746463", "0.57195026", "0.569831", "0.56819636", "0.56778497", "0.56773275", "0.5620537", "0.56194866", "0.55821073", "0.5577274", "0.5567309", "0.55498964", "0.55475754", "0.5547291", "0.5514071", "0.5508502", "0.54988706", "0.54866844", "0.547302", "0.5459073", "0.5448974", "0.5422223", "0.5411137", "0.539127", "0.5388513", "0.5379686", "0.5369619", "0.5357966", "0.5356585", "0.5329789", "0.53284967", "0.5326956", "0.5318815", "0.53138155", "0.53133446", "0.52967477", "0.5281995", "0.5273469", "0.52723587", "0.5271911", "0.5269946", "0.52593386", "0.5256634", "0.52455217", "0.5240882", "0.5238056", "0.5218547", "0.5218304", "0.5209833", "0.5204764", "0.5201682", "0.52015257", "0.51905787", "0.51767755", "0.5174944", "0.5164612", "0.5162212", "0.5161423", "0.5157008", "0.51522344", "0.51494867", "0.5137249", "0.5124037", "0.51200616", "0.5119205", "0.5115817", "0.5109659", "0.510765", "0.51038796", "0.50920004", "0.50906616", "0.5079034", "0.50743955" ]
0.7840895
0
labelsForConjurConfig returns the labels for selecting the resources belonging to the given ConjurConfig CR name.
func labelsForConjurConfig(name string) map[string]string { return map[string]string{"app": "conjur-config", "conjur-config-cr": name} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Generator) ConfigLabels() map[string]string {\n\t// We have to make a copy to preserve the privacy of g.image.Config.\n\tcopy := map[string]string{}\n\tfor k, v := range g.image.Config.Labels {\n\t\tcopy[k] = v\n\t}\n\treturn copy\n}", "func getResourceLabelsForModel(namespace, name string) map[string]string {\n\tprojectId, _ := gce.ProjectID()\n\tlocation, _ := gce.InstanceAttributeValue(\"cluster-location\")\n\tlocation = strings.TrimSpace(location)\n\tclusterName, _ := gce.InstanceAttributeValue(\"cluster-name\")\n\tclusterName = strings.TrimSpace(clusterName)\n\treturn map[string]string{\n\t\t\"project_id\": projectId,\n\t\t\"location\": location,\n\t\t\"cluster_name\": clusterName,\n\t\t\"namespace_name\": namespace,\n\t\t\"pod_name\": name,\n\t}\n}", "func GetClanLabels(qparms rest.QParms) ([]Label, error) {\n\tvar sb strings.Builder\n\tsb.Grow(100)\n\tsb.WriteString(config.Data.BaseURL)\n\tsb.WriteString(\"/labels/clans/\")\n\n\tbody, err := get(sb.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse into an array of clans\n\ttype respType struct {\n\t\tLabels []Label `json:\"items\"`\n\t}\n\tvar resp respType\n\terr = json.Unmarshal(body, &resp)\n\tif err != nil {\n\t\tlog.Debug(\"failed to parse the json response\")\n\t\treturn nil, err\n\t}\n\n\treturn resp.Labels, nil\n}", "func GetLabels(component constants.ComponentName, cr_name string) map[string]string {\n\treturn generateComponentLabels(component, cr_name)\n}", "func (r *ConjurConfigReconciler) configMapForConjurConfig(\n\tc *conjurv1alpha1.ConjurConfig, name string) *v1.ConfigMap {\n\n\tls := labelsForConjurConfig(c.Name)\n\n\tconjurAccount := os.Getenv(\"conjurAccount\")\n\tb, err := ioutil.ReadFile(\"/etc/conjur/conjurAccount\")\n\tif err == nil {\n\t\tlog.Info(\"Found conjurAccount file, using instead of env var\")\n\t\tconjurAccount = string(b)\n\t}\n\tconjurSslCertificate := os.Getenv(\"conjurSslCertificate\")\n\tb, err = ioutil.ReadFile(\"/etc/conjur/conjurSslCertificate\")\n\tif err == nil {\n\t\tlog.Info(\"Found conjurSslCertificate file, using instead of env var\")\n\t\tconjurSslCertificate = string(b)\n\t}\n\n\tconfigMap := &v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: c.Namespace,\n\t\t\tLabels: ls,\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"CONJUR_ACCOUNT\": conjurAccount,\n\t\t\t\"CONJUR_APPLIANCE_URL\": os.Getenv(\"conjurApplianceUrl\"),\n\t\t\t\"CONJUR_AUTHN_URL\": fmt.Sprintf(\"%s/authn-k8s/%s\",\n\t\t\t\tos.Getenv(\"conjurApplianceUrl\"),\n\t\t\t\tos.Getenv(\"authnK8sAuthenticatorID\")),\n\t\t\t\"CONJUR_SSL_CERTIFICATE\": conjurSslCertificate,\n\t\t},\n\t}\n\t// Set ConjurConfig instance as the owner and controller\n\tctrl.SetControllerReference(c, configMap, r.Scheme)\n\treturn configMap\n}", "func RsrcLabels(cr, name, component string) map[string]string {\n\treturn map[string]string{\n\t\tLabelAirflowCR: cr,\n\t\tLabelAirflowCRName: name,\n\t\tLabelAirflowComponent: component,\n\t}\n}", "func (drc *DummyRegistryClient) LabelsForImageName(in string) (labels map[string]string, err error) {\n\tres := drc.Called(in)\n\treturn res.Get(0).(map[string]string), res.Error(1)\n}", "func (o TrustConfigOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *TrustConfig) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}", "func deploymentLabels(ctx *core.DeploymentContext) map[string]string {\n\treturn map[string]string{\n\t\triserLabel(\"deployment\"): ctx.DeploymentConfig.Name,\n\t\triserLabel(\"environment\"): ctx.DeploymentConfig.EnvironmentName,\n\t\triserLabel(\"app\"): string(ctx.DeploymentConfig.App.Name),\n\t}\n}", "func (c *Config) Labels() map[string]string {\n\treturn c.labels\n}", "func (nv *NetView) ConfigLabels(labs []string) bool {\n\tvs := nv.Scene()\n\tlgp, err := vs.ChildByNameTry(\"Labels\", 1)\n\tif err != nil {\n\t\tlgp = gi3d.AddNewGroup(vs, vs, \"Labels\")\n\t}\n\n\tlbConfig := kit.TypeAndNameList{}\n\tfor _, ls := range labs {\n\t\tlbConfig.Add(gi3d.KiT_Text2D, ls)\n\t}\n\tmods, updt := lgp.ConfigChildren(lbConfig)\n\tif mods {\n\t\tfor i, ls := range labs {\n\t\t\tlb := lgp.ChildByName(ls, i).(*gi3d.Text2D)\n\t\t\tlb.Defaults(vs)\n\t\t\tlb.SetText(vs, ls)\n\t\t\tlb.SetProp(\"text-align\", gist.AlignLeft)\n\t\t\tlb.SetProp(\"vertical-align\", gist.AlignTop)\n\t\t\tlb.SetProp(\"white-space\", gist.WhiteSpacePre)\n\t\t}\n\t}\n\tlgp.UpdateEnd(updt)\n\treturn mods\n}", "func (r *RedisFailoverHandler) getLabels(rf *redisfailoverv1.RedisFailover) map[string]string {\n\tdynLabels := map[string]string{\n\t\trfLabelNameKey: rf.Name,\n\t}\n\n\t// Filter the labels based on the whitelist\n\tfilteredCustomLabels := make(map[string]string)\n\tif rf.Spec.LabelWhitelist != nil && len(rf.Spec.LabelWhitelist) != 0 {\n\t\tfor _, regex := range rf.Spec.LabelWhitelist {\n\t\t\tcompiledRegexp, err := regexp.Compile(regex)\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Errorf(\"Unable to compile label whitelist regex '%s', ignoring it.\", regex)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor labelKey, labelValue := range rf.Labels {\n\t\t\t\tif match := compiledRegexp.MatchString(labelKey); match {\n\t\t\t\t\tfilteredCustomLabels[labelKey] = labelValue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// If no whitelist is specified then don't filter the labels.\n\t\tfilteredCustomLabels = rf.Labels\n\t}\n\treturn util.MergeLabels(defaultLabels, dynLabels, filteredCustomLabels)\n}", "func (c *Configuration) LabelFor(org, repo string) *TiCommunityLabel {\n\tfullName := fmt.Sprintf(\"%s/%s\", org, repo)\n\tfor _, label := range c.TiCommunityLabel {\n\t\tif !sets.NewString(label.Repos...).Has(fullName) {\n\t\t\tcontinue\n\t\t}\n\t\treturn &label\n\t}\n\t// If you don't find anything, loop again looking for an org config\n\tfor _, label := range c.TiCommunityLabel {\n\t\tif !sets.NewString(label.Repos...).Has(org) {\n\t\t\tcontinue\n\t\t}\n\t\treturn &label\n\t}\n\treturn &TiCommunityLabel{}\n}", "func (l RegistrationLabels) Labels() []string {\n\treturn []string{\"result\", \"type\", \"src\"}\n}", "func (c *Config) LabelsFilter() []string {\n\treturn c.labelsFilter\n}", "func (r *Reconciler) deploymentLabels() map[string]string {\n\treturn map[string]string{\n\t\t\"app.kubernetes.io/name\": componentName,\n\t\t\"app.kubernetes.io/part-of\": \"Linkerd\",\n\t\t\"app.kubernetes.io/version\": string(r.Config.Spec.Version),\n\t}\n}", "func (o ClusterOutput) ResourceLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Cluster) pulumi.StringMapOutput { return v.ResourceLabels }).(pulumi.StringMapOutput)\n}", "func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {\n\tcfg, err := getAPIConfig(sdc, baseDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get API config: %w\", err)\n\t}\n\tms := getServiceLabels(cfg)\n\treturn ms, nil\n}", "func (r *Registry) Labels(ctx context.Context, ref image.Reference) (map[string]string, error) {\n\t// Set the default namespace if unset\n\tctx = ensureNamespace(ctx)\n\n\tmanifest, err := r.getManifest(ctx, ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timageConfig, err := r.getImage(ctx, *manifest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn imageConfig.Config.Labels, nil\n}", "func getBuildLabels(name string) map[string]string {\n\treturn map[string]string{\n\t\t\"build\": name,\n\t\t\"component_cr\": name,\n\t}\n}", "func buildLabels(params CRDCreationParameters) (map[string]string, error) {\n\tlabels := map[string]string{}\n\tlabels[tekton.LabelOwner] = params.GitInfo.Organisation\n\tlabels[tekton.LabelRepo] = params.GitInfo.Name\n\tlabels[tekton.LabelBranch] = params.BranchIdentifier\n\tif params.Context != \"\" {\n\t\tlabels[tekton.LabelContext] = params.Context\n\t}\n\tlabels[tekton.LabelBuild] = params.BuildNumber\n\n\t// add any custom labels\n\tcustomLabels, err := util.ExtractKeyValuePairs(params.Labels, \"=\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn util.MergeMaps(labels, customLabels), nil\n}", "func GetLabels() []string {\n\tvar res []string\n\tlabelsURL := \"https://raw.githubusercontent.com/googlecreativelab/quickdraw-dataset/master/categories.txt\"\n\tresp, err := http.Get(labelsURL)\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to fetch labels\")\n\t}\n\tres = strings.Split(string(body), \"\\n\")\n\treturn res\n}", "func (c *Container) Labels() map[string]string {\n\tlabels := make(map[string]string)\n\tfor key, value := range c.config.Labels {\n\t\tlabels[key] = value\n\t}\n\treturn labels\n}", "func (c *Client) Labels(containerID string) (map[string]string, error) {\n\tr, err := c.http.Get(fmt.Sprintf(\"%scontainers/%s/json\", baseAddr, containerID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = statusCode(r.StatusCode, http.StatusOK); err != nil {\n\t\treturn nil, err\n\t}\n\n\tinspect := struct {\n\t\tConfig struct {\n\t\t\tLabels map[string]string `json:\"Labels\"`\n\t\t} `json:\"Config\"`\n\t}{}\n\n\treturn inspect.Config.Labels, json.NewDecoder(r.Body).Decode(&inspect)\n}", "func (c *Component) Labels() map[string]string {\n\treturn Labels(c.CR, c.Name)\n}", "func labelsForIntegration(name string) map[string]string {\n\treturn map[string]string{\"app\": \"integration\", \"integration_cr\": name}\n}", "func (c *ContainerContext) Labels() string {\n\tif c.c.Labels == nil {\n\t\treturn \"\"\n\t}\n\n\tvar joinLabels []string\n\tfor k, v := range c.c.Labels {\n\t\tjoinLabels = append(joinLabels, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\treturn strings.Join(joinLabels, \",\")\n}", "func GetLabels(component, name, identifier string) map[string]string {\n\t// see https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels\n\treturn map[string]string{\n\t\t\"app.kubernetes.io/managed-by\": \"splunk-operator\",\n\t\t\"app.kubernetes.io/component\": component,\n\t\t\"app.kubernetes.io/name\": name,\n\t\t\"app.kubernetes.io/part-of\": fmt.Sprintf(\"splunk-%s-%s\", identifier, component),\n\t\t\"app.kubernetes.io/instance\": fmt.Sprintf(\"splunk-%s-%s\", identifier, name),\n\t}\n}", "func labelsForPlex(name string) map[string]string {\n\treturn map[string]string{\"app\": \"plex\", \"tier\": \"frontend\", \"environment\": \"prod\"}\n\t}", "func RackLabels(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) map[string]string {\n\trecLabels := recommendedLabels()\n\trackLabels := DatacenterLabels(c)\n\trackLabels[constants.RackNameLabel] = r.Name\n\n\treturn mergeLabels(rackLabels, recLabels)\n}", "func (opts CreateOptions) Labels() map[string]string {\n\treturn opts.toConfig().Labels\n}", "func (m *Manager) Labels() map[string]string {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\treturn m.Config.Labels\n}", "func LabelsForApp(projectName, appName string) map[string]string {\n\treturn map[string]string{\n\t\t\"app\": appName,\n\t\tprojectCRKey: projectName,\n\t}\n}", "func getRegistryDeploymentLabels(pkgName string) map[string]string {\n\tlabels := makeRegistryLabels(pkgName)\n\tlabels[\"server-name\"] = getRegistryServerName(pkgName)\n\treturn labels\n}", "func labelsForWebApp(name string) map[string]string {\n\treturn map[string]string{\"app\": \"WebApp\", \"WebApp_cr\": name}\n}", "func (o ConnectionProfileOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *ConnectionProfile) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (o LookupClientTlsPolicyResultOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupClientTlsPolicyResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func getSelectorLabels(component string) map[string]string {\n\tlabels := map[string]string{}\n\tswitch component {\n\tcase \"imc-controller\":\n\t\tlabels[\"messaging.knative.dev/channel\"] = \"in-memory-channel\"\n\t\tlabels[\"messaging.knative.dev/role\"] = \"controller\"\n\tcase \"imc-dispatcher\":\n\t\tlabels[\"messaging.knative.dev/channel\"] = \"in-memory-channel\"\n\t\tlabels[\"messaging.knative.dev/role\"] = \"dispatcher\"\n\tcase \"mt-broker-filter\":\n\t\tlabels[\"eventing.knative.dev/brokerRole\"] = \"filter\"\n\tcase \"mt-broker-ingress\":\n\t\tlabels[\"eventing.knative.dev/brokerRole\"] = \"ingress\"\n\tcase \"kafka-controller-manager\":\n\t\tlabels[\"control-plane\"] = \"kafka-controller-manager\"\n\tdefault:\n\t\tlabels[\"app\"] = component\n\t}\n\treturn labels\n}", "func (o ProjectOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Project) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}", "func labelsForHybrisBase(name string) map[string]string {\n\treturn map[string]string{\n\t\t\"app\": \"hybrisBase\",\n\t\t\"hybrisBase_cr\": name,\n\t}\n}", "func defaultLabels(j *v1alpha1.Jira) map[string]string {\n\treturn map[string]string{\n\t\t\"app\": \"jira\",\n\t\t\"cluster\": j.Name,\n\t}\n}", "func validateLabelsConfig(c LabelsConfig) error {\n\tif c == nil {\n\t\treturn errors.New(ErrEmptyLabelStageConfig)\n\t}\n\tfor labelName, labelSrc := range c {\n\t\tif !model.LabelName(labelName).IsValid() {\n\t\t\treturn fmt.Errorf(ErrInvalidLabelName, labelName)\n\t\t}\n\t\t// If no label source was specified, use the key name\n\t\tif labelSrc == nil || *labelSrc == \"\" {\n\t\t\tlName := labelName\n\t\t\tc[labelName] = &lName\n\t\t}\n\t}\n\treturn nil\n}", "func getReplicationControllersForLabels(c client.ReplicationControllerInterface, labelsToMatch labels.Labels) string {\n\t// Get all replication controllers.\n\t// TODO this needs a namespace scope as argument\n\trcs, err := c.List(labels.Everything())\n\tif err != nil {\n\t\tglog.Fatalf(\"Error getting replication controllers: %v\\n\", err)\n\t}\n\n\t// Find the ones that match labelsToMatch.\n\tvar matchingRCs []api.ReplicationController\n\tfor _, controller := range rcs.Items {\n\t\tselector := labels.SelectorFromSet(controller.Spec.Selector)\n\t\tif selector.Matches(labelsToMatch) {\n\t\t\tmatchingRCs = append(matchingRCs, controller)\n\t\t}\n\t}\n\n\t// Format the matching RC's into strings.\n\tvar rcStrings []string\n\tfor _, controller := range matchingRCs {\n\t\trcStrings = append(rcStrings, fmt.Sprintf(\"%s (%d/%d replicas created)\", controller.Name, controller.Status.Replicas, controller.Spec.Replicas))\n\t}\n\n\tlist := strings.Join(rcStrings, \", \")\n\tif list == \"\" {\n\t\treturn \"<none>\"\n\t}\n\treturn list\n}", "func getLabels(\n docker *client.Client,\n containerId string) (labels map[string]string, err error) {\n\n inspect, err := docker.ContainerInspect(context.Background(), containerId)\n if err != nil {\n return\n }\n\n labels = inspect.Config.Labels\n return\n}", "func getLabelToMatchForKind(kind string) []string {\n\tswitch kind {\n\tcase \"apiservice\": // API Services are not namespaced\n\t\treturn []string{\"apiservice\"}\n\tcase \"customresourcedefinition\": // CRD are not namespaced\n\t\treturn []string{\"customresourcedefinition\"}\n\tcase \"job\": // job metrics use specific label\n\t\treturn []string{\"job_name\", \"namespace\"}\n\tcase \"node\": // persistent nodes are not namespaced\n\t\treturn []string{\"node\"}\n\tcase \"persistentvolume\": // persistent volumes are not namespaced\n\t\treturn []string{\"persistentvolume\"}\n\tdefault:\n\t\treturn []string{kind, \"namespace\"}\n\t}\n}", "func (o QperfSpecClientConfigurationOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v QperfSpecClientConfiguration) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func labelsForInfluxdb(name string) map[string]string {\n\treturn map[string]string{\"app\": \"influxdb\", \"influxdb_cr\": name}\n}", "func (o LookupCloudCredentialResultOutput) Labels() pulumi.MapOutput {\n\treturn o.ApplyT(func(v LookupCloudCredentialResult) map[string]interface{} { return v.Labels }).(pulumi.MapOutput)\n}", "func selectorTaskLabels(responseObject map[string]interface{}) map[string]interface{} {\n\ttaskSpecObject := utils.GetJSONObject(responseObject, \"Spec\")\n\tif taskSpecObject != nil {\n\t\tcontainerSpecObject := utils.GetJSONObject(taskSpecObject, \"ContainerSpec\")\n\t\tif containerSpecObject != nil {\n\t\t\treturn utils.GetJSONObject(containerSpecObject, \"Labels\")\n\t\t}\n\t}\n\treturn nil\n}", "func (v *Volume) Labels() map[string]string {\n\tlabels := make(map[string]string)\n\tfor key, value := range v.config.Labels {\n\t\tlabels[key] = value\n\t}\n\treturn labels\n}", "func (l *labeler) Labels(otherLabels map[string]string) map[string]string {\n\tlabels := map[string]string{\n\t\tdockerLabelPrefix + \".job.id\": strconv.FormatInt(l.build.ID, 10),\n\t\tdockerLabelPrefix + \".job.url\": l.build.JobURL(),\n\t\tdockerLabelPrefix + \".job.sha\": l.build.GitInfo.Sha,\n\t\tdockerLabelPrefix + \".job.before_sha\": l.build.GitInfo.BeforeSha,\n\t\tdockerLabelPrefix + \".job.ref\": l.build.GitInfo.Ref,\n\t\tdockerLabelPrefix + \".project.id\": strconv.FormatInt(l.build.JobInfo.ProjectID, 10),\n\t\tdockerLabelPrefix + \".pipeline.id\": l.build.GetAllVariables().Value(\"CI_PIPELINE_ID\"),\n\t\tdockerLabelPrefix + \".runner.id\": l.build.Runner.ShortDescription(),\n\t\tdockerLabelPrefix + \".runner.local_id\": strconv.Itoa(l.build.RunnerID),\n\t\tdockerLabelPrefix + \".managed\": \"true\",\n\t}\n\n\tfor k, v := range otherLabels {\n\t\tlabels[fmt.Sprintf(\"%s.%s\", dockerLabelPrefix, k)] = v\n\t}\n\n\treturn labels\n}", "func (o ProjectRoleTemplateBindingOutput) Labels() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *ProjectRoleTemplateBinding) pulumi.MapOutput { return v.Labels }).(pulumi.MapOutput)\n}", "func (b *Bot) labels(ctx context.Context, files []github.PullRequestFile) ([]string, error) {\n\tvar labels []string\n\n\t// The branch name is unsafe, but here we are simply adding a label.\n\tif isReleaseBranch(b.c.Environment.UnsafeBase) {\n\t\tlog.Println(\"Label: Found backport branch.\")\n\t\tlabels = append(labels, \"backport\")\n\t}\n\n\tfor _, file := range files {\n\t\tif strings.HasPrefix(file.Name, \"vendor/\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor k, v := range prefixes {\n\t\t\tif strings.HasPrefix(file.Name, k) {\n\t\t\t\tlog.Printf(\"Label: Found prefix %v, attaching labels: %v.\", k, v)\n\t\t\t\tlabels = append(labels, v...)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn deduplicate(labels), nil\n}", "func (o BuildRunStatusBuildSpecRuntimeOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecRuntime) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (o CatalogOutput) Labels() pulumi.MapOutput {\n\treturn o.ApplyT(func(v *Catalog) pulumi.MapOutput { return v.Labels }).(pulumi.MapOutput)\n}", "func CreateResourceLabels(resourceID string) map[string]string {\n\tlabels := make(map[string]string)\n\tlabels[\"resource_type\"] = strings.Split(resourceID, \"/\")[6]\n\tlabels[\"resource_group\"] = strings.Split(resourceID, \"/\")[4]\n\tlabels[\"resource_name\"] = strings.Split(resourceID, \"/\")[8]\n\treturn labels\n}", "func (c *FromCommand) Labels() string {\n\treturn ExpandEnv(c.args[kubegetParams.labels])\n}", "func (o LookupConnectivityTestResultOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupConnectivityTestResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (l CurrentLabels) Labels() []string {\n\treturn []string{\"type\"}\n}", "func labelsForCascade(name string) map[string]string {\n\treturn map[string]string{selectorKey: name, appKey: appValue}\n}", "func GetNameStatusRegistRequests(statusRegistRequest int) string {\n\tstatusName := \"\"\n\n\tswitch statusRegistRequest {\n\tcase cf.PendingRequestStatus:\n\t\tstatusName = \"Pending\"\n\tcase cf.DenyRequestStatus:\n\t\tstatusName = \"Deny\"\n\tcase cf.AcceptRequestStatus:\n\t\tstatusName = \"Accepted\"\n\tcase cf.RegisteredRequestStatus:\n\t\tstatusName = \"Registered\"\n\t}\n\n\treturn statusName\n}", "func SelectorLabels(name, instance string) map[string]string {\n\treturn map[string]string{\n\t\tApplicationNameLabelKey: name,\n\t\tApplicationInstanceLabelKey: instance,\n\t}\n}", "func (c *Config) ResourceConfig(name string) (*ResourceConfig, bool) {\n\trc, ok := c.Resources[name]\n\treturn &rc, ok\n}", "func (o *ConnectorTypeAllOf) GetLabels() []string {\n\tif o == nil || o.Labels == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.Labels\n}", "func (a ProblemAdapter) GetLabels() map[string]string {\n\treturn nil\n}", "func LabelList(project string) ([]*gitlab.Label, error) {\n\tp, err := FindProject(project)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlist, _, err := lab.Labels.ListLabels(p.ID, &gitlab.ListLabelsOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn list, nil\n}", "func labelsForNacos(name string) map[string]string {\n\treturn map[string]string{\"app\": \"nacos\", \"release\": name}\n}", "func (o KubernetesNodePoolOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *KubernetesNodePool) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}", "func ResourcePoolsToConfig(pools []*resourcepoolv1.ResourcePool,\n) []config.ResourcePoolConfig {\n\trpConfigs := make([]config.ResourcePoolConfig, len(pools))\n\tfor i, rp := range pools {\n\t\trpConfigs[i] = config.ResourcePoolConfig{\n\t\t\tPoolName: rp.Name,\n\t\t}\n\t}\n\n\treturn rpConfigs\n}", "func getLabels(cos map[string]*common.Comment, rows [][]string) map[string]string {\n\tlbls := map[string]string{}\n\tfor _, row := range rows {\n\t\tfor _, co := range cos {\n\t\t\tif co.GetAuthor() == row[0] && co.GetText() == row[2] {\n\t\t\t\tlbls[co.GetName()] = row[3]\n\t\t\t}\n\t\t}\n\t}\n\treturn lbls\n}", "func getLabels (labelsStr string) (map[string]string, derrors.Error) {\n\n\tlabels := make (map[string]string, 0)\n\tif labelsStr == \"\" {\n\t\treturn labels, nil\n\t}\n\n\tif labelsStr != \"\" {\n\t\tlabelsList := strings.Split(labelsStr, \",\")\n\t\tfor _, paramStr := range labelsList {\n\t\t\tparam := strings.Split(paramStr, \":\")\n\t\t\tif len(param) != 2 {\n\t\t\t\treturn nil, derrors.NewInvalidArgumentError(\"invalid labels format.\").WithParams(labelsStr)\n\t\t\t}\n\t\t\tlabels[param[0]] = param[1]\n\t\t}\n\t}\n\n\treturn labels, nil\n}", "func (o *CreateOptions) GetLabels() map[string]string {\n\tif o.Labels == nil {\n\t\tvar z map[string]string\n\t\treturn z\n\t}\n\treturn o.Labels\n}", "func (o ConnectorOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Connector) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (c *Collection) Label() {\n\tfor _, resource := range c.Items {\n\t\tresource.Label(c.ResourceLabel)\n\t}\n}", "func (o MonitoredResourceDescriptorOutput) Labels() LabelDescriptorArrayOutput {\n\treturn o.ApplyT(func(v MonitoredResourceDescriptor) []LabelDescriptor { return v.Labels }).(LabelDescriptorArrayOutput)\n}", "func (o Iperf3SpecClientConfigurationOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v Iperf3SpecClientConfiguration) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func getLabelsByContainerID(containerID string, frameworks []frameworkInfo, log *logrus.Entry) map[string]string {\n\tlabels := map[string]string{}\n\tfor _, framework := range frameworks {\n\t\tlog.Debugf(\"Attempting to add labels to %v framework\", framework)\n\t\tfor _, executor := range framework.Executors {\n\t\t\tlog.Debugf(\"Found executor %v for framework %v\", framework, executor)\n\t\t\tif executor.Container == containerID {\n\t\t\t\tlog.Debugf(\"ContainerID %v for executor %v is a match, adding labels\", containerID, executor)\n\t\t\t\tfor _, pair := range executor.Labels {\n\t\t\t\t\tif _, inSlice := cosmosLabels[pair.Key]; inSlice {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif len(pair.Value) > maxLabelLength {\n\t\t\t\t\t\tlog.Warnf(\"Label %s is longer than %d chars; discarding label\", pair.Key, maxLabelLength)\n\t\t\t\t\t\tlog.Debugf(\"Discarded label value: %s\", pair.Value)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tlog.Debugf(\"Adding label for containerID %v: %v = %+v\", containerID, pair.Key, pair.Value)\n\t\t\t\t\tlabels[pair.Key] = pair.Value\n\t\t\t\t}\n\t\t\t\treturn labels\n\t\t\t}\n\t\t}\n\t}\n\treturn labels\n}", "func (c *Client) GetCronWorkflowLabels(namespace, name, prefix string) (labels map[string]string, err error) {\n\tcwf, err := c.ArgoprojV1alpha1().CronWorkflows(namespace).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"Namespace\": namespace,\n\t\t\t\"Name\": name,\n\t\t\t\"Error\": err.Error(),\n\t\t}).Error(\"CronWorkflow not found.\")\n\t\treturn nil, util.NewUserError(codes.NotFound, \"CronWorkflow not found.\")\n\t}\n\n\tlabels = label.FilterByPrefix(prefix, cwf.Labels)\n\tlabels = label.RemovePrefix(prefix, labels)\n\n\treturn\n}", "func (o LookupClusterRoleTemplateBindingResultOutput) Labels() pulumi.MapOutput {\n\treturn o.ApplyT(func(v LookupClusterRoleTemplateBindingResult) map[string]interface{} { return v.Labels }).(pulumi.MapOutput)\n}", "func (r *HookRunner) labels() map[string]string {\n\treturn map[string]string{\n\t\t\"plan\": string(r.Plan.UID),\n\t\t\"migration\": string(r.Plan.UID),\n\t\t\"step\": r.vm.Phase,\n\t\t\"vm\": r.vm.ID,\n\t}\n}", "func (o BuildSpecRuntimeOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v BuildSpecRuntime) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func labelsForApplication(name string) map[string]string {\n\treturn map[string]string{\"app\": \"application\", \"application_cr\": name}\n}", "func jiraLabels(j *v1alpha1.Jira) map[string]string {\n\tlabels := defaultLabels(j)\n\tfor key, val := range j.ObjectMeta.Labels {\n\t\tlabels[key] = val\n\t}\n\treturn labels\n}", "func (o *TemplateSummaryResources) GetLabels() []TemplateSummaryLabel {\n\tif o == nil {\n\t\tvar ret []TemplateSummaryLabel\n\t\treturn ret\n\t}\n\n\treturn o.Labels\n}", "func (o QperfSpecClientConfigurationPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *QperfSpecClientConfiguration) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o BuildRunStatusBuildSpecRuntimePtrOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecRuntime) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Labels\n\t}).(pulumi.StringMapOutput)\n}", "func GetPlayerLabels(qparms rest.QParms) ([]Label, error) {\n\tvar sb strings.Builder\n\tsb.Grow(100)\n\tsb.WriteString(config.Data.BaseURL)\n\tsb.WriteString(\"/labels/players/\")\n\n\tbody, err := get(sb.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse into an array of clans\n\ttype respType struct {\n\t\tLabels []Label `json:\"items\"`\n\t}\n\tvar resp respType\n\terr = json.Unmarshal(body, &resp)\n\tif err != nil {\n\t\tlog.Debug(\"failed to parse the json response\")\n\t\treturn nil, err\n\t}\n\n\treturn resp.Labels, nil\n}", "func getResourcesForCrd(ctx context.Context, dynClient dynamic.Interface, crd *unstructured.Unstructured, log logr.Logger) ([]byte, error) {\n\tversions := getList(crd.Object, \"spec\", \"versions\")\n\tif len(versions) == 0 {\n\t\tlog.V(INFO).Info(\"crd has no version, skipping\", \"crd\", crd.GetName())\n\n\t\treturn nil, nil\n\t}\n\n\tgroup := getString(crd.Object, \"spec\", \"group\")\n\tresource := getString(crd.Object, \"spec\", \"names\", \"plural\")\n\tversion := getString(versions[0], \"name\")\n\n\tres := schema.GroupVersionResource{Group: group, Resource: resource, Version: version}\n\n\tresources, err := dynClient.Resource(res).List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load resources for CRD %s: %w\", res, err)\n\t}\n\n\tresourceBuffer := bytes.Buffer{}\n\n\tfor j := range resources.Items {\n\t\tresourceYaml, err := ToCleanedK8sResourceYAML(&resources.Items[j])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresourceBuffer.WriteString(\"---\\n\")\n\t\tresourceBuffer.Write(resourceYaml)\n\t}\n\n\treturn resourceBuffer.Bytes(), nil\n}", "func (s *cpuSource) GetLabels() (source.FeatureLabels, error) {\n\tlabels := source.FeatureLabels{}\n\tfeatures := s.GetFeatures()\n\n\t// CPUID\n\tfor f := range features.Flags[CpuidFeature].Elements {\n\t\tif s.cpuidFilter.unmask(f) {\n\t\t\tlabels[\"cpuid.\"+f] = true\n\t\t}\n\t}\n\n\t// CPU model\n\tfor k, v := range features.Attributes[Cpumodel].Elements {\n\t\tlabels[\"model.\"+k] = v\n\t}\n\n\t// Cstate\n\tfor k, v := range features.Attributes[CstateFeature].Elements {\n\t\tlabels[\"cstate.\"+k] = v\n\t}\n\n\t// Pstate\n\tfor k, v := range features.Attributes[PstateFeature].Elements {\n\t\tlabels[\"pstate.\"+k] = v\n\t}\n\n\t// RDT\n\tfor k, v := range features.Attributes[RdtFeature].Elements {\n\t\tif k == \"RDTL3CA_NUM_CLOSID\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tlabels[\"rdt.\"+k] = v\n\t}\n\n\t// Security\n\t// skipLabel lists features that will not have labels created but are only made available for\n\t// NodeFeatureRules (e.g. to be published via extended resources instead)\n\tskipLabel := sets.NewString(\n\t\t\"tdx.total_keys\",\n\t\t\"sgx.epc\",\n\t\t\"sev.encrypted_state_ids\",\n\t\t\"sev.asids\")\n\tfor k, v := range features.Attributes[SecurityFeature].Elements {\n\t\tif !skipLabel.Has(k) {\n\t\t\tlabels[\"security.\"+k] = v\n\t\t}\n\t}\n\n\t// SGX\n\tfor k, v := range features.Attributes[SgxFeature].Elements {\n\t\tlabels[\"sgx.\"+k] = v\n\t}\n\n\t// Secure Execution\n\tfor k, v := range features.Attributes[SeFeature].Elements {\n\t\tlabels[\"se.\"+k] = v\n\t}\n\n\t// SST\n\tfor k, v := range features.Attributes[SstFeature].Elements {\n\t\tlabels[\"power.sst_\"+k] = v\n\t}\n\n\t// Hyperthreading\n\tif v, ok := features.Attributes[TopologyFeature].Elements[\"hardware_multithreading\"]; ok {\n\t\tlabels[\"hardware_multithreading\"] = v\n\t}\n\n\t// NX\n\tif v, ok := features.Attributes[CoprocessorFeature].Elements[\"nx_gzip\"]; ok {\n\t\tlabels[\"coprocessor.nx_gzip\"] = v\n\t}\n\n\treturn labels, nil\n}", "func labelsForInstance(name string) map[string]string {\n\treturn map[string]string{\"app\": \"instance\", \"instance_cr\": name}\n}", "func (c *BcsMonitorClient) Labels(selectors []string, startTime, endTime time.Time) (*LabelResponse, error) {\n\tvar queryString string\n\tvar err error\n\tif len(selectors) != 0 {\n\t\tqueryString = c.setSelectors(queryString, selectors)\n\t}\n\tif !startTime.IsZero() {\n\t\tqueryString = c.setQuery(queryString, \"start\", fmt.Sprintf(\"%d\", startTime.Unix()))\n\t}\n\tif !endTime.IsZero() {\n\t\tqueryString = c.setQuery(queryString, \"end\", fmt.Sprintf(\"%d\", endTime.Unix()))\n\t}\n\turl := fmt.Sprintf(\"%s%s\", c.opts.Endpoint, LabelsPath)\n\tif queryString != \"\" {\n\t\turl = fmt.Sprintf(\"%s?%s\", url, queryString)\n\t}\n\turl = c.addAppMessage(url)\n\tstart := time.Now()\n\tdefer func() {\n\t\tprom.ReportLibRequestMetric(prom.BkBcsMonitor, \"Labels\", \"GET\", err, start)\n\t}()\n\trsp, err := c.requestClient.DoRequest(url, \"GET\", c.defaultHeader, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := &LabelResponse{}\n\terr = json.Unmarshal(rsp, result)\n\tif err != nil {\n\t\tblog.Errorf(\"json unmarshal error:%v\", err)\n\t\treturn nil, fmt.Errorf(\"do request error, url: %s, error:%v\", url, err)\n\t}\n\treturn result, nil\n}", "func (sm SchedulerModel) getLabels(group string, instance InstanceID) map[string]string {\n\tlabels := map[string]string{\n\t\t\"group\": group,\n\t\t\"instance\": string(instance),\n\t}\n\n\treturn labels\n}", "func (d *DefaultLabelStrategy) LabelNames(p *protocol.Protocol) []string {\n\tres := []string{\"name\", \"proto\", \"ip_version\", \"import_filter\", \"export_filter\"}\n\tif d.descriptionLabels && p.Description != \"\" {\n\t\tres = append(res, labelKeysFromDescription(p.Description)...)\n\t}\n\n\treturn res\n}", "func (c *BcsMonitorClient) Labels(selectors []string, startTime, endTime time.Time) (*LabelResponse, error) {\n\tvar queryString string\n\tvar err error\n\tif len(selectors) != 0 {\n\t\tqueryString = c.setSelectors(queryString, selectors)\n\t}\n\tif !startTime.IsZero() {\n\t\tqueryString = c.setQuery(queryString, \"start\", fmt.Sprintf(\"%d\", startTime.Unix()))\n\t}\n\tif !endTime.IsZero() {\n\t\tqueryString = c.setQuery(queryString, \"end\", fmt.Sprintf(\"%d\", endTime.Unix()))\n\t}\n\turl := fmt.Sprintf(\"%s%s\", c.completeEndpoint, LabelsPath)\n\tif queryString != \"\" {\n\t\turl = fmt.Sprintf(\"%s?%s\", url, queryString)\n\t}\n\turl = c.addAppMessage(url)\n\tstart := time.Now()\n\tdefer func() {\n\t\tprom.ReportLibRequestMetric(prom.BkBcsMonitor, \"Labels\", \"GET\", err, start)\n\t}()\n\trsp, err := c.requestClient.DoRequest(url, \"GET\", c.defaultHeader, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := &LabelResponse{}\n\terr = json.Unmarshal(rsp, result)\n\tif err != nil {\n\t\tblog.Errorf(\"json unmarshal error:%v\", err)\n\t\treturn nil, fmt.Errorf(\"do request error, url: %s, error:%v\", url, err)\n\t}\n\treturn result, nil\n}", "func (b *Bundle) ConsortiumsConfig() (oldchannelconfig.Consortiums, bool) {\n\tresult := b.rootConfig.Consortiums()\n\treturn result, result != nil\n}", "func (o CertificateOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Certificate) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}", "func GetLabelsForStackImage(stackID string, buildImage string, stackYaml StackYaml, config *RootCommandConfig) (map[string]string, error) {\n\tvar labels = make(map[string]string)\n\n\tgitLabels, err := getGitLabels(config)\n\tif err != nil {\n\t\tconfig.Warning.log(\"Not all labels will be set. \", err.Error())\n\t} else {\n\t\tif branchURL, ok := gitLabels[ociKeyPrefix+\"source\"]; ok {\n\t\t\tif contextDir, ok := gitLabels[appsodyImageCommitKeyPrefix+\"contextDir\"]; ok {\n\t\t\t\tbranchURL += contextDir\n\t\t\t\tgitLabels[ociKeyPrefix+\"url\"] = branchURL\n\t\t\t}\n\t\t\t// These are enforced by the stack lint so they should exist\n\t\t\tgitLabels[ociKeyPrefix+\"documentation\"] = branchURL + \"/README.md\"\n\t\t\tgitLabels[ociKeyPrefix+\"source\"] = branchURL + \"/image\"\n\t\t}\n\n\t\tfor key, value := range gitLabels {\n\t\t\tlabels[key] = value\n\t\t}\n\t}\n\n\t// build a ProjectConfig struct from the stackyaml so we can reuse getConfigLabels() func\n\tprojectConfig := ProjectConfig{\n\t\tProjectName: stackYaml.Name,\n\t\tVersion: stackYaml.Version,\n\t\tDescription: stackYaml.Description,\n\t\tLicense: stackYaml.License,\n\t\tMaintainers: stackYaml.Maintainers,\n\t}\n\tconfigLabels, err := getConfigLabels(projectConfig, \"stack.yaml\", config.LoggingConfig)\n\tif err != nil {\n\t\treturn labels, err\n\t}\n\tconfigLabels[appsodyStackKeyPrefix+\"id\"] = stackID\n\tconfigLabels[appsodyStackKeyPrefix+\"tag\"] = buildImage\n\n\tfor key, value := range configLabels {\n\t\tlabels[key] = value\n\t}\n\n\treturn labels, nil\n}", "func BuildConfigSelector(name string) labels.Selector {\n\treturn labels.Set{buildapi.BuildConfigLabel: buildapihelpers.LabelValue(name)}.AsSelector()\n}", "func (q *QuestionnaireT) LabelsByInputNames() (lblsByNames map[string]string, keys, lbls []string) {\n\n\tlblsByNames = map[string]string{} // init return\n\n\t// helpers\n\tkeysByPage := make([][]string, len(q.Pages))\n\tlblsByPage := make([][]string, len(q.Pages))\n\tlabelsPerRadio := map[string][]string{}\n\n\tfor i1 := 0; i1 < len(q.Pages); i1++ {\n\t\tfor i2 := 0; i2 < len(q.Pages[i1].Groups); i2++ {\n\t\t\tfor i3 := 0; i3 < len(q.Pages[i1].Groups[i2].Inputs); i3++ {\n\n\t\t\t\tinp := q.Pages[i1].Groups[i2].Inputs[i3]\n\n\t\t\t\tif inp.IsLayout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// going up/back until we have found a label\n\t\t\t\tlbl := \"\"\n\t\t\tnestedLoop:\n\t\t\t\tfor grUp := i2; grUp > -1; grUp-- {\n\n\t\t\t\t\tcountDownInputsFrom := i3\n\t\t\t\t\tif grUp != i2 {\n\t\t\t\t\t\tcountDownInputsFrom = len(q.Pages[i1].Groups[grUp].Inputs) - 1\n\t\t\t\t\t}\n\n\t\t\t\t\tfor inpUp := countDownInputsFrom; inpUp > -1; inpUp-- {\n\t\t\t\t\t\tlb := q.Pages[i1].Groups[grUp].Inputs[inpUp].Label.TrSilent(\"en\")\n\t\t\t\t\t\tlb = q.LabelCleanse(lb)\n\t\t\t\t\t\tif lb != \"\" {\n\t\t\t\t\t\t\tif lbl != \"\" {\n\t\t\t\t\t\t\t\t// slow, create a string buffer someday:\n\t\t\t\t\t\t\t\tlbl = lb + \" -- \" + lbl\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tlbl = lb\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif q.LabelIsOutline(lb) {\n\t\t\t\t\t\t\t// log.Printf(\"\\t\\t\\tfound lb at gr%02v.inp%02v: '%v'\", grUp, inpUp, q.LabelCleanse(lb))\n\t\t\t\t\t\t\tbreak nestedLoop\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tlblsByPage[i1] = append(lblsByPage[i1], lbl)\n\t\t\t\tkeysByPage[i1] = append(keysByPage[i1], inp.Name)\n\n\t\t\t\t// special treatment for radio inputs - who occur several times:\n\t\t\t\t// collect their labels\n\t\t\t\tif inp.Type == \"radio\" {\n\t\t\t\t\tif lbl != \"\" {\n\t\t\t\t\t\tif labelsPerRadio[inp.Name] == nil {\n\t\t\t\t\t\t\tlabelsPerRadio[inp.Name] = []string{}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlabelsPerRadio[inp.Name] = append(labelsPerRadio[inp.Name], lbl)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// cleanse repeating radios\n\tfor pageIdx := 0; pageIdx < len(keysByPage); pageIdx++ {\n\t\tfor inpIdx, inpName := range keysByPage[pageIdx] {\n\t\t\tif lbls, ok := labelsPerRadio[inpName]; ok {\n\t\t\t\tlbls = cleanseIdentical(lbls)\n\t\t\t\tlbls = cleansePrefixes(lbls)\n\t\t\t\tlblsByPage[pageIdx][inpIdx] = strings.Join(lbls, \" -- \")\n\t\t\t}\n\t\t}\n\t}\n\n\t// cleanse repeating prefixes\n\tfor pageIdx := 0; pageIdx < len(lblsByPage); pageIdx++ {\n\t\tlblsByPage[pageIdx] = cleansePrefixes(lblsByPage[pageIdx])\n\t}\n\n\tfor pageIdx := 0; pageIdx < len(keysByPage); pageIdx++ {\n\t\tfor inpIdx, inpName := range keysByPage[pageIdx] {\n\t\t\tkeys = append(keys, inpName)\n\t\t\tlbls = append(keys, lblsByPage[pageIdx][inpIdx])\n\t\t\tlblsByNames[inpName] = lblsByPage[pageIdx][inpIdx]\n\t\t}\n\t}\n\n\treturn\n}", "func (r *Repo) ListLabels() github.Labels {\n\treturn r.cli.ListLabels(r.path)\n}" ]
[ "0.6093918", "0.58824027", "0.58429694", "0.58260775", "0.5772613", "0.57721615", "0.5721638", "0.5631717", "0.5623358", "0.55912167", "0.55769736", "0.55578935", "0.55528104", "0.5469654", "0.54685485", "0.546843", "0.5462578", "0.544288", "0.5354089", "0.5348128", "0.5333321", "0.5331416", "0.5311477", "0.5287337", "0.52633667", "0.5232888", "0.51904076", "0.51791805", "0.517074", "0.5168611", "0.5164645", "0.5153224", "0.51419795", "0.5141282", "0.51282287", "0.50739074", "0.507005", "0.5068511", "0.5055775", "0.5053229", "0.5036352", "0.502786", "0.502522", "0.5000314", "0.49955958", "0.49886942", "0.49785134", "0.4978049", "0.4969158", "0.4965765", "0.49346006", "0.49335584", "0.49318364", "0.49274278", "0.49154764", "0.49080953", "0.4902288", "0.48967823", "0.4884943", "0.48710954", "0.4860053", "0.4844968", "0.48443928", "0.48429245", "0.4840934", "0.48342168", "0.48244995", "0.48193535", "0.48174307", "0.4808114", "0.48055068", "0.48014235", "0.4789089", "0.47869667", "0.47843927", "0.478213", "0.47808808", "0.47803298", "0.47801685", "0.47787115", "0.4753005", "0.47524846", "0.47501877", "0.47491434", "0.47389635", "0.47331554", "0.47282854", "0.47220868", "0.4719746", "0.47174162", "0.4715159", "0.47020167", "0.47011146", "0.46940246", "0.46934712", "0.46885544", "0.46880335", "0.4685777", "0.4684849", "0.46828863" ]
0.8235097
0
SetupWithManager sets up the controller with the Manager.
func (r *ConjurConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&conjurv1alpha1.ConjurConfig{}). Owns(&v1.ConfigMap{}). Owns(&rbacv1.RoleBinding{}). Complete(r) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&corev1.Node{}).\n\t\tNamed(ControllerName).\n\t\tComplete(r)\n}", "func (c *Controller) SetupWithManager(mgr ctrl.Manager) error {\n\tr := resource.NewManagedReconciler(mgr,\n\t\tresource.ManagedKind(v1alpha3.VirtualNetworkGroupVersionKind),\n\t\tresource.WithManagedConnectionPublishers(),\n\t\tresource.WithExternalConnecter(&connecter{client: mgr.GetClient()}))\n\n\tname := strings.ToLower(fmt.Sprintf(\"%s.%s\", v1alpha3.VirtualNetworkKind, v1alpha3.Group))\n\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tNamed(name).\n\t\tFor(&v1alpha3.VirtualNetwork{}).\n\t\tComplete(r)\n}", "func (r *ServiceManager) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&corev1.Service{}).\n\t\tNamed(r.Name).\n\t\tComplete(r)\n}", "func (r *SkydiveReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&skydivegroupv1.Skydive{}).\n\t\tComplete(r)\n}", "func (r *MachineTester) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&machinev1alpha1.Machine{}).\n\t\tFor(&machinev1alpha1.MachineDeployment{}).\n\t\tFor(&machinev1alpha1.MachineSet{}).\n\t\tComplete(r)\n}", "func SetupWithManager(mgr ctrl.Manager) error {\n\tr, err := newReconciler(mgr)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&tenantv1.Tenant{}).\n\t\tComplete(r)\n}", "func (r *TestReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&testv1alpha1.Test{}).\n\t\tComplete(r)\n}", "func (r *LimitadorReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&limitadorv1alpha1.Limitador{}).\n\t\tComplete(r)\n}", "func (r *FlowTestReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&loggingpipelineplumberv1beta1.FlowTest{}).\n\t\tWithEventFilter(eventFilter()).\n\t\tComplete(r)\n}", "func (r *FalconContainerReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&falconv1alpha1.FalconContainer{}).\n\t\tComplete(r)\n}", "func (r *RouterServiceReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&corev1.Service{}).\n\t\tWithEventFilter(eventPredicates()).\n\t\tComplete(r)\n}", "func (r *GuestbookReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&appv1alpha1.Guestbook{}).\n\t\tOwns(&appsv1.Deployment{}).\n\t\tOwns(&corev1.Service{}).\n\t\tComplete(r)\n}", "func (r *StudentReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&esdevopscomv1.Student{}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr manager.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&toolchainv1alpha1.ChangeTierRequest{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).\n\t\tComplete(r)\n}", "func (r *VisitorAppReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&javiromanv1alpha1.VisitorApp{}).\n\t\tOwns(&appsv1.Deployment{}).\n\t\tComplete(r)\n}", "func (r *MachineDeletionReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&v1alpha1.MachineDeletion{}).\n\t\tComplete(r)\n}", "func (r *PvcConReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&v1.Pod{}).\n\t\tComplete(r)\n}", "func (r *RedisClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\n\tif err := ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&redisv1beta1.RedisCluster{}).\n\t\tOwns(&appsv1.StatefulSet{}).\n\t\tOwns(&appsv1.Deployment{}).\n\t\tComplete(r); err != nil {\n\t\treturn err\n\t}\n\n\tr.handler = newHandler(mgr)\n\n\treturn nil\n}", "func (r *OrganizationReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&pipelinev1alpha1.Organization{}).\n\t\tComplete(r)\n}", "func (r *CascadeReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\n\t// TODO: start prometheus and layout watcher here.\n\n\tr.NodeManagerMap = make(map[string]*derechov1alpha1.CascadeNodeManager)\n\tr.MachinesMetrics = make(map[string]*derechov1alpha1.MachineMetrics)\n\n\tgo r.observeAndSchedule()\n\tgo r.listenUpdateView()\n\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&derechov1alpha1.Cascade{}).\n\t\tOwns(&v1.Pod{}).\n\t\tOwns(&v1.Service{}).\n\t\tOwns(&derechov1alpha1.CascadeNodeManager{}).\n\t\tComplete(r)\n}", "func (r *HotelReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&webappv1.Hotel{}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&eventloggerv1.EventLogger{}).\n\t\tOwns(&corev1.Pod{}).\n\t\tOwns(&corev1.ServiceAccount{}).\n\t\tOwns(&rbacv1.Role{}).\n\t\tOwns(&rbacv1.RoleBinding{}).\n\t\tComplete(r)\n}", "func (r *NetworkReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&ethereumv1alpha1.Network{}).\n\t\tOwns(&appsv1.StatefulSet{}).\n\t\tOwns(&corev1.Service{}).\n\t\tOwns(&corev1.Secret{}).\n\t\tOwns(&corev1.PersistentVolumeClaim{}).\n\t\tOwns(&corev1.ConfigMap{}).\n\t\tComplete(r)\n}", "func (r *CacheBackendReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&cachev1alpha1.CacheBackend{}).\n\t\tComplete(r)\n}", "func (rc *RouteController) SetupWithManager(mgr ctrl.Manager) error {\n\tresourceToBeProccesedPredicate := predicate.Funcs{\n\t\tDeleteFunc: func(e event.DeleteEvent) bool {\n\t\t\t// Finalizers are used to check if a resource is being deleted, and perform there the needed actions\n\t\t\t// we don't want to reconcile on the delete of a resource.\n\t\t\treturn false\n\t\t},\n\t}\n\treturn ctrl.NewControllerManagedBy(mgr).WithEventFilter(resourceToBeProccesedPredicate).\n\t\tFor(&netv1alpha1.TunnelEndpoint{}).\n\t\tComplete(rc)\n}", "func (ec *EtcdCustodian) SetupWithManager(ctx context.Context, mgr ctrl.Manager, workers int) error {\n\tbuilder := ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{\n\t\tMaxConcurrentReconciles: workers,\n\t})\n\n\treturn builder.\n\t\tFor(&druidv1alpha1.Etcd{}).\n\t\tWatches(\n\t\t\t&source.Kind{Type: &appsv1.StatefulSet{}},\n\t\t\textensionshandler.EnqueueRequestsFromMapper(druidmapper.StatefulSetToEtcd(ctx, mgr.GetClient()), extensionshandler.UpdateWithNew),\n\t\t\tctrlbuilder.WithPredicates(druidpredicates.StatefulSetStatusChange()),\n\t\t).\n\t\tComplete(ec)\n}", "func (r *SDIObserverReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&sdiv1alpha1.SDIObserver{}).\n\t\tComplete(r)\n}", "func (r *VolumeReplicationReconciler) SetupWithManager(mgr ctrl.Manager, cfg *config.DriverConfig) error {\n\tr.DriverConfig = cfg\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&replicationv1alpha1.VolumeReplication{}).\n\t\tComplete(r)\n}", "func (c *CloudsqlController) SetupWithManager(mgr ctrl.Manager) error {\n\tr := &Reconciler{\n\t\tclient: mgr.GetClient(),\n\t\tfactory: &operationsFactory{mgr.GetClient()},\n\t}\n\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tNamed(controllerName).\n\t\tFor(&v1alpha1.CloudsqlInstance{}).\n\t\tOwns(&core.Secret{}).\n\t\tComplete(r)\n}", "func (r *BlueprintReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&app.Blueprint{}).\n\t\tComplete(r)\n}", "func (r *GlotpodReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&goglotdevv1alpha1.Glotpod{}).\n\t\tComplete(r)\n}", "func (r *KcrdReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&kapiv1.Kcrd{}).\n\t\tOwns(&v1.Deployment{}).\n\t\tOwns(&v12.Service{}).\n\t\tOwns(&netv1.Ingress{}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\taroClusterPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {\n\t\treturn o.GetName() == arov1alpha1.SingletonClusterName\n\t})\n\n\tbuilder := ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&arov1alpha1.Cluster{}, builder.WithPredicates(aroClusterPredicate))\n\n\treturn builder.Named(ControllerName).Complete(r)\n}", "func (r *NodeReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&ethereumv1alpha1.Node{}).\n\t\tOwns(&appsv1.StatefulSet{}).\n\t\tOwns(&corev1.Service{}).\n\t\tOwns(&corev1.Secret{}).\n\t\tOwns(&corev1.PersistentVolumeClaim{}).\n\t\tOwns(&corev1.ConfigMap{}).\n\t\tComplete(r)\n}", "func (r *DemoPodReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&examplev1.DemoPod{}).\n\t\tComplete(r)\n}", "func (r *CIBuildReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&cicdv1alpha1.CIBuild{}).\n\t\tComplete(r)\n}", "func (r *NacosReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&corev1beta1.Nacos{}).Complete(r)\n}", "func (r *EndpointsReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&corev1.Endpoints{}).\n\t\tComplete(r)\n}", "func (r *ChannelReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&slackv1alpha1.Channel{}).\n\t\tComplete(r)\n}", "func (c *ServiceImportController) SetupWithManager(mgr controllerruntime.Manager) error {\n\treturn controllerruntime.NewControllerManagedBy(mgr).For(&mcsv1alpha1.ServiceImport{}).Complete(c)\n}", "func (c *Controller) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&drivecrd.Drive{}).\n\t\tWithEventFilter(predicate.Funcs{\n\t\t\tCreateFunc: func(e event.CreateEvent) bool {\n\t\t\t\treturn c.filterCRs(e.Object)\n\t\t\t},\n\t\t\tDeleteFunc: func(e event.DeleteEvent) bool {\n\t\t\t\treturn c.filterCRs(e.Object)\n\t\t\t},\n\t\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\t\treturn c.filterCRs(e.ObjectOld)\n\t\t\t},\n\t\t\tGenericFunc: func(e event.GenericEvent) bool {\n\t\t\t\treturn c.filterCRs(e.Object)\n\t\t\t},\n\t\t}).\n\t\tComplete(c)\n}", "func (r *APIKeyReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&v1alpha1.APIKey{}).\n\t\tComplete(r)\n}", "func (r *FooReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&batchv1.Foo{}).\n\t\tComplete(r)\n}", "func (r *FruitsCatalogGReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&redhatcomv1alpha1.FruitsCatalogG{}).\n\t\tOwns(&appsv1.Deployment{}).\n\t\tOwns(&corev1.Service{}).\n\t\tComplete(r)\n}", "func (r *TestRunReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&graphnodev1alpha1.TestRun{}).\n\t\tComplete(r)\n}", "func (r *KindClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&infrastructurev1alpha4.KindCluster{}).\n\t\tComplete(r)\n}", "func (r *KubernetesMachineReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&infrav1.KubernetesMachine{}).\n\t\tWatches(\n\t\t\t&source.Kind{Type: &clusterv1.Machine{}},\n\t\t\t&handler.EnqueueRequestsFromMapFunc{\n\t\t\t\tToRequests: util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind(\"KubernetesMachine\")),\n\t\t\t},\n\t\t).\n\t\tWatches(\n\t\t\t&source.Kind{Type: &infrav1.KubernetesCluster{}},\n\t\t\t&handler.EnqueueRequestsFromMapFunc{\n\t\t\t\tToRequests: handler.ToRequestsFunc(r.KubernetesClusterToKubernetesMachines),\n\t\t\t},\n\t\t).\n\t\tWatches(\n\t\t\t&source.Kind{Type: &corev1.Pod{}},\n\t\t\t&handler.EnqueueRequestsFromMapFunc{\n\t\t\t\tToRequests: handler.ToRequestsFunc(r.PodToKubernetesMachine),\n\t\t\t},\n\t\t).\n\t\tWatches(\n\t\t\t&source.Kind{Type: &corev1.Secret{}},\n\t\t\t&handler.EnqueueRequestsFromMapFunc{\n\t\t\t\tToRequests: handler.ToRequestsFunc(r.SecretToKubernetesMachine),\n\t\t\t},\n\t\t).\n\t\tComplete(r)\n}", "func (r *yandexContainerRegistryReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&connectorsv1.YandexContainerRegistry{}).\n\t\tComplete(r)\n}", "func (r *KubeFedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&corev1beta1.KubeFedCluster{}).\n\t\tComplete(r)\n}", "func (r *ServiceBindingReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&mmv1.ServiceBinding{}).\n\t\tComplete(r)\n}", "func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&hjydevv1.Nginx{}).\n\t\tComplete(r)\n}", "func (r *TransferReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tr.AccountSystemNamespace = os.Getenv(ACCOUNTNAMESPACEENV)\n\tif r.AccountSystemNamespace == \"\" {\n\t\tr.AccountSystemNamespace = DEFAULTACCOUNTNAMESPACE\n\t}\n\tr.Logger = ctrl.Log.WithName(\"transfer-controller\")\n\tif m := os.Getenv(\"TRANSFERMINBALANCE\"); m != \"\" {\n\t\tminBalance, err := strconv.ParseInt(m, 10, 64)\n\t\tif err != nil {\n\t\t\tr.Logger.Error(err, \"parse min balance failed\")\n\t\t} else {\n\t\t\tMinBalance = minBalance\n\t\t}\n\t}\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&accountv1.Transfer{}, builder.WithPredicates(OnlyCreatePredicate{})).\n\t\tComplete(r)\n}", "func (r *NodeReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&filecoinv1alpha1.Node{}).\n\t\tOwns(&appsv1.StatefulSet{}).\n\t\tOwns(&corev1.Service{}).\n\t\tOwns(&corev1.PersistentVolumeClaim{}).\n\t\tComplete(r)\n}", "func (r *EventTrackerPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&eventtrackerv1.EventTrackerPolicy{}).\n\t\tComplete(r)\n}", "func (r *LagoonTaskReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&lagoonv1alpha1.LagoonTask{}).\n\t\tWithEventFilter(TaskPredicates{\n\t\t\tControllerNamespace: r.ControllerNamespace,\n\t\t}).\n\t\tComplete(r)\n}", "func (r *QiskitPlaygroundReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tif r.IsOpenShift {\n\t\tr.Log.Info(\"Running on OpenShift cluster\")\n\t} else {\n\t\tr.Log.Info(\"Running on Vanila Kubernetes cluster\")\n\t}\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&qiskitv1alpha1.QiskitPlayground{}).\n\t\tOwns(&appsv1.Deployment{}).\n\t\tComplete(r)\n}", "func (r *ReconcileClusterVersion) SetupWithManager(mgr ctrl.Manager, opts controller.Options) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tWithOptions(opts).\n\t\tFor(&tenancyv1alpha1.ClusterVersion{}).\n\t\tComplete(r)\n}", "func (r *CredentialReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tr.Client = mgr.GetClient()\n\tr.DiscoveryClient = discoveryclient.NewDiscoveryClientForConfigOrDie(mgr.GetConfig())\n\tr.scheme = mgr.GetScheme()\n\tr.log = ctrl.Log.WithName(\"controllers\").WithName(\"Credential\")\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&servicebrokerv1alpha1.Credential{}).\n\t\tComplete(r)\n}", "func (r *ConferenceReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&conferencev1.Conference{}).\n\t\tComplete(r)\n}", "func (tcr *TinkerbellClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&infrastructurev1alpha3.TinkerbellCluster{}).\n\t\tWatches(\n\t\t\t&source.Kind{Type: &clusterv1.Cluster{}},\n\t\t\t&handler.EnqueueRequestsFromMapFunc{\n\t\t\t\tToRequests: util.ClusterToInfrastructureMapFunc(infrastructurev1alpha3.GroupVersion.WithKind(\"TinkerbellCluster\")),\n\t\t\t},\n\t\t).\n\t\tComplete(tcr)\n}", "func (r *PassLessReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&v1alpha1.PassLess{}).\n\t\tComplete(r)\n}", "func (r *NginxReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&nginxv1alpha1.Nginx{}).\n\t\tOwns(&appsv1.Deployment{}).\n\t\tComplete(r)\n}", "func (r *ReconcileProvisioner) SetupWithManager(mgr ctrl.Manager) error {\n\tr.scheme = mgr.GetScheme()\n\n\terr := apiextensionsv1.SchemeBuilder.AddToScheme(r.scheme)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif r.Log == nil {\n\t\tr.Log = ctrl.Log.WithName(\"mcd\").WithName(\"provisioner\")\n\t}\n\tif r.clusterRegistry == nil {\n\t\tclusterRegistry, err := registry.New(mgr.GetConfig(), mgr.GetScheme(), mgr.GetRESTMapper())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr.clusterRegistry = clusterRegistry\n\t}\n\n\tcfgManager, err := config.New(mgr.GetConfig(), mgr.GetScheme(), mgr.GetRESTMapper())\n\tif err != nil {\n\t\treturn err\n\t}\n\tinteroperatorCfg := cfgManager.GetConfig()\n\tr.cfgManager = cfgManager\n\n\tbuilder := ctrl.NewControllerManagedBy(mgr).\n\t\tNamed(\"mcd_provisioner\").\n\t\tWithOptions(controller.Options{\n\t\t\tMaxConcurrentReconciles: interoperatorCfg.ProvisionerWorkerCount,\n\t\t}).\n\t\tFor(&resourcev1alpha1.SFCluster{}).\n\t\tWithEventFilter(watches.NamespaceFilter())\n\n\treturn builder.Complete(r)\n}", "func (r *ManagedResourceReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&paasv1beta1.ManagedResource{}).\n\t\tComplete(r)\n}", "func (r *APISchemeReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&cloudingressv1alpha1.APIScheme{}).\n\t\tComplete(r)\n}", "func (r *PipelineReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&pipelinev1alpha1.Pipeline{}).\n\t\tOwns(&v1.Pod{}).\n\t\tComplete(r)\n}", "func (r *DemoResourceReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&toolsv1.DemoResource{}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\taroClusterPredicate := predicate.NewPredicateFuncs(func(o client.Object) bool {\n\t\treturn o.GetName() == arov1alpha1.SingletonClusterName\n\t})\n\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&arov1alpha1.Cluster{}, builder.WithPredicates(aroClusterPredicate)).\n\t\tOwns(&configv1.ClusterOperator{}).\n\t\tNamed(ControllerName).\n\t\tComplete(r)\n}", "func (r *SpannerAutoscalerReconciler) SetupWithManager(mgr ctrlmanager.Manager) error {\n\topts := ctrlcontroller.Options{\n\t\tReconciler: r,\n\t}\n\n\treturn ctrlbuilder.ControllerManagedBy(mgr).\n\t\tFor(&spannerv1beta1.SpannerAutoscaler{}).\n\t\tWithOptions(opts).\n\t\tComplete(r)\n}", "func (r *CapabilityReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&runv1alpha1.Capability{}).\n\t\tComplete(r)\n}", "func (r *RepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&ecrv1beta1.Repository{}).\n\t\tWithOptions(controller.Options{MaxConcurrentReconciles: 1}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, opts SetupOpts) error {\n\tcontrollerName := fmt.Sprintf(\"%v-controller\", strings.ToLower(r.gvk.Kind))\n\n\tr.addDefaults(mgr, controllerName)\n\tif !opts.DisableSetupScheme {\n\t\tr.setupScheme(mgr)\n\t}\n\n\tc, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: r.maxConcurrentReconciles})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.setupWatches(mgr, c); err != nil {\n\t\treturn err\n\t}\n\n\tr.log.Info(\"Watching resource\",\n\t\t\"group\", r.gvk.Group,\n\t\t\"version\", r.gvk.Version,\n\t\t\"kind\", r.gvk.Kind,\n\t)\n\n\treturn nil\n}", "func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&v1alpha1.CosmosDB{}).\n\t\tComplete(r)\n}", "func (r *HumioAlertReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&humiov1alpha1.HumioAlert{}).\n\t\tComplete(r)\n}", "func (r *NotificationReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&v1alpha1.Notification{}).\n\t\tComplete(r)\n}", "func (r *LabelerReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&nulllabelerv1.Labeler{}).\n\t\tWatches(\n\t\t\t&source.Kind{Type: &core.Pod{}},\n\t\t\thandler.EnqueueRequestsFromMapFunc(r.GetAll),\n\t\t).\n\t\t// Uncomment the following line adding a pointer to an instance of the controlled resource as an argument\n\t\t// For().\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tr.record = event.NewAPIRecorder(mgr.GetEventRecorderFor(\"AppRollout\")).\n\t\tWithAnnotations(\"controller\", \"AppRollout\")\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&oamv1alpha2.AppRollout{}).\n\t\tOwns(&oamv1alpha2.Application{}).\n\t\tComplete(r)\n}", "func (r *AtReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&cnatv1alpha1.At{}).\n\t\tOwns(&corev1.Pod{}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&toolchainv1alpha1.SocialEvent{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).\n\t\tWatches(\n\t\t\t// watches UserSignups when their labels are *changed* (in particular, to track the approved users)\n\t\t\t// and they have a `toolchain.dev.openshift.com/social-event` label\n\t\t\t&source.Kind{Type: &toolchainv1alpha1.UserSignup{}},\n\t\t\thandler.EnqueueRequestsFromMapFunc(commoncontrollers.MapToOwnerByLabel(r.Namespace, toolchainv1alpha1.SocialEventUserSignupLabelKey)),\n\t\t\tbuilder.WithPredicates(predicate.LabelChangedPredicate{}),\n\t\t).Complete(r)\n}", "func (r *DeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&appsv1.Deployment{}).\n\t\tOwns(&corev1.Pod{}).\n\t\tWatches(&source.Kind{Type: &appsv1.Deployment{}},\n\t\t\t&handler.EnqueueRequestForObject{}).\n\t\tComplete(r)\n}", "func SetupWithManager(mgr manager.Manager) error {\n\treturn add(mgr, newReconciler(mgr))\n}", "func (r *GithubDeployKeyReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&credentialv1.GithubDeployKey{}).\n\t\tComplete(r)\n}", "func (r *AppInsightsReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&azurev1alpha1.AppInsights{}).\n\t\tComplete(r)\n}", "func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&corev1.Namespace{}).\n\t\tWithEventFilter(manageLabelPredicate()).\n\t\tComplete(r)\n}", "func (r *ProbesCheckerReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&cachev1alpha1.ProbesChecker{}).\n\t\tOwns(&corev1.Pod{}).\n\t\tComplete(r)\n}", "func (r *MilvusClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tbuilder := ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&milvusiov1alpha1.MilvusCluster{}).\n\t\t//Owns(&appsv1.Deployment{}).\n\t\t//Owns(&corev1.ConfigMap{}).\n\t\t//Owns(&corev1.Service{}).\n\t\t//WithEventFilter(&MilvusClusterPredicate{}).\n\t\tWithOptions(controller.Options{MaxConcurrentReconciles: 1})\n\n\t/* if config.IsDebug() {\n\t\tbuilder.WithEventFilter(DebugPredicate())\n\t} */\n\n\treturn builder.Complete(r)\n}", "func (r *DaemonSetReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&appsv1.DaemonSet{}).\n\t\tOwns(&corev1.Pod{}).\n\t\tWatches(&source.Kind{Type: &appsv1.DaemonSet{}},\n\t\t\t&handler.EnqueueRequestForObject{}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tcontrollerName := fmt.Sprintf(\"%v-controller\", strings.ToLower(r.gvk.Kind))\n\n\tr.addDefaults(mgr, controllerName)\n\tif !r.skipPrimaryGVKSchemeRegistration {\n\t\tr.setupScheme(mgr)\n\t}\n\n\tc, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: r.maxConcurrentReconciles})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.setupWatches(mgr, c); err != nil {\n\t\treturn err\n\t}\n\n\tr.log.Info(\"Watching resource\",\n\t\t\"group\", r.gvk.Group,\n\t\t\"version\", r.gvk.Version,\n\t\t\"kind\", r.gvk.Kind,\n\t)\n\n\treturn nil\n}", "func (r *MetadataRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&kubedrv1alpha1.MetadataRestore{}).\n\t\tComplete(r)\n}", "func (r *TerraformConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tmgrIndexer := mgr.GetFieldIndexer()\n\tindexer := indexerFunc(\"TerraformConfiguration\", terapi.GroupVersion.String())\n\n\tif err := mgrIndexer.IndexField(&terapi.TerraformPlan{}, indexOwnerKey, indexer); err != nil {\n\t\treturn err\n\t}\n\n\tif err := mgrIndexer.IndexField(&terapi.TerraformState{}, indexOwnerKey, indexer); err != nil {\n\t\treturn err\n\t}\n\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&terapi.TerraformConfiguration{}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\terr := ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&operatorv1alpha1.OperandRequest{}).\n\t\tComplete(reconcile.Func(r.ReconcileOperandRequest))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *ElastalertReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&esv1alpha1.Elastalert{}).\n\t\tWithOptions(controller.Options{MaxConcurrentReconciles: 5}).\n\t\tComplete(r)\n}", "func (r *ReconcileResourceDistribution) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\t// Uncomment the following line adding a pointer to an instance of the controlled resource as an argument\n\t\t// For().\n\t\tComplete(r)\n}", "func (r *MysqlUserReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&apiv1alpha1.MysqlUser{}).\n\t\tComplete(r)\n}", "func (r *KoupletBuildReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&apiv1alpha1.KoupletBuild{}).\n\t\tOwns(&batchv1.Job{}).\n\t\tComplete(r)\n}", "func (r *BCSNetIPClaimReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&netservicev1.BCSNetIPClaim{}).\n\t\tComplete(r)\n}", "func (r *NifiParameterContextReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&v1alpha1.NifiParameterContext{}).\n\t\tComplete(r)\n}", "func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {\n\tr.record = event.NewAPIRecorder(mgr.GetEventRecorderFor(\"AppRollout\")).\n\t\tWithAnnotations(\"controller\", \"AppRollout\")\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&v1alpha2.ApplicationContext{}).\n\t\tOwns(&v1alpha2.Application{}).\n\t\tComplete(r)\n}", "func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&anywherev1.Cluster{}).\n\t\tWatches(&source.Kind{Type: &anywherev1.VSphereDatacenterConfig{}}, &handler.EnqueueRequestForObject{}).\n\t\tWatches(&source.Kind{Type: &anywherev1.VSphereMachineConfig{}}, &handler.EnqueueRequestForObject{}).\n\t\tWatches(&source.Kind{Type: &anywherev1.DockerDatacenterConfig{}}, &handler.EnqueueRequestForObject{}).\n\t\tComplete(r)\n}", "func (r *LocalOperatorReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&cachev1.LocalOperator{}).\n\t\tOwns(&appsv1.Deployment{}).\n\t\tComplete(r)\n}" ]
[ "0.81449234", "0.8115102", "0.8088992", "0.8032909", "0.80321074", "0.80057675", "0.797915", "0.7958285", "0.78910285", "0.7864939", "0.7809449", "0.780032", "0.7782882", "0.7780697", "0.7753962", "0.7747532", "0.773852", "0.772797", "0.7717635", "0.7709445", "0.7708367", "0.7707273", "0.7707203", "0.7701198", "0.76981413", "0.76849806", "0.76772887", "0.76723945", "0.7667952", "0.76519835", "0.7647828", "0.7645579", "0.7642982", "0.7629969", "0.76292926", "0.76211655", "0.76184106", "0.7616164", "0.75966406", "0.7593025", "0.7589332", "0.7582862", "0.757373", "0.7555972", "0.75545204", "0.7553792", "0.7549016", "0.7543521", "0.75408447", "0.7524832", "0.7523872", "0.75223184", "0.7518728", "0.75090826", "0.750682", "0.7506191", "0.7502007", "0.7501996", "0.75012803", "0.75002706", "0.74985754", "0.7498032", "0.7487376", "0.7486561", "0.7479385", "0.7477881", "0.7474116", "0.7452218", "0.7451994", "0.7451083", "0.744344", "0.74412394", "0.74331087", "0.7431702", "0.7422671", "0.7407741", "0.73989046", "0.7397023", "0.73916876", "0.73906124", "0.7388284", "0.7376328", "0.7374034", "0.73730886", "0.73722094", "0.73696166", "0.7361254", "0.73576313", "0.7350601", "0.7345556", "0.73351675", "0.7335129", "0.7330395", "0.73248214", "0.7313421", "0.7308892", "0.7301914", "0.73009026", "0.7290811", "0.72848654" ]
0.74000865
76
/ Test with a predefined string
func TestLorem(t *testing.T) { src := `package main // Lorem ipsum dolor sit amet, consectetur adipiscing elit, // sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. // Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris // nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in // reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. // Excepteur sint occaecat cupidatat non proident, // sunt in culpa qui officia deserunt mollit anim id est laborum. func main(){}` file, err := parser.ParseFile(token.NewFileSet(), "", src, parser.ParseComments) if err != nil { t.Fatal(err) } nwords := uint(76) weight := 2. want := uint(float64(nwords) * weight) m := Metric{Config: Weights{Word: weight}} ast.Inspect(file, func(n ast.Node) bool { m.ParseNode(n) return true }) got := uint(m.Finish()) if want != got { t.Fatalf(`GetCommentComp("Lorem ipsum...") = %v, Wanted %v`, got, want) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isMatch(s string, p string) bool {\n\n}", "func verifyHasString(T *testing.T, s string, code string) {\n\tif strings.Index(code, s) == -1 {\n\t\tT.Errorf(\"expected to find %s in the generated code:\\n%s\\n\", s, code)\n\t}\n}", "func TestMatchString(t *testing.T) {\n\tpattern, upper, lower := \"^(B|b)rian$\", \"Brian\", \"brian\"\n\n\tif match, err := regexp.MatchString(pattern, upper); match != true {\n\t\tt.Errorf(\"MatchString did not match %q %v\", upper, err)\n\t}\n\n\tif match, err := regexp.MatchString(pattern, lower); match != true {\n\t\tt.Errorf(\"MatchString did not match %q %v\", lower, err)\n\t}\n}", "func isTest(name, prefix string) bool {\n\tif !strings.HasPrefix(name, prefix) {\n\t\treturn false\n\t}\n\tif len(name) == len(prefix) { // \"Test\" is ok\n\t\treturn true\n\t}\n\trune, _ := utf8.DecodeRuneInString(name[len(prefix):])\n\treturn !unicode.IsLower(rune)\n}", "func isTest(name, prefix string) bool {\n\tif !strings.HasPrefix(name, prefix) {\n\t\treturn false\n\t}\n\tif len(name) == len(prefix) { // \"Test\" is ok\n\t\treturn true\n\t}\n\tr, _ := utf8.DecodeRuneInString(name[len(prefix):])\n\treturn !unicode.IsLower(r)\n}", "func main() {\n\tswitch \"test\" {\n\tcase \"false\":\n\t\tfmt.Println(\"false\")\n\tcase \"true\":\n\t\tfmt.Println(\"true\")\n\tcase \"Howdy\", \"Hi\", \"test\":\n\t\tfmt.Println(\"Hi there\")\n\tdefault:\n\t\tfmt.Println(\"true\")\n\t}\n}", "func containsDemo(a string, b string) bool {\n\treturn strings.Contains(a, b)\n}", "func AssertString(t *testing.T, got string, want string) {\n\tt.Helper()\n\tif got != want {\n\t\tt.Errorf(\"got %q, want %q\", got, want)\n\t}\n}", "func sjekkString(a string){\n\t\n\tb := \"hei\"\n\n\tif (a == b) {\n\t\tfmt.Println(\"Hei på deg!\")\n\t}else {\n\t\tfmt.Println(\"Nei!\")\n\t}\n}", "func Contains(t *testing.T, s, substring string) {\n\tt.Helper()\n\n\tif !strings.Contains(s, substring) {\n\t\tt.Errorf(`%s: string \"%s\" does not contain \"%s\"`, t.Name(), s, substring)\n\t}\n}", "func (v *VerbalExpression) Test(s string) bool {\n\treturn v.Regex().Match([]byte(s))\n}", "func StringContains(t *testing.T, actual string, expected string) {\n\tt.Helper()\n\tif !strings.Contains(actual, expected) {\n\t\tt.Errorf(\"expected %s to contain %s\", actual, expected)\n\t\tt.FailNow()\n\t}\n}", "func (s *Scheduler) isHotString(filename string, str string) bool {\n\tstr = strings.ToLower(str)\n\tmatcher, ok := s.matcher[filename]\n\tif !ok {\n\t\tlog.Panicf(\"invalid filename detected %v\", filename)\n\t}\n\tisHot := len(matcher.MatchThreadSafe([]byte(str))) > 0\n\tif s.customFilter != nil {\n\t\treturn s.customFilter(str, isHot)\n\t}\n\treturn isHot\n}", "func main() {\n\t// variables\n\tvar userInput string\n\n\t// Get the input\n\tfmt.Printf(\"Enter the findian string to match : \")\n\t_, err := fmt.Scan(&userInput)\n\n\t// Validate the input\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tuserInputs := strings.ToLower(userInput)\n\tif strings.HasPrefix(userInputs, \"i\") &&\n\t\tstrings.HasSuffix(userInputs, \"n\") &&\n\t\tstrings.Index(userInputs, \"a\") != -1 {\n\n\t\tfmt.Println(\"Found!\")\n\n\t} else {\n\t\tfmt.Println(\"Not Found!\")\n\t}\n\n}", "func OkMatchesString(label, val, regex string, t *testing.T) {\n\tre := regexp.MustCompile(regex)\n\tif re.MatchString(val) {\n\t\tt.Logf(\"ok - %s: '%s' matches '%s'\\n\", label, val, regex)\n\t} else {\n\t\tt.Logf(\"not ok - %s: String '%s' doesn't match '%s'\", label, val, regex)\n\t\tt.Fail()\n\t}\n}", "func matchStr(rgxp string, compare string) bool {\n\tr, err := regexp.Compile(rgxp)\n\tif err != nil {\n\t\tlog.Fatalf(\"invalid regexp: %s\", rgxp)\n\t}\n\treturn r.MatchString(strings.ToLower(compare))\n}", "func SearchString() {\n\ts := \"this is a test\"\n\n\tfmt.Println(strings.Contains(s, \"this\"))\n\n\tfmt.Println(strings.ContainsAny(s, \"bca\"))\n\n\tfmt.Println(strings.HasPrefix(s, \"this\"))\n\n\tfmt.Println(strings.HasSuffix(s, \"test\"))\n}", "func TestGenString(t *testing.T) {\n\tif genStr := GenString(); genStr != \"hello world\" {\n\t\tt.Error(\"The generated string should be \\\"hello word\\\", got \", genStr)\n\t}\n}", "func m(t *testing.T, s, re string) {\n\tt.Helper()\n\tmatched, err := regexp.MatchString(re, s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !matched {\n\t\tt.Errorf(\"string does not match pattern %q:\\n%s\", re, s)\n\t}\n}", "func stringContains(first, second string) bool {\n\tif strings.Contains(strings.ToUpper(first), strings.ToUpper(second)) {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}", "func TestAnyString(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ts []string\n\t\texpected bool\n\t}{\n\t\t{[]string{\"foo\", \"\\u0062\\u0061\\u0072\", \"baz\"}, true},\n\t\t{[]string{\"boo\", \"bar\", \"baz\"}, false},\n\t\t{[]string{\"foo\", \"far\", \"baz\"}, true},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.AnyString(test.s, func(s string) bool {\n\t\t\treturn strings.HasPrefix(s, \"f\")\n\t\t})\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}", "func (String) Matches(pattern string) bool { return boolResult }", "func ExpectString(t *testing.T, field string, expected string, found string) {\n\tif expected != found {\n\t\tt.Errorf(\"%s [%s], found '%s'\", field, expected, found)\n\t}\n}", "func Test(str, pattern string) bool {\n\tvar pa string\n\tswitch pattern {\n\tcase \"idcard\":\n\t\tpa = `(^\\d{15}$)|(^\\d{17}(\\d|x|X)$)`\n\tcase \"english\":\n\t\tpa = \"^[A-Za-z]+$\"\n\tcase \"chinese\":\n\t\tpa = \"^[\\u4e00-\\u9fa5]+$\"\n\tcase \"username\":\n\t\tpa = `^[a-z][a-z0-9]{4,19}$`\n\tcase \"email\":\n\t\tpa = `^\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*$`\n\tcase \"zip\":\n\t\tpa = `^[1-9]\\d{5}$`\n\tcase \"qq\":\n\t\tpa = `^[1-9]\\d{4,9}$`\n\tcase \"phone\":\n\t\tpa = `^((\\(\\d{2,3}\\))|(\\d{3}\\-))?(\\(0\\d{2,3}\\)|0\\d{2,3}-)?[1-9]\\d{6,7}(\\-\\d{1,4})?$`\n\tcase \"mobile\":\n\t\tpa = `^(13[0-9]|14[5|7]|15[0-9]|18[0-9]|199)\\d{8}$`\n\tcase \"url\":\n\t\tpa = `^((ht|f)tps?):\\/\\/[\\w\\-]+(\\.[\\w\\-]+)+([\\w\\-.,@?^=%&:\\/~+#]*[\\w\\-@?^=%&\\/~+#])?$`\n\tcase \"ip\":\n\t\tpa = `^\\d+\\.\\d+\\.\\d+\\.\\d+$`\n\tcase \"password\":\n\t\treturn isStrongPassword(str)\n\tdefault:\n\t\tpa = pattern\n\t}\n\treg := regexp.MustCompile(pa)\n\n\treturn reg.MatchString(str)\n}", "func Contains(substr, operand string) bool { return strings.Contains(operand, substr) }", "func matchFunc(a, b string) bool {\n\tmatched, _ := regexp.MatchString(b, a)\n\treturn matched\n}", "func (Var) Contains(pattern string) bool { return boolResult }", "func main() {\n\ts := \"anagram\"\n\tt := \"nagaram\"\n\n\tfmt.Println(isAnagram(s, t))\n}", "func TestMatch(t *testing.T) {\n\tpattern := \"^(B|b)rian$\"\n\tif match, err := regexp.Match(pattern, []byte(\"Brian\")); match != true {\n\t\tt.Errorf(\"Brian did not match %q %v\", pattern, err)\n\t}\n\n\tif match, err := regexp.Match(pattern, []byte(\"brian\")); match != true {\n\t\tt.Errorf(\"brian did not match %q %v\", pattern, err)\n\t}\n}", "func TestString(t *testing.T) {\n\topcode := Opcode(byte(0)) // Init test opcode\n\n\tif opcode.String() != \"Nop\" { // Check invalid opcode\n\t\tt.Fatal(\"invalid opcode string\") // Panic\n\t}\n\n\topcode2 := Opcode(byte(1)) // Init 2nd test opcode\n\n\tif opcode2.String() != \"Unreachable\" { // Check invalid opcode\n\t\tt.Fatal(\"invalid opcode string\") // Panic\n\t}\n\n\topcode3 := Opcode(byte(161)) // Init 3rd test opcode\n\n\tif opcode3.String() != \"Unknown\" { // Check invalid opcode\n\t\tt.Fatal(\"invalid opcode string\") // Panic\n\t}\n\n\tt.Logf(\"opcode strings: %s, %s, %s\", opcode.String(), opcode2.String(), opcode3.String()) // Log success\n}", "func (f *Flow) MatchString(key string, predicate getter.StringPredicate) bool {\n\tif s, err := f.GetFieldString(key); err == nil {\n\t\treturn predicate(s)\n\t}\n\treturn false\n}", "func main() {\n\tfmt.Println(isMatch(\"mississippi\", \"mis*is*p*.\"))\n\tfmt.Println(isMatch(\"aab\", \"c*a*b\"))\n}", "func TestCheckBinaryExprStringEqlString(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `\"abc\" == \"abc\"`, env, (\"abc\" == \"abc\"), ConstBool)\n}", "func checkRecord(s string) bool {\n\n}", "func find_static_string(path string, str string) (bool, error) {\n\tbuf, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn bytes.Contains(buf, []byte(str)), nil\n}", "func String(tst *testing.T, str, correct string) {\n\tif str != correct {\n\t\tPrintFail(\"error %q != %q\\n\", str, correct)\n\t\ttst.Errorf(\"string failed with: %q != %q\", str, correct)\n\t\treturn\n\t}\n\tPrintOk(\"%s == %s\", str, correct)\n}", "func TestCheckBinaryExprStringGtrString(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `\"abc\" > \"abc\"`, env, (\"abc\" > \"abc\"), ConstBool)\n}", "func TestCheckBinaryExprStringLssString(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `\"abc\" < \"abc\"`, env, (\"abc\" < \"abc\"), ConstBool)\n}", "func TestIATBHString(t *testing.T) {\n\ttestIATBHString(t)\n}", "func TestFindString(t *testing.T) {\n\tregex := regexp.MustCompile(\"Brian\")\n\tsubject := \"Hello Brian\"\n\tmatch := regex.FindString(subject)\n\tAssert(\"Brian\", match, t)\n}", "func SampleAlpha(alpha string) {\n\n}", "func StringContains(s, sub string) bool {\n\treturn s != \"\" && strings.Contains(s, sub)\n}", "func match(a, b string) bool {\n\treturn strings.EqualFold(a, b)\n}", "func TestStrConfigDefualt(t *testing.T) {\n\tret := strConfigDefualt(\"Invalid\", \"X\")\n\tif ret != \"X\" {\n\t\tt.Error(\"Expected X got\", ret)\n\t}\n}", "func (this *MatchString) matchStr(str string, mSrc string) bool {\n\tres, err := regexp.MatchString(mSrc, str)\n\treturn res == true && err == nil\n}", "func StringContains(s, sub string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\n\treturn strings.Contains(s, sub)\n}", "func makeStringMatcher(arg interface{}) (stringMatcher, error) {\n\tif str, ok := arg.(string); ok {\n\t\treturn &stringLiteralMatcher{str: str}, nil\n\t} else if re, ok := arg.(*regexp.Regexp); ok {\n\t\treturn &stringRegexMatcher{pattern: re}, nil\n\t} else if sm, ok := arg.(func(string) bool); ok {\n\t\treturn &funcStringMatcher{fn: sm}, nil\n\t} else if c, ok := arg.(MatchConst); ok {\n\t\tif c == Any {\n\t\t\treturn &funcStringMatcher{\n\t\t\t\tfn: func(string) bool { return true },\n\t\t\t}, nil\n\t\t} else if c == None {\n\t\t\treturn &funcStringMatcher{\n\t\t\t\tfn: func(string) bool { return false },\n\t\t\t}, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"Cannot use value %v when matching against strings\", arg)\n}", "func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)", "func execValidString(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret := utf8.ValidString(args[0].(string))\n\tp.Ret(1, ret)\n}", "func hasFunc(a, b string) bool {\n\treturn strings.Contains(a, b)\n}", "func contains(s string, substr string) bool {\n if len(substr) == 0 {\n return true\n }\n s = strings.ToLower(s)\n split := strings.Split(s, \"-\")\n s = strings.Join(split, \"\") + \" \" + strings.Join(split, \" \")\n\n substr = strings.ToLower(substr)\n substr = strings.Join(strings.Split(substr, \"-\"), \"\")\n\n index := strings.Index(s, substr)\n if index == -1 {\n return false\n }\n if index + len(substr) < len(s) {\n char := s[index + len(substr)]\n if char >= 'a' && char <= 'z' || char >= '0' && char <= '9' {\n return false\n }\n }\n if index > 0 {\n char := s[index - 1]\n if char >= 'a' && char <= 'z' || char >= '0' && char <= '9' {\n return false\n }\n }\n return true\n}", "func teststring(t *testing.T, s string) {\n\tbuf := toint8(s)\n\tr := kstat.CFieldString(buf[:])\n\tif r != s {\n\t\tt.Fatalf(\"full buf mismatch: %q vs %q\", s, r)\n\t}\n\tr = kstat.CFieldString(buf[:len(s)])\n\tif r != s {\n\t\tt.Fatalf(\"exact buf mismatch: %q vs %q\", s, r)\n\t}\n\tr = kstat.CFieldString(buf[:len(s)+1])\n\tif r != s {\n\t\tt.Fatalf(\"string + one null mismatch: %q vs %q\", s, r)\n\t}\n\tif len(s) > 1 {\n\t\tr = kstat.CFieldString(buf[:1])\n\t\tif r != s[:1] {\n\t\t\tt.Fatalf(\"first character mismatch: %q vs %q\", s[:1], r)\n\t\t}\n\t}\n}", "func AssertString(t *testing.T, message string, expected string, got string) {\n\texpected = trimString(expected)\n\tgot = trimString(got)\n\tif expected != got {\n\t\tt.Errorf(\"%s: expected '%s' but got '%s' instead\", message, expected, got)\n\t}\n}", "func IndexString(a, b string) int", "func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {}", "func assertStringRegexp(t *testing.T, pattern, s string) {\n\tmatches, _ := regexp.MatchString(pattern, s)\n\n\tif !matches {\n\t\tt.Errorf(\"%s is not in format %s\", s, pattern)\n\t}\n}", "func TestContainsString(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tstr string\n\t\twant bool\n\t}{\n\t\t{\n\t\t\t\"Returns true if []string contains str\",\n\t\t\t\"contains-me\",\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t\"Returns false if []string doesn't contain str\",\n\t\t\t\"contains-me-not\",\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"Returns false if []string doesn't contain substring of str\",\n\t\t\t\"con\",\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\ttt := tt\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tif got := ContainsString([]string{\"test\", \"contains-me\", \"test2\"}, tt.str); got != tt.want {\n\t\t\t\tt.Errorf(\"ContainsString() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func Hey(remark string) string {\n\nremark = strings.Trim(remark,\" \")\n\nif strings.HasSuffix(remark,\"?\"){\n\tif strings.ToUpper(remark) == remark && strings.ContainsAny(remark,(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")){\n\t\treturn \"Calm down, I know what I'm doing!\"\n\t} else {\n\t\treturn \"Sure.\"\n\t}\n\n} else if strings.ToUpper(remark) == remark && strings.ContainsAny(remark,(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")) {\n\treturn \"Whoa, chill out!\"\n\n\t} else if remark == \"\" || strings.ContainsAny(remark,(\"\\t\")){\n\treturn \"Fine. Be that way!\"\n\n\t} else {\n\treturn \"Whatever.\"\n}\n\n}", "func (t *Test) ExecContainsString(contains string) error {\n\terr := ExecContainsString(t.Command, contains)\n\tif err != nil {\n\t\tt.Result.Error(err)\n\t\treturn err\n\t}\n\tt.Result.Success()\n\treturn nil\n}", "func IncludeString(vs []string, t string) bool {\n return IndexString(vs, t) >= 0\n}", "func arrContains(str string) bool {\n\tfor _, compare := range []string{\"CREATE\", \"REMOVE\", \"RENAME\"} {\n\t\tif strings.Contains(strings.ToUpper(str), compare) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func contains(s string, c byte) bool {\n\tfor i := 0; i < len(s); i++ {\n\t\tif s[i] == c {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func main() {\n\n\tstring1 := \"Odit, lorem ipsum dolor sit amet, consectetur adipisicing elit.\"\n\n\tfmt.Printf(\"strings.Contains? 'elit': %v \\n\", strings.Contains(string1, \"elit\"))\n\tfmt.Printf(\"strings.Contains? 'elitZ': %v \\n\", strings.Contains(string1, \"elitZ\"))\n\n\tfmt.Printf(\"strings.Index? 'elit': %v \\n\", strings.Index(string1, \"elit\"))\n\n\tfmt.Printf(\"strings.Count? 'p': %v \\n\", strings.Count(string1, \"p\"))\n\n\tfmt.Printf(\"strings.HasPrefix? 'lit.': %v \\n\", strings.HasPrefix(string1, \"lit.\"))\n\tfmt.Printf(\"strings.HasSuffix? 'lit.': %v \\n\", strings.HasSuffix(string1, \"lit.\"))\n\n\tfmt.Println(strings.Replace(string1, \"adipisicing\", \"REPLACED!\", 1))\n}", "func checkStr(s string) bool {\n\ts = strings.ToLower(s)\n\n\t// Ensure any embedded suffix / delimiter is removed as I'm using ReadString() function to capture.\n\t// In *nix, expect suffix '\\n', in Windows expect suffix '\\r\\n'\n\ts = strings.TrimSuffix(s, \"\\n\")\n\ts = strings.TrimSuffix(s, \"\\r\")\n\n\tif strings.HasPrefix(s, \"i\") && strings.HasSuffix(s, \"n\") && strings.Contains(s, \"a\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func stringContains(data []string, value string) bool {\n\tfor _, elem := range data {\n\t\tif elem == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestCheckBinaryExprStringAddString(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `\"abc\" + \"abc\"`, env, (\"abc\" + \"abc\"), ConstString)\n}", "func ContainsString(items []string, target string) bool {\n\tfor _, item := range items {\n\t\tif item == target {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func containsString(s []string, e string) bool {\n\treturn sliceIndex(s, e) > -1\n}", "func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {}", "func AssertGoldenString(t TestingT, actual, filename string) {\n\tt.Helper()\n\n\tif err := compare([]byte(actual), path(filename)); err != nil {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n}", "func ValidStringArgs(possibilities []string, received string) bool {\n\tfor _, possible := range possibilities {\n\t\tif possible == received {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestSprintfContains(t *testing.T, logic func(format string, a interface{}) string) {\n\tfor _, printable := range printables {\n\t\tt.Run(fmt.Sprint(printable), func(t *testing.T) {\n\t\t\tassert.Contains(t, logic(\"Hello, %v!\", printable), fmt.Sprintf(\"Hello, %v!\", printable))\n\t\t})\n\t}\n}", "func verifyStringProperty(s *Step, ps *Steps) error {\n\tpStr := ps.data.GetStringObjectValue(s.parameter)\n\tstrValue, ok := s.value.(string)\n\tif pStr == nil || !ok || *pStr != strValue {\n\t\treturn ps.getError(fmt.Sprintf(\"bad value for %s\", s.parameter))\n\t}\n\treturn nil\n}", "func TestCheckBinaryExprStringLeqString(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `\"abc\" <= \"abc\"`, env, (\"abc\" <= \"abc\"), ConstBool)\n}", "func ContainsString(stdOut *bytes.Buffer, contains string) error {\n\tso := stdOut.String()\n\tif !strings.Contains(so, contains) {\n\t\treturn fmt.Errorf(\"stdOut %q did not contain %q\", so, contains)\n\t}\n\treturn nil\n}", "func IsStringIn(val string, availables []string) bool {\n\tfor _, choice := range availables {\n\t\tif val == choice {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (g GiveCommand) Matches(str string) bool {\n\treturn giveReg.MatchString(str)\n}", "func instr(ch byte, st string) bool {\n for i := 0; i < len(st); i++ {\n if st[i] == ch { return true }\n }\n return false\n}", "func Match(regx string, arg string) bool {\n\tmatched, err := regexp.MatchString(\"^(\"+regx+\")$\", arg)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn matched\n}", "func TestCheckBinaryExprStringGeqString(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `\"abc\" >= \"abc\"`, env, (\"abc\" >= \"abc\"), ConstBool)\n}", "func containsString(slice []string, element string) bool {\n\treturn posString(slice, element) != -1\n}", "func TestMondayIsNotFoundInTheEmptyString(t *testing.T) {\n\tif Contains(\"\", \"Monday\") {\n\t\tt.Fail()\n\t}\n}", "func TestCheckBinaryExprStringNeqString(t *testing.T) {\n\tenv := MakeSimpleEnv()\n\n\texpectConst(t, `\"abc\" != \"abc\"`, env, (\"abc\" != \"abc\"), ConstBool)\n}", "func (sc *Scavenger) StringExists(str string) (yes bool) {\n\t_, _, yes = sc.Finder().FindString(str)\n\treturn\n}", "func MatchString(infix, matchString string) bool {\n\tn := Compile(infix)\n\treturn n.Matches(matchString)\n}", "func TestFCString(t *testing.T) {\n\ttestFCString(t)\n}", "func IsIn(str string, params ...string) bool {\n\tfor _, param := range params {\n\t\tif str == param {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func isOption(inString string) bool {\r\n\tvar rx *regexp.Regexp = regexp.MustCompile(`^--?`)\r\n\treturn rx.MatchString(inString)\r\n}", "func TestString(t *testing.T) {\n\ttests := []struct {\n\t\tin *journey.Stepper\n\t\texpect string\n\t}{\n\t\t{\n\t\t\tin: journey.NewStepper(),\n\t\t\texpect: \"0000\",\n\t\t},\n\t\t{\n\t\t\tin: &journey.Stepper{\n\t\t\t\tSteps: []uint32{20},\n\t\t\t\tI: 0,\n\t\t\t},\n\t\t\texpect: \"0020\",\n\t\t},\n\t\t{\n\t\t\tin: &journey.Stepper{\n\t\t\t\tSteps: []uint32{10, 100, 1000},\n\t\t\t\tI: 2,\n\t\t\t},\n\t\t\texpect: \"0010_0100_1000\",\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tgot := test.in.String()\n\t\tif got != test.expect {\n\t\t\tt.Errorf(\"%d - expect String to be equal %s, but got %s\", i, test.expect, got)\n\t\t}\n\t}\n\n}", "func isWantedTest(suiteName, testName string, filterRegexp *regexp.Regexp) bool {\n if !strings.HasPrefix(testName, \"Test\") {\n return false\n } else if filterRegexp == nil {\n return true\n }\n return (filterRegexp.MatchString(testName) ||\n filterRegexp.MatchString(suiteName) ||\n filterRegexp.MatchString(suiteName + \".\" + testName))\n}", "func (pattern targetPattern) MatchString(target string) bool {\n\tparts := strings.SplitN(target, \"/\", 2)\n\treturn len(parts) == 2 && parts[1] == pattern.name && pattern.namespace.MatchString(parts[0])\n}", "func match(pattern string, message string) bool {\n\treturn strings.Contains(message, pattern)\n}", "func hasString(slice []string, target string) bool {\n var retval bool = false\n for _, str := range slice {\n if str == target {\n retval = true\n break\n }\n }\n\n return retval\n}", "func TestIntEqStrEq(t *testing.T) {\n\tintEq := IntEq(1)\n\tstrEq := StrEq(\"1\")\n\tassert.True(t, intEq.Eq(strEq), \"1 != '1'\")\n\tassert.True(t, strEq.Eq(intEq), \"'1' != 1\")\n}", "func main() {\n\tfmt.Println(containCharsEqual(\"haha\", \"ahaq\"))\n}", "func Test_regex(t *testing.T) {\n\n\tfor _, val := range shouldMatch {\n\t\tif !secretKarmaReg.MatchString(val) {\n\t\t\tt.Errorf(\"String %s should have matched but didn't.\", val)\n\t\t}\n\t}\n\n\t// for _, val := range shouldNotMatch {\n\t// \tif secretKarmaReg.MatchString(val) {\n\t// \t\tt.Errorf(\"String %s should not have have matched but did.\", val)\n\t// \t}\n\t// }\n}", "func stringContains(t *testing.T, a, b []string) {\n\tas := assert.New(t)\n\n\tm := make(map[string]bool)\n\tfor _, v := range a {\n\t\tm[v] = true\n\t}\n\tfor _, v := range b {\n\t\tif !m[v] {\n\t\t\tas.Fail(fmt.Sprintf(\"%#v should contain %#v\", b, a))\n\t\t}\n\t}\n}", "func checkStringFlagReplaceWithUtilVersion(name string, arg string, compulsory bool) (exists bool) {\n\tvar hasArg bool\n\n\tif arg != \"\" {\n\t\texists = true\n\t}\n\n\t// Try to detect missing flag argument.\n\t// If an argument is another flag, argument has not been provided.\n\tif exists && !strings.HasPrefix(arg, \"-\") {\n\t\t// Option expecting an argument but has been followed by another flag.\n\t\thasArg = true\n\t}\n\t/*\n\t\twhere(fmt.Sprintf(\"-%s compulsory = %t\", name, compulsory))\n\t\twhere(fmt.Sprintf(\"-%s exists = %t\", name, exists))\n\t\twhere(fmt.Sprintf(\"-%s hasArg = %t\", name, hasArg))\n\t\twhere(fmt.Sprintf(\"-%s value = %s\", name, arg))\n\t*/\n\n\tif compulsory && !exists {\n\t\tfmt.Fprintf(os.Stderr, \"compulsory flag: -%s\\n\", name)\n\t\tprintUsage()\n\t\tos.Exit(2)\n\t}\n\n\tif exists && !hasArg {\n\t\tfmt.Fprintf(os.Stderr, \"flag -%s needs a valid argument (not: %s)\\n\", name, arg)\n\t\tprintUsage()\n\t\tos.Exit(3)\n\t}\n\n\treturn\n}", "func getTestNameFromStatusLine(text string) string {\n\tm := regexStatus.FindStringSubmatch(text)\n\treturn m[2]\n}", "func main() {\n\tvar endWith = strings.HasSuffix(\"This is an example string\", \"ng\")\n\tfmt.Println(endWith)\n}", "func TestHelloName(t *testing.T) {\n\tname := \"Pesho\"\n\twant := regexp.MustCompile(`\\b` + name + `\\b`)\n\n\tmessage, err := Hello(name)\n\n\tif !want.MatchString(message) || err != nil {\n\t\tt.Fatalf(`Hello(\"Gladys\") = %q, %v, want match for %#q, nil`, message, err, want)\n\t}\n}" ]
[ "0.6646962", "0.64980584", "0.64220244", "0.6103411", "0.6068693", "0.6058485", "0.6053496", "0.5970497", "0.5969839", "0.5969263", "0.59662545", "0.59622586", "0.59489334", "0.59450847", "0.59323", "0.5917186", "0.5906766", "0.588611", "0.58697194", "0.5859399", "0.58549356", "0.5850857", "0.5798707", "0.57941103", "0.57877076", "0.576589", "0.5752309", "0.5750496", "0.57412946", "0.5740559", "0.5727032", "0.57008964", "0.5686728", "0.56739694", "0.56731", "0.5662386", "0.56382716", "0.5608876", "0.5599646", "0.5593583", "0.55852246", "0.55749345", "0.55704993", "0.5566359", "0.5555785", "0.55425894", "0.552333", "0.5514053", "0.5513729", "0.55120444", "0.5496367", "0.54946965", "0.5492129", "0.5485939", "0.5483223", "0.5482811", "0.5478397", "0.5463903", "0.5459863", "0.54526705", "0.5451297", "0.5448652", "0.5447588", "0.5447187", "0.54466045", "0.5446511", "0.5436403", "0.5427846", "0.5419569", "0.54159623", "0.54136026", "0.54128027", "0.5409331", "0.5406204", "0.5404836", "0.54019755", "0.5396804", "0.5394311", "0.5390406", "0.53765845", "0.5376347", "0.53678215", "0.5365917", "0.5357633", "0.5355092", "0.5352551", "0.53436095", "0.5343113", "0.53366864", "0.53320956", "0.53269607", "0.53221947", "0.5317799", "0.5312244", "0.5307053", "0.5306757", "0.5298811", "0.529862", "0.5298347", "0.5298321", "0.5296299" ]
0.0
-1
NewChannelPool create and returns an new Pool instance with specified configuration.
func NewChannelPool(poolConfig *PoolConfig) (Pool, error) { c := &ChannelPool{ conns: make(chan *IdleConn, poolConfig.MaxCap), factory: poolConfig.Factory, close: poolConfig.Close, idleTimeout: poolConfig.IdleTimeout, para: poolConfig.Para, } for i := 0; i < poolConfig.InitialCap; i++ { conn, err := c.factory(poolConfig.Para) if err != nil { c.Release() return nil, fmt.Errorf("factory is not able to fill the pool: %s", err) } c.conns <- &IdleConn{conn: conn, t: time.Now()} } return c, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewChannelPool(initialCap, maxCap int, factory Factory) (Pool, error) {\n\tif initialCap < 0 || maxCap <= 0 || initialCap > maxCap {\n\t\treturn nil, errors.New(\"invalid capacity settings\")\n\t}\n\n\tc := &channelPool{\n\t\trconns: make(chan RpcAble, maxCap),\n\t\tfactory: factory,\n\t}\n\n\t// create initial RPC-able connections, if something goes wrong,\n\t// just close the pool error out.\n\tfor i := 0; i < initialCap; i++ {\n\t\trconn, err := factory()\n\t\tif err != nil {\n\t\t\tc.Close()\n\t\t\treturn nil, fmt.Errorf(\"factory is not able to fill the pool: %s\", err)\n\t\t}\n\t\tc.rconns <- rconn\n\t}\n\n\treturn c, nil\n}", "func NewChannelPool(initialCap, maxCap int, factory Factory) (Pool, error) {\n\tif initialCap <= 0 || maxCap <= 0 || initialCap > maxCap {\n\t\treturn nil, errors.New(\"invalid capacity settings\")\n\t}\n\n\tc := &ChannelPool{\n\t\tconns: make(chan net.Conn, maxCap),\n\t\tfactory: factory,\n\t}\n\n\t// create initial connections, if something goes wrong,\n\t// just close the pool error out.\n\tfor i := 0; i < initialCap; i++ {\n\t\tconn, err := factory()\n\t\tif err != nil {\n\t\t\tc.Close()\n\t\t\treturn nil, fmt.Errorf(\"factory is not able to fill the pool: %s\", err)\n\t\t}\n\t\tc.conns <- conn\n\t}\n\n\treturn c, nil\n}", "func NewChannelPool(poolConfig *MindAlphaServingClientPoolConfig) (Pool, error) {\n\t_poolConfig = poolConfig\n\tclient_logger.GetMindAlphaServingClientLogger().Infof(\"NewChannelPool(): config: addr: %v, service: %v\", poolConfig.ConsulAddr, poolConfig.MindAlphaServingService)\n\tif 0 == poolConfig.MaxConnNumPerAddr {\n\t\treturn nil, errors.New(\"invalid capacity settings\")\n\t}\n\n\tc := &channelPool{\n\t\tservConnsMap: make(map[string]*serviceConns),\n\t\tcurAddrIdx: -1,\n\t\tmaxActive: poolConfig.MaxConnNumPerAddr,\n\t\tminWeight: 1,\n\t\tgcd: 1,\n\t}\n\t_channelPool = c\n\n\tvar total_conn_num = 0\n\tawInfo, err := getConsulServiceAddrs()\n\tif err != nil || nil == awInfo || len(awInfo.addrWeights) == 0 {\n\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"can not get any service address from consul: %v\", err)\n\t\treturn nil, errors.New(\"can not get any service address from consul: \" + err.Error())\n\t}\n\taddrWeights := awInfo.addrWeights\n\n\tc.servAddrList = addrWeights\n\tc.maxWeight = awInfo.maxWeight\n\tc.minWeight = awInfo.minWeight\n\tc.gcd = awInfo.gcd\n\n\tfor _, addrWeight := range addrWeights {\n\t\taddr := addrWeight.addr\n\t\tc.servConnsMap[addr] = &serviceConns{idleConns: make(chan *idleConn, poolConfig.MaxConnNumPerAddr), openingConnNum: 0}\n\n\t\tics, err := createConnsForService(addr, poolConfig.MaxConnNumPerAddr)\n\t\tif err != nil {\n\t\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"can not create connnection for %v. error: %v\", addr, err)\n\t\t}\n\t\tfor _, ic := range ics {\n\t\t\tc.servConnsMap[addr].idleConns <- ic\n\t\t\tc.servConnsMap[addr].openingConnNum += 1\n\t\t\ttotal_conn_num += 1\n\t\t}\n\t\tclient_logger.GetMindAlphaServingClientLogger().Infof(\"set addr %v openingConnNum = %v\", addr, c.servConnsMap[addr].openingConnNum)\n\t}\n\tif 0 == total_conn_num {\n\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"can not create any connection for any service address\")\n\t\treturn nil, errors.New(\"can not create any connection for any service address\")\n\t}\n\t// watch consul, check address change\n\tgo func(*channelPool) {\n\t\tfor {\n\t\t\ttime.Sleep(time.Second * 15)\n\n\t\t\tawInfo, err := getConsulServiceAddrs()\n\t\t\tif err != nil || nil == awInfo || len(awInfo.addrWeights) == 0 {\n\t\t\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"Watch(): can not get any service address from consul: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnewAddrWeights := awInfo.addrWeights\n\t\t\tvar addedAddrs []string\n\t\t\tvar delAddrs []string\n\t\t\tnewAddrsMap := make(map[string]int, len(newAddrWeights))\n\t\t\tfor _, newAddrWeight := range newAddrWeights {\n\t\t\t\tnewAddr := newAddrWeight.addr\n\t\t\t\tnewAddrsMap[newAddr] = 1\n\t\t\t\t_, ok := c.servConnsMap[newAddr]\n\t\t\t\tif !ok {\n\t\t\t\t\taddedAddrs = append(addedAddrs, newAddr)\n\t\t\t\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"Watch(): get new address %v from consul\", newAddr)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// check deleted address.\n\t\t\tfor k, _ := range c.servConnsMap {\n\t\t\t\tif _, ok := newAddrsMap[k]; !ok {\n\t\t\t\t\tdelAddrs = append(delAddrs, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(delAddrs) > 0 {\n\t\t\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"Watch(): deleted address: %v\", delAddrs)\n\t\t\t}\n\t\t\tif len(addedAddrs) > 0 {\n\t\t\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"Watch(): added address: %v\", addedAddrs)\n\t\t\t}\n\n\t\t\taddrIcsMap := make(map[string][]*idleConn)\n\t\t\tfor _, addAddr := range addedAddrs {\n\n\t\t\t\tics, err := createConnsForService(addAddr, poolConfig.MaxConnNumPerAddr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"can not create connnection for %v. error: %v\", addAddr, err)\n\t\t\t\t}\n\t\t\t\ttotal_conn_num := 0\n\t\t\t\ttotal_conn_num += len(ics)\n\t\t\t\taddrIcsMap[addAddr] = ics\n\n\t\t\t\tif 0 == total_conn_num {\n\t\t\t\t\tclient_logger.GetMindAlphaServingClientLogger().Errorf(\"Watch(): can not create any connection for new added service %v\", addAddr)\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.mu.Lock()\n\t\t\tfor addAddr, ics := range addrIcsMap {\n\t\t\t\tc.servConnsMap[addAddr] = &serviceConns{idleConns: make(chan *idleConn, poolConfig.MaxConnNumPerAddr), openingConnNum: 0}\n\t\t\t\tc.servConnsMap[addAddr].openingConnNum += len(ics)\n\t\t\t\tfor _, ic := range ics {\n\t\t\t\t\tc.servConnsMap[addAddr].idleConns <- ic\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//delete deleted address related connections\n\t\t\tfor _, delAddr := range delAddrs {\n\t\t\t\tclose(c.servConnsMap[delAddr].idleConns)\n\t\t\t\tfor ic := range c.servConnsMap[delAddr].idleConns {\n\t\t\t\t\tic.connWrap.CloseConnWrap()\n\t\t\t\t\tc.servConnsMap[delAddr].openingConnNum--\n\t\t\t\t}\n\t\t\t\tdelete(c.servConnsMap, delAddr)\n\t\t\t}\n\t\t\tc.servAddrList = newAddrWeights\n\t\t\tc.maxWeight = awInfo.maxWeight\n\t\t\tc.minWeight = awInfo.minWeight\n\t\t\tc.gcd = awInfo.gcd\n\t\t\tc.mu.Unlock()\n\t\t}\n\t}(c) // end of goroutine.\n\n\treturn c, nil\n}", "func NewPool(c *Config) (p *Pool) {\n\tif c.DialTimeout <= 0 || c.ReadTimeout <= 0 || c.WriteTimeout <= 0 {\n\t\tpanic(\"must config redis timeout\")\n\t}\n\n\tdialFunc := func() (redis.Conn, error) {\n\t\treturn redis.Dial(\n\t\t\t\"tcp\",\n\t\t\tc.Addr,\n\t\t\tredis.DialConnectTimeout(time.Duration(c.DialTimeout)),\n\t\t\tredis.DialReadTimeout(time.Duration(c.ReadTimeout)),\n\t\t\tredis.DialWriteTimeout(time.Duration(c.WriteTimeout)))\n\t}\n\n\treturn &Pool{redis.Pool{\n\t\tMaxIdle: c.MaxIdle,\n\t\tMaxActive: c.MaxActive,\n\t\tIdleTimeout: time.Duration(c.IdleTimeout),\n\t\tDial: dialFunc}}\n}", "func New(c *Config) *pool {\n\tbackends := c.Backends\n\tconnsPerBackend := c.NumConns\n\tcacheEnabled := c.EnableCache\n\tmaxRetries := c.MaxRetries\n\n\tbackendCount := int(math.Max(float64(len(backends)), float64(1)))\n\tmaxRequests := connsPerBackend * backendCount * 2\n\n\ttr := &http.Transport{\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 10 * time.Second,\n\t\t\tKeepAlive: 10 * time.Second,\n\t\t}).DialContext,\n\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\tExpectContinueTimeout: 10 * time.Second,\n\t\tResponseHeaderTimeout: 10 * time.Second,\n\t}\n\n\tclient := &http.Client{\n\t\tTimeout: 30 * time.Second,\n\t\tTransport: tr,\n\t}\n\n\tcache, err := buildCache(cacheEnabled)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating cache: %v\", err)\n\t}\n\n\tconnectionPool := &pool{\n\t\tconnections: make(chan *connection.Connection, maxRequests),\n\t\thealthChecks: make(map[string]*healthcheck.HealthChecker),\n\t\tclient: client,\n\t\tconnsPerBackend: connsPerBackend,\n\t\tcache: cache,\n\t\tmaxRetries: maxRetries,\n\t}\n\n\tpoolConnections := []*connection.Connection{}\n\n\tstartup := &sync.WaitGroup{}\n\tfor _, backend := range backends {\n\t\tstartup.Add(1)\n\t\tpoolConnections = connectionPool.addBackend(poolConnections, backend, startup)\n\t}\n\n\tshuffle(poolConnections, connectionPool.connections)\n\n\tgo connectionPool.ListenForBackendChanges(startup)\n\n\treturn connectionPool\n}", "func CreatePool(addr address, connCap, channelCapOfPreCoon int) (*pool, error) {\n\tif connCap == 0 {\n\t\tconnCap = 1\n\t}\n\tif channelCapOfPreCoon <= 0 {\n\t\tchannelCapOfPreCoon = 1\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"ConnCap\": connCap,\n\t\t\"channelCapOfPreCoon\": channelCapOfPreCoon,\n\t}).Info(\"创建RabbitMQ连接池\")\n\treturn (&pool{\n\t\taddr: addr,\n\t\tconnCap: connCap,\n\t\tchannelCapOfPreCoon: channelCapOfPreCoon,\n\t\treadyChannel: make(chan *channel, connCap * channelCapOfPreCoon),\n\t}).create()\n}", "func NewPool(min, max uint, factory func() (net.Conn, error), heartbeat func(conn net.Conn) error) (*Pool, error) {\n\tif err := validateConfig(min, max, factory); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpool := &Pool{\n\t\tmin: min,\n\t\tmax: max,\n\t\tfactory: factory,\n\t\tconns: make(chan net.Conn, max),\n\t\theartbeat: heartbeat,\n\t}\n\n\tif err := pool.initConn(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pool, nil\n}", "func NewPool(cfg *config) (*Pool, error) {\n\tp := new(Pool)\n\tp.cfg = cfg\n\n\terr := p.initDB()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.limiter = network.NewRateLimiter()\n\tdcrdRPCCfg := &rpcclient.ConnConfig{\n\t\tHost: cfg.DcrdRPCHost,\n\t\tEndpoint: \"ws\",\n\t\tUser: cfg.RPCUser,\n\t\tPass: cfg.RPCPass,\n\t\tCertificates: cfg.dcrdRPCCerts,\n\t}\n\n\tminPmt, err := dcrutil.NewAmount(cfg.MinPayment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmaxTxFeeReserve, err := dcrutil.NewAmount(cfg.MaxTxFeeReserve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.ctx, p.cancel = context.WithCancel(context.Background())\n\thcfg := &network.HubConfig{\n\t\tActiveNet: cfg.net,\n\t\tWalletRPCCertFile: cfg.WalletRPCCert,\n\t\tWalletGRPCHost: cfg.WalletGRPCHost,\n\t\tDcrdRPCCfg: dcrdRPCCfg,\n\t\tPoolFee: cfg.PoolFee,\n\t\tMaxTxFeeReserve: maxTxFeeReserve,\n\t\tMaxGenTime: new(big.Int).SetUint64(cfg.MaxGenTime),\n\t\tPaymentMethod: cfg.PaymentMethod,\n\t\tLastNPeriod: cfg.LastNPeriod,\n\t\tWalletPass: cfg.WalletPass,\n\t\tMinPayment: minPmt,\n\t\tPoolFeeAddrs: cfg.poolFeeAddrs,\n\t\tSoloPool: cfg.SoloPool,\n\t}\n\n\tp.hub, err = network.NewHub(p.ctx, p.cancel, p.db, p.httpc, hcfg, p.limiter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar blockExplorerURL string\n\tswitch cfg.ActiveNet {\n\tcase chaincfg.TestNet3Params.Name:\n\t\tblockExplorerURL = \"https://testnet.dcrdata.org\"\n\tdefault:\n\t\tblockExplorerURL = \"https://explorer.dcrdata.org\"\n\t}\n\n\tgcfg := &gui.Config{\n\t\tCtx: p.ctx,\n\t\tSoloPool: cfg.SoloPool,\n\t\tGUIDir: cfg.GUIDir,\n\t\tBackupPass: cfg.BackupPass,\n\t\tGUIPort: cfg.GUIPort,\n\t\tTLSCertFile: defaultTLSCertFile,\n\t\tTLSKeyFile: defaultTLSKeyFile,\n\t\tActiveNet: cfg.net,\n\t\tPaymentMethod: cfg.PaymentMethod,\n\t\tBlockExplorerURL: blockExplorerURL,\n\t}\n\n\tp.gui, err = gui.NewGUI(gcfg, p.hub, p.db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}", "func NewPool(conf *config.Config) *ConnPool {\n\tpool := &ConnPool{\n\t\tconf: conf,\n\t\tconns: make([]*Connection, 0, conf.MaxConnections),\n\n\t\tconnDelayCloseCh: make(chan *Connection, 10000),\n\t\tconnDelayClosed: make(chan struct{}),\n\n\t\tidleChas: make([]*Channel, 0, conf.MaxIdleChannels),\n\n\t\treqChaList: &ReqChaList{},\n\t}\n\n\tgo func() {\n\t\tfor conn := range pool.connDelayCloseCh {\n\t\t\terr := conn.close(true)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warnf(\"conn.close: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t\tclose(pool.connDelayClosed)\n\t}()\n\n\treturn pool\n}", "func New(ctx context.Context, wg *sync.WaitGroup, poolSize int) *pool {\n\tp := &pool{\n\t\tctx: ctx,\n\t\twg: wg,\n\t\tsize: poolSize,\n\t\tworkers: make(chan Worker),\n\t\ttickets: make(chan bool, poolSize),\n\t}\n\n\tgo p.process()\n\n\treturn p\n}", "func New(serverid string, quit chan struct{}) *Pool {\n\tchanges := make(chan *Change)\n\tbroadcast := make(chan *baps3.Message)\n\n\tp := poolInner{\n\t\tcontents: make(map[*Client]struct{}),\n\t\tchanges: changes,\n\t\tquit: quit,\n\t\tbroadcast: broadcast,\n\t\tquitting: false,\n\t\tserverid: serverid,\n\t}\n\n\treturn &Pool{\n\t\tinner: &p,\n\t\tChanges: changes,\n\t\tBroadcast: broadcast,\n\t}\n}", "func NewPool(maxConcurrency int) *GoPool {\n\tbuffer := make(chan bool, maxConcurrency)\n\treturn &GoPool{\n\t\tbuffer: buffer,\n\t\tbufferSize: maxConcurrency,\n\t}\n}", "func NewPool(config etc.RedisPool) (pool *redis.Pool, err error) {\n\tconfigURL, err := url.Parse(config.URL)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"invalid redis URL: %s\", err)\n\t\treturn\n\t}\n\n\tswitch configURL.Scheme {\n\tcase \"redis\":\n\t\tpool = newInstancePool(config)\n\tdefault:\n\t\terr = fmt.Errorf(\"invalid redis URL scheme: %s\", configURL.Scheme)\n\t}\n\treturn\n}", "func NewPool() Pool {\n\treturn &pool{\n\t\tclients: make(map[string]client),\n\t}\n}", "func NewPool(dialfunc DialFunc, ops ...PoolOption) *Pool {\n\tp := &Pool{dial: dialfunc}\n\tp.maxIdle = 5\n\tfor _, op := range ops {\n\t\top(p)\n\t}\n\tp.ch = make(chan *PoolConn, p.maxIdle)\n\tp.nowfunc = time.Now\n\treturn p\n}", "func NewPool() *Pool {\n\treturn &Pool{\n\t\tRegister: make(chan *Client),\n\t\tUnregister: make(chan *Client),\n\t\tClients: make(map[*Client]bool),\n\t\tBroadcast: make(chan Message),\n\t}\n}", "func CreatePool(config *Config) (*redis.Pool, error) {\n\treturn &redis.Pool{\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn redis.Dial(\n\t\t\t\tconfig.Network,\n\t\t\t\tconfig.Address,\n\t\t\t\tredis.DialDatabase(config.Database),\n\t\t\t\tredis.DialUsername(config.Username),\n\t\t\t\tredis.DialPassword(config.Password),\n\t\t\t)\n\t\t},\n\t\tMaxIdle: config.MaxIdle,\n\t\tIdleTimeout: config.IdleTimeout,\n\t}, nil\n}", "func NewPool(size int) Pool {\n\treturn Pool {\n\t\tSize: size,\n\t}\n}", "func NewPool(conn *Connector) *Pool {\n\treturn &Pool{\n\t\tconn: conn,\n\t}\n}", "func NewPool(c *Conf) *Pool {\n\tp := &Pool{\n\t\tConf: c,\n\t\tc: make(chan *tree, c.capacity),\n\t}\n\tfor i := 0; i < c.capacity; i++ {\n\t\tp.c <- newTree(p.segmentSize, p.maxSize, p.depth, p.hasher)\n\t}\n\treturn p\n}", "func NewPool(numCluster int, numDeployment int) *Pool {\n\ts := &Pool{\n\t\tbackend: make(chan *lambdastore.Deployment, numDeployment+1), // Allocate extra 1 buffer to avoid blocking\n\t\tactives: hashmap.NewMap(numCluster),\n\t}\n\tfor i := 0; i < numDeployment; i++ {\n\t\ts.backend <- lambdastore.NewDeployment(global.Options.GetLambdaPrefix(), uint64(i))\n\t}\n\treturn s\n}", "func NewPool() *Pool {\n\treturn &Pool{\n\t\tresourceQueue: make(chan Resource),\n\t\tresourceCount: 0,\n\t}\n}", "func NewPool(poolSize int, poolCmd []string, portRange []int) *Pool {\n\tlog.Println(\"Initializing pool\")\n\n\trand.Seed(time.Now().Unix())\n\n\tpool := &Pool{Cmd: poolCmd}\n\tpool.Workers = make([]*Worker, poolSize)\n\tfor i := 0; i < poolSize; i++ {\n\t\tpool.Workers[i] = &Worker{pool: pool}\n\t}\n\treturn pool\n}", "func New(factory Factory, minIdleConns, poolSize, usedPreConn int, idleTimeout, maxLifetime time.Duration) (*Pool, error) {\n\tif poolSize <= 0 {\n\t\tpoolSize = 1\n\t}\n\n\tif minIdleConns > poolSize {\n\t\tminIdleConns = poolSize\n\t}\n\n\topt := Options{\n\t\tFactory: factory,\n\t\tPoolSize: poolSize,\n\t\tMinIdleConns: minIdleConns,\n\t\tUsedPreConn: usedPreConn,\n\t\tMaxConnLifeTime: maxLifetime,\n\t\tIdleTimeout: idleTimeout,\n\t}\n\n\tp := &Pool{\n\t\topt: &opt,\n\t\tclients: make([]*ClientConn, 0, poolSize),\n\t\tidleConnQueue: initQueue(poolSize),\n\t\trequestQueue: initQueue(0),\n\t}\n\n\t// init client in pool\n\tfor i := 0; i < opt.MinIdleConns; i++ {\n\t\tgo p.checkMinIdleConns()\n\t}\n\n\treturn p, nil\n}", "func CreatePool() *Pool {\n\tp := &Pool{\n\t\tRegister: make(chan *Client),\n\t\tUnregister: make(chan *Client),\n\t\tClients: make(map[*Client]bool),\n\t\tBroadcast: make(chan Message),\n\t}\n\treturn p\n}", "func NewPool(id string, size int, capacity int) *Pool {\n\tvar wg sync.WaitGroup\n\n\t// min for int...\n\ts := size\n\tif s > capacity {\n\t\ts = capacity\n\t}\n\n\treturn &Pool{\n\t\tid,\n\t\t&wg,\n\t\tmake(chan Job, capacity),\n\t\tmake(chan JobResult, capacity),\n\t\ts,\n\t\tcapacity,\n\t}\n}", "func newPool(redisURL string, maxIdle, maxActive int) *redis.Pool {\n\n\treturn &redis.Pool{\n\t\tMaxIdle: maxIdle,\n\t\tMaxActive: maxActive, // max number of connections\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.DialURL(redisURL)\n\t\t\treturn c, err\n\t\t},\n\t}\n}", "func (p *LightningPool) new(ctx context.Context) (*amqp.Channel, error) {\n\treturn p.conn.Channel(ctx)\n}", "func NewPool(connectFunc func() (*Conn, func(), error)) *Pool {\n\treturn &Pool{\n\t\tconnectFunc: connectFunc,\n\t\tconnMu: &sync.Mutex{},\n\t}\n}", "func NewPool() Pool {\n\treturn Pool{\n\t\tBalanceRune: cosmos.ZeroUint(),\n\t\tBalanceAsset: cosmos.ZeroUint(),\n\t\tPoolUnits: cosmos.ZeroUint(),\n\t\tStatus: Enabled,\n\t}\n}", "func New() *pool {\n\treturn &pool{\n\t\tmetrics: newMetrics(),\n\t}\n}", "func NewPool(poolSize int) *Pool {\n\treturn &Pool{\n\t\tpool: NewAtomicQueue(poolSize),\n\t\tpoolSize: poolSize,\n\t}\n}", "func NewPool(concurrency int, fns <-chan func() Promise, opts ...PoolOptions) *Pool {\n\tif concurrency <= 0 {\n\t\tpanic(\"promise.NewPool: concurrency must be greater than 0\")\n\t}\n\n\tvar options PoolOptions\n\tif len(opts) > 0 {\n\t\toptions = opts[0]\n\t}\n\n\treturn &Pool{\n\t\tfns: fns,\n\t\tsem: make(chan struct{}, concurrency),\n\t\tdone: make(chan struct{}),\n\t\tresult: make(chan Result),\n\t\toptions: options,\n\t}\n}", "func NewPool(size int, requireTls bool) *Pool {\n\tnextClient := make(chan *Client, size)\n\n\tfor i := 0; i < size; i++ {\n\t\tnextClient <- NewClient(requireTls)\n\t}\n\n\treturn &Pool{nextClient}\n}", "func New(o Options) *Pool {\n\tif o.New == nil {\n\t\tpanic(\"pool: new func must not be nil\")\n\t}\n\n\tif o.Size <= 0 {\n\t\to.Size = 1\n\t}\n\n\tif o.Timeout <= 0 {\n\t\to.Timeout = 30 * time.Second\n\t}\n\n\treturn &Pool{\n\t\titems: make(chan interface{}, o.Size),\n\t\tmaxSize: o.Size,\n\t\ttimeout: o.Timeout,\n\t\tnewFn: o.New,\n\t\tmu: new(sync.Mutex),\n\t}\n}", "func New(opt *Options, factory Factory) (*Pool, error) {\n\tif opt == nil {\n\t\topt = new(Options)\n\t}\n\n\tp := &Pool{\n\t\tconns: make([]member, 0, opt.MaxCap),\n\t\tfactory: factory,\n\t\topt: opt.norm(),\n\t\tdying: make(chan none),\n\t\tdead: make(chan none),\n\t}\n\n\tfor i := 0; i < opt.InitialSize; i++ {\n\t\tcn, err := factory()\n\t\tif err != nil {\n\t\t\t_ = p.close()\n\t\t\treturn nil, err\n\t\t}\n\t\tp.Put(cn)\n\t}\n\n\tgo p.loop()\n\treturn p, nil\n}", "func NewPoolWithConfig(size int, conf *PoolConfig) (*GoroutinePool, error) {\n\tp := &GoroutinePool{\n\t\tcapacity: int32(size),\n\t\tlock: NewSpinLock(),\n\t\tconfig: conf,\n\t\tworkers: NewLoopQueue(size),\n\t}\n\tp.cond = sync.NewCond(p.lock)\n\tp.objCache.New = func() interface{} {\n\t\treturn &Worker{\n\t\t\tpool: p,\n\t\t\ttask: make(chan f, 1),\n\t\t\targs: make(chan interface{}, 1),\n\t\t}\n\t}\n\treturn p, nil\n}", "func NewPool(num, size int, poolID ...int) (p *Pool) {\n\tp = new(Pool)\n\tp.init(num, size, poolID...)\n\treturn\n}", "func NewConnPool(lg log.Logger, cfg config.P2PConfig) (*ConnPool, error) {\n\treturn &ConnPool{\n\t\tlog: lg,\n\t\tconfig: cfg,\n\t\tconns: make(map[string]*Conn),\n\t}, nil\n}", "func NewPool(eventBus *eventbus.EventBus, brokersNum, clientsPerBroker uint) *BrokerPool {\n\tbp := new(BrokerPool)\n\tbp.workers = make([]*Broker, 0)\n\tbp.ConnectionsChan = make(chan wsConn, 100)\n\n\t// Instantiate all brokers\n\tfor i := uint(0); i < brokersNum; i++ {\n\t\tbr := NewBroker(i, eventBus, clientsPerBroker, bp.ConnectionsChan)\n\t\tbp.workers = append(bp.workers, br)\n\t}\n\n\t// Run all brokers workers\n\tfor _, br := range bp.workers {\n\t\tgo br.Run()\n\t}\n\n\treturn bp\n}", "func newPool(opts EngineOpts) *connPool {\n\tpool := tunny.NewFunc(opts.PoolSize, func(payload interface{}) interface{} {\n\t\tfn, ok := payload.(executable)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"payload is not an executable: %T\", payload)\n\t\t}\n\t\treturn fn()\n\t})\n\treturn &connPool{\n\t\tp: pool,\n\t\tto: opts.TransactionTimeout,\n\t}\n}", "func NewPool(network, addr string, size int, clientTimeout time.Duration, password string, dbNum int) (*Pool, error) {\n\tdf := func(network, addr string) (*redis.Client, error) {\n\t\tclient, err := redis.Dial(network, addr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif password != \"\" {\n\t\t\terr = filterNoPasswordSetErr(client.Cmd(\"AUTH\", password).Err)\n\t\t\tif err != nil {\n\t\t\t\tclient.Close()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tif dbNum > 0 {\n\t\t\terr = client.Cmd(\"SELECT\", dbNum).Err\n\t\t}\n\n\t\treturn client, err\n\t}\n\treturn NewCustomPool(network, addr, size, clientTimeout, df)\n}", "func NewPool(log LoggerInterface, concurrency int) *Pool {\n\tpool := &Pool{\n\t\tconcurrency: concurrency,\n\t\tlog: log,\n\t}\n\tpool.Init()\n\treturn pool\n}", "func NewPool(option *Option) (*Pool, error) {\n\tif option == nil {\n\t\treturn nil, errors.New(ErrorOption)\n\t}\n\tif option.Factor == nil {\n\t\treturn nil, errors.New(ErrorPoolInit)\n\t}\n\n\tif option.Init < 1 || option.Cap < 1 || option.IdleDur < 1 || option.MaxLifeDur < 1 {\n\t\treturn nil, errors.New(ErrorPoolInit)\n\t}\n\n\tif option.Init > option.Cap {\n\t\toption.Init = option.Cap\n\t}\n\n\tpool := &Pool{\n\t\tclients: make(chan *Client, option.Cap),\n\t\tcap: option.Cap,\n\t\tidleDur: option.IdleDur,\n\t\tmaxLifeDur: option.MaxLifeDur,\n\t\ttimeout: option.Timeout,\n\t\tfactor: option.Factor,\n\t\tmode: option.Mode,\n\t}\n\n\tfor i := int32(0); i < option.Init; i++ {\n\t\tclient, err := pool.createClient()\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(ErrorPoolInit)\n\t\t}\n\t\tpool.clients <- client\n\t}\n\n\treturn pool, nil\n}", "func NewPool(delegate *redis.Client) redsyncredis.Pool {\n\treturn &pool{delegate}\n}", "func NewComposerPool(max int) (result *ComposerPool) {\n\tlogger.Debug(\"Entering NewComposerPool\", max)\n\tdefer func() { logger.Debug(\"Exiting NewComposerPool\", result) }()\n\n result = &ComposerPool{\n Pool: make(chan *Composer, max),\n\t\tPoolCount: 0,\n\t\tPoolMax: max,\n\t\tPoolMutex: &sync.Mutex{},\n }\n return result\n}", "func New(network, addr string, size int) (*Pool, error) {\n\treturn NewCustom(network, size, SingleAddrFunc(addr), redis.Dial)\n}", "func NewPool(size int) *Pool {\n\tpool := &Pool{\n\t\ttasks: make(chan Task, DefaultPoolTaskChannelSize),\n\t\tkill: make(chan struct{}),\n\t}\n\tpool.Resize(size)\n\treturn pool\n}", "func NewPool(tasks []*Task, concurrency int) *Pool {\n\treturn &Pool{\n\t\tTasks:\ttasks,\n\t\tConcurrency: concurrency,\n\t\tCollector: make(chan *Task, taskSize),\n\t}\n}", "func New(n int, ctor func() Worker) *Pool {\n\tp := &Pool{\n\t\tctor: ctor,\n\t\treqChan: make(chan workRequest),\n\t}\n\tp.SetSize(n)\n\n\treturn p\n}", "func newPool(url string) *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxIdle: 80,\n\t\tMaxActive: 12000, // max number of connections\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.DialURL(url)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t}\n}", "func New(o *Options) (*redis.Pool, error) {\n\tpool := &redis.Pool{\n\t\tMaxIdle: o.MaxIdle,\n\t\tMaxActive: o.MaxActive,\n\t\tIdleTimeout: time.Duration(o.IdleTimeout) * time.Second,\n\t\tWait: true,\n\t\t// Other pool configuration not shown in this example.\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tcon, err := redis.Dial(\"tcp\", o.URL,\n\t\t\t\tredis.DialPassword(o.Password),\n\t\t\t\tredis.DialConnectTimeout(time.Duration(o.Timeout)*time.Second),\n\t\t\t\tredis.DialReadTimeout(time.Duration(o.Timeout)*time.Second),\n\t\t\t\tredis.DialWriteTimeout(time.Duration(o.Timeout)*time.Second))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn con, nil\n\t\t},\n\t}\n\tClient.RedisCon = pool\n\treturn pool, nil\n}", "func New(\n\tctx context.Context,\n\tdsn string,\n\twaitFor time.Duration,\n\ttracer opentracing.Tracer,\n) (PgxWrapper, error) {\n\tcfg, err := pgxpool.ParseConfig(dsn)\n\tif err != nil {\n\t\treturn PgxWrapper{}, fmt.Errorf(\"could not create new connection configuration: %w\", err)\n\t}\n\n\tpool, err := newPgxPool(ctx, cfg, waitFor)\n\tif err != nil {\n\t\treturn PgxWrapper{}, fmt.Errorf(\"could not create new connection pool: %w\", err)\n\t}\n\n\treturn PgxWrapper{\n\t\tpool: pool,\n\t\ttracer: tracer,\n\t}, nil\n}", "func New() *Pool {\n\tpool := &Pool{\n\t\tNewQueue(),\n\t}\n\treturn pool\n}", "func newResourcePool(config resourcePoolConfig) (*resourcePool, error) {\n\terr := (&config).setup()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trp := &resourcePool{\n\t\tminSize: config.MinSize,\n\t\tmaxSize: config.MaxSize,\n\t\texpiredFn: config.ExpiredFn,\n\t\tcloseFn: config.CloseFn,\n\t\tinitFn: config.InitFn,\n\t\tmaintainInterval: config.MaintainInterval,\n\t}\n\n\treturn rp, nil\n}", "func NewPool(opts ...Option) *ComplexPool {\n\treturn NewComplexPool(opts...)\n}", "func NewPool(log Logger, ctx interface{}, maxWorkers int, handlers map[string]Handler) Worker {\n\treturn NewPoolWithGC(log, ctx, maxWorkers, handlers,\n\t\tdefaultPeriodicGCSeconds, defaultSubmitGCSeconds)\n}", "func (h *ConnectionPoolsHandler) Create(\n\tctx context.Context,\n\tproject string,\n\tserviceName string,\n\treq CreateConnectionPoolRequest,\n) (*ConnectionPool, error) {\n\tpath := buildPath(\"project\", project, \"service\", serviceName, \"connection_pool\")\n\t_, err := h.client.doPostRequest(ctx, path, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Server doesn't return the connection pool we created, need to fetch it separately.\n\treturn h.Get(ctx, project, serviceName, req.PoolName)\n}", "func (r RedisWriter) NewPool() *redis.Pool {\n\tconfig := r\n\treturn &redis.Pool{\n\t\tMaxIdle: config.RedisPoolMaxIdle,\n\t\tMaxActive: config.RedisPoolMaxActive,\n\t\tIdleTimeout: time.Duration(config.RedisPoolIdleTimeout) * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\t//return redis.Dial(\"tcp\", config.RedisAddress) },\n\t\t\tc, err := redis.Dial(\"tcp\", config.RedisAddress)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.Do(\"AUTH\", config.RedisPassword)\n\t\t\treturn c, err\n\t\t},\n\t}\n}", "func NewPool(parentCtx context.Context) *Pool {\n\tbaseCtx, baseCancel := context.WithCancel(parentCtx)\n\tctx, cancel := context.WithCancel(baseCtx)\n\treturn &Pool{\n\t\tbaseCtx: baseCtx,\n\t\tbaseCancel: baseCancel,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t}\n}", "func NewPool(store *storage.Storage, nbWorkers int) *Pool {\n\tworkerPool := &Pool{\n\t\tqueue: make(chan model.Job),\n\t}\n\n\tfor i := 0; i < nbWorkers; i++ {\n\t\tworker := &Worker{id: i, store: store}\n\t\tgo worker.Run(workerPool.queue)\n\t}\n\n\treturn workerPool\n}", "func New(maxWorkers int, maxJobQueue int) *Pool {\n\t// Create the job queue\n\tjobQueue := make(chan Job, maxJobQueue)\n\n\t// Create the worker pool\n\tworkerPool := make(chan chan Job, maxWorkers)\n\n\treturn &Pool{\n\t\tworkerPool: workerPool,\n\t\tjobQueue: jobQueue,\n\t\tmaxJobQueue: maxJobQueue,\n\t\tmaxWorkers: maxWorkers,\n\t\tstatus: NotStarted,\n\t}\n}", "func New(max int) *WorkPool { // 注册工作池,并设置最大并发数\n\tif max < 1 {\n\t\tmax = 1\n\t}\n\n\tp := &WorkPool{\n\t\ttask: make(chan TaskHandler, 2*max),\n\t\terrChan: make(chan error, 1),\n\t\twaitingQueue: myqueue.New(),\n\t}\n\n\tgo p.loop(max)\n\treturn p\n}", "func NewPool(env tabletenv.Env, name string, cfg tabletenv.ConnPoolConfig) *Pool {\n\tidleTimeout := cfg.IdleTimeoutSeconds.Get()\n\tmaxLifetime := cfg.MaxLifetimeSeconds.Get()\n\tcp := &Pool{\n\t\tenv: env,\n\t\tname: name,\n\t\tcapacity: cfg.Size,\n\t\tprefillParallelism: cfg.PrefillParallelism,\n\t\ttimeout: cfg.TimeoutSeconds.Get(),\n\t\tidleTimeout: idleTimeout,\n\t\tmaxLifetime: maxLifetime,\n\t\twaiterCap: int64(cfg.MaxWaiters),\n\t\tdbaPool: dbconnpool.NewConnectionPool(\"\", 1, idleTimeout, maxLifetime, 0),\n\t}\n\tif name == \"\" {\n\t\treturn cp\n\t}\n\tenv.Exporter().NewGaugeFunc(name+\"Capacity\", \"Tablet server conn pool capacity\", cp.Capacity)\n\tenv.Exporter().NewGaugeFunc(name+\"Available\", \"Tablet server conn pool available\", cp.Available)\n\tenv.Exporter().NewGaugeFunc(name+\"Active\", \"Tablet server conn pool active\", cp.Active)\n\tenv.Exporter().NewGaugeFunc(name+\"InUse\", \"Tablet server conn pool in use\", cp.InUse)\n\tenv.Exporter().NewGaugeFunc(name+\"MaxCap\", \"Tablet server conn pool max cap\", cp.MaxCap)\n\tenv.Exporter().NewCounterFunc(name+\"WaitCount\", \"Tablet server conn pool wait count\", cp.WaitCount)\n\tenv.Exporter().NewCounterDurationFunc(name+\"WaitTime\", \"Tablet server wait time\", cp.WaitTime)\n\tenv.Exporter().NewGaugeDurationFunc(name+\"IdleTimeout\", \"Tablet server idle timeout\", cp.IdleTimeout)\n\tenv.Exporter().NewCounterFunc(name+\"IdleClosed\", \"Tablet server conn pool idle closed\", cp.IdleClosed)\n\tenv.Exporter().NewCounterFunc(name+\"MaxLifetimeClosed\", \"Tablet server conn pool refresh closed\", cp.MaxLifetimeClosed)\n\tenv.Exporter().NewCounterFunc(name+\"Exhausted\", \"Number of times pool had zero available slots\", cp.Exhausted)\n\tenv.Exporter().NewCounterFunc(name+\"WaiterQueueFull\", \"Number of times the waiter queue was full\", cp.waiterQueueFull.Load)\n\tenv.Exporter().NewCounterFunc(name+\"Get\", \"Tablet server conn pool get count\", cp.GetCount)\n\tenv.Exporter().NewCounterFunc(name+\"GetSetting\", \"Tablet server conn pool get with setting count\", cp.GetSettingCount)\n\tenv.Exporter().NewCounterFunc(name+\"DiffSetting\", \"Number of times pool applied different setting\", cp.DiffSettingCount)\n\tenv.Exporter().NewCounterFunc(name+\"ResetSetting\", \"Number of times pool reset the setting\", cp.ResetSettingCount)\n\tcp.getConnTime = env.Exporter().NewTimings(name+\"GetConnTime\", \"Tracks the amount of time it takes to get a connection\", \"Settings\")\n\n\treturn cp\n}", "func NewPool(addr string) *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn redis.Dial(\"tcp\", addr, redis.DialConnectTimeout(time.Millisecond*100))\n\t\t},\n\t}\n}", "func NewPool(opts ...PoolOpt) *Pool {\n\tp := &Pool{\n\t\tnum: defaultNumOfRoutines,\n\t\tmax: defaultMaxRequestBufferSize,\n\t\tctx: context.TODO(),\n\t\topen: false,\n\t\tclose: make(chan bool),\n\t}\n\tfor _, o := range opts {\n\t\to(p)\n\t}\n\tp.reqs = make(chan *request, p.max)\n\treturn p\n}", "func NewPool() *Pool {\n\tpool := new(Pool)\n\tpool.init()\n\treturn pool\n}", "func NewPool(minSize uint) *Pool {\n\tif minSize == 0 {\n\t\tminSize = defaultMinSize\n\t}\n\treturn &Pool{minSize: minSize, freelist: []*buf{}}\n}", "func New(queueDepth int, singleThreaded bool) *GoroutinePool {\n\treturn &GoroutinePool{\n\t\tqueue: make(chan work, queueDepth),\n\t\tsingleThreaded: singleThreaded,\n\t}\n}", "func NewPool(n int) (ret *Pool) {\n\tret = &Pool{}\n\tret.taskChannel = make(chan Runnable)\n\tfor t:=0; t < n; t++ {\n\t\tgo ret.executor()\n\t}\n\treturn\n}", "func New(qsize, wsize int) *Pool {\n\tif wsize == 0 {\n\t\twsize = runtime.NumCPU()\n\t}\n\n\tif qsize < wsize {\n\t\tqsize = wsize\n\t}\n\n\tpool := &Pool{\n\t\tqueue: make(chan Task, qsize),\n\t\tworkers: make(chan chan Task, wsize),\n\t\tshutdown: make(chan struct{}),\n\t}\n\n\tgo pool.start()\n\n\tfor i := 0; i < wsize; i++ {\n\t\tStartWorker(pool)\n\t}\n\n\treturn pool\n}", "func NewPool(fn ResolverFactory) *Pool {\n\treturn &Pool{\n\t\tfactory: fn,\n\t}\n}", "func New(pool pool.Pool, collect collector.Collector, url, redisChannel string, buffer int, replication float64) *Config {\n\toptions, err := redis.ParseURL(url)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to parse redis url (%v): %v\", url, err)\n\t}\n\n\tclient := redis.NewClient(options)\n\tif err := client.Ping(context.TODO()).Err(); err != nil {\n\t\tlog.Fatalf(\"failed to ping redis: %v\", err)\n\t}\n\n\tpubsub := client.Subscribe(context.TODO(), redisChannel)\n\tif _, err := pubsub.Receive(context.TODO()); err != nil {\n\t\tlog.Fatalf(\"failed to ping redis channel: %v\", err)\n\t}\n\n\treturn &Config{\n\t\tpool: pool,\n\t\tcollect: collect,\n\t\tchannel: pubsub.ChannelSize(buffer),\n\t\tclient: client,\n\t\tpubsub: pubsub,\n\t\treplication: replication,\n\t}\n}", "func NewPool(workers int) *Pool {\n\treturn &Pool{\n\t\tworkers: workers,\n\t\tqueueTasks: make(chan Task),\n\t}\n}", "func NewPool(numWorkers int, jobQueueLen int) *Pool {\n\tjobQueue := make(chan Job, jobQueueLen)\n\tworkerPool := make(chan *worker, numWorkers)\n\n\tpool := &Pool{\n\t\tJobQueue: jobQueue,\n\t\tdispatcher: newDispatcher(workerPool, jobQueue),\n\t}\n\n\treturn pool\n}", "func NewPool(server, password string, maxIdle, maxActive int, idleTimeout time.Duration) *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxIdle: maxIdle, // default: 3\n\t\tMaxActive: maxActive, // default: 1000\n\t\tIdleTimeout: idleTimeout, // default 3 * 60 seconds\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", server)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif _, err := c.Do(\"AUTH\", password); err != nil {\n\t\t\t\tc.Close()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n}", "func newPool(addr string) (*pool, error) {\n\tp := pool{redis.Pool{\n\t\tMaxActive: 100,\n\t\tWait: true,\n\t\tMaxIdle: 10,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) { return redis.Dial(\"tcp\", addr) },\n\t}}\n\n\t// Test connection\n\tconn := p.Get()\n\tdefer conn.Close()\n\n\t_, err := conn.Do(\"PING\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &p, nil\n}", "func NewPool(parentCtx context.Context, opts ...func(*option)) *pool {\n\tctx, cancel := context.WithCancel(parentCtx)\n\tp := &pool{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\toption: option{recoverFunc: defaultRecoverGoroutine},\n\t}\n\tfor _, opt := range opts {\n\t\topt(&p.option)\n\t}\n\treturn p\n}", "func newPool(server string) *redis.Pool {\n\treturn &redis.Pool{\n\t\tMaxActive: 80,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", server)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t}\n}", "func NewPool(size int, allocator Allocator) *Pool {\n\treturn &Pool{\n\t\tallocator: allocator,\n\t\trecycleCh: make(chan []byte, size),\n\t}\n}", "func NewPool(size int) *Pool {\n\tpool := &Pool{\n\t\ttasks: make(chan Task, 128),\n\t\tkill: make(chan struct{}),\n\t}\n\tpool.Resize(size)\n\treturn pool\n}", "func (c Config) Pool() *redis.Pool {\n\treturn newPool(c)\n}", "func New(size int64) *GoPool {\n\tif size <= 0 {\n\t\tpanic(fmt.Sprintf(\"invalid GoPool size: %d\", size))\n\t}\n\treturn &GoPool{\n\t\tmaxRoutines: size,\n\t\troutines: semaphore.NewWeighted(size),\n\t}\n}", "func New(idleTimeout time.Duration) *Pool {\n\tpool := &Pool{\n\t\tidleTimeout: idleTimeout,\n\t\tstack: make([]*goroutine, 0, 64),\n\t}\n\treturn pool\n}", "func NewSessionPool(conf SessionPoolConf, log Logger) (*SessionPool, error) {\n\t// check the config\n\tconf.checkBasicFields(log)\n\n\tnewSessionPool := &SessionPool{\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\t// init the pool\n\tif err := newSessionPool.init(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create a new session pool, %s\", err.Error())\n\t}\n\tnewSessionPool.startCleaner()\n\treturn newSessionPool, nil\n}", "func NewPool(filePath string, opt PoolOptions) (*Pool, error) {\n\tp := Pool{\n\t\tpath: filePath,\n\t\topt: opt,\n\t}\n\tif p.opt.Size > 0 {\n\t\tp.read = make([][]byte, 0, p.opt.Size)\n\t}\n\n\tif err := p.loadFromFile(); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(p.read) != p.opt.Size && p.opt.Size != -1 {\n\t\treturn nil, errors.New(\"put first\")\n\t}\n\treturn &p, nil\n}", "func New(maxBlocks int, opts ...PoolOpt) (*Pool, error) {\n\to := defaultPoolOpts\n\tfor _, opt := range opts {\n\t\topt(&o)\n\t}\n\tp := &Pool{\n\t\tinitOpts: o,\n\t\tfree: make([][]byte, 0, maxBlocks),\n\t\tallocated: make(map[*byte]struct{}, maxBlocks),\n\t\tmaxBlocks: maxBlocks,\n\t}\n\tif o.preAlloc > 0 {\n\t\tif err := p.prealloc(o.preAlloc); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\truntime.SetFinalizer(p, func(pool *Pool) {\n\t\tpool.Close()\n\t})\n\treturn p, nil\n}", "func NewPool(rootDir string, globalCacheDir string) *Pool {\n\tresult := &Pool{\n\t\thosts: make(map[string]*pluginsForHost),\n\t\trootDir: rootDir,\n\t\tglobalCacheDir: globalCacheDir,\n\t}\n\tgo result.supervisor()\n\treturn result\n}", "func NewCustom(network string, size int, af AddrFunc, df DialFunc) (*Pool, error) {\n\tp := Pool{\n\t\tpool: make(chan *redis.Client, size),\n\t\tspare: make(chan string, size),\n\t\tdf: df,\n\t\tstopCh: make(chan bool),\n\t\tnetwork: network,\n\t}\n\n\tclient, err := df(network, af(0))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.pool <- client\n\n\tfor i := 1; i < size; i++ {\n\t\tp.spare <- af(i)\n\t}\n\n\t// set up a go-routine which will periodically ping connections in the pool.\n\t// if the pool is idle every connection will be hit once every 10 seconds.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(10 * time.Second / time.Duration(size - len(p.spare))):\n\t\t\t\tp.ping()\n\t\t\tcase <-p.stopCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn &p, err\n}", "func New(size int, options ...Option) *Pool {\n\tp := &Pool{\n\t\tsize: int32(size),\n\t\twait: make(chan struct{}, 1),\n\t\tctx: context.Background(),\n\t}\n\n\t// apply options\n\tfor _, option := range options {\n\t\toption(p)\n\t}\n\n\tif p.workers == nil {\n\t\tp.workers = make(chan Worker)\n\t}\n\n\tgo func() {\n\t\tatomic.AddInt32(&p.counter, p.size)\n\n\t\tfor i := int32(0); i < p.size; i++ {\n\t\t\tgo p.worker()\n\t\t}\n\t}()\n\n\treturn p\n}", "func NewClientPool(config *PoolConfig) ClientPool {\n\tbcp := &basicClientPool{\n\t\tpool: &sync.Map{},\n\t\tconfig: config,\n\t}\n\n\t// Set config\n\tif bcp.config == nil {\n\t\tbcp.config = &PoolConfig{}\n\t}\n\n\tif bcp.config.DeadCheckInterval == 0 {\n\t\tbcp.config.DeadCheckInterval = defaultDeadCheckInterval\n\t}\n\n\tif bcp.config.ExpireTime == 0 {\n\t\tbcp.config.ExpireTime = defaultExpireTime\n\t}\n\n\treturn bcp\n}", "func newPool(addr string) *redis.Pool {\n\tp := &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\t/*c, err := redis.Dial(\"tcp\", addr)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error in connecting to Redis: %v\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlog.Printf(\"Connected!\")\n\t\t\treturn c, nil\n\t\t\t*/\n\t\t\treturn redis.Dial(\"tcp\", addr)\n\t\t},\n\t}\n\treturn p\n}", "func NewPool(size int) (*GoroutinePool, error) {\n\tp := &GoroutinePool{\n\t\tcapacity: int32(size),\n\t\tlock: NewSpinLock(),\n\t\tconfig: defaultPoolConfig(),\n\t\tworkers: NewLoopQueue(size),\n\t}\n\tp.cond = sync.NewCond(p.lock)\n\tp.objCache.New = func() interface{} {\n\t\treturn &Worker{\n\t\t\tpool: p,\n\t\t\ttask: make(chan f, 1),\n\t\t\targs: make(chan interface{}, 1),\n\t\t}\n\t}\n\treturn p, nil\n}", "func New(bc blockchainer.Blockchainer, capacity int) *Pool {\n\tif capacity <= 0 {\n\t\tpanic(\"invalid capacity\")\n\t}\n\n\treturn &Pool{\n\t\tverified: make(map[util.Uint256]*list.Element),\n\t\tsenders: make(map[util.Uint160]*list.List),\n\t\tsingleCap: capacity,\n\t\tchain: bc,\n\t}\n}", "func newPool(count, size int) *Pool {\n\tp := &Pool{\n\t\tbuffers: make(chan *Buffer, count),\n\t}\n\tfor i := 0; i < count; i++ {\n\t\tp.buffers <- &Buffer{\n\t\t\tpool: p,\n\t\t\tbytes: make([]byte, size),\n\t\t}\n\t}\n\treturn p\n}", "func CreatePool(\n\thost, password, network string,\n\tmaxConn int,\n\tidleTimeout, connTimeout time.Duration,\n) *rd.Pool {\n\treturn &rd.Pool{\n\t\tMaxIdle: maxConn,\n\t\tIdleTimeout: idleTimeout,\n\t\tDial: func() (rd.Conn, error) {\n\t\t\tc, err := rd.Dial(network, host,\n\t\t\t\trd.DialConnectTimeout(connTimeout),\n\t\t\t\trd.DialReadTimeout(connTimeout),\n\t\t\t\trd.DialWriteTimeout(connTimeout))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif password != \"\" {\n\t\t\t\tif _, err := c.Do(\"AUTH\", password); err != nil {\n\t\t\t\t\tc.Close()\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t}\n}", "func newPool(server string) *redis.Pool {\n\n\treturn &redis.Pool{\n\t\tMaxActive: 500,\n\t\tMaxIdle: 500,\n\t\tIdleTimeout: 5 * time.Second,\n\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", server)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n}", "func New(databaseConfig *Config, logger pgx.Logger) (*Repository, error) {\n\tpostgresDataSource := fmt.Sprintf(\"postgres://%s:%s@%s/%s?sslmode=%s\",\n\t\tdatabaseConfig.Username,\n\t\tdatabaseConfig.Password,\n\t\tdatabaseConfig.Hostname,\n\t\tdatabaseConfig.Name,\n\t\tdatabaseConfig.SSLMode)\n\tpoolConfig, err := pgxpool.ParseConfig(postgresDataSource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpoolConfig.ConnConfig.Logger = logger\n\tlogLevelMapping := map[string]pgx.LogLevel{\n\t\t\"trace\": pgx.LogLevelTrace,\n\t\t\"debug\": pgx.LogLevelDebug,\n\t\t\"info\": pgx.LogLevelInfo,\n\t\t\"warn\": pgx.LogLevelWarn,\n\t\t\"error\": pgx.LogLevelError,\n\t}\n\tpoolConfig.ConnConfig.LogLevel = logLevelMapping[databaseConfig.LogLevel]\n\tpoolConfig.MaxConns = databaseConfig.MaxConnections\n\tpoolConfig.MinConns = databaseConfig.MinConnections\n\n\tpool, err := pgxpool.ConnectConfig(context.Background(), poolConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Repository{pool: pool}, nil\n}", "func ParamSpecPoolNew(typePrefixing bool) *ParamSpecPool {\n\tc_type_prefixing :=\n\t\tboolToGboolean(typePrefixing)\n\n\tretC := C.g_param_spec_pool_new(c_type_prefixing)\n\tretGo := ParamSpecPoolNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func NewPool() Pool {\n\treturn Pool{p: &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn logf.NewBufferWithCapacity(1024)\n\t\t},\n\t}}\n}" ]
[ "0.77236617", "0.7638384", "0.7456807", "0.7367632", "0.7274907", "0.7243026", "0.7232222", "0.71909064", "0.7151999", "0.7128678", "0.69553524", "0.69491535", "0.69203454", "0.69009054", "0.68833876", "0.687325", "0.67893636", "0.67762536", "0.6750324", "0.6733915", "0.6719909", "0.6713006", "0.67071664", "0.66995025", "0.66987973", "0.6690724", "0.6668591", "0.66620463", "0.663903", "0.663068", "0.6630557", "0.6622321", "0.66049284", "0.66012007", "0.6600279", "0.6574163", "0.6559435", "0.65500194", "0.6546179", "0.654323", "0.6535846", "0.65280545", "0.6520308", "0.65162355", "0.65111244", "0.65106285", "0.6493671", "0.64853716", "0.64852816", "0.64768326", "0.64436805", "0.64418256", "0.6441521", "0.6436681", "0.6423897", "0.6423445", "0.64227074", "0.64216155", "0.6402949", "0.6401849", "0.63961333", "0.63827455", "0.6377078", "0.6358281", "0.63508624", "0.6347139", "0.6341368", "0.6337665", "0.6336527", "0.6334384", "0.63168275", "0.631238", "0.6307576", "0.6302542", "0.6298451", "0.6295783", "0.62821615", "0.6280277", "0.6279792", "0.6277735", "0.62717056", "0.6265656", "0.62401783", "0.62390816", "0.62383676", "0.6230972", "0.6229783", "0.61936843", "0.61909103", "0.6183053", "0.6181917", "0.6178418", "0.6176084", "0.61754483", "0.616659", "0.6150581", "0.6117509", "0.60901165", "0.60862416", "0.6084364" ]
0.81151676
0
Get a resource from resource pool.
func (c *ChannelPool) Get() (interface{}, error) { conns := c.getConns() if conns == nil { return nil, ErrClosed } for { select { case wrapConn := <-conns: if wrapConn == nil { return c.factory(c.para) } if timeout := c.idleTimeout; timeout > 0 { if wrapConn.t.Add(timeout).Before(time.Now()) { c.Close(wrapConn.conn) continue } } return wrapConn.conn, nil default: conn, err := c.factory(c.para) if err != nil { return nil, err } return conn, nil } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *ResourcePool) Get() (resource ResourceWrapper, err error) {\n\treturn p.getWait()\n}", "func (handler GetLiquidityPoolByIDHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) {\n\tctx := r.Context()\n\tqp := LiquidityPoolQuery{}\n\terr := getParams(&qp, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thistoryQ, err := horizonContext.HistoryQFromRequest(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcb, err := historyQ.FindLiquidityPoolByID(ctx, qp.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tledger := &history.Ledger{}\n\terr = historyQ.LedgerBySequence(ctx, ledger, int32(cb.LastModifiedLedger))\n\tif historyQ.NoRows(err) {\n\t\tledger = nil\n\t} else if err != nil {\n\t\treturn nil, errors.Wrap(err, \"LedgerBySequence error\")\n\t}\n\n\tvar resource protocol.LiquidityPool\n\terr = resourceadapter.PopulateLiquidityPool(ctx, &resource, cb, ledger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resource, nil\n}", "func (c *Client) GetResource(state, module, name string) (st storage.Resource, err error) {\n\terr = c.get(&st, \"resources/\"+state+\"/\"+module+\"/\"+name, nil)\n\tif err != nil {\n\t\treturn st, fmt.Errorf(\"failed to retrieve resource: %v\", err)\n\t}\n\n\treturn\n}", "func (p *Pool) GetResource(ctx context.Context) (res Resource, err error) {\n\t//If there is a monitorFunc, send the notification\n\tif p.MonitorFunc != nil {\n\t\tgo p.MonitorFunc(newActionMsg(ResourceRequested))\n\t}\nresLoop:\n\tfor res == nil {\n\t\tselect {\n\t\tcase res = <-p.resourceQueue:\n\t\t\tp.mutex.Lock()\n\t\t\tp.resourceCount--\n\t\t\tp.mutex.Unlock()\n\n\t\t\tif res.IsHealthy() {\n\t\t\t\tbreak resLoop\n\t\t\t}\n\t\t\t//Terminate unhealthy resources & set res back to nil to get the next one.\n\t\t\tif p.MonitorFunc != nil {\n\t\t\t\tgo p.MonitorFunc(newActionMsg(UnhealthyResourceTerminated))\n\t\t\t}\n\t\t\tres.Terminate()\n\t\t\tres = nil\n\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\tbreak resLoop\n\t\t}\n\t}\n\treturn res, err\n}", "func GetResource(dao *Dao, state data.Map, URL string) *url.Resource {\n\tfor _, candidate := range getURLs(URL) {\n\t\tresource := url.NewResource(candidate)\n\t\tstorageService, err := storage.NewServiceForURL(resource.URL, \"\")\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\texists, _ := storageService.Exists(resource.URL)\n\t\tif exists {\n\t\t\treturn resource\n\t\t}\n\t}\n\tif strings.Contains(URL, \":/\") || strings.HasPrefix(URL, \"/\") {\n\t\treturn nil\n\t}\n\t//Lookup shared workflow\n\tfor _, candidate := range getURLs(URL) {\n\t\tresource, err := dao.NewRepoResource(state, fmt.Sprintf(\"workflow/%v\", candidate))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tstorageService, err := storage.NewServiceForURL(resource.URL, \"\")\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif exists, _ := storageService.Exists(resource.URL); exists {\n\t\t\treturn resource\n\t\t}\n\t}\n\treturn nil\n}", "func (rp *resourcePool) Get() interface{} {\n\trp.Lock()\n\tdefer rp.Unlock()\n\n\tfor rp.start != nil {\n\t\tcurr := rp.start\n\t\trp.remove(curr)\n\t\tif !rp.expiredFn(curr.value) {\n\t\t\treturn curr.value\n\t\t}\n\t\trp.closeFn(curr.value)\n\t\trp.totalSize--\n\t}\n\treturn nil\n}", "func (r *PoolNAPTRResource) Get(id string) (*Pool, error) {\n\tvar item Pool\n\tif err := r.c.ReadQuery(BasePath+PoolNAPTREndpoint, &item); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &item, nil\n}", "func (p *Pool) Get() interface{}", "func (cf *ConfigGroup) GetResource(gvk, name, namespace string) pulumi.Resource {\n\tid := name\n\tif len(namespace) > 0 && namespace != \"default\" {\n\t\tid = fmt.Sprintf(\"%s/%s\", namespace, name)\n\t}\n\tkey := fmt.Sprintf(\"%s::%s\", gvk, id)\n\treturn cf.Resources[key]\n}", "func (c *Client) GetPool(name string) (p Pool, err error) {\n\tvar poolURI string\n\n\tfor _, p := range c.Info.Pools {\n\t\tif p.Name == name {\n\t\t\tpoolURI = p.URI\n\t\t\tbreak\n\t\t}\n\t}\n\tif poolURI == \"\" {\n\t\treturn p, errors.New(\"No pool named \" + name)\n\t}\n\n\terr = c.parseURLResponse(poolURI, &p)\n\n\tp.client = c\n\n\terr = p.refresh()\n\treturn\n}", "func (p *ResourcePool) getAvailable(timeout <-chan time.Time) (ResourceWrapper, error) {\n\n\t//Wait for an object, or a timeout\n\tselect {\n\tcase <-timeout:\n\t\treturn ResourceWrapper{p: p, e: ResourceTimeoutError}, ResourceTimeoutError\n\n\tcase wrapper, ok := <-p.resources:\n\n\t\t//pool is closed\n\t\tif !ok {\n\t\t\treturn ResourceWrapper{p: p, e: PoolClosedError}, PoolClosedError\n\t\t}\n\n\t\t//decriment the number of available resources\n\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\n\t\t//if the resource fails the test, close it and wait to get another resource\n\t\tif p.resTest(wrapper.Resource) != nil {\n\t\t\tp.resClose(wrapper.Resource)\n\t\t\twrapper.Close()\n\t\t\treturn ResourceWrapper{p: p, e: ResourceTestError}, ResourceTestError\n\t\t}\n\n\t\t//we got a valid resource to return\n\t\t//signal the filler that we need to fill\n\t\treturn wrapper, wrapper.e\n\n\t//we don't have a resource available\n\t//lets create one if we can\n\tdefault:\n\n\t\t//try to obtain a lock for a new resource\n\t\tif n_open := atomic.AddUint32(&p.open, 1); n_open > p.Cap() {\n\t\t\t//decriment\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\t\treturn ResourceWrapper{p: p, e: ResourceExhaustedError}, ResourceExhaustedError\n\t\t}\n\n\t\tresource, err := p.resOpen()\n\t\tif err != nil {\n\t\t\t//decriment\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\t\treturn ResourceWrapper{p: p, e: ResourceCreationError}, ResourceCreationError\n\t\t}\n\n\t\treturn ResourceWrapper{p: p, Resource: resource}, nil\n\t}\n}", "func getPool(\n\thandle string, context ServerContext, access Access,\n) (*pool, error) {\n\tbuffer, err := getRawPool(handle, context, access)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tloaded := &pool{handle: handle}\n\terr = loaded.UnmarshalBinary(buffer)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn loaded, nil\n}", "func (m *ProgramControl) GetResource()(ProgramResourceable) {\n val, err := m.GetBackingStore().Get(\"resource\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(ProgramResourceable)\n }\n return nil\n}", "func (c *connPool) GetFromPool() interface{} {\n\tdefer c.nextMutex.Unlock()\n\tc.nextMutex.Lock()\n\tif c.total == 0 {\n\t\treturn nil\n\t}\n\tc.next = (c.next + 1) % c.total\n\treturn c.pool[c.next]\n}", "func GetResource(nsId string, resourceType string, resourceId string) (interface{}, error) {\n\n\terr := common.CheckString(nsId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn nil, err\n\t}\n\n\terr = common.CheckString(resourceId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn nil, err\n\t}\n\tcheck, err := CheckResource(nsId, resourceType, resourceId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tif !check {\n\t\terrString := \"The \" + resourceType + \" \" + resourceId + \" does not exist.\"\n\t\t//mapA := map[string]string{\"message\": errString}\n\t\t//mapB, _ := json.Marshal(mapA)\n\t\terr := fmt.Errorf(errString)\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(\"[Get resource] \" + resourceType + \", \" + resourceId)\n\n\tkey := common.GenResourceKey(nsId, resourceType, resourceId)\n\t//fmt.Println(key)\n\n\tkeyValue, err := common.CBStore.Get(key)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn nil, err\n\t}\n\tif keyValue != nil {\n\t\tswitch resourceType {\n\t\tcase common.StrImage:\n\t\t\tres := TbImageInfo{}\n\t\t\terr = json.Unmarshal([]byte(keyValue.Value), &res)\n\t\t\tif err != nil {\n\t\t\t\tcommon.CBLog.Error(err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn res, nil\n\t\tcase common.StrSecurityGroup:\n\t\t\tres := TbSecurityGroupInfo{}\n\t\t\terr = json.Unmarshal([]byte(keyValue.Value), &res)\n\t\t\tif err != nil {\n\t\t\t\tcommon.CBLog.Error(err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn res, nil\n\t\tcase common.StrSpec:\n\t\t\tres := TbSpecInfo{}\n\t\t\terr = json.Unmarshal([]byte(keyValue.Value), &res)\n\t\t\tif err != nil {\n\t\t\t\tcommon.CBLog.Error(err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn res, nil\n\t\tcase common.StrSSHKey:\n\t\t\tres := TbSshKeyInfo{}\n\t\t\terr = json.Unmarshal([]byte(keyValue.Value), &res)\n\t\t\tif err != nil {\n\t\t\t\tcommon.CBLog.Error(err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn res, nil\n\t\tcase common.StrVNet:\n\t\t\tres := TbVNetInfo{}\n\t\t\terr = json.Unmarshal([]byte(keyValue.Value), &res)\n\t\t\tif err != nil {\n\t\t\t\tcommon.CBLog.Error(err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn res, nil\n\t\t}\n\n\t\t//return true, nil\n\t}\n\terrString := \"Cannot get \" + resourceType + \" \" + resourceId + \".\"\n\terr = fmt.Errorf(errString)\n\treturn nil, err\n}", "func (m *EducationSubmissionResource) GetResource()(EducationResourceable) {\n val, err := m.GetBackingStore().Get(\"resource\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(EducationResourceable)\n }\n return nil\n}", "func (bp *Pool) Get(params ...interface{}) interface{} {\n\tres := bp.pool.Poll()\n\tif (res == nil || (bp.IsUsable != nil && !bp.IsUsable(res, params...))) && bp.New != nil {\n\t\tres = bp.New(params...)\n\t}\n\n\treturn res\n}", "func (f *Facade) GetResourcePool(ctx datastore.Context, id string) (*pool.ResourcePool, error) {\n\tglog.V(2).Infof(\"Facade.GetResourcePool: id=%s\", id)\n\tvar entity pool.ResourcePool\n\terr := f.poolStore.Get(ctx, pool.Key(id), &entity)\n\tif datastore.IsErrNoSuchEntity(err) {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.calcPoolCapacity(ctx, &entity)\n\n\treturn &entity, nil\n}", "func getResource(mapping *meta.RESTMapping, config *rest.Config, group string,\n\tversion string, namespace string, name string) error {\n\trestClient, err := getRESTClient(config, group, version)\n\tif err != nil {\n\t\treturn &kfapis.KfError{\n\t\t\tCode: int(kfapis.INVALID_ARGUMENT),\n\t\t\tMessage: fmt.Sprintf(\"getResource error: %v\", err),\n\t\t}\n\t}\n\n\tif _, err = restClient.\n\t\tGet().\n\t\tResource(mapping.Resource.Resource).\n\t\tNamespaceIfScoped(namespace, mapping.Scope.Name() == \"namespace\").\n\t\tName(name).\n\t\tDo().\n\t\tGet(); err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn &kfapis.KfError{\n\t\t\tCode: int(kfapis.INVALID_ARGUMENT),\n\t\t\tMessage: fmt.Sprintf(\"getResource error: %v\", err),\n\t\t}\n\t}\n}", "func (p *ObjectPool) Get() PoolObject {\n\tv := p.pool.Get()\n\tif v == nil {\n\t\treturn p.New()\n\t}\n\n\treturn v.(PoolObject)\n}", "func (yad *yandexDisk) GetResource(path string, fields []string, limit int, offset int, previewCrop bool, previewSize string, sort string) (r *Resource, e error) {\n\treq, e := yad.getResource(\"\", path, fields, limit, offset, previewCrop, previewSize, sort)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tr = new(Resource)\n\t_, e = yad.client.getResponse(req, &r)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn\n}", "func getResource(name string, taskInfo *mesos.TaskInfo) *mesos.Resource {\n\tfor _, resource := range taskInfo.Resources {\n\t\tif *resource.Name == name {\n\t\t\treturn resource\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c ComponentDescriptor) GetResource(rtype, name, version string) (Resource, error) {\n\tfor _, res := range c.Resources {\n\t\tif res.GetType() == rtype && res.GetName() == name && res.GetVersion() == version {\n\t\t\treturn res, nil\n\t\t}\n\t}\n\treturn Resource{}, NotFound\n}", "func (b *Bucket) GetPool() *Pool {\n\tb.RLock()\n\tdefer b.RUnlock()\n\tret := b.pool\n\treturn ret\n}", "func GetPool() Pool {\n\treturn p\n}", "func (bcp *basicClientPool) Get(url, authType, accessCredential string, skipCertVerify bool) (Client, error) {\n\tk := fmt.Sprintf(\"%s:%s:%s:%v\", url, authType, accessCredential, skipCertVerify)\n\n\titem, ok := bcp.pool.Load(k)\n\tif !ok {\n\t\tnc, err := NewClient(url, authType, accessCredential, skipCertVerify)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"client pool: get\")\n\t\t}\n\n\t\t// Cache it\n\t\tnpi := &poolItem{\n\t\t\tc: nc,\n\t\t\ttimestamp: time.Now().UTC(),\n\t\t}\n\n\t\tbcp.pool.Store(k, npi)\n\t\titem = npi\n\n\t\t// dead check\n\t\tbcp.deadCheck(k, npi)\n\t}\n\n\treturn item.(*poolItem).c, nil\n}", "func GetResource(fn string) fyne.Resource {\n\tbytes := assetRead(fn)\n\tif len(bytes) == 0 {\n\t\treturn nil\n\t}\n\tname := filepath.Base(fn)\n\treturn fyne.NewStaticResource(name, bytes)\n}", "func (s *Pool) Get() (net.Conn, error) {\n\tif cn := s.pop(); cn != nil {\n\t\treturn cn, nil\n\t}\n\n\treturn s.factory()\n}", "func (s *Session) GetResourcePoolByMoID(ctx context.Context, moID string) (*object.ResourcePool, error) {\n\tref := types.ManagedObjectReference{Type: \"ResourcePool\", Value: moID}\n\to, err := s.Finder.ObjectReference(ctx, ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn o.(*object.ResourcePool), nil\n}", "func (p *pool) Get(connectionString string) (*RestClient, error) {\n\tkey, err := makeKey(connectionString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c, ok := p.clients[key]; ok {\n\t\treturn c, nil\n\t}\n\n\tc, err := NewRestClient(connectionString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := p.defaults.apply(c); err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.clients[key] = c\n\treturn c, nil\n}", "func (p *WorkerPool) Get(ctx context.Context) (w *Worker, err error) {\n\tr, err := p.resourcePool.Get(ctx)\n\tif err != nil {\n\t\treturn\n\t}\n\tw = r.(*Worker)\n\treturn\n}", "func GetResource(uri string) (io.ReadCloser, error) {\n\tvar file io.ReadCloser\n\tif strings.HasPrefix(uri, \"http://\") || strings.HasPrefix(uri, \"https://\") {\n\t\tresp, err := http.Get(uri)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn nil, errors.Errorf(\"http GET returned status %d for resource %s\", resp.StatusCode, uri)\n\t\t}\n\n\t\tfile = resp.Body\n\t} else {\n\t\tpath, err := filepath.Abs(uri)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"getting absolute path for %v\", uri)\n\t\t}\n\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"opening file %v\", path)\n\t\t}\n\t\tfile = f\n\t}\n\n\t// Write the body to file\n\treturn file, nil\n}", "func (p *Pool) Get() (*redis.Client, error) {\n\tselect {\n\tcase conn := <- p.pool:\n\t\treturn conn, nil\n\n\tdefault:\n\t\tselect {\n\t\tcase conn := <- p.pool:\n\t\t\treturn conn, nil\n\n\t\tcase addr := <- p.spare:\n\t\t\tvar conn *redis.Client\n\t\t\tvar err error\n\n\t\t\tdefer func() {\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.replenish(p.network, addr)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tconn, err = p.df(p.network, addr)\n\t\t\treturn conn, err\n\n\t\tcase <-time.After(time.Second * 5):\n\t\t\treturn nil, errors.New(\"pool exhausted\")\n\t\t}\n\t}\n}", "func GetPool() *radix.Pool {\n\tpool, err := radix.NewPool(\"tcp\", os.Getenv(\"ERU_SE_REDIS_IP\"), 10)\n\tif err != nil {\n\t\tprintln(err)\n\t\tpanic(err)\n\t} else {\n\t\treturn pool\n\t}\n}", "func (objectSet *PoolObjectSet) GetObject(id string) (*nimbleos.Pool, error) {\n\tresp, err := objectSet.Client.Get(poolPath, id, &nimbleos.Pool{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// null check\n\tif resp == nil {\n\t\treturn nil, nil\n\t}\n\treturn resp.(*nimbleos.Pool), err\n}", "func (p *Pool) Get(ctx context.Context) (*ClientConn, error) {\n\tif p == nil {\n\t\treturn nil, ErrClosed\n\t}\n\n\tclient := make(chan *ClientConn, 1)\n\tp.clientQueue(client)\n\n\tselect {\n\tcase conn := <-client:\n\t\tconn.use()\n\t\treturn conn, nil\n\tcase <-ctx.Done():\n\t\treturn nil, ErrTimeout\n\t}\n}", "func (r *RedisPool) GetPool() *redis.Pool {\n\tif r.pool == nil {\n\t\tlog.Fatalln(errors.New(\"error get redis pool\"))\n\t}\n\treturn r.pool\n}", "func (p *NoteStoreClient) GetResource(ctx context.Context, authenticationToken string, guid Types.Guid, withData bool, withRecognition bool, withAttributes bool, withAlternateData bool) (r *Types.Resource, userException *Errors.EDAMUserException, systemException *Errors.EDAMSystemException, notFoundException *Errors.EDAMNotFoundException, err error) {\n\tif err = p.sendGetResource(ctx, authenticationToken, guid, withData, withRecognition, withAttributes, withAlternateData); err != nil {\n\t\treturn\n\t}\n\treturn p.recvGetResource(ctx)\n}", "func (p *Pool) Get() (x interface{}) {\n\tp.init()\n\treturn p.p.Get()\n}", "func (d *DryccCmd) ResourceGet(appID, name string) error {\n\ts, appID, err := load(d.ConfigFile, appID)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//d.Printf(\" %s from %s... \", name, appID)\n\n\tresource, err := resources.Get(s.Client, appID, name)\n\tif d.checkAPICompatibility(s.Client, err) != nil {\n\t\treturn err\n\t}\n\t// todo format data json to yaml\n\tprintResourceDetail(d, appID, resource)\n\t//d.Println(resource)\n\treturn nil\n}", "func (rcont *ResourceContainer) Get() Resource {\n\treturn rcont.resc\n}", "func Get(reader io.Reader) (r *bufio.Reader) {\n\treturn defaultPool.Get(reader)\n}", "func (k Keeper) GetPool(ctx sdk.Context, id uint64) types.Pool {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolKey))\n\tvar pool types.Pool\n\tk.cdc.MustUnmarshalBinaryBare(store.Get(GetPoolIDBytes(id)), &pool)\n\treturn pool\n}", "func GetResource(name string, namespace string, kubeclient *kubernetes.Clientset) (string, error) {\n\tif namespace == \"\" {\n\t\tnamespace = \"default\"\n\t}\n\n\topts := metaV1.ListOptions{\n\t\tLimit: 10,\n\t}\n\topts.APIVersion = \"apps/v1\"\n\topts.Kind = \"Deployment\"\n\n\tlist, err := kubeclient.AppsV1().Deployments(namespace).List(opts)\n\tif err != nil {\n\t\treturn \"\", pkgerrors.Wrap(err, \"Get Deployment error\")\n\t}\n\n\tfor _, deployment := range list.Items {\n\t\tif deployment.Name == name {\n\t\t\treturn name, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}", "func GetPool() *redis.Pool {\n\treturn pool\n}", "func (pool *Pool) Get() (*Conn, error) {\n\n\tfor {\n\t\tif n := atomic.LoadInt32(&pool.numIdle); n > 0 {\n\t\t\tif atomic.CompareAndSwapInt32(&pool.numIdle, n, n-1) {\n\t\t\t\tdeadline := pool.Deadline()\n\t\t\t\treturn pool.get(deadline.UnixNano())\n\t\t\t}\n\t\t} else if n < 0 {\n\t\t\treturn nil, errPoolClosed\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tmax := pool.maxConnections()\n\tfor {\n\t\tif n := atomic.LoadInt32(&pool.numOpen); 0 <= n && n < max {\n\t\t\tif atomic.CompareAndSwapInt32(&pool.numOpen, n, n+1) {\n\t\t\t\tconn, err := pool.dial()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\treturn conn, nil\n\t\t\t}\n\t\t} else if n < 0 {\n\t\t\treturn nil, errPoolClosed\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tdeadline := pool.Deadline()\n\treturn pool.get(deadline.UnixNano())\n}", "func (s *ResourcesService) Get(ctx context.Context, realm, clientID, resourceID string) (*Resource, *http.Response, error) {\n\tu := fmt.Sprintf(\"admin/realms/%s/clients/%s/authz/resource-server/resource/%s\", realm, clientID, resourceID)\n\treq, err := s.keycloak.NewRequest(http.MethodGet, u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar resource Resource\n\tres, err := s.keycloak.Do(ctx, req, &resource)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &resource, res, nil\n}", "func GetPool(conf *Conf, db DB, pk PoolPK) (pool *Pool, err error) {\n\tipk, err := dynamodbattribute.MarshalMap(pk)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to marshal keys map\")\n\t}\n\n\tvar out *dynamodb.GetItemOutput\n\tif out, err = db.GetItem(&dynamodb.GetItemInput{\n\t\tTableName: aws.String(conf.PoolsTableName),\n\t\tKey: ipk,\n\t}); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get item\")\n\t}\n\n\tif out.Item == nil {\n\t\treturn nil, ErrPoolNotExists\n\t}\n\n\tpool = &Pool{}\n\terr = dynamodbattribute.UnmarshalMap(out.Item, pool)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal item\")\n\t}\n\n\treturn pool, nil\n}", "func (a *ResourcepoolApiService) GetResourcepoolPoolByMoid(ctx context.Context, moid string) ApiGetResourcepoolPoolByMoidRequest {\n\treturn ApiGetResourcepoolPoolByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (h *ConnectionPoolsHandler) Get(ctx context.Context, project, serviceName, poolName string) (*ConnectionPool, error) {\n\t// There's no API for getting individual connection pool entry. List instead and filter from there\n\tpools, err := h.List(ctx, project, serviceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, pool := range pools {\n\t\tif pool.PoolName == poolName {\n\t\t\treturn pool, nil\n\t\t}\n\t}\n\n\terr = Error{Message: fmt.Sprintf(\"Connection pool with name %v not found\", poolName), Status: 404}\n\treturn nil, err\n}", "func (c *channelPool) Get() (RpcAble, error) {\n\trconns := c.getRconns()\n\tif rconns == nil {\n\t\treturn nil, ErrClosed\n\t}\n\n\t// wrap our rconns with out custom RpcAble implementation (wrapRconn\n\t// method) that puts the RPC-able connection back to the pool if it's closed.\n\tselect {\n\tcase rconn := <-rconns:\n\t\tif rconn == nil {\n\t\t\treturn nil, ErrClosed\n\t\t}\n\n\t\treturn c.wrapRconn(rconn), nil\n\tdefault:\n\t\trconn, err := c.factory()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn c.wrapRconn(rconn), nil\n\t}\n}", "func (rc *ResourceCollection) Get(ref ResourceReference) *Resource {\n\tif _, exists := rc.collection[ref.Domain]; !exists {\n\t\treturn nil\n\t}\n\n\tif _, exists := rc.collection[ref.Domain][ref.Kind]; !exists {\n\t\treturn nil\n\t}\n\n\tif resource, exists := rc.collection[ref.Domain][ref.Kind][ref.ID]; exists {\n\t\treturn &resource\n\t}\n\treturn nil\n}", "func (c *Client) GetResource(ctx context.Context, key string, resourceType models.ResourceType) ([]*models.Object, error) {\n\tvar out models.WhoisResource\n\tif err := c.transport.Get(ctx, \"/\"+string(resourceType)+\"/\"+key, &out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn out.Objects.Object, nil\n}", "func (c *IDClient) GetResource(ctx context.Context, id gomanifold.ID) (*Resource, error) {\n\tidBytes, err := id.MarshalText()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := fmt.Sprintf(\"/id/resource/%s\", string(idBytes))\n\n\treq, err := c.backend.NewRequest(http.MethodGet, p, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp Resource\n\t_, err = c.backend.Do(ctx, req, &resp, func(code int) error {\n\t\tswitch code {\n\t\tcase 400, 404, 500:\n\t\t\treturn &Error{}\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &resp, nil\n}", "func (cp *Pool) Get(ctx context.Context, setting *pools.Setting) (*DBConn, error) {\n\tspan, ctx := trace.NewSpan(ctx, \"Pool.Get\")\n\tdefer span.Finish()\n\n\tif cp.waiterCap > 0 {\n\t\twaiterCount := cp.waiterCount.Add(1)\n\t\tdefer cp.waiterCount.Add(-1)\n\t\tif waiterCount > cp.waiterCap {\n\t\t\tcp.waiterQueueFull.Add(1)\n\t\t\treturn nil, vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, \"pool %s waiter count exceeded\", cp.name)\n\t\t}\n\t}\n\n\tif cp.isCallerIDAppDebug(ctx) {\n\t\treturn NewDBConnNoPool(ctx, cp.appDebugParams, cp.dbaPool, setting)\n\t}\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn nil, ErrConnPoolClosed\n\t}\n\tspan.Annotate(\"capacity\", p.Capacity())\n\tspan.Annotate(\"in_use\", p.InUse())\n\tspan.Annotate(\"available\", p.Available())\n\tspan.Annotate(\"active\", p.Active())\n\n\tif cp.timeout != 0 {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, cp.timeout)\n\t\tdefer cancel()\n\t}\n\n\tstart := time.Now()\n\tr, err := p.Get(ctx, setting)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif cp.getConnTime != nil {\n\t\tif setting == nil {\n\t\t\tcp.getConnTime.Record(getWithoutS, start)\n\t\t} else {\n\t\t\tcp.getConnTime.Record(getWithS, start)\n\t\t}\n\t}\n\treturn r.(*DBConn), nil\n}", "func GetPool(ctx context.Context) *redis.Pool {\n\tp, _ := ctx.Value(&contextKey).(*redis.Pool)\n\treturn p\n}", "func (p *Pool) Get(ctx context.Context) (*PoolConn, error) {\n\tselect {\n\tcase conn := <-p.ch:\n\t\tnow := p.nowfunc()\n\t\tif (p.maxIdleTime > 0 && now.Sub(conn.freedAt) > p.maxIdleTime) ||\n\t\t\t(p.maxConnTime > 0 && now.Sub(conn.CreatedAt()) > p.maxConnTime) {\n\t\t\tp.closeconn(conn)\n\t\t\treturn p.Get(ctx)\n\t\t}\n\t\tconn.p = p\n\t\treturn conn, nil\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tdefault:\n\t}\n\tactive := atomic.AddInt64(&p.active, 1)\n\tif p.maxActive > 0 && active > p.maxActive {\n\t\tatomic.AddInt64(&p.active, -1)\n\t\treturn nil, ErrMaxActive\n\t}\n\tc, err := p.dial(ctx)\n\tif err != nil {\n\t\tatomic.AddInt64(&p.active, -1)\n\t\treturn nil, err\n\t}\n\treturn &PoolConn{Conn: c, p: p, createdAt: p.nowfunc()}, nil\n}", "func (m *MultiConnPool) Get() *pgx.ConnPool {\n\tif len(m.Pools) == 1 {\n\t\treturn m.Pools[0]\n\t}\n\ti := atomic.AddUint32(&m.counter, 1) - 1\n\treturn m.Pools[i%uint32(len(m.Pools))]\n}", "func (o *ResourcepoolPoolMember) GetResource() MoBaseMoRelationship {\n\tif o == nil || o.Resource == nil {\n\t\tvar ret MoBaseMoRelationship\n\t\treturn ret\n\t}\n\treturn *o.Resource\n}", "func Get(r *Resource) {\n\tchanJobs <- r\n}", "func (r *RootResource) GetResource(path string) *Resource {\n\tresources := r.resources\n\tvar resource *Resource\n\tfor _, comp := range strings.Split(path, \".\") {\n\t\tif subResource, found := resources[comp]; found {\n\t\t\tresource = subResource.resource\n\t\t\tresources = resource.resources\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn resource\n}", "func (c *ResourcesClient) Get(ctx context.Context, teamLabel string, resourceLabel string, opts *ResourcesGetOpts) (*Resource, error) {\n\tp := fmt.Sprintf(\"/resources/%s/%s/\", teamLabel, resourceLabel)\n\n\tvar q url.Values\n\tif opts != nil {\n\t\tq = make(url.Values)\n\t\tif opts.ProductLabel != nil {\n\t\t\tq.Set(\"product_label\", *opts.ProductLabel)\n\t\t}\n\n\t\tif opts.ProjectLabel != nil {\n\t\t\tq.Set(\"project_label\", *opts.ProjectLabel)\n\t\t}\n\t}\n\n\treq, err := c.backend.NewRequest(http.MethodGet, p, q, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp Resource\n\t_, err = c.backend.Do(ctx, req, &resp, func(code int) error {\n\t\tswitch code {\n\t\tcase 400, 404, 500:\n\t\t\treturn &Error{}\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &resp, nil\n}", "func (p *ResourcePool) getWait() (resource ResourceWrapper, err error) {\n\n\tstart := time.Now()\n\ttimeout := time.After(p.TimeoutTime)\n\n\tfor {\n\t\tr, e := p.getAvailable(timeout)\n\n\t\t//if the test failed try again\n\t\tif e == ResourceTestError {\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t\tcontinue\n\t\t}\n\n\t\t//if we are at our max open try again after a short sleep\n\t\tif e == ResourceExhaustedError {\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t\tcontinue\n\t\t}\n\n\t\t//if we failed to create a new resource, try agaig after a short sleep\n\t\tif e == ResourceCreationError {\n\t\t\ttime.Sleep(time.Microsecond)\n\t\t\tcontinue\n\t\t}\n\n\t\tp.Report()\n\t\tp.ReportWait(time.Now().Sub(start))\n\t\treturn r, e\n\t}\n\n}", "func GetActiveResource(ctx context.Context) Resource {\n\trcont := GetActiveResourceContainer(ctx)\n\tif rcont == nil {\n\t\treturn nil\n\t}\n\n\tif !rcont.Exists() {\n\t\treturn nil\n\t}\n\n\treturn rcont.Get()\n}", "func (c *boundedPool) Get() (net.Conn, error) {\n\tconns := c.getConns()\n\tif conns == nil {\n\t\treturn nil, pool.ErrClosed\n\t}\n\n\t// Try and grab a connection from the pool\n\tselect {\n\tcase conn := <-conns:\n\t\tif conn == nil {\n\t\t\treturn nil, pool.ErrClosed\n\t\t}\n\t\treturn c.wrapConn(conn), nil\n\tdefault:\n\t\t// Could not get connection, can we create a new one?\n\t\tif atomic.LoadInt32(&c.total) < int32(cap(conns)) {\n\t\t\tconn, err := c.factory()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tatomic.AddInt32(&c.total, 1)\n\n\t\t\treturn c.wrapConn(conn), nil\n\t\t}\n\t}\n\n\t// The pool was empty and we couldn't create a new one to\n\t// retry until one is free or we timeout\n\tselect {\n\tcase conn := <-conns:\n\t\tif conn == nil {\n\t\t\treturn nil, pool.ErrClosed\n\t\t}\n\t\treturn c.wrapConn(conn), nil\n\tcase <-time.After(c.timeout):\n\t\treturn nil, fmt.Errorf(\"timed out waiting for free connection\")\n\t}\n\n}", "func (p *NoteStoreClient) GetResource(ctx context.Context, authenticationToken string, guid GUID, withData bool, withRecognition bool, withAttributes bool, withAlternateData bool) (r *Resource, err error) {\n var _args135 NoteStoreGetResourceArgs\n _args135.AuthenticationToken = authenticationToken\n _args135.GUID = guid\n _args135.WithData = withData\n _args135.WithRecognition = withRecognition\n _args135.WithAttributes = withAttributes\n _args135.WithAlternateData = withAlternateData\n var _result136 NoteStoreGetResourceResult\n if err = p.Client_().Call(ctx, \"getResource\", &_args135, &_result136); err != nil {\n return\n }\n switch {\n case _result136.UserException!= nil:\n return r, _result136.UserException\n case _result136.SystemException!= nil:\n return r, _result136.SystemException\n case _result136.NotFoundException!= nil:\n return r, _result136.NotFoundException\n }\n\n return _result136.GetSuccess(), nil\n}", "func (rp *ResourcePool) Acquire(ctx context.Context) (resource Resource, err error) {\n\tif rp.IsClosed() {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tdefault:\n\t}\n\n\tacq := acquireMessage{\n\t\tctx: ctx,\n\t\trch: make(chan Resource),\n\t\tech: make(chan error),\n\t}\n\n\tselect {\n\tcase rp.acqchan <- acq:\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n\n\tselect {\n\tcase resource := <-acq.rch:\n\t\treturn resource, nil\n\tcase err := <-acq.ech:\n\t\treturn nil, err\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n}", "func (bp *BufReaderPool) Get(r io.Reader) *BufReader {\n\tvar item *BufReader\n\tselect {\n\tcase item = <-bp.pool:\n\t\titem.Reset(r)\n\tdefault:\n\t\titem = &BufReader{\n\t\t\tReader: bufio.NewReaderSize(r, bp.rdSize),\n\t\t\tpool: bp,\n\t\t}\n\t}\n\treturn item\n}", "func (rc *RedisClient) GetObj(key string) (interface{}, error) {\n\tconn := rc.pool.Get()\n\tdefer conn.Close()\n\treply, errDo := conn.Do(\"GET\", key)\n\treturn reply, errDo\n}", "func (c *ChannelPool) Get() (net.Conn, error) {\n\tconns := c.getConns()\n\tif conns == nil {\n\t\treturn nil, ErrClosed\n\t}\n\n\tselect {\n\tcase conn := <-conns:\n\t\tif conn == nil {\n\t\t\treturn nil, ErrClosed\n\t\t}\n\t\treturn conn, nil\n\tdefault:\n\t\treturn c.factory()\n\t}\n}", "func (mio *Mio) GetPool() string {\n if mio.obj == nil {\n return \"\"\n }\n p := mio.objPool\n\n return fmt.Sprintf(\"0x%x:0x%x\", p.f_container, p.f_key)\n}", "func GetResource(name, domainName string) (*Resource, error) {\n\trsc := NewResource(name)\n\treq, err := client.NewRequest(\n\t\tConfig,\n\t\t\"GET\",\n\t\tfmt.Sprintf(\"/config-gtm/v1/domains/%s/resources/%s\", domainName, name),\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsetVersionHeader(req, schemaVersion)\n\n\tprintHttpRequest(req, true)\n\n\tres, err := client.Do(Config, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprintHttpResponse(res, true)\n\n\tif client.IsError(res) && res.StatusCode != 404 {\n\t\treturn nil, client.NewAPIError(res)\n\t} else if res.StatusCode == 404 {\n\t\treturn nil, CommonError{entityName: \"Resource\", name: name}\n\t} else {\n\t\terr = client.BodyJSON(res, rsc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn rsc, nil\n\t}\n}", "func (module *ResourceModule) GetResourceByName(name string) (*Resource, error) {\n\tv, ok := (*module.ResourceMap)[name]\n\tif ok {\n\t\treturn &v, nil\n\t}\n\n\treturn nil, errors.New(\"Resource does not exist \")\n}", "func Get() *pgxpool.Pool {\n\tif db == nil {\n\t\tInit()\n\t}\n\treturn db\n}", "func (c *Client) Resource(path string) *Resource {\n\tr := &Resource{u: c.URL()}\n\tif !isAPI(path) {\n\t\tpath = Path + path\n\t}\n\tr.u.Path = path\n\treturn r\n}", "func (n *resPool) GetDemand() *scalar.Resources {\n\tn.RLock()\n\tdefer n.RUnlock()\n\treturn n.demand\n}", "func (bp *BufferPool) Get() *bytes.Buffer {\n\tvar buff *bytes.Buffer\n\tselect {\n\tcase buff = <-bp.pool:\n\tdefault:\n\t\tbuff = &bytes.Buffer{}\n\t}\n\treturn buff\n}", "func (c *ImageRegistryCollection) Get(name common.ID) (*ImageRegistryResource, error) {\n\tr := c.New()\n\tif err := c.core.db.get(c, name, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}", "func GetResource(t *testing.T, u string) (*http.Response, []byte) {\n\treturn GetResourceWithBasicAuth(t, u, \"\", \"\")\n}", "func (verber *resourceVerber) Get(kind string, namespaceSet bool, namespace string, name string) (runtime.Object, error) {\n\tclient, resourceSpec, err := verber.getResourceSpecFromKind(kind, namespaceSet)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := &runtime.Unknown{}\n\treq := client.Get().Resource(resourceSpec.Resource).Name(name).SetHeader(\"Accept\", \"application/json\")\n\n\tif resourceSpec.Namespaced {\n\t\treq.Namespace(namespace)\n\t}\n\n\terr = req.Do(context.TODO()).Into(result)\n\treturn result, err\n}", "func getPool() *redis.Pool {\n\tif(pool == nil){\n pool = &redis.Pool{\n\t\t\tMaxIdle: 3,\n\t\t\tIdleTimeout: 240 * time.Second,\n\t\t\tDial: func () (redis.Conn, error) { return redis.Dial(\"tcp\", redisUrlServer) },\n\t\t }\n\t}\n\n\treturn pool\t\n}", "func (p *Pool) Get() (net.Conn, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif p.conns == nil {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\tvar err error\n\tselect {\n\tcase conn := <-p.conns:\n\t\tif err = p.heartbeat(conn); err != nil {\n\t\t\tfmt.Println(\"detected connection closed\")\n\t\t\tconn, err = p.factory()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tif conn == nil {\n\t\t\tconn, err = p.factory()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t}\n\n\t\treturn conn, nil\n\tdefault:\n\t\treturn p.factory()\n\t}\n}", "func (m *ClientPool) Get(addr string) rpcClient {\n\tfun := \"ClientPool.Get -->\"\n\n\tpo := m.getPool(addr)\n\tvar c rpcClient\n\tselect {\n\tcase c = <-po:\n\t\tslog.Tracef(\"%s get: %s len:%d\", fun, addr, len(po))\n\tdefault:\n\t\tc = m.Factory(addr)\n\t}\n\treturn c\n}", "func (r *ResourceHandler) GetResource(scope ResourceScope, options ...URIOption) (*models.Resource, error) {\n\tr.ensureHandlerIsSet()\n\treturn r.resourceHandler.GetResource(context.TODO(), toV2ResourceScope(scope), v2.ResourcesGetResourceOptions{URIOptions: toV2URIOptions(options)})\n}", "func GetUserPool(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *UserPoolState, opts ...pulumi.ResourceOption) (*UserPool, error) {\n\tvar resource UserPool\n\terr := ctx.ReadResource(\"aws-native:cognito:UserPool\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func Get(ctx context.Context) (redis.Conn, error) {\n\tif p := GetPool(ctx); p != nil {\n\t\treturn p.GetContext(ctx)\n\t}\n\treturn nil, ErrNotConfigured\n}", "func (a *ResourcepoolApiService) GetResourcepoolLeaseResourceByMoid(ctx context.Context, moid string) ApiGetResourcepoolLeaseResourceByMoidRequest {\n\treturn ApiGetResourcepoolLeaseResourceByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (p *connPool) Get() (cn *conn, isNew bool, err error) {\n\tif p.closed() {\n\t\terr = errClosed\n\t\treturn\n\t}\n\n\t// Fetch first non-idle connection, if available.\n\tif cn = p.First(); cn != nil {\n\t\treturn\n\t}\n\n\t// Try to create a new one.\n\tif p.conns.Reserve() {\n\t\tcn, err = p.new()\n\t\tif err != nil {\n\t\t\tp.conns.Remove(nil)\n\t\t\treturn\n\t\t}\n\t\tp.conns.Add(cn)\n\t\tisNew = true\n\t\treturn\n\t}\n\n\t// Otherwise, wait for the available connection.\n\tif cn = p.wait(); cn != nil {\n\t\treturn\n\t}\n\n\terr = errPoolTimeout\n\treturn\n}", "func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource {\n\tkey := addr.String()\n\tswitch addr.Mode {\n\tcase addrs.ManagedResourceMode:\n\t\treturn m.ManagedResources[key]\n\tcase addrs.DataResourceMode:\n\t\treturn m.DataResources[key]\n\tdefault:\n\t\treturn nil\n\t}\n}", "func (p *pool) get() (*channel, error) {\n\tif p.closed {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\tactiveChannel, ok := <-p.readyChannel\n\tif !ok {\n\t\treturn nil, ErrPoolClosed\n\t}\n\n\treturn activeChannel, nil\n}", "func (rep *Repository) Find(ctx context.Context, request Request) (*Resource, error) {\n\tvar (\n\t\terr error\n\t\tres *Resource\n\t)\n\n\tv, ok := rep.handles.Load(request.ID)\n\tif ok {\n\t\thd := v.(*mutex.Mutex)\n\t\tif !hd.Wait(ctx) {\n\t\t\terr = fmt.Errorf(\"wait context err\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tres, err = rep.findFromCache(ctx, request)\n\tif err != nil { // reset resource to nil if is err from cache\n\t\tif rep.errHandler == nil || !rep.errHandler.ThroughOnCacheErr(ctx, request, err) {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = nil\n\t}\n\n\tif res == nil {\n\t\tres, err = rep.throughToStore(ctx, request)\n\t\tif err != nil {\n\t\t\tres, err = rep.downgrade(ctx, request, err) // try downgrade data\n\t\t} else if res == nil {\n\t\t\tres, err = rep.downgrade(ctx, request, nil) // try downgrade data if data not exists\n\t\t}\n\t}\n\treturn res, err\n}", "func getResource(ctx context.Context, client resource.Interface, obj runtime.Object) (runtime.Object, error) {\n\tobjMeta := resource.MustToMeta(obj)\n\n\tif objMeta.GetName() != \"\" || objMeta.GetGenerateName() == \"\" {\n\t\treturn client.Get(ctx, objMeta.GetName(), metav1.GetOptions{})\n\t}\n\n\tlist, err := client.List(ctx, metav1.ListOptions{\n\t\tLabelSelector: labels.SelectorFromSet(objMeta.GetLabels()).String(),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcount := len(list)\n\tif count == 0 {\n\t\treturn nil, apierrors.NewNotFound(schema.GroupResource{}, \"\")\n\t}\n\n\tif count != 1 {\n\t\treturn nil, fmt.Errorf(\"found %d resources with labels %#v, expected 1\", count, objMeta.GetLabels())\n\t}\n\n\treturn list[0], nil\n}", "func (s *Service) GetHalResource(ctx context.Context, resourceName, url string) ([]byte, *status.Status) {\n\treturn s.getHalResource(ctx, resourceName, url, s.client.Get)\n}", "func (p *Pool) Get() (*PooledConnection, error) {\n\t// Lock the pool to keep the kids out.\n\tp.mu.Lock()\n\n\t// Clean this place up.\n\tp.purge()\n\n\t// Wait loop\n\tfor {\n\t\t// Try to grab first available idle connection\n\t\tif conn := p.first(); conn != nil {\n\n\t\t\t// Remove the connection from the idle slice\n\t\t\tp.idle = append(p.idle[:0], p.idle[1:]...)\n\t\t\tp.active++\n\t\t\tp.mu.Unlock()\n\t\t\tpc := &PooledConnection{Pool: p, Client: conn.pc.Client}\n\t\t\treturn pc, nil\n\n\t\t}\n\n\t\t// No idle connections, try dialing a new one\n\t\tif p.MaxActive == 0 || p.active < p.MaxActive {\n\t\t\tp.active++\n\t\t\tdial := p.Dial\n\n\t\t\t// Unlock here so that any other connections that need to be\n\t\t\t// dialed do not have to wait.\n\t\t\tp.mu.Unlock()\n\n\t\t\tdc, err := dial()\n\t\t\tif err != nil {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tp.release()\n\t\t\t\tp.mu.Unlock()\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tpc := &PooledConnection{Pool: p, Client: dc}\n\t\t\treturn pc, nil\n\t\t}\n\n\t\t//No idle connections and max active connections, let's wait.\n\t\tif p.cond == nil {\n\t\t\tp.cond = sync.NewCond(&p.mu)\n\t\t}\n\n\t\tp.cond.Wait()\n\t}\n}", "func (s *Session) ChildResourcePool(ctx context.Context, resourcePoolName string) (*object.ResourcePool, error) {\n\tresourcePool, err := s.findChildEntity(ctx, s.resourcePool, resourcePoolName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trp, ok := resourcePool.(*object.ResourcePool)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"ResourcePool %q is not expected ResourcePool type but a %T\", resourcePoolName, resourcePool)\n\t}\n\treturn rp, nil\n}", "func (p *Pool) Get() (*redisClient, error) {\n\tselect {\n\tcase rc := <-p.pool:\n\t\tif p.clientTimeout > 0 && time.Now().Sub(rc.createdTime) > p.clientTimeout {\n\t\t\tif err := rc.Conn.Close(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn p.generate()\n\t\t}\n\t\treturn rc, nil\n\tdefault:\n\t\treturn p.generate()\n\t}\n}", "func (p *Generic) Get(sz int) interface{} {\r\n\tvar c interface{}\r\n\tselect {\r\n\tcase c = <-p.pool:\r\n\tdefault:\r\n\t\tc = p.fn(sz)\r\n\t}\r\n\r\n\treturn c\r\n}", "func (a *ResourcepoolApiService) GetResourcepoolLeaseResourceByMoidExecute(r ApiGetResourcepoolLeaseResourceByMoidRequest) (*ResourcepoolLeaseResource, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *ResourcepoolLeaseResource\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ResourcepoolApiService.GetResourcepoolLeaseResourceByMoid\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/resourcepool/LeaseResources/{Moid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"Moid\"+\"}\", url.PathEscape(parameterToString(r.moid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/csv\", \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (rp *ResourcePoolImpl) GetDevicePool() map[string]types.HostDevice {\n\treturn rp.devicePool\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetResourcePool() string {\n\tif o == nil || o.ResourcePool == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ResourcePool\n}" ]
[ "0.7809274", "0.7043481", "0.7004939", "0.6892022", "0.6866254", "0.6643891", "0.66169924", "0.658725", "0.65026814", "0.64851373", "0.64733285", "0.64366895", "0.6374926", "0.6359933", "0.63093024", "0.62816465", "0.62720895", "0.6255434", "0.62535197", "0.6252041", "0.62392825", "0.62340987", "0.62233895", "0.62197596", "0.6196336", "0.61825234", "0.61634225", "0.6162087", "0.61457515", "0.6128608", "0.61018264", "0.6100586", "0.60969365", "0.60877633", "0.6087369", "0.608183", "0.607069", "0.6049487", "0.60437053", "0.6025697", "0.601124", "0.60093355", "0.5996355", "0.59841126", "0.5976425", "0.59731615", "0.5952185", "0.5950778", "0.5928999", "0.5921483", "0.5919707", "0.5915129", "0.5912015", "0.5899786", "0.5888728", "0.5865359", "0.58502495", "0.5850001", "0.5848239", "0.5844187", "0.5842758", "0.58397", "0.5831986", "0.58286875", "0.5828408", "0.58259", "0.5803111", "0.5800367", "0.5794152", "0.5783091", "0.5779813", "0.5778581", "0.57700837", "0.57685", "0.57572395", "0.57542706", "0.5725018", "0.57207793", "0.5720055", "0.57175887", "0.57148784", "0.5701144", "0.569339", "0.56863046", "0.5676549", "0.5659022", "0.5653176", "0.56513584", "0.5650134", "0.56368786", "0.56298923", "0.5628704", "0.5627676", "0.5627054", "0.5614421", "0.561313", "0.56126934", "0.5610516", "0.5610325", "0.5597188" ]
0.57399076
76
Put a resource to resource pool.
func (c *ChannelPool) Put(conn interface{}) error { if conn == nil { return nil } c.mu.Lock() defer c.mu.Unlock() if c.conns == nil { return c.Close(conn) } select { case c.conns <- &IdleConn{conn: conn, t: time.Now()}: return nil default: return c.Close(conn) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *DryccCmd) ResourcePut(appID, plan string, name string, params []string) error {\n\ts, appID, err := load(d.ConfigFile, appID)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Printf(\"Updating %s to %s... \", name, appID)\n\n\tparamsMap, err := parseParams(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tquit := progress(d.WOut)\n\tresource := api.Resource{\n\t\tPlan: plan,\n\t\tOptions: paramsMap,\n\t}\n\t_, err = resources.Put(s.Client, appID, name, resource)\n\tquit <- true\n\t<-quit\n\tif d.checkAPICompatibility(s.Client, err) != nil {\n\t\treturn err\n\t}\n\n\td.Println(\"done\")\n\treturn nil\n}", "func (rp *resourcePool) Put(v interface{}) bool {\n\trp.Lock()\n\tdefer rp.Unlock()\n\tif rp.expiredFn(v) {\n\t\trp.closeFn(v)\n\t\trp.totalSize--\n\t\treturn false\n\t}\n\n\trp.add(&resourcePoolElement{value: v})\n\treturn true\n}", "func (c *channelPool) put(rconn RpcAble) error {\n\tif rconn == nil {\n\t\treturn errors.New(\"rconn is nil. rejecting\")\n\t}\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.rconns == nil {\n\t\t// pool is closed, close passed rconn\n\t\treturn rconn.Close()\n\t}\n\n\t// put the resource back into the pool. If the pool is full, this will\n\t// block and the default case will be executed.\n\tselect {\n\tcase c.rconns <- rconn:\n\t\treturn nil\n\tdefault:\n\t\t// pool is full, close passed rconn\n\t\treturn rconn.Close()\n\t}\n}", "func (bp *Pool) Put(elem interface{}) {\n\tbp.pool.Offer(elem)\n}", "func (p *ObjectPool) Put(o PoolObject) {\n\to.Reset()\n\tp.pool.Put(o)\n}", "func (q *Queue) AddResource(name string) (string, error) {\n\t// Check that the address is already in use\n\tfor _, v := range q.pool {\n\t\tif v.Name == name && v.Status != common.STATUS_QUIT {\n\t\t\t// We have found a resource with the same address so error\n\t\t\tlog.WithField(\"name\", name).Debug(\"Resource already exists.\")\n\t\t\treturn \"\", errors.New(\"Resource already exists!\")\n\t\t}\n\t}\n\n\t// Create empty resource\n\tres := NewResource()\n\n\tres.Name = name\n\tres.Status = common.STATUS_PENDING\n\n\t//Generate a UUID for the resource\n\tresourceuuid := uuid.New()\n\n\t// Add resource to resource pool with generated UUID\n\tq.Lock()\n\tq.pool[resourceuuid] = res\n\tq.Unlock()\n\n\treturn resourceuuid, nil\n}", "func (rc *ResourceCollection) Put(ref ResourceReference, resource Resource) {\n\trc.ensureResourcePathExists(ref.Domain, ref.Kind)\n\n\tother := NewResourceCollection()\n\tother.ensureResourcePathExists(ref.Domain, ref.Kind)\n\tother.collection[ref.Domain][ref.Kind][ref.ID] = resource\n\n\trc.Merge(other)\n}", "func Put(r *bufio.Reader) {\n\tdefaultPool.Put(r)\n}", "func (p *Pool) AddResource(res Resource) error {\n\tif !res.IsHealthy() {\n\t\treturn errors.New(\"Cannot add unhealthy resources to the pool\")\n\t}\n\tp.mutex.Lock()\n\tp.resourceCount++\n\tp.mutex.Unlock()\n\t//If there is a notifyFunc, send the notification\n\tif p.MonitorFunc != nil {\n\t\tgo p.MonitorFunc(newActionMsg(ResourceAdded))\n\t}\n\tgo func() {\n\t\tp.resourceQueue <- res\n\t}()\n\treturn nil\n}", "func (p *Pool) Put(rc *redisClient) {\n\tselect {\n\tcase p.pool <- rc:\n\tdefault:\n\t\trc.Conn.Close()\n\t}\n}", "func Put(buf []byte) {\n\tbuiltinPool.Put(buf)\n}", "func (p *Pool) Put(x interface{}) {\n\tp.init()\n\tp.p.Put(x)\n}", "func (c *boundedPool) put(conn net.Conn) error {\n\tif conn == nil {\n\t\treturn errors.New(\"connection is nil. rejecting\")\n\t}\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.conns == nil {\n\t\t// pool is closed, close passed connection\n\t\treturn conn.Close()\n\t}\n\n\t// put the resource back into the pool. If the pool is full, this will\n\t// block and the default case will be executed.\n\tselect {\n\tcase c.conns <- conn:\n\t\treturn nil\n\tdefault:\n\t\t// pool is full, close passed connection\n\t\treturn conn.Close()\n\t}\n}", "func (p *Pool) Put(x interface{})", "func (k *K8sStore) AddResource(obj interface{}) {\n\tkey, ns, labels := resourceKey(obj)\n\tnewObj := k.resourceCtor(obj, k.ctorConfig)\n\tglog.V(11).Infof(\"%s added: %s\", k.resourceName, key)\n\tk.dataMutex.Lock()\n\tk.data[key] = newObj\n\tk.dataMutex.Unlock()\n\tk.updateLabelMap(ns, labels, 1)\n\n\terr := k.AppendNewObject(newObj)\n\tif err != nil {\n\t\tglog.Warningf(\"Error when appending new object to current state: %v\", err)\n\t}\n}", "func Put(bw *bufio.Writer) {\n\tbwPool.Put(bw)\n}", "func (p *Generic) Put(c interface{}) {\r\n\tselect {\r\n\tcase p.pool <- c:\r\n\tdefault:\r\n\t}\r\n}", "func (p *Pool[T]) Put(item T) {\n\tp.p.Put(item)\n}", "func (rp *ResourcePool) Release(resource Resource) {\n\tif rp.IsClosed() {\n\t\tif !resource.IsClosed() {\n\t\t\tresource.Close()\n\t\t}\n\n\t\tatomic.AddInt64(&rp.numResources, -1)\n\n\t\treturn\n\t}\n\n\trel := releaseMessage{\n\t\tr: resource,\n\t}\n\n\trp.rchan <- rel\n}", "func Put(buf *bytes.Buffer) {\n\tassert.NotNil(&buf)\n\tbuf.Reset()\n\tbuf.Grow(256)\n\tgPool.Put(buf)\n}", "func (m *EducationSubmissionResource) SetResource(value EducationResourceable)() {\n err := m.GetBackingStore().Set(\"resource\", value)\n if err != nil {\n panic(err)\n }\n}", "func (resourceSet ResourceSet) Add(resource string) {\n\tresourceSet[resource] = struct{}{}\n}", "func (p SimplePool) Put(conn net.Conn) {\r\n\t//fmt.Println(\"<< Put\")\r\n\tconn.Close()\r\n}", "func (p *BufferBucketPool) Put(bucket *BufferBucket) {\n\tp.pool.Put(bucket)\n}", "func (verber *resourceVerber) Put(kind string, namespaceSet bool, namespace string, name string,\n\tobject *runtime.Unknown) error {\n\n\tclient, resourceSpec, err := verber.getResourceSpecFromKind(kind, namespaceSet)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := client.Put().\n\t\tResource(resourceSpec.Resource).\n\t\tName(name).\n\t\tSetHeader(\"Content-Type\", \"application/json\").\n\t\tBody([]byte(object.Raw))\n\n\tif resourceSpec.Namespaced {\n\t\treq.Namespace(namespace)\n\t}\n\n\treturn req.Do(context.TODO()).Error()\n}", "func (cp *Pool) Put(conn *DBConn) {\n\tp := cp.pool()\n\tif p == nil {\n\t\tpanic(ErrConnPoolClosed)\n\t}\n\tif conn == nil {\n\t\tp.Put(nil)\n\t} else {\n\t\tp.Put(conn)\n\t}\n}", "func (ucp *UDPClientPool) Put(cl interface{}) {\n\tif cl != nil {\n\t\tucp.pool.Put(cl)\n\t}\n}", "func (p *Pool) Put(conn *redis.Client) {\n\tselect {\n\tcase <-p.stopCh:\n\t\tconn.Close()\n\tdefault:\n\t\tif conn.LastCritical == nil {\n\t\t\tselect {\n\t\t\tcase p.pool <- conn:\n\t\t\tdefault:\n\t\t\t\tconn.Close()\n\t\t\t}\n\t\t} else {\n\t\t\tp.replenish(conn.Network, conn.Addr)\n\t\t}\n\t}\n}", "func registerResource(name string, resource corev2.Resource) {\n\ttypeMapMu.Lock()\n\tdefer typeMapMu.Unlock()\n\ttypeMap[name] = resource\n}", "func PutResponseIntoPool(r *Response) { r.Reset(nil); responsePool.Put(r) }", "func (p *Pool) Put(conn net.Conn) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\tif p.conns == nil {\n\t\treturn ErrPoolClosed\n\t}\n\n\tif conn == nil {\n\t\treturn ErrConnClosed\n\t}\n\n\tselect {\n\tcase p.conns <- conn:\n\t\treturn nil\n\tdefault:\n\t\t// pool is full, closing connection\n\t\tconn.Close()\n\t}\n\treturn nil\n}", "func (m *ProgramControl) SetResource(value ProgramResourceable)() {\n err := m.GetBackingStore().Set(\"resource\", value)\n if err != nil {\n panic(err)\n }\n}", "func (pool AllocatingPool) ClaimResource(userInput map[string]interface{}, description *string) (*ent.Resource, error) {\n\n\tstrat, err := pool.AllocationStrategy()\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Unable to retrieve allocation-strategy for pool %d\", pool.ID)\n\t\treturn nil, errors.Wrapf(err,\n\t\t\t\"Unable to claim resource from pool #%d, allocation strategy loading error \", pool.ID)\n\t}\n\n\tps, err := pool.PoolProperties()\n\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Unable to retrieve pool-properties for pool %d\", pool.ID)\n\t\treturn nil, errors.Wrapf(err,\n\t\t\t\"Unable to claim resource from pool #%d, resource type loading error \", pool.ID)\n\t}\n\n\tpropMap, propErr := convertProperties(ps)\n\n\tif propErr != nil {\n\t\tlog.Error(pool.ctx, propErr, \"Unable to convert value from property\")\n\t\treturn nil, errors.Wrapf(propErr, \"Unable to convert value from property\")\n\t}\n\n\tresourceType, err := pool.ResourceType()\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Unable retrieve resource type for pool with ID: %d\", pool.ID)\n\t\treturn nil, errors.Wrapf(err,\n\t\t\t\"Unable to claim resource from pool #%d, resource type loading error \", pool.ID)\n\t}\n\n\tvar resourcePool model.ResourcePoolInput\n\tresourcePool.ResourcePoolName = pool.Name\n\n\tcurrentResources, err := pool.loadClaimedResources()\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Unable retrieve already claimed resources for pool with ID: %d\", pool.ID)\n\t\treturn nil, errors.Wrapf(err,\n\t\t\t\"Unable to claim resource from pool #%d, resource loading error \", pool.ID)\n\t}\n\n\tvar functionName string\n\n\tif strat.Lang == allocationstrategy.LangPy {\n\t\tfunctionName = \"script_fun()\"\n\t} else {\n\t\tfunctionName = \"invoke()\"\n\t}\n\n\tresourceProperties, _ /*TODO do something with logs */, err := InvokeAllocationStrategy(\n\t\tpool.invoker, strat, userInput, resourcePool, currentResources, propMap, functionName)\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Unable to claim resource with pool with ID: %d, invoking strategy failed\", pool.ID)\n\t\treturn nil, errors.Wrapf(err,\n\t\t\t\"Unable to claim resource from pool #%d, allocation strategy \\\"%s\\\" failed\", pool.ID, strat.Name)\n\t}\n\n\t// Query to check whether this resource already exists.\n\t// 1. construct query\n\tquery, err := pool.findResource(RawResourceProps(resourceProperties))\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Cannot query for resource based on pool with ID %d\", pool.ID)\n\t\treturn nil, errors.Wrapf(err, \"Cannot query for resource based on pool #%d and properties \\\"%s\\\"\", pool.ID, resourceProperties)\n\t}\n\n\t// 2. Try to find the resource in DB\n\tfoundResources, err := query.WithProperties().All(pool.ctx)\n\n\t//TODO - what if foundResources is nil ?? do we continue?\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Unable to retrieve allocated resources for pool %d\", pool.ID)\n\t}\n\n\tif len(foundResources) == 0 {\n\t\t// 3a. Nothing found - create new resource\n\t\tcreated, err := PreCreateResources(pool.ctx, pool.client, []RawResourceProps{resourceProperties},\n\t\t\tpool.ResourcePool, resourceType, resource.StatusClaimed, description)\n\t\tif err != nil {\n\t\t\tlog.Error(pool.ctx, err, \"Unable to create resource in pool %d\", pool.ID)\n\t\t\treturn nil, errors.Wrapf(err, \"Unable to create resource in pool #%d\", pool.ID)\n\t\t}\n\t\tif len(created) > 1 {\n\t\t\t// TODO this seems serious, shouldn't we delete those resources or something more than log it?\n\t\t\tlog.Error(pool.ctx, err, \"Unexpected error creating resource in pool %d\" +\n\t\t\t\t\" multiple resources created (count: %d)\", pool.ID, len(created))\n\t\t\treturn nil, errors.Errorf(\n\t\t\t\t\"Unexpected error creating resource in pool #%d, properties \\\"%s\\\" . \"+\n\t\t\t\t\t\"Created %d resources instead of one.\", pool.ID, resourceProperties, len(created))\n\t\t}\n\t\treturn created[0], nil\n\t} else if len(foundResources) > 1 {\n\t\tlog.Error(pool.ctx, err, \"Unable to claim resource for pool ID %d, database contains more than one result\", pool.ID)\n\t\treturn nil, errors.Errorf(\n\t\t\t\"Unable to claim resource with properties \\\"%s\\\" from pool #%d, database contains more than one result\", resourceProperties, pool.ID)\n\t}\n\tres := foundResources[0]\n\t// 3b. Claim found resource if possible\n\tif res.Status == resource.StatusClaimed || res.Status == resource.StatusRetired {\n\t\tlog.Error(pool.ctx, err, \"Resource with ID %d is in an incorrect state %+v\", res.ID, res.Status)\n\t\treturn nil, errors.Errorf(\"Resource #%d is in incorrect state \\\"%s\\\"\", res.ID, res.Status)\n\t} else if res.Status == resource.StatusBench {\n\t\tcutoff := res.UpdatedAt.Add(time.Duration(pool.DealocationSafetyPeriod) * time.Second)\n\t\tif time.Now().Before(cutoff) {\n\t\t\tlog.Error(pool.ctx, err, \"Unable to claim resource %d from pool %d, resource cannot be claimed before %s\", res.ID, pool.ID, cutoff)\n\t\t\treturn nil, errors.Errorf(\n\t\t\t\t\"Unable to claim resource #%d from pool #%d, resource cannot be claimed before %s\", res.ID, pool.ID, cutoff)\n\t\t}\n\t}\n\tres.Status = resource.StatusClaimed\n\terr = pool.client.Resource.\n\t\tUpdateOne(res).\n\t\tSetStatus(res.Status).\n\t\tSetNillableDescription(description).\n\t\tExec(pool.ctx)\n\n\t//TODO what does this mean? should we somehow rollback everything that transpired until this point??\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Cannot update resource %d\", res.ID)\n\t\treturn nil, errors.Wrapf(err, \"Cannot update resource #%d\", res.ID)\n\t}\n\treturn res, nil\n}", "func (p *Pool) Put(b *Buffer) {\n\tp.lock.Lock()\n\tb.next = p.free\n\tp.free = b\n\tp.freeNum++\n\tp.lock.Unlock()\n\treturn\n}", "func (p Pool) Put(buf *logf.Buffer) {\n\tp.p.Put(buf)\n}", "func (p *Pool) Put(task *Task) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif p.status == STOPED {\n\t\treturn ErrPoolAlreadyClosed\n\t}\n\n\t// run worker\n\tif p.GetRunningWorkers() < p.GetCap() {\n\t\tp.run()\n\t}\n\n\t// send task\n\tif p.status == RUNNING {\n\t\tp.chTask <- task\n\t}\n\n\treturn nil\n}", "func (p *keyPool) Put(key *aero.Key) {\n\tp.Lock()\n\tp.buffer = append(p.buffer, key)\n\tp.Unlock()\n}", "func (f *Facade) AddResourcePool(ctx datastore.Context, entity *pool.ResourcePool) error {\n\tglog.V(2).Infof(\"Facade.AddResourcePool: %+v\", entity)\n\tif exists, err := f.GetResourcePool(ctx, entity.ID); err != nil {\n\t\treturn err\n\t} else if exists != nil {\n\t\treturn fmt.Errorf(\"pool already exists: %s\", entity.ID)\n\t}\n\n\tvar err error\n\tec := newEventCtx()\n\tdefer f.afterEvent(afterPoolAdd, ec, entity, err)\n\n\tif err = f.beforeEvent(beforePoolAdd, ec, entity); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: Get rid of me when we have front-end functionality of pool realms\n\tif entity.Realm == \"\" {\n\t\tentity.Realm = defaultRealm\n\t}\n\n\tnow := time.Now()\n\tentity.CreatedAt = now\n\tentity.UpdatedAt = now\n\tif err = f.poolStore.Put(ctx, pool.Key(entity.ID), entity); err != nil {\n\t\treturn err\n\t}\n\terr = zkAPI(f).AddResourcePool(entity)\n\treturn err\n}", "func setResource(alloc types.ResourceList, res map[string]int64, grpres map[string]int64) {\n\t// set resource\n\tfor key, val := range res {\n\t\tsetRes(alloc, key, val)\n\t}\n\t// set group resource\n\tfor key, val := range grpres {\n\t\tsetGrpRes(alloc, key, val)\n\t}\n}", "func (bp *BufReaderPool) put(r *BufReader) {\n\tif r != nil {\n\t\tselect {\n\t\tcase bp.pool <- r:\n\t\tdefault:\n\t\t\t// do nothing, just discard\n\t\t}\n\t}\n}", "func Put(msg *Message) {\n\tPool.Put(msg)\n}", "func Register(name string, r Closer) error {\n\tif _, ok := resources[name]; ok {\n\t\tlog4go.Warn(\"resource[%v] update\", name)\n\t}\n\tresources[name] = r\n\treturn nil\n}", "func (q *Queue) ConnectResource(resUUID, addr string, tlsconfig *tls.Config) error {\n\tq.RLock()\n\tlocalRes := q.pool[resUUID]\n\tq.RUnlock()\n\n\t// First, setup the address we're going to connect to\n\tlocalRes.Address = addr\n\t// Then store a local version in the event we need to add the default port\n\ttarget := localRes.Address\n\n\t//Check to see if we have a port, otherwise use the default 9443\n\tif !strings.Contains(target, \":\") {\n\t\ttarget += \":9443\"\n\t}\n\tlog.WithField(\"addr\", target).Info(\"Connecting to resource\")\n\n\t// Dial the target and see if we get a connection in 15 seconds\n\t/*\n\t\tconn, err := net.DialTimeout(\"tcp\", target, time.Second*15)\n\t\tif err != nil {\n\t\t\tlog.WithField(\"addr\", target).Debug(\"Unable to dial the resource.\")\n\t\t\treturn err\n\t\t}\n\n\t\t// Now we need to set the ServerName in the tls config. We'll make a copy\n\t\t// to make sure we don't mess with anything\n\t\tlocalConfig := *tlsconfig\n\t\tlocalConfig.ServerName = localRes.Address\n\n\t\t// Now let's build a TLS connection object and force a handshake to make\n\t\t// sure it's working\n\t\ttlsConn := tls.Client(conn, &localConfig)\n\t\terr = tlsConn.Handshake()\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"addr\": target,\n\t\t\t\t\"servername\": localRes.Address,\n\t\t\t}).Debug(\"An error occured while building the TLS connection\")\n\t\t\treturn err\n\t\t}\n\t*/\n\n\tdialer := &net.Dialer{\n\t\tTimeout: 15 * time.Second,\n\t}\n\n\tconn, err := tls.DialWithDialer(dialer, \"tcp\", target, tlsconfig)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"addr\": target,\n\t\t\t\"servername\": localRes.Address,\n\t\t}).Debug(\"An error occured while building the TLS connection\")\n\t\treturn err\n\t}\n\n\t// Build the RPC client for the resource\n\tlocalRes.Client = rpc.NewClient(conn)\n\tif err != nil {\n\t\tlog.WithField(\"addr\", target).Debug(\"An error occured while creating new client\")\n\t\treturn err\n\t}\n\n\t// Let the user know we connected\n\tlog.WithField(\"target\", localRes.Address).Info(\"Successfully connected to resource\")\n\tlocalRes.Status = common.STATUS_RUNNING\n\n\tq.Lock()\n\tq.pool[resUUID] = localRes\n\tq.Unlock()\n\n\t// Now let's make sure the tools and hardware are loaded\n\tq.LoadRemoteResourceHardware(resUUID)\n\tq.LoadRemoteResourceTools(resUUID)\n\n\t// Call out to the registered hooks about resource creation\n\tgo HookOnResourceConnect(Hooks.ResourceConnect, resUUID, localRes)\n\n\treturn nil\n}", "func (r *ReconcileGrafana) deployResource(cr *i8ly.Grafana, resource runtime.Object, resourceName string) error {\n\t// Try to find the resource, it may already exist\n\tselector := types.NamespacedName{\n\t\tNamespace: cr.Namespace,\n\t\tName: resourceName,\n\t}\n\terr := r.client.Get(context.TODO(), selector, resource)\n\n\t// The resource exists, do nothing\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t// Resource does not exist or something went wrong\n\tif errors.IsNotFound(err) {\n\t\tlog.Info(fmt.Sprintf(\"Resource %s does not exist, creating now\", resourceName))\n\t} else {\n\t\treturn err\n\t}\n\n\t// Set the CR as the owner of this resource so that when\n\t// the CR is deleted this resource also gets removed\n\terr = controllerutil.SetControllerReference(cr, resource.(v1.Object), r.scheme)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.client.Create(context.TODO(), resource)\n}", "func (p *PacketPool) Put(packet *Packet) {\n\tif packet.Origin != NoOrigin {\n\t\tpacket.Origin = NoOrigin\n\t}\n\tif p.tlmEnabled {\n\t\ttlmPacketPoolPut.Inc()\n\t\ttlmPacketPool.Dec()\n\t}\n\tp.pool.Put(packet)\n}", "func (sp *StackPackage) AddResource(filepath string, sr StackResource) {\n\tsp.Resources[filepath] = sr\n}", "func (c *cConn) Put() {\n\tif c.fd < 0 {\n\t\treturn\n\t}\n\tC.fd_pool_put(c.conn, C.int(c.fd))\n\tc.fd = -1\n\tC.fd_pool_free_conn(c.conn)\n\tc.conn = nil\n}", "func Put(b *[]byte) {\n\tif b == nil {\n\t\treturn\n\t}\n\tcapacity := cap(*b)\n\tid, poolCapacity := getPoolIDAndCapacity(capacity)\n\tif capacity <= poolCapacity {\n\t\tbb := (*b)[:0]\n\t\tpools[id].Put(&bb)\n\t}\n}", "func (p *IdlePool) Put(c io.Closer) {\n\tp.Lock()\n\tdefer p.Unlock()\n\tnow := time.Now()\n\tn := len(p.elems)\n\ti0 := rand.Intn(n)\n\tfor i := 0; i < n; i++ {\n\t\tj := (i0 + i) % n\n\t\tif p.elems[j] == nil {\n\t\t\tp.elems[j] = c\n\t\t\tp.times[j] = now\n\t\t\treturn\n\t\t}\n\t}\n\tp.elems[i0].Close()\n\tp.elems[i0] = c\n\tp.times[i0] = now\n}", "func (s *Server) Add(ctx context.Context, resource *cap.Resource) (*cap.Resource, error) {\n\t// Start a new span.\n\tspan, sctx := opentracing.StartSpanFromContext(ctx, \"Server::Add\")\n\tdefer span.Finish()\n\n\tlog.WithField(\"uri\", resource.Uri).Info(\"Add resource\")\n\n\tlog.WithFields(log.Fields{\n\t\t\"uri\": resource.Uri,\n\t\t\"mime\": resource.MimeType,\n\t\t\"checksum\": resource.Checksum(),\n\t}).Debug(\"Processing resource\")\n\n\t// If we have a URL resource, just return it the way it is.\n\tif resource.MimeType == \"application/x-url\" {\n\t\treturn resource, nil\n\t}\n\n\t// Identify the destination filename.\n\turi, err := url.Parse(resource.Uri)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to parse URI\")\n\t\traven.CaptureError(err, nil)\n\t\text.Error.Set(span, true)\n\t\tspan.LogFields(otlog.Error(err))\n\t\treturn nil, err\n\t}\n\n\tvar filename string\n\n\tif resource.Checksum() == \"\" {\n\t\tfilename = strings.ToLower(filepath.Base(uri.Path))\n\t} else {\n\t\tfilename = strings.ToLower(fmt.Sprintf(\"%s%s\", resource.Checksum(), filepath.Ext(uri.Path)))\n\t}\n\n\tlog.WithField(\"filename\", filename).Info(\"Generated filename\")\n\n\tresponse := &cap.Resource{\n\t\tUri: filename,\n\t\tMimeType: resource.MimeType,\n\t\tDigest: resource.Checksum(),\n\t\tSize: resource.Size,\n\t\tDescription: resource.Description,\n\t}\n\n\t// Determine if we have to fetch the file.\n\thas, err := s.Storage.Has(sctx, filename)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to determine if file already exists\")\n\t\traven.CaptureError(err, nil)\n\t\text.Error.Set(span, true)\n\t\tspan.LogFields(otlog.Error(err))\n\t\treturn nil, err\n\t}\n\n\tif !has {\n\t\t// If we have a DerefUri, decode + write that to file.\n\t\tif len(resource.DerefUri) > 0 {\n\t\t\tdata := bytes.NewReader(resource.DerefUri)\n\t\t\terr = s.Storage.Add(sctx, filename, resource.MimeType, data)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Error(\"Failed to write DerefUri resource\")\n\t\t\t\traven.CaptureError(err, nil)\n\t\t\t\text.Error.Set(span, true)\n\t\t\t\tspan.LogFields(otlog.Error(err))\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else if uri.Hostname() != \"\" {\n\t\t\t// If we have a URL, fetch it.\n\t\t\tdata, err := fetch(sctx, uri)\n\n\t\t\t// In the rare instance the resource doesn't exist,\n\t\t\t// we're just gonna ignore that failure and pretend it did.\n\t\t\t// TODO: Actually handle this in a smarter way.\n\t\t\tif err == ErrNotFound {\n\t\t\t\tlog.WithError(err).Error(\"The resource did not exist\")\n\t\t\t\traven.CaptureError(err, nil)\n\t\t\t\text.Error.Set(span, true)\n\t\t\t\tspan.LogFields(otlog.Error(err))\n\n\t\t\t\treturn response, nil\n\t\t\t} else if err != nil {\n\t\t\t\tlog.WithError(err).Error(\"Failed to fetch resource\")\n\t\t\t\traven.CaptureError(err, nil)\n\t\t\t\text.Error.Set(span, true)\n\t\t\t\tspan.LogFields(otlog.Error(err))\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdefer data.Close()\n\n\t\t\terr = s.Storage.Add(sctx, filename, resource.MimeType, data)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Error(\"Failed to write resource\")\n\t\t\t\traven.CaptureError(err, nil)\n\t\t\t\text.Error.Set(span, true)\n\t\t\t\tspan.LogFields(otlog.Error(err))\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\t// We don't have enough information to generate the resource.\n\t\t\tlog.Warn(\"Unable to save resource... Not enough information provided to fetch.\")\n\t\t\treturn nil, errors.New(\"Not enough information to save resource\")\n\t\t}\n\t} else {\n\t\tlog.Debug(\"Resource already exists\")\n\t}\n\n\t// Return the updated resource object\n\treturn response, nil\n}", "func (p *pool) Lock(user, reason, resourceName string) (*ResourceLock, error) {\n\tspecificResource := len(resourceName) > 0\n\n\tfor k, v := range p.locks {\n\t\tif v != nil {\n\t\t\t// it's already in used\n\t\t\tcontinue\n\t\t}\n\n\t\tif !specificResource && k.ExplicitLock {\n\t\t\t// resource can be locked only specifically\n\t\t\tcontinue\n\t\t}\n\n\t\tif specificResource && k.Name != resourceName {\n\t\t\t// specific resource should be locked but it's not this one.\n\t\t\tcontinue\n\t\t}\n\n\t\tresourceLock := &ResourceLock{\n\t\t\tResource: *k,\n\t\t\tUser: user,\n\t\t\tReason: reason,\n\t\t\tLockUntil: time.Now().Add(p.lockDuration),\n\t\t}\n\n\t\tp.mu.Lock()\n\t\tdefer p.mu.Unlock()\n\n\t\tp.locks[k] = resourceLock\n\n\t\tif err := storage.Write(storageKey, k.Name, resourceLock); err != nil {\n\t\t\tlog.Error(errors.Wrap(err, \"error while storing pool lock entry\"))\n\t\t}\n\t\treturn resourceLock, nil\n\t}\n\n\treturn nil, ErrNoResourceAvailable\n}", "func (pool *Pool) Put(c *Conn) {\n\tif c == nil {\n\t\treturn\n\t}\n\n\tif c.err != nil {\n\t\tpool.closeConn(c)\n\t\treturn\n\t}\n\tnow := time.Now()\n\tif pool.MaxConnectionAge > 0 && now.Sub(c.createdAt) > pool.MaxConnectionAge {\n\t\tpool.closeConn(c)\n\t\treturn\n\t}\n\tc.lastUsedAt = now\n\tpool.put(c)\n}", "func (cp *ContextPool) Put(c *Context) {\n\tc.Reset()\n\tselect {\n\tcase cp.c <- c:\n\tdefault: // Discard the buffer if the pool is full.\n\t}\n}", "func (bp bufferPool) Put(b intermediateBuffer) {\n\t// Non-blocking write in case pool has filled up (too many buffers\n\t// returned, none being used).\n\tselect {\n\tcase bp.c <- b:\n\tdefault:\n\t}\n}", "func (s *fakeTracerProviderStore) RegisterResource(res *resource.Resource) {\n\ts.res = res\n}", "func (p *ResourcePool) releaseAtomic(wrapper *ResourceWrapper) {\n\n\tp.fMutex.RLock()\n\tdefer p.fMutex.RUnlock()\n\n\t//if this pool is closed when trying to release this resource\n\t//just close the resource\n\tif p.closed == true {\n\t\tp.resClose(wrapper.Resource)\n\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\twrapper.p = nil\n\t\treturn\n\t}\n\n\t//obtain a lock to return the resource to the pool\n\t//if we end up not needing to, lets undo our lock\n\t//and close the resource\n\tif nAvailable := atomic.AddUint32(&p.nAvailable, 1); nAvailable > p.min {\n\t\t//decriment\n\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\t\tp.resClose(wrapper.Resource)\n\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\treturn\n\t}\n\n\tp.resources <- *wrapper\n}", "func PutBuffer(buf *bytes.Buffer) {\n\tbufPool.Put(buf)\n}", "func (c *Client) Put(path string, data, resource interface{}) error {\n\treturn c.CreateAndDo(\"PUT\", path, data, nil, nil, resource)\n}", "func (p *Pool) Put(job interface{}) {\n\tp.jobs <- job\n}", "func (t *Transaction) SetResource(r rsrc.Resource) {\n\tt.res = r.Clone()\n}", "func put(resource string, data []byte) ([]byte, error) {\n\thttpParams := &HTTPParams{\"PUT\", resource, \"application/json\", data}\n\treturn processRequest(httpParams)\n}", "func (n *resPool) AddToDemand(res *scalar.Resources) error {\n\tn.Lock()\n\tdefer n.Unlock()\n\n\tn.demand = n.demand.Add(res)\n\n\tlog.WithFields(log.Fields{\n\t\t\"respool_id\": n.id,\n\t\t\"demand\": n.demand,\n\t}).Debug(\"Current Demand after Adding resources\")\n\n\treturn nil\n}", "func (bp *BufferPool) Put(b *bytes.Buffer) {\n\tb.Reset()\n\tbp.Pool.Put(b)\n}", "func (p *BufferPool) Put(b []byte) {\n\tp.p.Put(b)\n}", "func (rc *ResourceCacheMap) Put(value *unstructured.Unstructured) error {\n\tkey, err := object.UnstructuredToObjMeta(value)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create resource cache key: %w\", err)\n\t}\n\trc.Set(key, value)\n\treturn nil\n}", "func (p *DownloadPool) release(d *Downloader) {\n\tp.resource <- d // can never fail ...\n}", "func (p *BufferBucketVersionsPool) Put(buckets *BufferBucketVersions) {\n\tp.pool.Put(buckets)\n}", "func (p *Pool) Put(h *Hasher) {\n\tp.c <- h.bmt\n}", "func (c *Client) Put(endpoint string, resource ...RequestResource) error {\n\twrapper := newWrapper(\"put\", endpoint, resource...)\n\treturn c.do(&wrapper)\n}", "func putBuf(buf []byte) {\n\tbufPool.Put(buf)\n}", "func (c *ChannelPool) Put(conn net.Conn) error {\n\tif conn == nil {\n\t\treturn errors.New(\"connection is nil. rejecting\")\n\t}\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.conns == nil {\n\t\tconn.Close()\n\t\treturn ErrClosed\n\t}\n\n\tselect {\n\tcase c.conns <- conn:\n\t\treturn nil\n\tdefault:\n\t\tconn.Close()\n\t\treturn ErrFull\n\t}\n}", "func (p *Pool) put(pc *PooledConnection) {\n\tif p.closed {\n\t\tpc.Client.Close()\n\t\treturn\n\t}\n\tidle := &idleConnection{pc: pc, t: time.Now()}\n\t// Prepend the connection to the front of the slice\n\tp.idle = append([]*idleConnection{idle}, p.idle...)\n\n}", "func (s *SyncStorage) LockResource(ns string, resource string, expiration time.Duration, opt *Options) (*SyncStorageLock, error) {\n\tvalue, err := s.randomToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar retryTimer *time.Timer\n\tfor i, attempts := 0, opt.getRetryCount()+1; i < attempts; i++ {\n\t\tok, err := s.getDbBackend(ns).SetNX(getNsPrefix(ns)+resource, value, expiration)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if ok {\n\t\t\treturn &SyncStorageLock{s: s, key: resource, value: value}, nil\n\t\t}\n\t\tif retryTimer == nil {\n\t\t\tretryTimer = time.NewTimer(opt.getRetryWait())\n\t\t\tdefer retryTimer.Stop()\n\t\t} else {\n\t\t\tretryTimer.Reset(opt.getRetryWait())\n\t\t}\n\n\t\tselect {\n\t\tcase <-retryTimer.C:\n\t\t}\n\t}\n\treturn nil, errors.New(\"Lock not obtained\")\n}", "func (c *Client) LockResource(resource interface{}) error {\n\trpath, err := resourcePath(resource)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = c.MakeApiRequest(\"PUT\", fmt.Sprintf(\"/1.0/%s/lock_resource\", rpath), nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (l *SyncStorageLock) ReleaseResource(ns string) error {\n\tok, err := l.s.getDbBackend(ns).DelIE(getNsPrefix(ns)+l.key, l.value)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !ok {\n\t\treturn errors.New(\"Lock not held\")\n\t}\n\treturn nil\n}", "func (p *ProcessCacheEntryPool) Put(pce *model.ProcessCacheEntry) {\n\tpce.Reset()\n\tp.pool.Put(pce)\n}", "func (adm Admin) AddResource(cluster string, resource string, partitions int, stateModel string) error {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Disconnect()\n\n\tif ok, err := conn.IsClusterSetup(cluster); !ok || err != nil {\n\t\treturn ErrClusterNotSetup\n\t}\n\n\tkeys := KeyBuilder{cluster}\n\n\t// make sure the state model def exists\n\tif exists, err := conn.Exists(keys.stateModel(stateModel)); !exists || err != nil {\n\t\treturn ErrStateModelDefNotExist\n\t}\n\n\t// make sure the path for the ideal state does not exit\n\tisPath := keys.idealStates() + \"/\" + resource\n\tif exists, err := conn.Exists(isPath); exists || err != nil {\n\t\tif exists {\n\t\t\treturn ErrResourceExists\n\t\t}\n\t\treturn err\n\t}\n\n\t// create the idealstate for the resource\n\t// is := NewIdealState(resource)\n\t// is.SetNumPartitions(partitions)\n\t// is.SetReplicas(0)\n\t// is.SetRebalanceMode(\"SEMI_AUTO\")\n\t// is.SetStateModelDefRef(stateModel)\n\t// // save the ideal state in zookeeper\n\t// is.Save(conn, cluster)\n\n\tis := NewRecord(resource)\n\tis.SetSimpleField(\"NUM_PARTITIONS\", strconv.Itoa(partitions))\n\tis.SetSimpleField(\"REPLICAS\", strconv.Itoa(0))\n\tis.SetSimpleField(\"REBALANCE_MODE\", strings.ToUpper(\"SEMI_AUTO\"))\n\tis.SetSimpleField(\"STATE_MODEL_DEF_REF\", stateModel)\n\tconn.CreateRecordWithPath(isPath, is)\n\n\treturn nil\n}", "func (s *opentelemetryTracerProviderStore) RegisterResource(res *resource.Resource) {\n\ts.res = res\n}", "func TestBytesPool_Put(t *testing.T) {\n\tpool := NewBytesPool(1, size)\n\tbts1 := make([]byte, size)\n\tpool.Put(bts1)\n\tbts2 := make([]byte, size)\n\tpool.Put(bts2)\n\n\tpool.Get()\n}", "func (s *ConcoursePipeline) AddResource(name string, typename string, source map[string]interface{}) {\n\ts.Resources = append(s.Resources, atc.ResourceConfig{\n\t\tName: name,\n\t\tType: typename,\n\t\tSource: source,\n\t})\n}", "func (bufferP *BufferPool) Put(data []byte) {\n\tif data == nil {\n\t\treturn\n\t}\n\tsize := len(data)\n\tif size == util.PacketHeaderSize {\n\t\tatomic.AddInt64(&headBuffersCount, -1)\n\t\tid := atomic.AddUint64(&headBufFreeId, 1)\n\t\tbufferP.putHead(int(id%slotCnt), data)\n\t} else if size == util.BlockSize {\n\t\tatomic.AddInt64(&normalBuffersCount, -1)\n\t\tid := atomic.AddUint64(&normalBufFreecId, 1)\n\t\tbufferP.putNormal(int(id%slotCnt), data)\n\t} else if size == util.DefaultTinySizeLimit {\n\t\tbufferP.tinyPool.Put(data)\n\t\tatomic.AddInt64(&tinyBuffersCount, -1)\n\t}\n}", "func cacheResource(key string, resource interface{}, cache Cache) {\n\tc, ok := resource.(Cacheable)\n\n\tif ok && c.IsCacheable() {\n\t\tcache.Set(key, resource)\n\t}\n}", "func (r *RoleV2) SetResource(kind string, actions []string) {\n\tif r.Spec.Resources == nil {\n\t\tr.Spec.Resources = make(map[string][]string)\n\t}\n\tr.Spec.Resources[kind] = actions\n}", "func (h *egressIPClusterControllerEventHandler) AddResource(obj interface{}, fromRetryLoop bool) error {\n\tswitch h.objType {\n\tcase factory.EgressNodeType:\n\t\tnode := obj.(*v1.Node)\n\t\t// Initialize the allocator on every update,\n\t\t// ovnkube-node/cloud-network-config-controller will make sure to\n\t\t// annotate the node with the egressIPConfig, but that might have\n\t\t// happened after we processed the ADD for that object, hence keep\n\t\t// retrying for all UPDATEs.\n\t\tif err := h.eIPC.initEgressIPAllocator(node); err != nil {\n\t\t\tklog.Warningf(\"Egress node initialization error: %v\", err)\n\t\t}\n\t\tnodeEgressLabel := util.GetNodeEgressLabel()\n\t\tnodeLabels := node.GetLabels()\n\t\t_, hasEgressLabel := nodeLabels[nodeEgressLabel]\n\t\tif hasEgressLabel {\n\t\t\th.eIPC.setNodeEgressAssignable(node.Name, true)\n\t\t}\n\t\tisReady := h.eIPC.isEgressNodeReady(node)\n\t\tif isReady {\n\t\t\th.eIPC.setNodeEgressReady(node.Name, true)\n\t\t}\n\t\tisReachable := h.eIPC.isEgressNodeReachable(node)\n\t\tif hasEgressLabel && isReachable && isReady {\n\t\t\th.eIPC.setNodeEgressReachable(node.Name, true)\n\t\t\tif err := h.eIPC.addEgressNode(node.Name); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase factory.EgressIPType:\n\t\teIP := obj.(*egressipv1.EgressIP)\n\t\treturn h.eIPC.reconcileEgressIP(nil, eIP)\n\tcase factory.CloudPrivateIPConfigType:\n\t\tcloudPrivateIPConfig := obj.(*ocpcloudnetworkapi.CloudPrivateIPConfig)\n\t\treturn h.eIPC.reconcileCloudPrivateIPConfig(nil, cloudPrivateIPConfig)\n\tdefault:\n\t\treturn fmt.Errorf(\"no add function for object type %s\", h.objType)\n\t}\n\treturn nil\n}", "func (p *idElementPool) put(e *idElement) {\n\tp.pool.Put(e)\n}", "func (p *ResourcePool) destroy(wrapper *ResourceWrapper) {\n\n\t//you can destroy a resource if the pool is closed, no harm no foul\n\tp.resClose(wrapper.Resource)\n\tatomic.AddUint32(&p.open, ^uint32(0))\n\twrapper.p = nil\n}", "func PutBuf(buf *Buffer) {\n\tbufferPool.Put(buf)\n}", "func (rs *ResourceCollection) addResource(r *Resource) {\n\t// prepend middleware from parent\n\tr.middleware = append(rs.middleware, r.middleware...)\n\t// pass the coreSecurityMiddleware if the new resource doesn't have one\n\tif r.overWriteCoreSecurityMiddleware == nil {\n\t\tr.overWriteCoreSecurityMiddleware = rs.overWriteCoreSecurityMiddleware\n\t}\n\trs.checkMap()\n\trs.resources[r.path] = *r\n}", "func (listener *Listener) AddResource(src io.Closer, cleaners ...func(error)) {\n\tlistener.resources = append(listener.resources, resource{src, cleaners})\n}", "func (p *loopBufferPool) Put(v *loopBuffer) {\n\tif v.Cap() != ReuseBufferSize {\n\t\treturn\n\t}\n\tp.pool.Put(v)\n}", "func (p *ConnProvider) Put(conn net.Conn) error {\n\tclosed := atomic.LoadInt32(&p.closed)\n\tif closed == 1 {\n\t\treturn errors.New(\"pool is closed\")\n\t}\n\n\taddr := conn.RemoteAddr().String()\n\n\tp.mu.Lock()\n\tif _, ok := p.idleConnMap[addr]; !ok {\n\t\tp.idleConnMap[addr] = make(chan net.Conn, p.options.IdleMaxCap)\n\t}\n\tp.mu.Unlock()\n\n\t// set conn timeout\n\t// The timeout will be verified at the next `Get()`\n\terr := conn.SetDeadline(time.Now().Add(p.options.Timeout))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tselect {\n\tcase p.idleConnMap[addr] <- conn:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"beyond max capacity\")\n\t}\n}", "func (p *Pool) Put(buf []byte) {\n\tsize := cap(buf)\n\tif size == 0 || size > math.MaxInt32 {\n\t\treturn\n\t}\n\tidx := index(uint32(size))\n\tif size != 1<<idx { // this byte slice is not from Pool.Get(), put it into the previous interval of idx\n\t\tidx--\n\t}\n\t// array pointer\n\tp.pools[idx].Put(unsafe.Pointer(&buf[:1][0]))\n}", "func (adm Admin) AddResource(\n\tcluster string, resource string, partitions int, stateModel string) error {\n\tif ok, err := adm.isClusterSetup(cluster); !ok || err != nil {\n\t\treturn ErrClusterNotSetup\n\t}\n\n\tbuilder := &KeyBuilder{cluster}\n\n\t// make sure the state model def exists\n\texists, _, err := adm.zkClient.Exists(builder.stateModelDef(stateModel))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"state model doesnt't exist \"+stateModel)\n\t}\n\tif !exists {\n\t\treturn ErrStateModelDefNotExist\n\t}\n\n\t// make sure the path for the ideal state does not exit\n\tisPath := builder.idealStates() + \"/\" + resource\n\tif exists, _, err := adm.zkClient.Exists(isPath); exists || err != nil {\n\t\tif exists {\n\t\t\treturn ErrResourceExists\n\t\t}\n\t\treturn err\n\t}\n\n\t// create the idealstate for the resource\n\t// is := NewIdealState(resource)\n\t// is.SetNumPartitions(partitions)\n\t// is.SetReplicas(0)\n\t// is.SetRebalanceMode(\"SEMI_AUTO\")\n\t// is.SetStateModelDefRef(stateModel)\n\t// // save the ideal state in zookeeper\n\t// is.Save(conn, cluster)\n\n\tis := model.NewMsg(resource)\n\tis.SetSimpleField(\"NUM_PARTITIONS\", strconv.Itoa(partitions))\n\tis.SetSimpleField(\"REPLICAS\", strconv.Itoa(0))\n\tis.SetSimpleField(\"REBALANCE_MODE\", strings.ToUpper(\"SEMI_AUTO\"))\n\tis.SetStateModelDef(stateModel)\n\n\taccessor := newDataAccessor(adm.zkClient, builder)\n\taccessor.createMsg(isPath, is)\n\n\treturn nil\n}", "func (r ApiCreateResourcepoolPoolRequest) ResourcepoolPool(resourcepoolPool ResourcepoolPool) ApiCreateResourcepoolPoolRequest {\n\tr.resourcepoolPool = &resourcepoolPool\n\treturn r\n}", "func (gores *Gores) push(queue string, item interface{}) error {\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\titemString, err := gores.Encode(item)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"push item failed: %s\", err)\n\t}\n\n\t_, err = conn.Do(\"RPUSH\", fmt.Sprintf(queuePrefix, queue), itemString)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"push item failed: %s\", err)\n\t}\n\n\terr = gores.watchQueue(queue)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"push item failed: %s\", err)\n\t}\n\n\treturn nil\n}", "func (rp *resourcePool) add(e *resourcePoolElement) {\n\tif e == nil {\n\t\te = &resourcePoolElement{\n\t\t\tvalue: rp.initFn(),\n\t\t}\n\t}\n\n\te.next = rp.start\n\tif rp.start != nil {\n\t\trp.start.prev = e\n\t}\n\trp.start = e\n\tif rp.end == nil {\n\t\trp.end = e\n\t}\n\tatomic.AddUint64(&rp.size, 1)\n}", "func (_ BufferPtrPool32K) Put(b *[]byte) {\n\tPutBytesSlicePtr32K(b)\n}", "func (p *DecoderPool) Put(d *zstd.Decoder) {\n\td.Reset(nil) // Free up reference to the underlying io.Reader.\n\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tp.available++\n\n\tif p.head == nil {\n\t\tp.head = &decoder{d: d}\n\t\treturn\n\t}\n\n\tdec := &decoder{d: d, next: p.head}\n\tp.head = dec\n\treturn\n}", "func (p *ResourcePool) Get() (resource ResourceWrapper, err error) {\n\treturn p.getWait()\n}", "func (r ApiUpdateResourcepoolPoolRequest) ResourcepoolPool(resourcepoolPool ResourcepoolPool) ApiUpdateResourcepoolPoolRequest {\n\tr.resourcepoolPool = &resourcepoolPool\n\treturn r\n}" ]
[ "0.6845959", "0.6749187", "0.63114053", "0.62950456", "0.62640625", "0.6237187", "0.6188193", "0.61351657", "0.6122181", "0.6121751", "0.6114365", "0.61053646", "0.60983473", "0.6081295", "0.6021222", "0.5969665", "0.59173834", "0.59026545", "0.5823892", "0.5807041", "0.57915187", "0.578507", "0.5775478", "0.5767221", "0.57422185", "0.5732897", "0.570706", "0.5700171", "0.56842756", "0.5674574", "0.5666153", "0.5590333", "0.5582398", "0.5581935", "0.55782914", "0.5575539", "0.55617964", "0.5558008", "0.55290794", "0.5526695", "0.5522726", "0.54991937", "0.54920584", "0.54816735", "0.5477455", "0.5475862", "0.54747504", "0.54695886", "0.54688567", "0.5458828", "0.5457718", "0.5452866", "0.544836", "0.5445893", "0.543476", "0.5423649", "0.5414015", "0.5404412", "0.53993267", "0.5378104", "0.5367872", "0.5346481", "0.5345541", "0.53363925", "0.53252864", "0.532528", "0.5322292", "0.5318997", "0.53057826", "0.529622", "0.5288295", "0.5284195", "0.52833307", "0.52571374", "0.524988", "0.52483726", "0.5239825", "0.5236658", "0.5224268", "0.5223069", "0.5216446", "0.5214239", "0.51930755", "0.5183942", "0.5179674", "0.5177704", "0.5171744", "0.51618755", "0.5161549", "0.513789", "0.51342267", "0.5131624", "0.5129798", "0.51204926", "0.5120187", "0.5118588", "0.5112336", "0.51097655", "0.5101871", "0.50981134" ]
0.5641969
31
Close tries close specified resource.
func (c *ChannelPool) Close(conn interface{}) error { if conn == nil { return nil } return c.close(conn) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CloseResource(c io.Closer) {\n\tif err := c.Close(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func (r *Resource) Close() {\n\tr.Close()\n}", "func (ctx *ResourceContext) SafeClose() {\n}", "func (r ResourceConn) Close() {\n\tr.Conn.Close()\n}", "func (r *Resource) CloseAsync() {\n}", "func (r *Resource) CloseAsync() {\n}", "func (r *ResourceConn) Close() {\n\tr.ClientConn.Close()\n}", "func (nopCloser) Close() error { return nil }", "func FileClose(f *os.File,) error", "func closeSafely(toClose io.Closer) {\n\terr := toClose.Close()\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n}", "func (rp *ResourcePool) Close() {\n\trp.closedMutex.Lock()\n\tif rp.closed {\n\t\trp.closedMutex.Unlock()\n\t\treturn\n\t}\n\n\trp.cchan <- closeMessage{}\n}", "func SafeClose(c io.Closer) {\n\tif c != nil {\n\t\terr := c.Close()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}", "func Close(err error, c Closer) error {\n\tif e := c.Close(); e != nil && err == nil {\n\t\terr = e\n\t}\n\treturn err\n}", "func QuietlyClose(c io.Closer) {\n\t_ = c.Close()\n}", "func (this *reader) Close() (err error) {\n\tif ioCloser, ok := this.ioReader.(io.Closer); ok {\n\t\terr = ioCloser.Close()\n\t}\n\treturn\n}", "func (f *File) Close() error {\n\tf.mu.Lock()\n\tif f.rc != nil {\n\t\tclose(f.rc)\n\t\tf.rc = nil\n\t}\n\tf.mu.Unlock()\n\tif v, ok := f.r.(io.ReadCloser); ok {\n\t\treturn v.Close()\n\t}\n\treturn ErrNotSupported\n}", "func (b *bufCloser) Close() error { return nil }", "func Close(o io.Closer) {\n\t_ = o.Close()\n}", "func (ch *Channel) Close() {}", "func (r *Reader) Close() error {\n\treturn nil\n}", "func (r *Reader) Close() error {\n\treturn nil\n}", "func (r *Resource) WaitForClose(timeout time.Duration) error {\n\treturn nil\n}", "func (r *Resource) WaitForClose(timeout time.Duration) error {\n\treturn nil\n}", "func checkClose(c io.Closer, err *error) {\n\tcerr := c.Close()\n\tif *err == nil {\n\t\t*err = cerr\n\t}\n}", "func (s *stream) Close() error {\n\treturn nil\n}", "func ReadCloserClose(rc *zip.ReadCloser,) error", "func (p *ResourcePool) Close() {\n\n\tp.fMutex.Lock()\n\tdefer p.fMutex.Unlock()\n\n\tp.closed = true\n\n\tfor {\n\t\tselect {\n\t\tcase resource := <-p.resources:\n\t\t\tp.resClose(resource.Resource)\n\t\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\tdefault:\n\t\t\tclose(p.resources)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s IOStreams) Close() error {\n\t// TODO\n\treturn nil\n}", "func (fn Closer) Close() error {\n\treturn fn()\n}", "func (s *SeekerWrapper) Close() error { return s.s.Close() }", "func (*writeCloser) Close() error {\n\treturn nil\n}", "func (c *Conn) Close() error { return nil }", "func (s *Stream) Close() error {\n\tlog.Print(\"[INFO] Closing \", s.URL)\n\treturn s.rc.Close()\n}", "func (r *ReaderCloser) Close() error {\n\treturn unix.Close(r.fd)\n}", "func (r *Resource) Close() {\n\tlogrus.Warning(\"Closing all db connections\")\n}", "func (f *File) Close() error {\n\tvar err error\n\tif f.closer != nil {\n\t\terr = f.closer.Close()\n\t\tf.closer = nil\n\t}\n\treturn err\n}", "func mustClose(c io.Closer) {\n\tif err := c.Close(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (stream *Stream) Close() error {\n\tif r, ok := stream.r.(io.Closer); ok {\n\t\treturn r.Close()\n\t}\n\treturn nil\n}", "func (ff failingFile) Close() error {\n\treturn nil\n}", "func (f *File) Close() error {\n\treturn errUnsupported\n}", "func (cfp *FsPool) Close() error {\n\tif cfp.reader != nil {\n\t\terr := cfp.reader.Close()\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tcfp.reader = nil\n\t\tcfp.fileIndex = -1\n\t}\n\n\treturn nil\n}", "func Close(closer io.Closer, log log.Logger) {\n\tif err := closer.Close(); err != nil {\n\t\tlog.Crit(\"Failed to Close Object: %#v\\n Error: %s \", err.Error())\n\t}\n}", "func (s *Fs) Close() error {\n\treturn nil\n}", "func (r *Reader) Close() error {\n\t//Recycle the buffer if it has been created\n\tif r.buf != nil {\n\t\tr.Session().BufioSource().RecycleReader(r.buf)\n\t\tr.buf = nil\n\t}\n\tvar err error\n\tif r.pipedBody != nil {\n\t\terr = r.pipedBody.Close()\n\t\tr.pipedBody = nil\n\t\tr.rawBody = nil\n\t} else if r.rawBody != nil {\n\t\terr = r.rawBody.Close()\n\t\tr.rawBody = nil\n\t}\n\treturn err\n}", "func (rc *ReadCloser) Close() error {\n\treturn rc.f.Close()\n}", "func (f *realFile) Close() error { return f.file.Close() }", "func Close(obj interface{}) {\n\tif obj == nil {\n\t\treturn\n\t}\n\tif c, ok := obj.(Closer); ok {\n\t\tc.Close()\n\t}\n}", "func (r *RLockedFile) Close() (err error) {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tif r.refs == 0 {\n\t\treturn os.ErrInvalid\n\t}\n\n\tr.refs--\n\tif r.refs == 0 {\n\t\terr = r.File.Close()\n\t}\n\n\treturn err\n}", "func (*mockFile) Close() error {\n\treturn nil\n}", "func (pipe *pipe) Close() error {\n\t// This should explicitly be not doing anything further, since the core\n\t// mechanism here is that the context is cancelled / timed out, so relying\n\t// on this method being called is not a safe assumption. This is merely\n\t// to adapt the context into a Read/Write Closer to maintain interop\n\t// with people's mental models and in cases where a context is not passed\n\t// into the Pipe.\n\tpipe.cancel()\n\treturn nil\n}", "func (mio *Mio) Close() error {\n if mio.obj == nil {\n return errors.New(\"object is not opened\")\n }\n C.m0_obj_fini(mio.obj)\n C.free(unsafe.Pointer(mio.obj))\n mio.obj = nil\n\n return nil\n}", "func (c *Mock) Close() { c.Closed = true }", "func (fsi *fsIOPool) Close(path string) error {\n\tfsi.Lock()\n\tdefer fsi.Unlock()\n\n\tif err := checkPathLength(path); err != nil {\n\t\treturn err\n\t}\n\n\t// Pop readers from path.\n\trlkFile, ok := fsi.readersMap[path]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\t// Close the reader.\n\trlkFile.Close()\n\n\t// If the file is closed, remove it from the reader pool map.\n\tif rlkFile.IsClosed() {\n\n\t\t// Purge the cached lock path from map.\n\t\tdelete(fsi.readersMap, path)\n\t}\n\n\t// Success.\n\treturn nil\n}", "func (l *Manager) ReleaseResource(client string, resource string) string {\n\tlog.Infof(\"Release request from client [%s] to resource [%s]\", client, resource)\n\tholder, found := l.getLock(resource)\n\n\tif found == false {\n\t\tlog.Errorf(\"Resource [%v] is not found\", resource)\n\t\treturn \"ERROR: Resource is not found\"\n\t}\n\n\tif found && holder == client {\n\t\tl.deleteLock(client, resource)\n\t\tlog.Infof(\"Resource [%v] released\", resource)\n\t\tReleasedSignal <- resource\n\t\treturn \"SUCCESS: Resource is released\"\n\t}\n\n\tlog.Errorf(\"Resource [%v] is locked by another resource [%v]\", resource, holder)\n\treturn \"ERROR: Released is hold by another resource\"\n}", "func (x *Reader) Close() error {\n\tx.Reader = nil\n\tif x.File != nil {\n\t\tif err := x.File.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tx.File = nil\n\t}\n\treturn nil\n}", "func (ff *File) Close() error {\n\treturn nil\n}", "func (r *Reader) Close() error {\n\tif closer, ok := r.Reader.(io.Closer); ok {\n\t\treturn closer.Close()\n\t}\n\treturn nil\n}", "func (r *Replayer) Close() error {\n\treturn errors.New(\"implement me\")\n}", "func Close(URL *url.URL) error { return request(URL, http.MethodDelete, nil, nil) }", "func (f *FakeWriteCloser) Close() error {\n\treturn nil\n}", "func (f *file) Close() error {\n\treturn nil\n}", "func (rr *Reader) Close() {\n\tif rr.Err == nil && len(rr.Bytes()) != 0 {\n\t\trr.Err = errors.New(\"excess bytes in buffer\")\n\t}\n}", "func (cr *callResult) Close() error { return nil }", "func (c *fileClient) Close() error {\n\treturn errNotImplemented.New(Kind)\n}", "func (d *downloader) Close() error {\n\tif d.Reader != nil {\n\t\treturn d.Reader.Close()\n\t}\n\treturn nil\n}", "func (bb *BytesBuffer) Close() error {\n\treturn nil\n}", "func (reader *Reader) Close() (e error) {\n\tif nil != reader.bufReject {\n\t\te = reader.bufReject.Flush()\n\t\tif e != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif nil != reader.fReject {\n\t\te = reader.fReject.Close()\n\t\tif e != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treader.deleteEmptyRejected()\n\n\tif nil != reader.fRead {\n\t\te = reader.fRead.Close()\n\t}\n\treturn\n}", "func (r *Resampler) Close() (err error) {\n\tif r.resampler == nil {\n\t\treturn errors.New(\"soxr resampler is nil\")\n\t}\n\tC.soxr_delete(r.resampler)\n\tr.resampler = nil\n\treturn\n}", "func Close(s Stream) error {\n\tif closer, ok := s.(io.Closer); ok {\n\t\treturn closer.Close()\n\t}\n\treturn nil\n}", "func (c *APIClient) Close() error {\n\tif conn, ok := c.conn.(io.Closer); ok {\n\t\treturn conn.Close()\n\t}\n\treturn nil\n}", "func (c *client) Close() error { return c.c.Close() }", "func (c *Connection) Close() error {\n\trerr := c.ReadCloser.Close()\n\twerr := c.WriteCloser.Close()\n\tif rerr != nil {\n\t\treturn rerr\n\t}\n\treturn werr\n}", "func (self *IPCSocket) Close() error {\n\tself.open = false\n\treturn self.socket.Close()\n}", "func (c *Client) Close() {}", "func (c *Client) Close() {}", "func (c *restClient) Close() error {\n\t// Replace httpClient with nil to force cleanup.\n\tc.httpClient = nil\n\treturn nil\n}", "func (c *restClient) Close() error {\n\t// Replace httpClient with nil to force cleanup.\n\tc.httpClient = nil\n\treturn nil\n}", "func (c *restClient) Close() error {\n\t// Replace httpClient with nil to force cleanup.\n\tc.httpClient = nil\n\treturn nil\n}", "func (c *restClient) Close() error {\n\t// Replace httpClient with nil to force cleanup.\n\tc.httpClient = nil\n\treturn nil\n}", "func (c *restClient) Close() error {\n\t// Replace httpClient with nil to force cleanup.\n\tc.httpClient = nil\n\treturn nil\n}", "func (res *respondent) Close() error {\n\tif res.fileChannel == nil {\n\t\treturn errors.New(utils.ErrorListenDial)\n\t}\n\tres.fileChannel.Destroy()\n\treturn nil\n}", "func (rp *ResourcePool) Release(resource Resource) {\n\tif rp.IsClosed() {\n\t\tif !resource.IsClosed() {\n\t\t\tresource.Close()\n\t\t}\n\n\t\tatomic.AddInt64(&rp.numResources, -1)\n\n\t\treturn\n\t}\n\n\trel := releaseMessage{\n\t\tr: resource,\n\t}\n\n\trp.rchan <- rel\n}", "func SafeClose(f *os.File, buf *bufio.Writer) {\n\tif buf != nil {\n\t\tif err := buf.Flush(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tif err := f.Close(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (r *Result) Close() error {\n\tif r == nil {\n\t\treturn nil\n\t}\n\treturn r.close(true)\n}", "func (r *Reader) Close() error {\n\tif err := r.poller.Close(); err != nil {\n\t\tif errors.Is(err, os.ErrClosed) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\t// Acquire the lock. This ensures that Read isn't running.\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.ring != nil {\n\t\tr.ring.Close()\n\t\tr.ring = nil\n\t}\n\n\treturn nil\n}", "func (f stdioFileHandle) Close() error {\n\treturn ErrUnsupported\n}", "func (f *FakeReadCloser) Close() error {\n\tf.CloseCalled = true\n\treturn f.CloseError\n}", "func (mrc *MockReadCloser) Close() error {\n\tmrc.closed = true\n\treturn nil\n}", "func (r *DiscoveryResolver) Close() {\n}", "func CloseQuietly(v io.Closer) {\n\t_ = v.Close()\n}", "func (h *HTTP) Close() error {\n\treturn nil\n}", "func (c *Context) Close() (err int) {\n\treturn int(C.rtlsdr_close((*C.rtlsdr_dev_t)(c.dev)))\n}", "func (p *TBufferedReadTransport) Close() error {\n\treturn nil\n}", "func (pool AllocatingPool) FreeResource(raw RawResourceProps) error {\n\treturn pool.freeResourceInner(raw, pool.retireResource, pool.freeResourceImmediately, pool.benchResource)\n}", "func (r *Radio) Close() {}", "func (s *Sniffer) close() error {\n\tif err := unix.Close(s.fd); err != nil {\n\t\treturn fmt.Errorf(\"can't close sniffer socket: %w\", err)\n\t}\n\ts.fd = -1\n\treturn nil\n}", "func (it *RandomBeaconDkgMaliciousResultSlashingFailedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (f *File) Close() error {\n\treturn nil\n}", "func (f *File) Close() error {\n\treturn nil\n}", "func (o *ODirectReader) Close() error {\n\tif o.bufp != nil {\n\t\tif o.SmallFile {\n\t\t\tODirectPoolSmall.Put(o.bufp)\n\t\t} else {\n\t\t\tODirectPoolLarge.Put(o.bufp)\n\t\t}\n\t\to.bufp = nil\n\t\to.buf = nil\n\t}\n\to.err = errors.New(\"internal error: ODirectReader Read after Close\")\n\treturn o.File.Close()\n}", "func (p *ResourcePool) destroy(wrapper *ResourceWrapper) {\n\n\t//you can destroy a resource if the pool is closed, no harm no foul\n\tp.resClose(wrapper.Resource)\n\tatomic.AddUint32(&p.open, ^uint32(0))\n\twrapper.p = nil\n}" ]
[ "0.7360149", "0.6990365", "0.6755163", "0.6430757", "0.6347393", "0.6347393", "0.6268224", "0.6240705", "0.5953999", "0.5901298", "0.5882901", "0.5868772", "0.58585006", "0.5833645", "0.5805449", "0.5803445", "0.57738656", "0.57553387", "0.57311213", "0.5721894", "0.5721894", "0.5716733", "0.5716733", "0.57132953", "0.57113963", "0.56831473", "0.56758046", "0.56692994", "0.5666015", "0.5662187", "0.5655418", "0.5649707", "0.564886", "0.56472725", "0.5645277", "0.5634458", "0.5627744", "0.5615547", "0.5605295", "0.55878186", "0.5578721", "0.55739063", "0.5573696", "0.5557717", "0.554997", "0.55467516", "0.55446553", "0.5527301", "0.55265653", "0.5511182", "0.5508016", "0.5507179", "0.55053246", "0.55033547", "0.5497358", "0.5490782", "0.5483981", "0.54802966", "0.5479451", "0.54774934", "0.5474354", "0.5471315", "0.5463265", "0.54484695", "0.5444358", "0.5443034", "0.5426226", "0.5422984", "0.54137224", "0.5411168", "0.540973", "0.5406142", "0.5399209", "0.53981614", "0.53981614", "0.53930086", "0.53930086", "0.53930086", "0.53930086", "0.53930086", "0.53897446", "0.5386041", "0.53854716", "0.53851134", "0.5383739", "0.5382798", "0.5375122", "0.537156", "0.5368904", "0.53686744", "0.5366212", "0.5365703", "0.5362173", "0.5360904", "0.5358072", "0.5353928", "0.53527904", "0.5350044", "0.5350044", "0.53491545", "0.5348196" ]
0.0
-1
Release all resource entity stored in pool.
func (c *ChannelPool) Release() { c.mu.Lock() conns := c.conns c.conns = nil c.factory = nil closeFun := c.close c.close = nil c.mu.Unlock() if conns == nil { return } close(conns) for wrapConn := range conns { closeFun(wrapConn.conn) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Pool) Release(){\n if(p.availablePool != nil){\n for _,dbCon := range p.availablePool{\n dbCon.Close()\n }\n }else{\n p.availablePool=nil\n }\n}", "func (p *Pool) Close(){\n p.availablePool[con]=p.usedPool[con]\n\tdelete(p.usedPool,con)\n}", "func (m *Manager) Release() {\n\tm.con.Close()\n\tm.stdCimV2Con.Close()\n}", "func (p *ResourcePool) destroy(wrapper *ResourceWrapper) {\n\n\t//you can destroy a resource if the pool is closed, no harm no foul\n\tp.resClose(wrapper.Resource)\n\tatomic.AddUint32(&p.open, ^uint32(0))\n\twrapper.p = nil\n}", "func (c *channelPool) Release() {\n\tc.mu.Lock()\n\tfor _, servConn := range c.servConnsMap {\n\t\tfor ic := range servConn.idleConns {\n\t\t\tic.connWrap.CloseConnWrap()\n\t\t}\n\t\tclose(servConn.idleConns)\n\t\tservConn.openingConnNum = 0\n\t}\n\n\tc.servConnsMap = nil\n\tc.servAddrList = nil\n\n\tc.mu.Unlock()\n}", "func (i *Instance) Close() error {\n\treturn i.pool.Purge(i.resource)\n}", "func (p *ResourcePool) Close() {\n\n\tp.fMutex.Lock()\n\tdefer p.fMutex.Unlock()\n\n\tp.closed = true\n\n\tfor {\n\t\tselect {\n\t\tcase resource := <-p.resources:\n\t\t\tp.resClose(resource.Resource)\n\t\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\t\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\tdefault:\n\t\t\tclose(p.resources)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *request) Release() {\n\tp.ctx = nil\n\tp.Entry = nil\n\tp.read = false\n\trequestPool.Put(p)\n}", "func (rp *resourcePool) Close() {\n\trp.Lock()\n\tdefer rp.Unlock()\n\n\t// Clear the resources in the pool.\n\tfor ; rp.start != nil; rp.start = rp.start.next {\n\t\trp.closeFn(rp.start.value)\n\t\trp.totalSize--\n\t}\n\tatomic.StoreUint64(&rp.size, 0)\n\trp.end = nil\n\n\t// Stop the maintenance timer. If it's already fired, a call to Maintain might be waiting for the lock to be\n\t// released, so we set closed to make that call a no-op.\n\trp.closed = true\n\t_ = rp.maintainTimer.Stop()\n}", "func (q *HTTP) Release() {\n\tq.HumanLabel = q.HumanLabel[:0]\n\tq.HumanDescription = q.HumanDescription[:0]\n\tq.id = 0\n\tq.Method = q.Method[:0]\n\tq.Path = q.Path[:0]\n\tq.Body = q.Body[:0]\n\tq.StartTimestamp = 0\n\tq.EndTimestamp = 0\n\n\tHTTPPool.Put(q)\n}", "func (p *Pool) Destroy() {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tp.factory = nil\n\tif p.conns == nil {\n\t\treturn\n\t}\n\n\tfor v := range p.conns {\n\t\tif v != nil {\n\t\t\tp.Close(v)\n\t\t}\n\t}\n\tp.conns = nil\n\n}", "func (p *Pool) Release() {\n\tp.once.Do(func() {\n\t\tatomic.StoreInt32(&p.release, 1)\n\t\tp.lock.Lock()\n\t\tp.workers.reset()\n\t\tp.lock.Unlock()\n\t\tdelete(PoolRecords, p.name)\n\t})\n}", "func Release() {\n\tdefaultRoutinePool.Release()\n}", "func (p *ResourcePool) releaseAtomic(wrapper *ResourceWrapper) {\n\n\tp.fMutex.RLock()\n\tdefer p.fMutex.RUnlock()\n\n\t//if this pool is closed when trying to release this resource\n\t//just close the resource\n\tif p.closed == true {\n\t\tp.resClose(wrapper.Resource)\n\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\twrapper.p = nil\n\t\treturn\n\t}\n\n\t//obtain a lock to return the resource to the pool\n\t//if we end up not needing to, lets undo our lock\n\t//and close the resource\n\tif nAvailable := atomic.AddUint32(&p.nAvailable, 1); nAvailable > p.min {\n\t\t//decriment\n\t\tatomic.AddUint32(&p.nAvailable, ^uint32(0))\n\t\tp.resClose(wrapper.Resource)\n\t\tatomic.AddUint32(&p.open, ^uint32(0))\n\t\treturn\n\t}\n\n\tp.resources <- *wrapper\n}", "func (l *Manager) ReleaseResources(client string) {\n\t// Looping over the set\n\tfor r := range l.ClientHolder[client] {\n\t\tmsg := l.ReleaseResource(client, r.(string))\n\t\tlog.Println(msg)\n\t}\n}", "func (p *Pool) Release() {\n\tp.dispatcher.stop <- true\n\t<-p.dispatcher.stop\n}", "func (r *ResponsePool) Release(resp *Response) {\n\tresp.Reset()\n\tr.pool.Put(resp)\n}", "func (rp *ResourcePool) Release(resource Resource) {\n\tif rp.IsClosed() {\n\t\tif !resource.IsClosed() {\n\t\t\tresource.Close()\n\t\t}\n\n\t\tatomic.AddInt64(&rp.numResources, -1)\n\n\t\treturn\n\t}\n\n\trel := releaseMessage{\n\t\tr: resource,\n\t}\n\n\trp.rchan <- rel\n}", "func (p *connPool) Purge() {\n\tdpiPool := p.dpiPool\n\tp.dpiPool = nil\n\tif dpiPool != nil {\n\t\tC.dpiPool_close(dpiPool, C.DPI_MODE_POOL_CLOSE_FORCE)\n\t}\n}", "func (t *Tik) Release() {\n\tt.client.Close()\n}", "func (handler *AllowAllHandler) Release() {\n\treturn\n}", "func (pool *Pool) Close() {\n\tpool.mutex.Lock()\n\tpool.freelist = nil\n\tpool.mutex.Unlock()\n}", "func (c *crdBackend) Release(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (err error) {\n\t// For CiliumIdentity-based allocation, the reference counting is\n\t// handled via CiliumEndpoint. Any CiliumEndpoint referring to a\n\t// CiliumIdentity will keep the CiliumIdentity alive. No action is\n\t// needed to release the reference here.\n\treturn nil\n}", "func (c *Client) ReleaseAll(dest string) error {\n\tc.lock.Lock()\n\n\tif len(c.resources) == 0 {\n\t\tc.lock.Unlock()\n\t\treturn fmt.Errorf(\"No holding resource\")\n\t}\n\tc.lock.Unlock()\n\n\tfor {\n\t\tr, ok := c.popResource()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\tif err := c.release(r, dest); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (am *AccountManager) Release() {\n\tam.bsem <- struct{}{}\n}", "func (p *WorkerPool[T, R]) Release() {\n\tif p.resChan != nil {\n\t\tclose(p.resChan)\n\t}\n}", "func Release(q *Query) {\n\tqueryPool.Put(q)\n}", "func Release(o *Object) {\n\to.hardReset()\n\topool.Put(o)\n}", "func (i *Instance) dispose() {\n\ti.pool.Close()\n}", "func ClosePool() error {\n\tif enable {\n\t\treturn pool.Close()\n\t}\n\treturn nil\n}", "func (conductor *conductor) Release() {\n\tconductor.peersMutex.Lock()\n\tdefer conductor.peersMutex.Unlock()\n\tfor _, pc := range conductor.peers {\n\t\tglog.Infoln(\"Delete pc\")\n\t\tpc.Delete()\n\t\tdelete(conductor.peers, pc.(*peerConn).Pointer)\n\t}\n\tC.release_shared(conductor.shared)\n\tconductor.shared = nil\n}", "func (c PersistentIdentity) Release() {\n\tcapnp.Client(c).Release()\n}", "func (pool AllocatingPool) Destroy() error {\n\t// Check if there are no more claims\n\tclaims, err := pool.QueryResources()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(claims) > 0 {\n\t\tlog.Warn(pool.ctx, \"Unable to delete pool with ID %d because it has allocated resources\", pool.ID)\n\t\treturn errors.Errorf(\"Unable to destroy pool #%d, there are claimed resources\",\n\t\t\tpool.ID)\n\t}\n\n\terr = DeletePoolProperties(pool.ctx, pool.client, pool.ID)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Delete pool itself\n\terr = pool.client.ResourcePool.DeleteOne(pool.ResourcePool).Exec(pool.ctx)\n\tif err != nil {\n\t\tlog.Error(pool.ctx, err, \"Unable delete pool with ID %d\", pool.ID)\n\t\treturn errors.Wrapf(err, \"Cannot destroy pool #%d\", pool.ID)\n\t}\n\n\treturn nil\n}", "func (l *SyncStorageLock) ReleaseResource(ns string) error {\n\tok, err := l.s.getDbBackend(ns).DelIE(getNsPrefix(ns)+l.key, l.value)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !ok {\n\t\treturn errors.New(\"Lock not held\")\n\t}\n\treturn nil\n}", "func (c *RPCClient) Close() {\n\tfor _, conn := range c.pool {\n\t\tconn.Close()\n\t}\n}", "func (p *Pool) Close() error {\n\treturn p.cleanup()\n}", "func (s *ControllerPool) Release(controllerName string, controller interface{}) {\n\ts.mu.RLock()\n\tpool, ok := s.poolMap[controllerName]\n\ts.mu.RUnlock()\n\tif !ok {\n\t\tpanic(\"unknown controller name\")\n\t}\n\tDiFree(controller)\n\tpool.Put(controller)\n\n}", "func (p *Pools) Close() {\n\tfor _, pool := range p.pools {\n\t\tpool.close()\n\t}\n\tp.Flush(true)\n}", "func Close() {\n\tpool.Close()\n}", "func (rp *Pool) Close() {\n\trp.lock.Lock()\n\tdefer rp.lock.Unlock()\n\n\trp.closed = true\n\n\tfor requestInfo, element := range rp.existMap {\n\t\trp.deleteRequest(element, requestInfo)\n\t}\n\n\trp.cancel()\n}", "func (m *MultiConnPool) Close() {\n\tfor _, p := range m.Pools {\n\t\tp.Close()\n\t}\n}", "func (s *UDPSender) Release() error {\n\tif s.conn == nil {\n\t\treturn nil\n\t}\n\terr := s.conn.Close()\n\ts.conn = nil\n\treturn err\n}", "func (t *tOps) close() {\n\tt.bpool.Close()\n\tt.cache.Close()\n\tif t.bcache != nil {\n\t\tt.bcache.CloseWeak()\n\t}\n}", "func (p *pool) Unlock(user, resourceName string) error {\n\tfor k, v := range p.locks {\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif k.Name != resourceName {\n\t\t\tcontinue\n\t\t}\n\n\t\tif v.User != user {\n\t\t\treturn ErrResourceLockedByDifferentUser\n\t\t}\n\n\t\tp.locks[k] = nil\n\n\t\tif err := storage.Delete(storageKey, k.Name); err != nil {\n\t\t\tlog.Error(errors.Wrap(err, \"error while storing pool lock entry\"))\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestPoolRelease(t *testing.T) {\n\ttestPool(t, false)\n}", "func ClosePool() error {\n\tif cfg.Disable {\n\t\treturn nil\n\t}\n\n\treturn pool.Close()\n}", "func (d *boltDriverPool) Close() error {\n\t// Lock the connection ref so no new connections can be added\n\td.refLock.Lock()\n\tdefer d.refLock.Unlock()\n\tfor _, conn := range d.connRefs {\n\t\t// Remove the reference to the pool, to allow a clean up of the connection\n\t\tconn.poolDriver = nil\n\t\terr := conn.Close()\n\t\tif err != nil {\n\t\t\td.closed = true\n\t\t\treturn err\n\t\t}\n\t}\n\t// Mark the pool as closed to stop any new connections\n\td.closed = true\n\treturn nil\n}", "func (conn *Connection) Free() {\n\tif conn.release {\n\t\t// Py_BEGIN_ALLOW_THREADS\n\t\tconn.Rollback()\n\t\tconn.srvMtx.Lock()\n\t\tC.OCISessionRelease(conn.handle, conn.environment.errorHandle, nil,\n\t\t\t0, C.OCI_DEFAULT)\n\t\t// Py_END_ALLOW_THREADS\n\t\tconn.srvMtx.Unlock()\n\t} else if !conn.attached {\n\t\tif conn.sessionHandle != nil {\n\t\t\t// Py_BEGIN_ALLOW_THREADS\n\t\t\tconn.Rollback()\n\t\t\tconn.srvMtx.Lock()\n\t\t\tC.OCISessionEnd(conn.handle, conn.environment.errorHandle,\n\t\t\t\tconn.sessionHandle, C.OCI_DEFAULT)\n\t\t\t// Py_END_ALLOW_THREADS\n\t\t\tconn.srvMtx.Unlock()\n\t\t}\n\t\tif conn.serverHandle != nil {\n\t\t\tC.OCIServerDetach(conn.serverHandle,\n\t\t\t\tconn.environment.errorHandle, C.OCI_DEFAULT)\n\t\t}\n\t}\n}", "func Release(b *Buffer) {\n\tb.B = b.B[:0]\n\tpool.Put(b)\n}", "func (rp *ResourcePool) Close() {\n\trp.closedMutex.Lock()\n\tif rp.closed {\n\t\trp.closedMutex.Unlock()\n\t\treturn\n\t}\n\n\trp.cchan <- closeMessage{}\n}", "func (p *DBPool) Close() {\n\tfor _, db := range p.dbs {\n\t\tdb.Close()\n\t}\n}", "func (p *Pool) release() {\n\tif p.closed {\n\t\treturn\n\t}\n\tp.active--\n\tif p.cond != nil {\n\t\tp.cond.Signal()\n\t}\n\n}", "func (pool *Pool) Close() {\n\tif pool.list != nil {\n\t\tC.zpool_list_close(pool.list)\n\t\tpool.list = nil\n\t}\n}", "func (p *connPool) Release(client *mcc.Client) {\n\t//reset connection deadlines\n\tconn := client.Hijack()\n\n\tconn.(net.Conn).SetReadDeadline(time.Date(1, time.January, 0, 0, 0, 0, 0, time.UTC))\n\tconn.(net.Conn).SetWriteDeadline(time.Date(1, time.January, 0, 0, 0, 0, 0, time.UTC))\n\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\tif p.clients != nil {\n\t\tselect {\n\t\tcase p.clients <- client:\n\t\t\treturn\n\t\tdefault:\n\t\t\t//the pool reaches its capacity, drop the client on the floor\n\t\t\tclient.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (p *Pool) Close() {\n\tclose(p.tasks)\n}", "func (self *userRestAPI) release() error {\n\tself.engine.Release()\n\treturn nil\n}", "func (p *Pool) Release(client *Client) {\n\tp.nextClient <- client\n}", "func (p *Pool) Close() {\n\tp.mu.Lock()\n\tclients := p.clients\n\trequestQueue := p.requestQueue\n\tp.clients = nil //release clients\n\tp.idleConnQueue = nil\n\tp.requestQueue = nil // release requestQueue\n\tp.mu.Unlock()\n\n\tfor !requestQueue.isEmpty() { // close all the wait channle in the queue\n\t\tclose(requestQueue.dequeue().(chan *ClientConn))\n\t}\n\n\tfor _, conn := range clients { // close all the conn in the pool\n\t\t_ = conn.ClientConn.Close()\n\t}\n}", "func (p *unlimitedPool) Close() {\n\n\terr := &ErrPoolClosed{s: errClosed}\n\tp.closeWithError(err)\n}", "func (pool AllocatingPool) FreeResource(raw RawResourceProps) error {\n\treturn pool.freeResourceInner(raw, pool.retireResource, pool.freeResourceImmediately, pool.benchResource)\n}", "func (s WorkerSnapshot) Release() {}", "func (p *DownloadPool) release(d *Downloader) {\n\tp.resource <- d // can never fail ...\n}", "func (p *Pool) Close() {\n\t// fine to loop through the buckets unlocked\n\t// locking happens at the bucket level\n\tfor b, _ := range p.BucketMap {\n\n\t\t// MB-33208 defer closing connection pools until the bucket is no longer used\n\t\tbucket := p.BucketMap[b]\n\t\tbucket.Lock()\n\t\tbucket.closed = true\n\t\tbucket.Unlock()\n\t}\n}", "func Close() {\n\tlog4go.Debug(\"resources destroy, pid:%v\", os.Getpid())\n\tfor name, r := range resources {\n\t\terr := r.Close()\n\t\tif err != nil {\n\t\t\tlog4go.Error(\"resources[%s] destroy failed:%s\", name, err.Error())\n\t\t} else {\n\t\t\tlog4go.Info(\"resources[%s] destroy finish\", name)\n\t\t}\n\t}\n}", "func ReleaseHPACK(hpack *HPACK) {\n\thpack.Reset()\n\thpackPool.Put(hpack)\n}", "func (conn *Conn) Close() {\n\tif conn.belongsToPool == nil {\n\t\tconn.close()\n\t} else {\n\t\tconn.belongsToPool.Release(conn)\n\t}\n}", "func (cluster *mongoCluster) Release() {\n\tcluster.Lock()\n\tif cluster.references == 0 {\n\t\tpanic(\"cluster.Release() with references == 0\")\n\t}\n\tcluster.references--\n\tif cluster.references == 0 {\n\t\tfor _, server := range cluster.servers.Slice() {\n\t\t\tserver.Close()\n\t\t}\n\t}\n\tcluster.Unlock()\n}", "func (pool Pool) Close() error {\n\treturn pool.Pool.Close()\n}", "func (p *pollerAutoScaler) Release(resource autoscaler.ResourceUnit) {\n\tp.sem.Release(int(resource))\n}", "func (r *RequestPool) Release(req *Request) {\n\treq.Reset()\n\tr.pool.Put(req)\n}", "func (p *Pool) Release(b *Buffer) {\n\tb.Reset()\n\tp.p.Put(b)\n}", "func (sm *SourceMgr) Release() {\n\tsm.lf.Close()\n\tos.Remove(filepath.Join(sm.cachedir, \"sm.lock\"))\n}", "func (it *iterator) Release() {\n\tit.keys, it.values = nil, nil\n}", "func (sm *SourceMgr) Release() {\n\tos.Remove(path.Join(sm.cachedir, \"sm.lock\"))\n}", "func (c *ChannelPool) Close() {\n\tc.mu.Lock()\n\tconns := c.conns\n\tc.conns = nil\n\tc.factory = nil\n\tc.mu.Unlock()\n\n\tif conns == nil {\n\t\treturn\n\t}\n\n\tclose(conns)\n\tfor conn := range conns {\n\t\tconn.Close()\n\t}\n}", "func Release(h *Headers) {\n\tvar ma nullInt64\n\tvar sma nullInt64\n\th.b = h.b[:0]\n\th.public = false\n\th.private = false\n\th.maxAge = ma\n\th.sharedMaxAge = sma\n\th.noCache = false\n\th.noStore = false\n\th.noTransform = false\n\th.mustRevalidate = false\n\th.proxyRevalidate = false\n\n\tpool.Put(h)\n}", "func (n *BufferedNetpool) DestroyAll() error {\n\tfor i := 0; i < len(n.pool.free); i++ {\n\t\tcon := <-n.pool.free\n\t\tcon.Flush()\n\t}\n\tn.pool.DestroyAll()\n\treturn nil\n}", "func (c *client) release(conn Conn, err error) {\n\tif err != nil {\n\t\tif err := conn.Close(); err != nil {\n\t\t\tc.logger.Printf(\"Could not close connection (%s)\", err.Error())\n\t\t}\n\n\t\tconn = nil\n\t}\n\n\tc.pool.Release(conn)\n}", "func (ctx *Context) Release() {\n\treleaseContext(ctx)\n}", "func (p *Pool) Close() {\n\n\tif !p.setStatus(STOPED) { // stop put task\n\t\treturn\n\t}\n\n\tfor len(p.chTask) > 0 { // wait all task be consumed\n\t\ttime.Sleep(1e6) // reduce CPU load\n\t}\n\n\tclose(p.chTask)\n}", "func (p *Pool) Close() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tfor _, c := range p.idle {\n\t\tc.pc.Client.Close()\n\t}\n\tp.closed = true\n}", "func (pool *TxPool) Stop() {\n\t// Unsubscribe all subscriptions registered from txpool\n\tpool.scope.Close()\n\n\t// Unsubscribe subscriptions registered from blockchain\n\tpool.chainHeadSub.Unsubscribe()\n\tpool.wg.Wait()\n\n\tif pool.journal != nil {\n\t\tpool.journal.close()\n\t}\n\tlog.Info(\"Transaction pool stopped\")\n}", "func (n *NoOpAllocator) ReleaseMany(poolID types.PoolID, ips []net.IP) error {\n\treturn nil\n}", "func (p *provider) release() error {\n\tp.m.Lock()\n\tdefer p.m.Unlock()\n\n\tp.refs--\n\n\tif p.refs > 0 {\n\t\treturn nil\n\t}\n\n\tdb := p.db\n\tp.db = nil\n\n\treturn p.close(db)\n}", "func PoolDestroy(name string) error {\n\tcmd := &Cmd{}\n\treturn NvlistIoctl(zfsHandle.Fd(), ZFS_IOC_POOL_DESTROY, name, cmd, nil, nil, nil)\n}", "func (p *Pipeline) Close() {\n\tfor _, pool := range p.pools {\n\t\tclose(pool.terminate)\n\t\tpool.done.Wait()\n\t\tpool.factory.Destroy()\n\t}\n}", "func (cp *Pool) Close() {\n\tlog.Infof(\"connpool - started execution of Close\")\n\tp := cp.pool()\n\tlog.Infof(\"connpool - found the pool\")\n\tif p == nil {\n\t\tlog.Infof(\"connpool - pool is empty\")\n\t\treturn\n\t}\n\t// We should not hold the lock while calling Close\n\t// because it waits for connections to be returned.\n\tlog.Infof(\"connpool - calling close on the pool\")\n\tp.Close()\n\tlog.Infof(\"connpool - acquiring lock\")\n\tcp.mu.Lock()\n\tlog.Infof(\"connpool - acquired lock\")\n\tcp.connections.Close()\n\tcp.connections = nil\n\tcp.mu.Unlock()\n\tlog.Infof(\"connpool - closing dbaPool\")\n\tcp.dbaPool.Close()\n\tlog.Infof(\"connpool - finished execution of Close\")\n}", "func (c *client) Release() {\n\tc.release(nil)\n}", "func (p *pool) close() {\n\tif p.closed {\n\t\treturn\n\t}\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tp.closed = true\n\tclose(p.readyChannel)\n\n\tfor connIndex := range p.connList {\n\t\tp.connList[connIndex].close()\n\t}\n\tp.connList = nil\n}", "func (p *LightningPool) Release(k *amqp.Channel) {\n\tp.mx.Lock()\n\tp.set = append(p.set, k)\n\tp.mx.Unlock()\n}", "func (p *Pool) Close() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tclose(p.items)\n\tfor v := range p.items {\n\t\tif c, ok := v.(closer); ok {\n\t\t\tif err := c.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p *Pool) Release(k ChannelKeeper) {\n\tp.mx.Lock()\n\tp.set = append(p.set, k)\n\tp.mx.Unlock()\n}", "func (iter *ldbCacheIter) Release() {\n}", "func ReleaseResponse(resp *Response) {\n\tresp.Reset()\n\tresponsePool.Put(resp)\n}", "func PoolCloseAll(pools []Pool) {\n\tfor _, p := range pools {\n\t\tp.Close()\n\t}\n}", "func (p *EOSClient) Release() error {\n\teosError := C.EdsTerminateSDK()\n\tif eosError != C.EDS_ERR_OK {\n\t\treturn errors.New(\"Error when terminating Canon SDK\")\n\t}\n\treturn nil\n}", "func (p *Pool) Close() {\n\tp.SetSize(0)\n\tclose(p.reqChan)\n}", "func (s *defaultModelSetter) Release() {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.m = nil\n}", "func (p *Pool) Close() error {\n\treturn <-p.close()\n}", "func (p *Pool) Shutdown() {\n\tif p.MonitorFunc != nil {\n\t\tgo p.MonitorFunc(newActionMsg(Shutdown))\n\t}\n\tfor {\n\t\tselect {\n\t\tcase res := <-p.resourceQueue:\n\t\t\t//Don't terminate in goroutine or caller may close the program before we\n\t\t\t//have had chance to call Terminate on all resources.\n\t\t\tres.Terminate()\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}" ]
[ "0.7412405", "0.6753915", "0.66650647", "0.6635696", "0.65399456", "0.6525649", "0.65159595", "0.6457692", "0.63981885", "0.6378158", "0.63610876", "0.63351583", "0.63126653", "0.6291022", "0.6288358", "0.62726957", "0.62382925", "0.621413", "0.621253", "0.6162023", "0.6160961", "0.6151448", "0.6126749", "0.6119796", "0.6104863", "0.61036557", "0.61029387", "0.6091995", "0.60819864", "0.60749096", "0.6071483", "0.605174", "0.6014331", "0.6009205", "0.59776545", "0.59641683", "0.5943225", "0.5941559", "0.5888138", "0.5884343", "0.5883263", "0.5880255", "0.5875139", "0.5859587", "0.5857047", "0.58547235", "0.5842871", "0.58368045", "0.5834097", "0.58336866", "0.5818046", "0.58151203", "0.58033997", "0.57957834", "0.577993", "0.57780427", "0.5775089", "0.5775071", "0.5753855", "0.5752352", "0.5750041", "0.57484204", "0.57380044", "0.57181275", "0.5716698", "0.5713239", "0.57084537", "0.5705401", "0.5703513", "0.5695526", "0.5692733", "0.56922305", "0.56746686", "0.5671855", "0.5665976", "0.5664181", "0.56512547", "0.56454426", "0.5631483", "0.5620779", "0.561682", "0.5616792", "0.56118166", "0.56053406", "0.5601457", "0.558095", "0.5579084", "0.55642956", "0.5561518", "0.5561063", "0.55596584", "0.5559195", "0.55572575", "0.55558187", "0.55532616", "0.55512625", "0.5545732", "0.5530803", "0.5528696", "0.5528638" ]
0.6400546
8
Len returns number of resource stored in pool.
func (c *ChannelPool) Len() int { return len(c.getConns()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Pool) Len() int { return int(atomic.LoadUint32(&s.avail)) }", "func (p Pool) Len() int { return len(p) }", "func (p Pool) Len() int {\n\treturn len(p)\n}", "func (pool GenePool) Len() int {\n return len(pool)\n}", "func (p NodePools) Len() int { return len(p) }", "func (mq *MQ) PoolLen() (chNum int, connNum int) {\n\treturn mq.pool.len()\n}", "func (p *Pool) Size() int {\n\t//Not bothering to use the mutex for reading as the value changes all the time\n\t//anyway - Size() only returns a snapshot of the size at a point in time.\n\t//\n\treturn p.resourceCount\n}", "func (p *Pool) Len() int {\n\treturn len(p.conns)\n}", "func (p *cPool) Len() int {\n\tp.mx.Lock()\n\tdefer p.mx.Unlock()\n\treturn len(p.c)\n}", "func (p *RedisConnectionPool) Len() int {\n\tif p.IsOpen() {\n\t\treturn p.myPool.Len()\n\t}\n\treturn -1\n}", "func (_SingleAuto *SingleAutoCallerSession) PoolLength() (*big.Int, error) {\n\treturn _SingleAuto.Contract.PoolLength(&_SingleAuto.CallOpts)\n}", "func (_SingleAuto *SingleAutoSession) PoolLength() (*big.Int, error) {\n\treturn _SingleAuto.Contract.PoolLength(&_SingleAuto.CallOpts)\n}", "func (c *ChannelPool) Len() int { return len(c.getConns()) }", "func (rp *Pool) Size() int {\n\trp.lock.Lock()\n\tdefer rp.lock.Unlock()\n\n\treturn len(rp.existMap)\n}", "func (p *connPool) Len() int {\n\treturn p.conns.Len()\n}", "func (pool *Pool) Size() int {\n\tpool.lock.RLock()\n\tdefer pool.lock.RUnlock()\n\treturn len(pool.clients)\n}", "func (p *Pool) Size() int {\n\treturn len(p.set)\n}", "func (p *LightningPool) Size() int {\n\treturn len(p.set)\n}", "func (tp *TrxPool) Size() int {\n\treturn tp.trxs.Len()\n}", "func (_SingleAuto *SingleAutoCaller) PoolLength(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _SingleAuto.contract.Call(opts, &out, \"poolLength\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (p *Pool) Size() int {\n\treturn p.maxSize\n}", "func (p *connPool) FreeLen() int {\n\treturn len(p.freeConns)\n}", "func (rp *ResourcePool) NumResources() int {\n\treturn int(atomic.LoadInt64(&rp.numResources))\n}", "func poolFor(size uint) int {\n\tif size == 0 {\n\t\treturn 0\n\t}\n\treturn bits.Len(size - 1)\n}", "func (wp *WorkerPool) Size() int {\n\treturn len(wp.workers)\n}", "func (v ResourceNodes) Len() int {\n\treturn len(v)\n}", "func (r pciResource) size() uint64 {\n\treturn r.end - r.start + 1\n}", "func (cp *Pool) GetCount() int64 {\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn p.GetCount()\n}", "func (cp *Pool) Capacity() int64 {\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn p.Capacity()\n}", "func (h ReqHeap) Len() int { return len(h) }", "func (pp *Pingreq) Len() int {\n\treturn nakedLen()\n}", "func (s *Store) Len(ctx context.Context) (int64, error) {\n\tvar nb int64\n\tif err := s.List(ctx, \"\", func(string) error {\n\t\tnb++\n\t\treturn nil\n\t}); err != nil {\n\t\treturn 0, err\n\t}\n\treturn nb, nil\n}", "func (pc *PrioCache) Len() int {\n\tpc.lock.Lock()\n\tdefer pc.lock.Unlock()\n\treturn len(pc.s)\n}", "func Length(q Interface) (int, error) {\n\tconn := RedisPool.Get()\n\tdefer conn.Close()\n\treturn redis.Int(conn.Do(\"LLEN\", redisQueueKey(q)))\n}", "func (p *offerPool) GetOffersLength() int {\n\tp.RLock()\n\tlength := p.offerList.Len()\n\tp.RUnlock()\n\n\treturn length\n}", "func (p *Pool) GetSize() int {\n\tp.workerMut.Lock()\n\tdefer p.workerMut.Unlock()\n\n\treturn len(p.workers)\n}", "func (lc *LruCache) Len() uint {\n\treturn lc.LruStore.Len()\n}", "func (r Repository) Len(ctx context.Context, uniqueID string, action authentity.Action) (int, error) {\n\totpKey := generateOtpKey(uniqueID, action)\n\tresult, err := r.redis.LLen(ctx, otpKey)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn result, err\n}", "func (vri *VectorResourceIDReader) Len() (size int) {\n size = len(vri._vector)\n return\n}", "func (pp *Pingresp) Len() int {\n\treturn nakedLen()\n}", "func (hm HashMap) Len(ctx context.Context) (int64, error) {\n\treq := newRequest(\"*2\\r\\n$4\\r\\nHLEN\\r\\n$\")\n\treq.addString(hm.name)\n\treturn hm.c.cmdInt(ctx, req)\n}", "func getPoolInitialSize(poolName string) int {\n\t// get initial node count\n\targs := []string{\"container\", \"node-pools\", \"describe\", poolName, \"--quiet\",\n\t\t\"--cluster=\" + framework.TestContext.CloudConfig.Cluster,\n\t\t\"--format=value(initialNodeCount)\"}\n\toutput, err := execCmd(getGcloudCommand(args)...).CombinedOutput()\n\tklog.Infof(\"Node-pool initial size: %s\", output)\n\tframework.ExpectNoError(err, string(output))\n\tfields := strings.Fields(string(output))\n\tframework.ExpectEqual(len(fields), 1)\n\tsize, err := strconv.ParseInt(fields[0], 10, 64)\n\tframework.ExpectNoError(err)\n\n\t// get number of node pools\n\targs = []string{\"container\", \"node-pools\", \"describe\", poolName, \"--quiet\",\n\t\t\"--cluster=\" + framework.TestContext.CloudConfig.Cluster,\n\t\t\"--format=value(instanceGroupUrls)\"}\n\toutput, err = execCmd(getGcloudCommand(args)...).CombinedOutput()\n\tframework.ExpectNoError(err, string(output))\n\tnodeGroupCount := len(strings.Split(string(output), \";\"))\n\treturn int(size) * nodeGroupCount\n}", "func (k Keeper) GetPoolCount(ctx sdk.Context) uint64 {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolCountKey))\n\tbyteKey := types.KeyPrefix(types.PoolCountKey)\n\tbz := store.Get(byteKey)\n\n\t// Count doesn't exist: no element\n\tif bz == nil {\n\t\treturn 0\n\t}\n\n\t// Parse bytes\n\tcount, err := strconv.ParseUint(string(bz), 10, 64)\n\tif err != nil {\n\t\t// Panic because the count should be always formattable to iint64\n\t\tpanic(\"cannot decode count\")\n\t}\n\n\treturn count\n}", "func (h CRConfigHistoryThreadsafe) Len() uint64 {\n\tif h.length == nil {\n\t\treturn 0\n\t}\n\treturn *h.length\n}", "func (l *Libvirt) StoragePoolNumOfVolumes(Pool StoragePool) (rNum int32, err error) {\n\tvar buf []byte\n\n\targs := StoragePoolNumOfVolumesArgs {\n\t\tPool: Pool,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar r response\n\tr, err = l.requestStream(91, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Return value unmarshaling\n\ttpd := typedParamDecoder{}\n\tct := map[string]xdr.TypeDecoder{\"libvirt.TypedParam\": tpd}\n\trdr := bytes.NewReader(r.Payload)\n\tdec := xdr.NewDecoderCustomTypes(rdr, 0, ct)\n\t// Num: int32\n\t_, err = dec.Decode(&rNum)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (s *shard) len() uint64 {\n\ts.rwMutex.RLock()\n\tlength := uint64(len(s.entryIndexes))\n\ts.rwMutex.RUnlock()\n\n\treturn length\n}", "func (s *ConcurrentSet) Len() uint32 {\n\treturn atomic.LoadUint32(&s.size)\n}", "func (hc *HybridCache) Len() int64 {\n\treturn hc.mc.Len() + hc.dc.Len()\n}", "func (l *semaphoreList) length() int {\n\tl.RLock()\n\tdefer l.RUnlock()\n\tlength := len(l.list)\n\treturn length\n}", "func (blt Bolt) Length() int {\n\tvar len int\n\tblt.db.View(func(tx *b.Tx) error { //nolint:errcheck\n\t\tlen = tx.Bucket(blt.Bucket).Stats().KeyN\n\t\treturn nil\n\t})\n\treturn len\n}", "func (w workers) Len() int { return len(w) }", "func (q *wantConnQueue) len() int {\n\treturn len(q.head) - q.headPos + len(q.tail)\n}", "func (c *NoReplKeyCache) Len() int {\n\tc.lock.RLock()\n\tlength := len(c.cache)\n\tc.lock.RUnlock()\n\treturn length\n}", "func (subr *SRCountersData) Len() (l uint16) {\n\tencoded, _ := subr.Encode()\n\tl = uint16(len(encoded))\n\treturn l\n}", "func (multi_queue *MultiQueue) Length() (int, error) {\n\tcount := 0\n\tfor _, q := range multi_queue.HealthyQueues() {\n\t\tconn := q.pooledConnection.Get()\n\t\tdefer conn.Close()\n\n\t\trep, err := redis.Int(conn.Do(\"LLEN\", multi_queue.key))\n\t\tif err == nil {\n\t\t\tcount = count + rep\n\t\t} else {\n\t\t\treturn count, err\n\t\t}\n\t}\n\treturn count, nil\n}", "func (q *PriorityQueue) Length() int {\n\treturn q.count\n}", "func (r *Redis) Size() (int64, error) {\n\tlenght, err := r.Len()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif lenght == 0 {\n\t\treturn 0, nil\n\t}\n\n\tvar itemIDs []string\n\tif err := r.store.ScoredSetRange(context.Background(), redisLruKeyCacheKey, 0, -1, &itemIDs); err != nil {\n\t\treturn 0, err\n\t}\n\n\t// DB Request\n\treturn item.ComputeSizeByIDs(r.db, itemIDs)\n}", "func (s *Stack) Len() int {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.count\n}", "func (a *RequestedNSSAI) GetLen() (len uint8) {}", "func (c *Cache) Len() int {\n\tvar len int\n\tfor _, shard := range c.shards {\n\t\tlen += shard.policy.Len()\n\t}\n\n\treturn len\n}", "func (sr *Stackers) Length() int {\n\tvar l int\n\tsr.ro.Lock()\n\t{\n\t\tl = len(sr.stacks)\n\t\tsr.ro.Unlock()\n\t}\n\treturn l\n}", "func (d *Dam) Size() int {\n\td.mutex.RLock()\n\tl := len(d.storage)\n\td.mutex.RUnlock()\n\treturn l\n}", "func (bc *Catalog) Len() int {\n\tbc.lock.RLock()\n\tdefer bc.lock.RUnlock()\n\treturn len(bc.bundles)\n}", "func (m *Manager) Len() int {\n\treturn m.hub.len()\n}", "func (this *List) Len() int {\n this.lock.RLock()\n this.lock.RUnlock()\n\n return len(this.counters)\n}", "func (intList *ConcurrentIntList) Len() int {\n\treturn int(atomic.LoadInt64(&intList.size))\n}", "func (rb *RingBuffer[T]) Len() int {\n\tif rb == nil {\n\t\treturn 0\n\t}\n\trb.mu.Lock()\n\tdefer rb.mu.Unlock()\n\treturn len(rb.buf)\n}", "func (mp *TxPool) Count() int {\n\tmp.mtx.RLock()\n\tcount := len(mp.pool)\n\tmp.mtx.RUnlock()\n\n\treturn count\n}", "func (r *RingT[T]) Len() int {\n\treturn int((r.head - r.tail) & r.mask)\n}", "func (s *PendingConnections) Len() int {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn len(s.value)\n}", "func (servers byPriorityWeight) Len() int { return len(servers) }", "func (t CreatableReplicaAssignment19) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int32 // PartitionIndex\n\tsz += sizeof.Int32Array(t.BrokerIds) // BrokerIds\n\treturn sz\n}", "func (p *Pool) AllocCount() int {\n\tp.RLock()\n\tdefer p.RUnlock()\n\treturn len(p.allocated)\n}", "func (h *Queue) Len() int { return len(h.slice) }", "func (db *MemoryCache) Len() int {\n\tdb.lock.RLock()\n\tdefer db.lock.RUnlock()\n\n\treturn len(db.db)\n}", "func (r *KeyRing) Len() int {\n\treturn len(r.entities)\n}", "func (gores *Gores) Size(queue string) (int64, error) {\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tsize, err := conn.Do(\"LLEN\", fmt.Sprintf(queuePrefix, queue))\n\tif size == nil || err != nil {\n\t\treturn 0, fmt.Errorf(\"Gores find queue size failed: %s\", err)\n\t}\n\n\treturn size.(int64), nil\n}", "func (r *MachinePoolsListResponse) Size() int {\n\tif r != nil && r.size != nil {\n\t\treturn *r.size\n\t}\n\treturn 0\n}", "func (s AppServers) Len() int { return len(s) }", "func (registry *Registry) Len() int {\n\tregistry.lock.RLock()\n\tdefer registry.lock.RUnlock()\n\treturn len(registry.db)\n}", "func (hub *WSHub) Len() int {\n\thub.mu.Lock()\n\tdefer hub.mu.Unlock()\n\treturn len(hub.connections)\n}", "func (rb *RingBuffer) Length() int {\n\treturn rb.count\n}", "func (r *Rope) Size() int { return r.size }", "func (h *data) Len() int { return len(h.queue) }", "func (c *Cache) Len() int32 {\n\tl := 0\n\tfor i := 0; i < shardCount; i++ {\n\t\tshard := c.data[i]\n\t\tshard.Lock()\n\t\tl += len(shard.items)\n\t\tshard.Unlock()\n\t}\n\treturn int32(l)\n}", "func (q *Stack) Len() int {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\treturn q.count\n}", "func (sm safeMap) Len() int {\n\treply := make(chan interface{})\n\tsm <- commandData{action: COUNT, result: reply}\n\treturn (<-reply).(int)\n}", "func (s *Store) Len() int {\n\ts.access.RLock()\n\tdefer s.access.RUnlock()\n\n\treturn len(s.data)\n}", "func (p *Prque) Size() int {\n\treturn p.cont.Len()\n}", "func (p *Prque) Size() int {\n\treturn p.cont.Len()\n}", "func (h *hashRing) Size() int {\n\treturn len(h.nodes)\n}", "func (h *Heap) Len() int { return len(h.slice) }", "func (b *Ring) Size() int {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\treturn b.size\n}", "func (p *MemDB) Len() int {\n\treturn p.n\n}", "func (s *Set) Length() int {\n\ts.m.Lock()\n\tsize := len(s.proxies)\n\ts.m.Unlock()\n\n\treturn size\n}", "func (s *Int64Map) Len() int {\n\treturn int(atomic.LoadInt64(&s.length))\n}", "func (storage *Storage) Len() (n int) {\n\tstorage.mutex.Lock()\n\tn = storage.lruList.Len()\n\tstorage.mutex.Unlock()\n\treturn\n}", "func (g *Gpks) Len() int {\n\treturn len(g.sid) + len(g.nid)\n}", "func (q *Queue) Len() int {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tvar len int\n\tlen = q.tasks.Len()\n\t\n\treturn len\n}", "func (e *Engine) ResourceCount() int {\n\treturn e.config.ResourceCount()\n}" ]
[ "0.8347779", "0.7919871", "0.77839893", "0.7465639", "0.74263847", "0.74159336", "0.7398953", "0.7252698", "0.7229176", "0.7174852", "0.7131198", "0.71197116", "0.7114624", "0.7087929", "0.705007", "0.7048135", "0.6971592", "0.6933583", "0.68244475", "0.67951185", "0.67513406", "0.67215437", "0.66979223", "0.66531396", "0.65640473", "0.6497093", "0.6423569", "0.6411346", "0.6378816", "0.6359975", "0.63352305", "0.63296705", "0.6283458", "0.6269482", "0.62344366", "0.62127244", "0.61984736", "0.61883086", "0.618807", "0.6150662", "0.6150573", "0.614036", "0.61402756", "0.6138044", "0.61187667", "0.6115896", "0.6114794", "0.6104598", "0.6089049", "0.6077233", "0.60740286", "0.60718215", "0.60684216", "0.6059026", "0.605696", "0.60531116", "0.6050302", "0.6048233", "0.6046943", "0.60425013", "0.6030309", "0.6026256", "0.6025341", "0.6020817", "0.60147434", "0.60113645", "0.6004648", "0.6002679", "0.6002125", "0.6001313", "0.5996504", "0.59934783", "0.5992336", "0.59850806", "0.5982158", "0.5974436", "0.59741175", "0.5971236", "0.59703654", "0.5968582", "0.5962895", "0.5959576", "0.5956202", "0.59560186", "0.59549385", "0.5952155", "0.59508634", "0.5947997", "0.5943362", "0.5943362", "0.5937027", "0.5935437", "0.5933172", "0.59322655", "0.59300876", "0.59300435", "0.5929003", "0.5927386", "0.5926663", "0.59251887" ]
0.6910287
18
Find a concept by string commands
func getConceptByString(searchTerm string, resultLimit string) string { url := baseUrl + edition + "/" + version + "/concepts?term=" + searchTerm + "&activeFilter=true&offset=0&limit=" + resultLimit return lookup(url) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getConceptByStringInProceduresSemanticTag(searchTerm string) string {\n\turl := baseUrl + \"/browser/\" +edition + \"/\" + version + \"/descriptions?term=\" + searchTerm + \"&conceptActive=true&semanticTag=procedure&groupByConcept=false&searchMode=STANDARD&offset=0&limit=50\"\n\treturn lookup(url)\n\n}", "func (c *Conn) find(commandName string, args []interface{}) *Cmd {\n\tfor _, cmd := range c.commands {\n\t\tif match(commandName, args, cmd) {\n\t\t\treturn cmd\n\t\t}\n\t}\n\treturn nil\n}", "func find(ops string, cmdInfo CommandInfo) bool {\n\tfor _, cmd := range cmdInfo.CmdList {\n\t\tif ops == cmd {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (app *App) findSimilarCmd(input string) []string {\n\tvar ss []string\n\t// ins := strings.Split(input, \"\")\n\t// fmt.Print(input, ins)\n\tln := len(input)\n\n\tnames := app.CmdNameMap()\n\tnames[\"help\"] = 4 // add 'help' command\n\n\t// find from command names\n\tfor name := range names {\n\t\tcln := len(name)\n\t\tif cln > ln && strings.Contains(name, input) {\n\t\t\tss = append(ss, name)\n\t\t} else if ln > cln && strings.Contains(input, name) {\n\t\t\t// sns := strings.Split(str, \"\")\n\t\t\tss = append(ss, name)\n\t\t}\n\n\t\t// max find 5 items\n\t\tif len(ss) == 5 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// find from aliases\n\tfor alias := range app.cmdAliases.Mapping() {\n\t\t// max find 5 items\n\t\tif len(ss) >= 5 {\n\t\t\tbreak\n\t\t}\n\n\t\tcln := len(alias)\n\t\tif cln > ln && strings.Contains(alias, input) {\n\t\t\tss = append(ss, alias)\n\t\t} else if ln > cln && strings.Contains(input, alias) {\n\t\t\tss = append(ss, alias)\n\t\t}\n\t}\n\n\treturn ss\n}", "func GetCommand(text string) Executable {\n\tfor _, cmd := range commandsDoc {\n\t\tregx := regexp.MustCompile(cmd.regexValidation)\n\t\tmatch := regx.FindStringSubmatch(text)\n\n\t\tif len(match) > 0 {\n\t\t\targs := make(map[string]string)\n\n\t\t\tfor i, label := range regx.SubexpNames() {\n\t\t\t\tif i > 0 && i <= len(match) {\n\t\t\t\t\targs[label] = match[i]\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Feel free to improve this solution for abstraction\n\t\t\treturn cmd.instance.buildCommand(args)\n\t\t}\n\t}\n\n\treturn nil\n}", "func Search(name string) {\n\t/*\n\t\t\tif v, exist := cmdMap[name]; exist {\n\t\t\t\treturn v\n\t\t\t}\n\t\treturn nil\n\t*/\n}", "func Find(text string) (string, bool) {\n\top, found := operators[text]\n\treturn op, found\n}", "func getCommand(name string, cmds []*command) *command {\n\tfor _, cmd := range cmds {\n\t\tif cmd.name == name {\n\t\t\treturn cmd\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Command) Find(name string) *Command {\n\tfor _, cc := range c.commands {\n\t\tif cc.match(name) {\n\t\t\treturn cc\n\t\t}\n\t}\n\n\treturn nil\n}", "func getConceptBySCTID(stcId string) string {\n\turl := baseUrl + \"/browser/\" + edition + \"/\" + version + \"/concepts/\" + stcId\n\treturn lookup(url)\n\n}", "func (c *Commands) FindCommand(args []string) (cmd *Command, rest []string, err error) {\n\tvar cmds []*Command\n\tcmds, _, rest, err = c.parse(args, nil, true)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(cmds) > 0 {\n\t\tcmd = cmds[len(cmds)-1]\n\t}\n\n\treturn\n}", "func getConceptByDescription(descriptionId string) string {\n\turl := baseUrl + edition + \"/\" + version + \"/descriptions/\" + descriptionId\n\treturn lookup(url)\n\n}", "func FindCommand(cmdline string) (*Command, *[]Command, []*Command, []string) {\n\tvar chain []*Command\n\tvar matches []Command\n\tvar next *Command\n\tcmds := &commands\n\n\t// Tokenise string.\n\tcmdstr := strings.Fields(cmdline)\n\tif len(cmdstr) == 0 {\n\t\treturn nil, cmds, chain, nil\n\t}\n\n\tfor idx, subcmd := range cmdstr {\n\t\tmatches = make([]Command, 0)\n\t\tnext = nil\n\t\tfor i := range *cmds {\n\t\t\tcmd := (*cmds)[i]\n\t\t\tif strings.HasPrefix(cmd.Command, subcmd) {\n\t\t\t\tmatches = append(matches, cmd)\n\t\t\t\tnext = &cmd\n\t\t\t}\n\t\t}\n\t\tif len(matches) == 0 {\n\t\t\t// Sub command not found.\n\t\t\treturn nil, nil, chain, cmdstr[idx:]\n\t\t} else if len(matches) > 1 {\n\t\t\t// Ambiguious command.\n\t\t\treturn nil, &matches, chain, cmdstr[idx:]\n\t\t}\n\t\tchain = append(chain, next)\n\t\tif next.function != nil {\n\t\t\t// We've reached a function.\n\t\t\treturn next, nil, chain, cmdstr[idx+1:]\n\t\t} else {\n\t\t\tcmds = next.Subcommands\n\t\t}\n\t}\n\tif next != nil {\n\t\treturn nil, next.Subcommands, chain, nil\n\t}\n\n\treturn nil, nil, chain, nil\n}", "func commandType(str string) cmdType {\n\n\tif str == \"newanimal\" {\n\t\treturn newAnimal\n\t}\n\treturn query\n\n}", "func TestCommandsHaveSynopsis(t *testing.T) {\n\tfor i, c := range coreCommands() {\n\t\tt.Run(fmt.Sprintf(\"test short description of command %d\", i), func(t *testing.T) {\n\t\t\tassert.NotEmpty(t, c.Short)\n\t\t})\n\t}\n}", "func (c *Command) FindCommand(name string) (*Command, bool) {\n\tfor _, command := range c.Commands {\n\t\tif command.Name == name {\n\t\t\treturn command, true\n\t\t}\n\t}\n\treturn nil, false\n}", "func main() {\n\tt := Constructor()\n\tt.AddWord(\"bad\")\n\tt.AddWord(\"dad\")\n\tt.AddWord(\"mad\")\n\ts := \"pad\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \"dad\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \".ad\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \"b..\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \".adx\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \".ax\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \".\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \"d.\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n}", "func findSubCommand(cmd *cobra.Command, subCmdName string) *cobra.Command {\n\tfor _, subCmd := range cmd.Commands() {\n\t\tuse := subCmd.Use\n\t\tif use == subCmdName || strings.HasPrefix(use, subCmdName+\" \") {\n\t\t\treturn subCmd\n\t\t}\n\t}\n\treturn nil\n}", "func IsOnCommand(msgText string, cmdList []string) (string, bool) {\n\t// lets find delimiter position\n\tcmdSymbIdx := strings.IndexAny(msgText, \"/\"+cmdSymbol)\n\tif cmdSymbIdx == -1 {\n\t\treturn \"\", false\n\t}\n\n\tfound := false\n\tmsg := msgText[cmdSymbIdx+len(cmdSymbol) : len(msgText)] //remove cmd string from query\n\tvar strToFind string\n\tfor _, cmd := range cmdList {\n\t\tif strings.HasPrefix(msg, cmd) {\n\t\t\tstrToFind = strings.TrimSpace(strings.TrimPrefix(msg, cmd))\n\t\t\tfound = true\n\t\t}\n\t}\n\n\treturn strToFind, found\n}", "func (cs *commandSet) match(txt string) (final Runner, prefixlen int) {\n\tcs.RLock()\n\tdefer cs.RUnlock()\n\n\tfor prefix, r := range cs.set {\n\t\tif !strings.HasPrefix(txt, prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tif final == nil || len(prefix) > prefixlen {\n\t\t\tprefixlen = len(prefix)\n\t\t\tfinal = r\n\t\t}\n\t}\n\treturn\n}", "func main() {\n\t// variables\n\tvar userInput string\n\n\t// Get the input\n\tfmt.Printf(\"Enter the findian string to match : \")\n\t_, err := fmt.Scan(&userInput)\n\n\t// Validate the input\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tuserInputs := strings.ToLower(userInput)\n\tif strings.HasPrefix(userInputs, \"i\") &&\n\t\tstrings.HasSuffix(userInputs, \"n\") &&\n\t\tstrings.Index(userInputs, \"a\") != -1 {\n\n\t\tfmt.Println(\"Found!\")\n\n\t} else {\n\t\tfmt.Println(\"Not Found!\")\n\t}\n\n}", "func (c *Commands) Get(name string) *Command {\n\tfor _, cmd := range c.list {\n\t\tif cmd.Name == name {\n\t\t\treturn cmd\n\t\t}\n\t\tfor _, a := range cmd.Aliases {\n\t\t\tif a == name {\n\t\t\t\treturn cmd\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func getSuggestDescription(meta *meta, args []string, suggest string) string {\n\tcommand := getCommand(meta, args, suggest)\n\tif command == nil {\n\t\treturn \"command not found\"\n\t}\n\n\tif argIsOption(suggest) {\n\t\toption := command.ArgSpecs.GetByName(optionToArgSpecName(suggest))\n\t\tif option != nil {\n\t\t\treturn option.Short\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tif argIsPositional(command, suggest) {\n\t\toption := command.ArgSpecs.GetPositionalArg()\n\t\tif option != nil {\n\t\t\treturn option.Short\n\t\t}\n\t\treturn \"\"\n\t}\n\n\t// Should be a command, just use command short\n\treturn command.Short\n}", "func (self manual) index(definition string) (task *Task) {\n sections := strings.Split(definition, DELIMITER)\n entries := self\n\n for _, section := range sections {\n for i := 0; i < len(entries); i++ {\n if entries[i].Label == section {\n task = entries[i]\n entries = task.manual // adjust `entries` pointer for next iteration.\n break\n }\n }\n\n if nil == task {\n return\n } else if section != task.Label {\n return nil\n }\n }\n\n return\n}", "func ContainsCmd(aa []api.Command, c api.Command) bool {\n\tfor _, v := range aa {\n\t\tif c.Parent == v.Parent && c.Usage == v.Usage {\n\t\t\treturn true\n\t\t}\n\n\t\tcoreCmd := fmt.Sprintf(\"%s_%s\", v.Parent, v.Usage)\n\t\tif c.Parent == coreCmd { // Ensures that no core commands will be added\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func getCommand(meta *meta, args []string, suggest string) *Command {\n\trawCommand := removeOptions(args)\n\tsuggestIsOption := argIsOption(suggest)\n\n\tif !suggestIsOption {\n\t\trawCommand = append(rawCommand, suggest)\n\t}\n\n\trawCommand = meta.CliConfig.Alias.ResolveAliases(rawCommand)\n\n\t// Find the closest command in case there is multiple positional arguments\n\tfor ; len(rawCommand) > 1; rawCommand = rawCommand[:len(rawCommand)-1] {\n\t\tcommand, foundCommand := meta.Commands.find(rawCommand...)\n\t\tif foundCommand {\n\t\t\treturn command\n\t\t}\n\t}\n\treturn nil\n}", "func getCommand(args []string) (*command, []string, error) {\n\tif len(args) < 2 {\n\t\treturn nil, nil, fmt.Errorf(\"Too few arguments: %q\", args)\n\t}\n\n\tfor _, c := range commands {\n\t\tif c.flag == args[1] {\n\t\t\treturn &c, args[2:], nil\n\t\t}\n\t}\n\n\t// command not found\n\treturn nil, nil, fmt.Errorf(\"Command not found: %q\", args)\n}", "func (doc *Document) FindOperation(name string) *Operation {\n\tfor _, defn := range doc.Definitions {\n\t\tif defn.Operation == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif name == \"\" || (defn.Operation.Name != nil && name == defn.Operation.Name.Value) {\n\t\t\treturn defn.Operation\n\t\t}\n\t}\n\treturn nil\n}", "func main() {\n // perform the search for the specified term\n search.Run(\"president\")\n}", "func getIndexOfCmdWithString(node *parse.PipeNode) (int, *parse.StringNode) {\n\tfor i, cmd := range node.Cmds {\n\t\tif len(cmd.Args) == 1 { // not sure about that, worth to be restrictive.\n\t\t\tif s, ok := cmd.Args[0].(*parse.StringNode); ok {\n\t\t\t\treturn i, s\n\t\t\t}\n\t\t}\n\t}\n\treturn -1, nil\n}", "func (dev *Device) GetCommandByNameAndCategory(cmdName, cmdCategory string) *Command {\n\tif cmdName == \"\" && cmdCategory == \"\" {\n\t\treturn nil\n\t}\n\n\tfor _, cmd := range dev.Commands {\n\t\tif (cmd.Name == cmdName || cmdName == \"\") &&\n\t\t\t(cmd.Category == cmdCategory || cmdCategory == \"\") {\n\t\t\treturn &cmd\n\t\t}\n\t}\n\treturn nil\n}", "func (c *completer) Complete(d prompt.Document) (s []*prompt.Suggest) {\n\tbc := d.TextBeforeCursor()\n\tif bc == \"\" {\n\t\treturn nil\n\t}\n\n\t// TODO: We should consider about spaces used as a part of test.\n\targs := strings.Split(spaces.ReplaceAllString(bc, \" \"), \" \")\n\tcmdName := args[0]\n\targs = args[1:] // Ignore command name.\n\n\tcmd, ok := c.cmds[cmdName]\n\tif !ok {\n\t\t// return all commands if current input is first command name\n\t\tif len(args) == 0 {\n\t\t\t// number of commands + help\n\t\t\tcmdNames := make([]*prompt.Suggest, 0, len(c.cmds))\n\t\t\tcmdNames = append(cmdNames, prompt.NewSuggestion(\"help\", \"show help message\"))\n\t\t\tfor name, cmd := range c.cmds {\n\t\t\t\tcmdNames = append(cmdNames, prompt.NewSuggestion(name, cmd.Synopsis()))\n\t\t\t}\n\n\t\t\ts = cmdNames\n\t\t}\n\t\treturn prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)\n\t}\n\n\tdefer func() {\n\t\tif len(s) != 0 {\n\t\t\ts = append(s, prompt.NewSuggestion(\"--help\", \"show command help message\"))\n\t\t}\n\t\ts = prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)\n\t}()\n\n\tfs, ok := cmd.FlagSet()\n\tif ok {\n\t\tif len(args) > 0 && strings.HasPrefix(args[len(args)-1], \"-\") {\n\t\t\tfs.VisitAll(func(f *pflag.Flag) {\n\t\t\t\ts = append(s, prompt.NewSuggestion(\"--\"+f.Name, f.Usage))\n\t\t\t})\n\t\t\treturn s\n\t\t}\n\n\t\t_ = fs.Parse(args)\n\t\targs = fs.Args()\n\t}\n\n\tcompFunc, ok := c.completions[cmdName]\n\tif !ok {\n\t\treturn s\n\t}\n\treturn compFunc(args)\n}", "func Search(file string, searchString string, log *zap.SugaredLogger) (matches []string) {\n\tpat := []byte(searchString)\n\tfp := common.GetFileAbsPath(file, log)\n\tf, err := os.Open(fp)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\t// start a scanner to search the operations\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tif bytes.Contains(scanner.Bytes(), pat) {\n\t\t\t// if this matches we know the string is somewhere **within a line of text**\n\t\t\t// we should split that line of text (strings.Fields) and range over those to ensure that we\n\t\t\t// don't count the entire line as the actual hit\n\t\t\t// This should be enough for yaml (althoug I imagine it would also detect stuff in comments)\n\t\t\t// but it would be madness for a json operations for example..\n\t\t\tfor _, field := range strings.Fields(scanner.Text()) {\n\t\t\t\tif bytes.Contains([]byte(field), pat) {\n\t\t\t\t\t// val := strings.Fields(scanner.Text())[1]\n\t\t\t\t\tmatches = append(matches, field)\n\t\t\t\t\t//log.Debug(scanner.Text())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Error(err)\n\t}\n\t//if len(matches) > 0 {\n\t//\tlog.Debugw(\"Search found some matches\",\n\t//\t\t\"searchString\", searchString,\n\t//\t\t\"operations\", file,\n\t//\t\t\"matches\", matches,\n\t//\t)\n\t//} else {\n\t//\tlog.Debugw(\"Search found no matches\",\n\t//\t\t\"searchString\", searchString,\n\t//\t\t\"operations\", file,\n\t//\t\t\"matches\", matches,\n\t//\t)\n\t//}\n\treturn matches\n}", "func (commands Commands) Get(name string) *Command {\n\tfor _, cmd := range commands {\n\t\tif cmd.HasName(name) {\n\t\t\treturn cmd\n\t\t}\n\t}\n\n\treturn nil\n}", "func UsageCommands() string {\n\treturn `neat-thing (neat-thing-today|new-neat-thing)\n`\n}", "func names_match(\np int32,/* points to the proposed match */\nid[]rune,/* the identifier*/\nt int32)bool{\nif len(name_dir[p].name)!=len(id){\nreturn false\n}\nreturn compare_runes(id,name_dir[p].name)==0\n}", "func spec(stream []string, uFeat string) []*structs.State {\n if stream == nil || uFeat == \".\" {\n return nil\n }\n uCat, _, _ := defeat(uFeat)\n found := search(stream, \"u\" + uCat, nil)\n return found\n}", "func Find(name string) (string, bool) { q, ok := queries[name]; return q, ok }", "func (r Repository) GetIdeasByString(query string) Ideas {\n\tsession, err := mgo.Dial(SERVER)\n\n\tif err != nil {\n\t\tfmt.Println(\"Failed to establish connection to Mongo server:\", err)\n\t}\n\n\tdefer session.Close()\n\n\tc := session.DB(DBNAME).C(IdeaCollection)\n\tresult := Ideas{}\n\n\t// Logic to create filter\n\tqs := strings.Split(query, \" \")\n\tand := make([]bson.M, len(qs))\n\tfor i, q := range qs {\n\t\tand[i] = bson.M{\"title\": bson.M{\n\t\t\t\"$regex\": bson.RegEx{Pattern: \".*\" + q + \".*\", Options: \"i\"},\n\t\t}}\n\t}\n\tfilter := bson.M{\"$and\": and}\n\n\tif err := c.Find(&filter).Limit(10).All(&result); err != nil {\n\t\tfmt.Println(\"Failed to write result:\", err)\n\t}\n\n\treturn result\n}", "func LookupCommand(name string) (string, error) {\n\tfor _, cmdName := range commands[name] {\n\t\t_, err := exec.LookPath(cmdName)\n\t\tif err != nil {\n\t\t\tif errors.Unwrap(err) == exec.ErrNotFound {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn cmdName, err\n\t\t}\n\t\treturn cmdName, nil\n\t}\n\treturn \"\", errors.New(\"none of these commands were found in your $PATH: \" + strings.Join(commands[name], \" \"))\n}", "func AddCommands() {\n\tRootCmd.AddCommand(SearchByFile)\n\tRootCmd.AddCommand(SearchByLink)\n}", "func (lr *Rule) Find(find string) []*Rule {\n\tvar res []*Rule\n\tlr.FuncDownMeFirst(0, lr.This(), func(k ki.Ki, level int, d any) bool {\n\t\tlri := k.(*Rule)\n\t\tif strings.Contains(lri.String, find) || strings.Contains(lri.Nm, find) {\n\t\t\tres = append(res, lri)\n\t\t}\n\t\treturn true\n\t})\n\treturn res\n}", "func IsCommand(cmd string) bool {\n for val := range DaemonizedCommands() {\n if val == cmd {\n return true\n }\n }\n for val := range InfoCommands() {\n if val == cmd {\n return true\n }\n }\n\n return false\n}", "func (s *Specs) findSpec(metricName string) (SpecDef, error) {\n\tvar spec SpecDef\n\n\tres := strings.SplitN(metricName, \"_\", 2)\n\tif len(res) < 2 {\n\t\treturn spec, fmt.Errorf(\"metric: %s has no suffix to identify the entity\", metricName)\n\t}\n\tserviceName := res[0]\n\n\tvar ok bool\n\tif spec, ok = s.SpecsByName[serviceName]; !ok {\n\t\treturn spec, fmt.Errorf(\"no spec files for service: %s\", serviceName)\n\t}\n\n\treturn spec, nil\n}", "func commandNotFound(c *cli.Context, cmd string) {\n\tfmt.Printf(\"%v has no commmand name '%v'\\n\", c.App.Name, cmd)\n}", "func Lookup(ident string) Token {\n\tif tok, is_keyword := directives[ident]; is_keyword {\n\t\treturn tok\n\t}\n\treturn NotFound\n}", "func (t *Tap) FindLikely(s string) string {\n\ti := len(t.programs) / 2\n\tbeg := 0\n\tend := len(t.programs)\n\tfor {\n\t\tp := t.programs[i]\n\t\tif len(s) <= len(p.Name) {\n\t\t\tif p.Name[:len(s)] == s {\n\t\t\t\t//Check for better fit.\n\t\t\t\tfor j := i; j > 0; j-- {\n\t\t\t\t\tif t.programs[j].Name == s {\n\t\t\t\t\t\treturn t.programs[j].Name\n\t\t\t\t\t}\n\t\t\t\t\tif p.Name[:len(s)] != s {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn p.Name\n\t\t\t}\n\t\t}\n\t\tif s < p.Name {\n\t\t\tend = i\n\t\t\ti = (i + beg) / 2\n\t\t} else {\n\t\t\tbeg = i\n\t\t\ti = (i + end) / 2\n\t\t}\n\t}\n\treturn \"\"\n}", "func foundAction(optionsStr map[string]string, target, query, ptype string) {\n\ttempCmd := strings.Fields(optionsStr[\"foundAction\"])\n\tafterCmd := strings.Join(tempCmd[1:], \" \")\n\tafterCmd = strings.ReplaceAll(afterCmd, \"@@query@@\", query)\n\tafterCmd = strings.ReplaceAll(afterCmd, \"@@target@@\", target)\n\tafterCmd = strings.ReplaceAll(afterCmd, \"@@type@@\", ptype)\n\tcmd := exec.Command(tempCmd[0], afterCmd)\n\terr := cmd.Start()\n\tif err != nil {\n\t\tprinting.DalLog(\"ERROR\", \"execution error from found-action\", optionsStr)\n\t}\n}", "func TestSynonymsDocs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\n\tsynonymCheckTemplate := `\n\t\tpachctl {{RESOURCE_SYNONYM}} -h > synonym.txt\n\t\tpachctl {{RESOURCE}} -h > singular.txt\n\t\tdiff synonym.txt singular.txt\n\t\trm synonym.txt singular.txt\n\t`\n\n\tsynonyms := synonymsMap()\n\n\tfor resource := range synonyms {\n\t\tif resource == \"secret\" {\n\t\t\t// no help doc defined for secret yet.\n\t\t\tcontinue\n\t\t}\n\n\t\twithResource := strings.ReplaceAll(synonymCheckTemplate, \"{{RESOURCE}}\", resource)\n\t\tsynonymCommand := strings.ReplaceAll(withResource, \"{{RESOURCE_SYNONYM}}\", synonyms[resource])\n\n\t\tt.Logf(\"Testing %s -h\\n\", resource)\n\t\trequire.NoError(t, tu.BashCmd(synonymCommand).Run())\n\t}\n}", "func (c *CurrentFact) Find(what string) []interface{} {\n\tif _, ok := CurrentFactMapper[what]; ok {\n\t\treturn CurrentFactMapper[what]()\n\t}\n\treturn []interface{}{}\n}", "func UsageCommands() string {\n\treturn `organization (list|show|add|remove|update)\nstep (list|add|remove|update)\nwalkthrough (list|show|add|remove|update|rename|publish)\n`\n}", "func findInputPair(code string, target int) string {\n\tbaseMemSlice := strToIntSlice(code, Sep)\n\tfor l := 0; l < 100; l++ {\n\t\tfor r := 0; r < 100; r++ {\n\t\t\tmemToUse := append([]int(nil), baseMemSlice...)\n\t\t\tmemToUse[1], memToUse[2] = l, r\n\t\t\trunProgram(memToUse)\n\t\t\tif memToUse[0] == target {\n\t\t\t\treturn fmt.Sprintf(\"noun: %d\\nverb: %d\", memToUse[1], memToUse[2])\n\t\t\t}\n\t\t}\n\t}\n\tpanic(\"There is no combination that matches the target!\")\n}", "func main() {\n\tword := \"apple\"\n\tprefix := \"app\"\n\tobj := Constructor()\n\tobj.Insert(word)\n\tparam_2 := obj.Search(word)\n\tparam_3 := obj.StartsWith(prefix)\n\tfmt.Println(param_2, param_3)\n}", "func IdentifyCommand(cmds map[string]bool, args []string) string {\n\tcommandIndex := 0\n\tcommandSeen := false\n\n\tfor _, arg := range args {\n\t\tif commandSeen {\n\t\t\tbreak\n\t\t}\n\n\t\tif strings.HasPrefix(arg, \"-\") == true {\n\t\t\tcommandIndex++\n\t\t\tcontinue\n\t\t}\n\n\t\tfor cmd := range cmds {\n\t\t\tif arg == cmd {\n\t\t\t\tcommandSeen = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !commandSeen {\n\t\t\tcommandIndex++\n\t\t}\n\t}\n\n\tif !commandSeen {\n\t\treturn \"\"\n\t}\n\n\treturn args[commandIndex]\n}", "func (fn *formulaFuncs) find(name string, argsList *list.List) formulaArg {\n\tif argsList.Len() < 2 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, fmt.Sprintf(\"%s requires at least 2 arguments\", name))\n\t}\n\tif argsList.Len() > 3 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, fmt.Sprintf(\"%s allows at most 3 arguments\", name))\n\t}\n\tfindText := argsList.Front().Value.(formulaArg).Value()\n\twithinText := argsList.Front().Next().Value.(formulaArg).Value()\n\tstartNum, result := 1, 1\n\tif argsList.Len() == 3 {\n\t\tnumArg := argsList.Back().Value.(formulaArg).ToNumber()\n\t\tif numArg.Type != ArgNumber {\n\t\t\treturn numArg\n\t\t}\n\t\tif numArg.Number < 0 {\n\t\t\treturn newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t\t}\n\t\tstartNum = int(numArg.Number)\n\t}\n\tif findText == \"\" {\n\t\treturn newNumberFormulaArg(float64(startNum))\n\t}\n\tfor idx := range withinText {\n\t\tif result < startNum {\n\t\t\tresult++\n\t\t}\n\t\tif strings.Index(withinText[idx:], findText) == 0 {\n\t\t\treturn newNumberFormulaArg(float64(result))\n\t\t}\n\t\tresult++\n\t}\n\treturn newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n}", "func (command *Command) GetSubCmd(name string) *Command {\n\t// Sort the sub commands slice using the length of the command name\n\tsort.Slice(command.SubCommands, func(i, j int) bool {\n\t\treturn len(command.SubCommands[i].Name) > len(command.SubCommands[j].Name)\n\t})\n\n\t// Loop through all commands to find the correct one\n\tfor _, subCommand := range command.SubCommands {\n\t\t// Define the slice to check\n\t\ttoCheck := make([]string, len(subCommand.Aliases)+1)\n\t\ttoCheck = append(toCheck, subCommand.Name)\n\t\ttoCheck = append(toCheck, subCommand.Aliases...)\n\t\tsort.Slice(toCheck, func(i, j int) bool {\n\t\t\treturn len(toCheck[i]) > len(toCheck[j])\n\t\t})\n\n\t\t// Check the prefix of the string\n\t\tif stringArrayContains(toCheck, name, subCommand.IgnoreCase) {\n\t\t\treturn subCommand\n\t\t}\n\t}\n\treturn nil\n}", "func UsageCommands() string {\n\treturn `want-go (get-simple-card-list|get-card-info|post-card-info|put-card-info|delete-card-info)\n`\n}", "func (i *Intent) Matches(_ context.Context, command string) bool {\n\t// TODO Add placeholder support and return the found pairings as well\n\tif command == i.Command {\n\t\treturn true\n\t}\n\n\tfor _, cmd := range i.Alternatives {\n\t\tif cmd == command {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (l *SLexicon) GetSemantic(word string) string {\n\tl.Lock() // one at a time\n\tdefer l.Unlock()\n\n\tif val, ok := l.Semantic[word]; ok { // non case sensitive first\n\t\treturn val\n\t}\n\tlwrStr := strings.ToLower(word)\n\tstemmedWord := l.GetStem(lwrStr)\n\tif val, ok := l.Semantic[stemmedWord]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}", "func (m Model) Find(word string) *Vector {\n\tfor _, vector := range m {\n\t\tif vector.word == word {\n\t\t\treturn vector\n\t\t}\n\t}\n\treturn nil\n}", "func containsCommand(components []string) bool {\n\tfor _, comp := range components {\n\t\tif isCommand(comp) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestFind(t *testing.T) {\n\tregex, err := regexp.Compile(\"Set|SetValue\")\n\tif err != nil {\n\t\tt.Error(\"Regexp could not compile\")\n\t}\n\tsubject := []byte{'S', 'e', 't', 'V', 'a', 'l', 'u', 'e'}\n\tmatch := regex.Find(subject)\n\tif match == nil {\n\t\tt.Error(\"Could not find match for regexp.Find\")\n\t\treturn\n\t}\n\tstr := string(match)\n\tif str != \"Set\" {\n\t\tt.Errorf(\"Expected 'Set', got %q\", str)\n\t}\n}", "func Lookup(ident string) Token {\n\tif tok, ok := keywords[strings.ToLower(ident)]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func Lookup(ident string) Token {\n\tif tok, ok := keywords[strings.ToLower(ident)]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func hints(s string) *cli.Hint {\n\tif s == \"hello\" {\n\t\t// string, color, bold\n\t\treturn &cli.Hint{\" World\", 35, false}\n\t}\n\treturn nil\n}", "func (d *DmSpecial) Find(sha1Sum string) string {\n\tlocations := d.Split()\n\treg := regexp.MustCompile(fmt.Sprintf(_regFmt, sha1Sum))\n\tfor _, location := range locations {\n\t\tif reg.MatchString(location) {\n\t\t\treturn location\n\t\t}\n\t}\n\treturn \"\"\n}", "func main() {\n\tfmt.Println(getCommandsFromFile(\"resources/not_enough_lines\"))\n\tfmt.Println(getCommandsFromFile(\"resources/version_not_a_number\"))\n\tfmt.Println(getCommandsFromFile(\"resources/invalid_json\"))\n\tfmt.Println(getCommandsFromFile(\"resources/incorrect_version\"))\n\tfmt.Println(getCommandsFromFile(\"resources/incorrect_mode\"))\n\tfmt.Println(getCommandsFromFile(\"resources/valid\"))\n}", "func (o *ordering) find(str string) *entry {\n\te := o.entryMap[str]\n\tif e == nil {\n\t\tr := []rune(str)\n\t\tif len(r) == 1 {\n\t\t\tconst (\n\t\t\t\tfirstHangul = 0xAC00\n\t\t\t\tlastHangul = 0xD7A3\n\t\t\t)\n\t\t\tif r[0] >= firstHangul && r[0] <= lastHangul {\n\t\t\t\tce := []rawCE{}\n\t\t\t\tnfd := norm.NFD.String(str)\n\t\t\t\tfor _, r := range nfd {\n\t\t\t\t\tce = append(ce, o.find(string(r)).elems...)\n\t\t\t\t}\n\t\t\t\te = o.newEntry(nfd, ce)\n\t\t\t} else {\n\t\t\t\te = o.newEntry(string(r[0]), []rawCE{\n\t\t\t\t\t{w: []int{\n\t\t\t\t\t\timplicitPrimary(r[0]),\n\t\t\t\t\t\tdefaultSecondary,\n\t\t\t\t\t\tdefaultTertiary,\n\t\t\t\t\t\tint(r[0]),\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\te.modified = true\n\t\t\t}\n\t\t\te.exclude = true // do not index implicits\n\t\t}\n\t}\n\treturn e\n}", "func (g GiveCommand) Matches(str string) bool {\n\treturn giveReg.MatchString(str)\n}", "func Reply(input string) string {\n\n //Passes user input into the preprocess function \n input = preprocess(input)\n\n for pattern, responses := range phrases {\n re := regexp.MustCompile(pattern)\n matches := re.FindStringSubmatch(input)\n\n if len(matches) > 0 {\n var fragment string\n if len(matches) > 1 {\n fragment = reflect(matches[1])\n }\n\n //this selects a random reponse based on the keywords \n output := randChoice(responses)\n \n //Puts input and output together so it appears smart \n if strings.Contains(output, \"%s\") {\n output = fmt.Sprintf(output, fragment)\n }\n return output\n }\n }\n\n //if no response is found it selects randomly from convoRestart\n return randChoice(convoRestart)\n}", "func (r *Router) GetCommand(name string) (c *Command) {\n\tfor _, cmd := range r.Commands {\n\t\tif strings.ToLower(cmd.Name) == strings.ToLower(name) {\n\t\t\treturn cmd\n\t\t}\n\t\tfor _, a := range cmd.Aliases {\n\t\t\tif strings.ToLower(a) == strings.ToLower(name) {\n\t\t\t\treturn cmd\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (cli *CLI) findRegisterValueByCommand(c string) any {\n\tvar value any = nil\n\tcmds := cli.cmds\n\tif v, has := cmds[c]; has {\n\t\tvalue = v\n\t} else if cli.cmdMap != nil {\n\t\tfor aCmd, mV := range cli.cmdMap {\n\t\t\tisBreak := false\n\t\t\tswitch mV.(type) {\n\t\t\tcase string:\n\t\t\t\tif c == mV.(string) {\n\t\t\t\t\tif v, has := cmds[aCmd]; has {\n\t\t\t\t\t\tisBreak = true\n\t\t\t\t\t\tvalue = v\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase []string:\n\t\t\t\tfor _, vs := range mV.([]string) {\n\t\t\t\t\tif c == vs {\n\t\t\t\t\t\tif v, has := cmds[aCmd]; has {\n\t\t\t\t\t\t\tisBreak = true\n\t\t\t\t\t\t\tvalue = v\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif isBreak {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn value\n}", "func describe(s selection, args []string) {\n\tfmt.Println(runWithStdin(s.archive(), \"guru\", \"-modified\", \"describe\", s.pos()))\n}", "func TestSynonyms(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\n\tsynonymCheckTemplate := `\n\t\tpachctl {{VERB}} {{RESOURCE_SYNONYM}} -h > synonym.txt\n\t\tpachctl {{VERB}} {{RESOURCE}} -h > singular.txt\n\t\tdiff synonym.txt singular.txt\n\t\trm synonym.txt singular.txt\n\t`\n\n\tresources := resourcesMap()\n\tsynonyms := synonymsMap()\n\n\tfor resource, verbs := range resources {\n\t\twithResource := strings.ReplaceAll(synonymCheckTemplate, \"{{RESOURCE}}\", resource)\n\t\twithResources := strings.ReplaceAll(withResource, \"{{RESOURCE_SYNONYM}}\", synonyms[resource])\n\n\t\tfor _, verb := range verbs {\n\t\t\tsynonymCommand := strings.ReplaceAll(withResources, \"{{VERB}}\", verb)\n\t\t\tt.Logf(\"Testing %s %s -h\\n\", verb, resource)\n\t\t\trequire.NoError(t, tu.BashCmd(synonymCommand).Run())\n\t\t}\n\t}\n}", "func (c ConfigCompletion) Run(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t// TODO cache the results (e.g. in /tmp)\n\t// TODO we're using the term 'user' for both of \"[email protected]\" here...\n\n\t// we'll compile a list of unqualified and qualified devIDs for the main user as well as the list of other users (ending with a dot)\n\t// e.g.: 'a me.a you.'\n\t// if toComplete contains a dot, only the user in question will be queried\n\tvar matchingKeys []string\n\tvar rc = cobra.ShellCompDirectiveNoFileComp\n\n\tfor k := range config.AllKeys(c.WithReadOnly) {\n\t\tif strings.HasPrefix(k, toComplete) {\n\t\t\tmatchingKeys = append(matchingKeys, k+c.Suffix)\n\t\t}\n\t}\n\n\treturn matchingKeys, rc\n}", "func ExecContainsString(command, contains string) error {\n\tstdOut, _, err := Exec(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ContainsString(stdOut, contains)\n}", "func (s *SoracomCompleter) flagSuggestions(line string) []prompt.Suggest {\n\tcommands, flags := splitToCommandsAndFlags(line) // split again...\n\tmethods, found := s.searchMethods(commands)\n\tif !found || len(methods) != 1 {\n\t\treturn []prompt.Suggest{{\n\t\t\tText: \"Error\",\n\t\t\tDescription: \"cannot find matching command\",\n\t\t}}\n\t}\n\tmethod := methods[0]\n\n\tparams := make([]param, 0) // all parameters for the method\n\tfor _, p := range method.Parameters {\n\t\tparams = append(params, param{\n\t\t\tname: strings.ReplaceAll(p.Name, \"_\", \"-\"),\n\t\t\trequired: p.Required,\n\t\t\tdescription: p.Description,\n\t\t\tparamType: p.Type,\n\t\t\tenum: p.Enum,\n\t\t})\n\t}\n\n\t// soracom-cli will augment some commands with 'fetch-all' option, which is not defined in the swagger\n\tfor _, a := range commandsWithFetchAll {\n\t\tif strings.HasPrefix(commands, a) {\n\t\t\tparams = append(params, param{\n\t\t\t\tname: \"fetch-all\",\n\t\t\t\trequired: false,\n\t\t\t\tdescription: \"Do pagination automatically.\",\n\t\t\t})\n\t\t}\n\t}\n\n\tsort.Slice(params, func(i, j int) bool {\n\t\treturn params[i].name < params[j].name\n\t})\n\n\tflagsArray := strings.Split(flags, \" \")\n\tlastWord := flagsArray[len(flagsArray)-1]\n\tisEnteringFlag := true\n\n\tif len(flagsArray) > 1 {\n\t\tif strings.HasPrefix(flagsArray[len(flagsArray)-2], \"--\") &&\n\t\t\t(strings.HasSuffix(line, \" \") || !strings.HasPrefix(lastWord, \"--\")) {\n\t\t\tisEnteringFlag = false\n\t\t}\n\t}\n\tif strings.HasSuffix(line, \" \") {\n\t\tisEnteringFlag = false\n\t}\n\tif len(flagsArray)%2 == 0 && !strings.HasPrefix(lastWord, \"--\") && strings.HasSuffix(line, \" \") {\n\t\tisEnteringFlag = true\n\t}\n\n\tvar lastFlag string\n\tfor i := len(flagsArray) - 1; i >= 0; i-- {\n\t\tif strings.HasPrefix(flagsArray[i], \"--\") {\n\t\t\tlastFlag = strings.ReplaceAll(flagsArray[i], \"--\", \"\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// provide flag name suggestion if user is entering flag\n\tif isEnteringFlag {\n\t\tr := make([]prompt.Suggest, 0)\n\t\tfor _, p := range params {\n\t\t\tif !contains(parseFlags(flags), lib.OptionCase(p.name)) {\n\t\t\t\trequired := \"\"\n\t\t\t\tif p.required {\n\t\t\t\t\trequired = \"(required) \"\n\t\t\t\t}\n\n\t\t\t\tr = append(r, prompt.Suggest{\n\t\t\t\t\tText: \"--\" + lib.OptionCase(p.name),\n\t\t\t\t\tDescription: required + p.description,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\treturn filterFunc(r, lastWord, prompt.FilterFuzzy)\n\t}\n\n\tif strings.HasPrefix(lastWord, \"--\") {\n\t\tlastWord = \"\"\n\t}\n\n\t// value suggestion\n\t// if last flag's value type is enum, provide possible values\n\tvar suggests []prompt.Suggest\n\tfor _, p := range params {\n\t\tif p.name == lastFlag {\n\t\t\tif len(p.enum) > 0 {\n\t\t\t\tfor _, e := range p.enum {\n\t\t\t\t\tsuggests = append(suggests, prompt.Suggest{\n\t\t\t\t\t\tText: e,\n\t\t\t\t\t\tDescription: \"\",\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(suggests) > 0 {\n\t\t\t\treturn filterFunc(suggests, lastWord, prompt.FilterFuzzy)\n\t\t\t}\n\t\t}\n\t}\n\n\t// if specific name is found, do more intelligent completion\n\tswitch lastFlag {\n\tcase \"status-filter\":\n\t\treturn s.statusFilterSuggestions(lastWord)\n\tcase \"speed-class-filter\":\n\t\treturn s.speedClassFilterSuggestions(lastWord)\n\tcase \"device-id\":\n\t\tif strings.HasPrefix(commands, \"device\") {\n\t\t\treturn s.inventoryDeviceIDFilterSuggestions(lastWord)\n\t\t}\n\t\tif strings.HasPrefix(commands, \"sigfox\") {\n\t\t\treturn s.sigfoxDeviceIDFilterSuggestions(lastWord)\n\t\t}\n\tcase \"imsi\":\n\t\treturn s.imsiFilterSuggestions(lastWord)\n\tcase \"order-id\":\n\t\treturn s.orderFilterSuggestions(lastWord)\n\tcase \"resource-id\": // `logs get` or `audit-logs napter get` uses 'resource-id' for imsi\n\t\treturn s.imsiFilterSuggestions(lastWord)\n\tcase \"group-id\":\n\t\treturn s.groupFilterSuggestions(lastWord)\n\t}\n\n\treturn suggests\n}", "func main() {\n\tdataStr := \"the data is big\"\n\t//words from the string\n\twords := strings.Fields(dataStr)\n\tfor c := range words{\n\t\tprintln(words[c])\n\t}\n\t//comparition\n\tprintln(\"the string comparition\" , strings.Compare(\"Gowtham\" , \"gowtham\"))\n\tprintln(\"the string contains\" , strings.ContainsAny(dataStr,\"the\"))\n\tprintln(\"the rune contains \",strings.ContainsRune(dataStr,'s'))\n\t// more operation you can see in strings package\n}", "func exampleStartWith() {\n\t// Read text from file\n\ttext, err := streeng.StringFromFile(\"pp.txt\")\n\tif err != nil {\n\t\tfmt.Println(\"String from file error:\", err.Error())\n\t\treturn\n\t}\n\n\t// split text with whitespace seperator\n\twords := strings.Fields(text)\n\n\t// Make a new Streeng\n\ts := streeng.MakeStreeng(words)\n\n\t// Match string which start with given string in streeng\n\tlistOfIndex := s.StartWith(\"sta\")\n\n\tfmt.Println(listOfIndex)\n}", "func (analyzer *Analyzer) Concepts(flavor, payload string, options url.Values) (*ConceptsResponse, error) {\n\tif !entryPoints.hasFlavor(\"concepts\", flavor) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"concepts info for %s not available\", flavor))\n\t}\n\n\toptions.Add(flavor, payload)\n\turl := entryPoints.urlFor(analyzer.baseUrl, \"concepts\", flavor)\n\tdata, err := analyzer.analyze(url, options, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tresponse := new(ConceptsResponse)\n\t\terr := json.Unmarshal(data, &response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tif response.Status != \"OK\" {\n\t\t\t\treturn nil, errors.New(response.StatusInfo)\n\t\t\t} else {\n\t\t\t\treturn response, nil\n\t\t\t}\n\t\t}\n\t}\n}", "func CompleteCommand(a ...interface{}) []string {\n\trv := []string{}\n\n\tif len(a) == 0 || comp.Line() == \"\" {\n\t\treturn rv\n\t}\n\n\tx := a[0].(*Command)\n\tword := comp.Word()\n\n\tfor k, _ := range x.Commands {\n\t\tif word == \" \" || strings.HasPrefix(k, word) {\n\t\t\trv = append(rv, k)\n\t\t}\n\t}\n\n\tfor _, k := range x.Params {\n\t\tif word == \" \" || strings.HasPrefix(k, word) {\n\t\t\trv = append(rv, k)\n\t\t}\n\t}\n\n\tsort.Strings(rv)\n\treturn rv\n}", "func UsageCommands() string {\n\treturn `foo (foo1|foo2|foo3|foo-options)\n`\n}", "func contains(s []string, e string) *string {\n\tfor _, a := range s {\n\t\tif strings.Contains(e, a) {\n\t\t\treturn &a\n\t\t}\n\t}\n\treturn nil\n}", "func findChapter(novel *Novel) {\n\tfmt.Print(\"Input chaptername(contains):\")\n\tchaptername := getInput()\n\tif chaptername != \"\" {\n\t\tfor index, chapter := range novel.Chapters {\n\t\t\tif strings.Contains(chapter.Name, chaptername) {\n\t\t\t\tfmt.Println(\"Index:\", index, \"Chapter:\", chapter.Name)\n\t\t\t}\n\t\t}\n\t}\n}", "func FindAtom(lpString string) ATOM {\n\tlpStringStr := unicode16FromString(lpString)\n\tret1 := syscall3(findAtom, 1,\n\t\tuintptr(unsafe.Pointer(&lpStringStr[0])),\n\t\t0,\n\t\t0)\n\treturn ATOM(ret1)\n}", "func (idents idents) Find(lit string) *Ident {\n\tfor _, id := range idents {\n\t\tif id.Lit != lit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn &id\n\t}\n\n\treturn nil\n}", "func (p *parser) command(tok rune) *command {\n\tswitch tok {\n\tcase '(':\n\t\tcmd := p.command(p.Scan())\n\t\tp.demands(')')\n\t\treturn cmd\n\tcase scanner.Ident:\n\t\tcmd := &command{\n\t\t\tname: p.TokenText(),\n\t\t\tparams: p.params(),\n\t\t}\n\t\treturn cmd\n\t}\n\tp.parseError(\"command\")\n\treturn &command{}\n}", "func (s strings) Find(in []string, what string) int {\n\tfor i, entry := range in {\n\t\tif entry == what {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}", "func execmStringSliceSearch(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := args[0].(sort.StringSlice).Search(args[1].(string))\n\tp.Ret(2, ret)\n}", "func (ui *UI) DynamicCommandLookup(impl interface{}) func(string) (func(), bool) {\n\tt := reflect.TypeOf(impl)\n\treturn func(name string) (func(), bool) {\n\t\tcmd := ui.GetCommand(name)\n\t\tif cmd == nil {\n\t\t\treturn nil, false\n\t\t}\n\t\tfn, found := t.MethodByName(cmd.Original)\n\t\tif !found {\n\t\t\treturn nil, found\n\t\t}\n\t\treturn fn.Func.Interface().(func()), true\n\t}\n}", "func getIndexOfCmdWithIdentifier(node *parse.PipeNode) (int, *parse.CommandNode) {\n\tfor i, cmd := range node.Cmds {\n\t\tif len(cmd.Args) > 0 {\n\t\t\tif _, ok := cmd.Args[0].(*parse.IdentifierNode); ok {\n\t\t\t\treturn i, cmd\n\t\t\t}\n\t\t}\n\t}\n\treturn -1, nil\n}", "func (s *Sensor) Command(comm string) *Sensor {\n\tif s.err != nil {\n\t\treturn s\n\t}\n\tavail, err := s.Commands()\n\tif err != nil {\n\t\ts.err = err\n\t\treturn s\n\t}\n\tok := false\n\tfor _, c := range avail {\n\t\tif c == comm {\n\t\t\tok = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !ok {\n\t\ts.err = fmt.Errorf(\"ev3dev: command %q not available for %s (available:%q)\", comm, s, avail)\n\t\treturn s\n\t}\n\ts.err = setAttributeOf(s, command, comm)\n\treturn s\n}", "func cmdParse(commandStr string, config *Config) (*Command, error) {\n\t// TODO: criteria\n\n\tlexer := lex(commandStr)\n\tdefer lexer.drain()\n\n\tcmdToken := lexer.nextItem()\n\tif cmdToken.typ != itemString {\n\t\treturn nil, fmt.Errorf(\"expected string, got token '%s'\", cmdToken.val)\n\t}\n\n\t// TODO: chained commands\n\tcommand := &Command{}\n\n\tfn, ok := cmdParseTable[cmdToken.val]\n\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"command '%s' not implemented\", cmdToken.val)\n\t}\n\n\tcmd, err := fn(lexer, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcommand.Executer = cmd\n\n\treturn command, nil\n}", "func (c Clipboard) Find(s string) ([]string, error) {\n\thistory, err := c.GetHistory()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfindResult := fuzzy.RankFindFold(s, history)\n\tsort.Sort(findResult)\n\tvar rankedResult []string\n\tfor _, i := range findResult {\n\t\trankedResult = append(rankedResult, i.Target)\n\t}\n\treturn rankedResult, nil\n}", "func readCommand(args []string) (*Command, error) {\n\tvar cmd, subcmd *Command\n\tvar ok bool\n\tif len(args) == 0 {\n\t\t// No command passed in: Print usage.\n\t\treturn &Command{\n\t\t\tCmd: func(cmd *Command) error { return Usage(nil) },\n\t\t}, nil\n\t}\n\tvar name = args[0]\n\tcmd, ok = Commands[name]\n\tif !ok {\n\t\t// Command not found: Print usage.\n\t\treturn &Command{\n\t\t\tCmd: func(cmd *Command) error { return Usage(nil) },\n\t\t}, nil\n\t}\n\t// command found. Remove it from the argument list.\n\targs = args[1:]\n\n\tif len(cmd.children) == 0 {\n\t\treturn cmdWithFlagsChecked(cmd, args)\n\t}\n\n\t// len (cmd.children > 0)\n\n\tif len(args) == 0 {\n\t\t// Subcommands exist but none was not found in args.\n\t\t// If no main cmd is defined, return an error.\n\t\tif cmd.Cmd == nil {\n\t\t\treturn wrongOrMissingSubcommand(cmd)\n\t\t}\n\t}\n\n\t// len (cmd.children > 0) && len(args) > 0\n\n\tvar subname = args[0]\n\tsubcmd, ok = cmd.children[subname]\n\tif ok {\n\t\t// subcommand found.\n\t\targs = args[1:]\n\t\tcmd = subcmd\n\t} else {\n\t\t// no subcommand passed in, so cmd should have a Cmd to execute\n\t\treturn wrongOrMissingSubcommand(cmd)\n\t}\n\n\treturn cmdWithFlagsChecked(cmd, args)\n}", "func TestCommandsHaveUsage(t *testing.T) {\n\tfor i, c := range allCommands() {\n\t\tt.Run(fmt.Sprintf(\"test usage term of command %d\", i), func(t *testing.T) {\n\t\t\tassert.NotEmpty(t, c.Use)\n\t\t})\n\t}\n}", "func (td taskDefn) findContainerDefn(name string) *ecs.ContainerDefinition {\n\tfor _, cd := range td.ContainerDefinitions {\n\t\tif name == ptr.StringValue(cd.Name) {\n\t\t\treturn cd\n\t\t}\n\t}\n\n\treturn nil\n}", "func matchAction(ivr string) (string, error) {\n\tinput_words := strings.Split(strings.ToLower(ivr), \" \")\n\tactions := map[string]string{\n\t\t\"on\": \"On\",\n\t\t\"off\": \"Off\",\n\t}\n\n\tfor key, value := range actions {\n\t\tif contains(input_words, key) {\n\t\t\treturn value, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no matching action\")\n}", "func TestNamespacedCommands(t *testing.T) {\n\tconst contextNS = \"from-context\"\n\tconst flagNS = \"from-flag\"\n\tconst allNS = \"\"\n\n\ttestcases := []struct {\n\t\tname string\n\t\tcmd string\n\t\twantNS string\n\t}{\n\t\t{name: \"get instances with flag namespace\", cmd: \"get instances --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"get instances with context namespace\", cmd: \"get instances\", wantNS: contextNS},\n\t\t{name: \"get all instances\", cmd: \"get instances --all-namespaces\", wantNS: allNS},\n\n\t\t{name: \"describe instance with flag namespace\", cmd: \"describe instance NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"describe instance with context namespace\", cmd: \"describe instances NAME\", wantNS: contextNS},\n\n\t\t{name: \"provision with flag namespace\", cmd: \"provision --class CLASS --plan PLAN NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"provision with context namespace\", cmd: \"provision --class CLASS --plan PLAN NAME\", wantNS: contextNS},\n\n\t\t{name: \"deprovision with flag namespace\", cmd: \"deprovision NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"deprovision with context namespace\", cmd: \"deprovision NAME\", wantNS: contextNS},\n\n\t\t{name: \"bind with flag namespace\", cmd: \"bind NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"bind with context namespace\", cmd: \"bind NAME\", wantNS: contextNS},\n\n\t\t{name: \"unbind with flag namespace\", cmd: \"unbind NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"unbind with context namespace\", cmd: \"unbind NAME\", wantNS: contextNS},\n\n\t\t{name: \"get bindings with flag namespace\", cmd: \"get bindings --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"get bindings with context namespace\", cmd: \"get bindings\", wantNS: contextNS},\n\t\t{name: \"get all bindings\", cmd: \"get bindings --all-namespaces\", wantNS: allNS},\n\n\t\t{name: \"describe binding with flag namespace\", cmd: \"describe binding NAME --namespace \" + flagNS, wantNS: flagNS},\n\t\t{name: \"describe binding with context namespace\", cmd: \"describe binding NAME\", wantNS: contextNS},\n\t}\n\n\tfor _, tc := range testcases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tfakeClient := fake.NewSimpleClientset()\n\n\t\t\tcxt := newContext()\n\t\t\tcxt.App = &svcat.App{\n\t\t\t\tCurrentNamespace: contextNS,\n\t\t\t\tSDK: &servicecatalog.SDK{ServiceCatalogClient: fakeClient},\n\t\t\t}\n\t\t\tcxt.Output = ioutil.Discard\n\n\t\t\texecuteFakeCommand(t, tc.cmd, cxt, true)\n\n\t\t\tgotNamespace := fakeClient.Actions()[0].GetNamespace()\n\t\t\tif tc.wantNS != gotNamespace {\n\t\t\t\tt.Fatalf(\"the wrong namespace was used. WANT: %q, GOT: %q\", tc.wantNS, gotNamespace)\n\t\t\t}\n\t\t})\n\t}\n}", "func main() {\n\t//perform the search for the specific term\n\tsearch.Run(\"president\")\n}" ]
[ "0.62465984", "0.56653553", "0.56165266", "0.5520251", "0.53954554", "0.5304584", "0.52405673", "0.5180302", "0.5180043", "0.51657534", "0.51444817", "0.507597", "0.5072325", "0.50065875", "0.499081", "0.48809624", "0.4875879", "0.48348758", "0.48312184", "0.48140812", "0.47959754", "0.4771265", "0.47673306", "0.47415945", "0.47269502", "0.47205958", "0.47085872", "0.47079322", "0.4707194", "0.469089", "0.46838537", "0.46779776", "0.4677215", "0.4669643", "0.46637675", "0.4656432", "0.46234378", "0.46213153", "0.46104696", "0.460737", "0.4580416", "0.45711395", "0.45585856", "0.455539", "0.4533339", "0.45286772", "0.4514621", "0.45069143", "0.45013306", "0.44978553", "0.4491024", "0.4490652", "0.44854972", "0.44834068", "0.44832417", "0.44713673", "0.44705", "0.44669223", "0.44637", "0.4448462", "0.4440598", "0.44364893", "0.44363928", "0.44363928", "0.44292074", "0.4423546", "0.44234648", "0.44162744", "0.44092113", "0.4408317", "0.44081497", "0.44046795", "0.4403342", "0.43994513", "0.43973267", "0.4392257", "0.43882707", "0.4384409", "0.43768358", "0.4375652", "0.437226", "0.43716523", "0.43676525", "0.43665767", "0.4361795", "0.43617773", "0.43609276", "0.43584117", "0.43581516", "0.43525836", "0.43486205", "0.43480787", "0.4347841", "0.43470415", "0.43406814", "0.43398052", "0.43321925", "0.43298584", "0.43224335", "0.43217745" ]
0.6050581
1
Find a concept by description SCTID
func getConceptByDescription(descriptionId string) string { url := baseUrl + edition + "/" + version + "/descriptions/" + descriptionId return lookup(url) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getConceptBySCTID(stcId string) string {\n\turl := baseUrl + \"/browser/\" + edition + \"/\" + version + \"/concepts/\" + stcId\n\treturn lookup(url)\n\n}", "func getConceptByStringInProceduresSemanticTag(searchTerm string) string {\n\turl := baseUrl + \"/browser/\" +edition + \"/\" + version + \"/descriptions?term=\" + searchTerm + \"&conceptActive=true&semanticTag=procedure&groupByConcept=false&searchMode=STANDARD&offset=0&limit=50\"\n\treturn lookup(url)\n\n}", "func findCRDDescription(\n\tns string,\n\tclient dynamic.Interface,\n\tbssGVK schema.GroupVersionKind,\n\tcrd *unstructured.Unstructured,\n) (*olmv1alpha1.CRDDescription, error) {\n\treturn NewOLM(client, ns).SelectCRDByGVK(bssGVK, crd)\n}", "func getConceptByString(searchTerm string, resultLimit string) string {\n\turl := baseUrl + edition + \"/\" + version + \"/concepts?term=\" + searchTerm + \"&activeFilter=true&offset=0&limit=\" + resultLimit\n\treturn lookup(url)\n}", "func FindDescription(ctx context.Context, exec boil.ContextExecutor, iD uint, selectCols ...string) (*Description, error) {\n\tdescriptionObj := &Description{}\n\n\tsel := \"*\"\n\tif len(selectCols) > 0 {\n\t\tsel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), \",\")\n\t}\n\tquery := fmt.Sprintf(\n\t\t\"select %s from `descriptions` where `id`=?\", sel,\n\t)\n\n\tq := queries.Raw(query, iD)\n\n\terr := q.Bind(ctx, exec, descriptionObj)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"models: unable to select from descriptions\")\n\t}\n\n\treturn descriptionObj, nil\n}", "func (nc *NaiveCache) GetConcept(conceptID int) (*Concept, bool) {\n\tvalue, success := nc.Get(conceptID)\n\tif !success {\n\t\treturn nil, false\n\t}\n\tconcept, success := value.(*Concept)\n\treturn concept, success\n}", "func DescriptionExists(ctx context.Context, exec boil.ContextExecutor, iD uint) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from `descriptions` where `id`=? limit 1)\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, iD)\n\t}\n\n\trow := exec.QueryRowContext(ctx, sql, iD)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: unable to check if descriptions exists\")\n\t}\n\n\treturn exists, nil\n}", "func (cts *Server) findSCT(ctx context.Context, vlog *verifiable.Log, hash []byte) (*ct.AddChainResponse, error) {\n\t// See if we have an SCT for this\n\tns, err := cts.getNs(vlog)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttsKey := append([]byte(\"sct\"), hash...)\n\n\tvar sct govpb.AddResponse\n\terr = cts.Reader.ExecuteReadOnly(ctx, ns[:], func(ctx context.Context, kr verifiable.KeyReader) error {\n\t\treturn kr.Get(ctx, tsKey, &sct)\n\t})\n\tswitch err {\n\tcase nil:\n\t\t// continue\n\tcase verifiable.ErrNoSuchKey:\n\t\treturn nil, verifiable.ErrNotFound\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t// Get log ID and we're done\n\tsk, err := cts.getSigningKey(ctx, vlog, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// we're done!\n\treturn &ct.AddChainResponse{\n\t\tID: sk.LogID[:],\n\t\tSCTVersion: ct.V1,\n\t\tSignature: sct.Signature,\n\t\tTimestamp: uint64(sct.Timestamp),\n\t\tExtensions: \"\",\n\t}, nil\n}", "func (nc *NaiveCache) GetDescription(descriptionID int) (*Description, bool) {\n\tvalue, success := nc.Get(descriptionID)\n\tif !success {\n\t\treturn nil, false\n\t}\n\tdescription, success := value.(*Description)\n\treturn description, success\n}", "func DescriptionContainsFold(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldContainsFold(FieldDescription, v))\n}", "func DescriptionContains(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldContains(FieldDescription, v))\n}", "func DescriptionContainsFold(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldDescription), v))\n\t})\n}", "func (term *Terminology) SNOMEDCTtoReadV2(ctx context.Context, id *apiv1.Identifier, f func(*apiv1.Identifier) error) error {\n\tsctID, err := snomed.ParseAndValidate(id.GetValue())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not parse SNOMED identifier: %w\", err)\n\t}\n\tif sctID.IsConcept() == false {\n\t\treturn fmt.Errorf(\"can map only concepts: '%d' not a concept\", sctID)\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tstream, err := term.client.CrossMap(ctx, &snomed.CrossMapRequest{\n\t\tConceptId: sctID.Integer(),\n\t\tRefsetId: 900000000000497000,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"crossmap error: %w\", err)\n\t}\n\tfor {\n\t\titem, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"crossmap error: %w\", err)\n\t\t}\n\t\terr = f(&apiv1.Identifier{\n\t\t\tSystem: identifiers.ReadV2,\n\t\t\tValue: item.GetSimpleMap().GetMapTarget(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func DescriptionContainsFold(v string) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContainsFold(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContainsFold(v string) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContainsFold(v string) predicate.GameServer {\n\treturn predicate.GameServer(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldDescription), v))\n\t})\n}", "func (term *Terminology) Resolve(ctx context.Context, id *apiv1.Identifier) (proto.Message, error) {\n\tsctID, err := snomed.ParseAndValidate(id.GetValue())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT: %w\", err)\n\t}\n\theader := metadata.New(map[string]string{\"accept-language\": \"en-GB\"})\n\tctx = metadata.NewOutgoingContext(ctx, header)\n\tif sctID.IsConcept() {\n\t\tec, err := term.client.GetExtendedConcept(ctx, &snomed.SctID{Identifier: sctID.Integer()})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT concept '%d': %w\", sctID, err)\n\t\t}\n\t\treturn ec, nil\n\t}\n\tif sctID.IsDescription() {\n\t\td, err := term.client.GetDescription(ctx, &snomed.SctID{Identifier: sctID.Integer()})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT description '%d': %w\", sctID, err)\n\t\t}\n\t\treturn d, nil\n\t}\n\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT entity '%d': only concepts and descriptions supported\", sctID)\n}", "func FindTerm(c *gin.Context) {\n\tvar ok bool\n\ttermID := c.Param(\"term_id\")\n\tif termID != \"\" {\n\t\tid := uuid.FromStringOrNil(termID)\n\t\tif id != uuid.Nil {\n\t\t\tvar term models.Term\n\t\t\terr := models.DB.Preload(\"RelatedTerms\").Model(&models.Term{}).Where(\"id = ?\", id).Take(&term).Error\n\t\t\tif err == nil {\n\t\t\t\tc.JSON(http.StatusOK, gin.H{\"data\": term})\n\t\t\t\tok = true\n\t\t\t}\n\t\t}\n\t}\n\tif !ok {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"data\": \"id not found\"})\n\t}\n}", "func DescriptionContainsFold(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContainsFold(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldDescription), v))\n\t})\n}", "func (ds *DatabaseService) GetDescriptions(concept *Concept) ([]*Description, error) {\n\tconceptID := int(concept.ConceptID)\n\tvalue, ok := ds.descriptionCache.Get(conceptID)\n\tif ok {\n\t\treturn value.([]*Description), nil\n\t}\n\trows, err := ds.db.Query(sqlDescriptions, concept.ConceptID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdescriptions, err := rowsToDescriptions(rows)\n\tif err == nil {\n\t\tds.descriptionCache.Put(conceptID, descriptions)\n\t}\n\treturn descriptions, err\n}", "func DescriptionContainsFold(v string) predicate.Product {\n\treturn predicate.Product(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContainsFold(v string) predicate.User {\n\treturn predicate.User(sql.FieldContainsFold(FieldDescription, v))\n}", "func (ds *DatabaseService) FetchConcept(conceptID int) (*Concept, error) {\n\treturn ds.cache.GetConceptOrElse(conceptID, func(conceptID int) (interface{}, error) {\n\t\tfetched, err := ds.performFetchConcepts(conceptID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconcept := fetched[conceptID]\n\t\tif concept == nil {\n\t\t\treturn nil, fmt.Errorf(\"No concept found with identifier %d\", conceptID)\n\t\t}\n\t\treturn concept, nil\n\t})\n}", "func OutcomeOverviewDoseDescriptionContainsFold(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldOutcomeOverviewDoseDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.GameServer {\n\treturn predicate.GameServer(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldDescription), v))\n\t})\n}", "func (term *Terminology) ReadV2toSNOMEDCT(ctx context.Context, id *apiv1.Identifier, f func(*apiv1.Identifier) error) error {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tresponse, err := term.client.FromCrossMap(ctx, &snomed.TranslateFromRequest{S: id.GetValue(), RefsetId: 900000000000497000})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(response.GetTranslations()) == 0 {\n\t\tlog.Printf(\"no translations found for map from '%s:%s' to '%s'\", id.GetSystem(), id.GetValue(), identifiers.SNOMEDCT)\n\t}\n\tfor _, t := range response.GetTranslations() {\n\t\tref := t.GetReferenceSetItem().GetReferencedComponentId()\n\t\tif err := f(&apiv1.Identifier{System: identifiers.SNOMEDCT, Value: strconv.FormatInt(ref, 10)}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func OutcomeOverviewDoseDescriptionContains(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldOutcomeOverviewDoseDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.Product {\n\treturn predicate.Product(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldDescription), v))\n\t})\n}", "func DescriptionContains(v string) predicate.User {\n\treturn predicate.User(sql.FieldContains(FieldDescription, v))\n}", "func OutcomeOverviewDescriptionContainsFold(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldOutcomeOverviewDescription), v))\n\t})\n}", "func (td taskDefn) findContainerDefn(name string) *ecs.ContainerDefinition {\n\tfor _, cd := range td.ContainerDefinitions {\n\t\tif name == ptr.StringValue(cd.Name) {\n\t\t\treturn cd\n\t\t}\n\t}\n\n\treturn nil\n}", "func FindCvtermsynonym(exec boil.Executor, cvtermsynonymID int, selectCols ...string) (*Cvtermsynonym, error) {\n\tcvtermsynonymObj := &Cvtermsynonym{}\n\n\tsel := \"*\"\n\tif len(selectCols) > 0 {\n\t\tsel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), \",\")\n\t}\n\tquery := fmt.Sprintf(\n\t\t\"select %s from \\\"cvtermsynonym\\\" where \\\"cvtermsynonym_id\\\"=$1\", sel,\n\t)\n\n\tq := queries.Raw(exec, query, cvtermsynonymID)\n\n\terr := q.Bind(cvtermsynonymObj)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"chado: unable to select from cvtermsynonym\")\n\t}\n\n\treturn cvtermsynonymObj, nil\n}", "func (h *HubBub) LookupTactic(id string) (Tactic, error) {\n\tt, ok := h.tactics[id]\n\tif !ok {\n\t\treturn t, fmt.Errorf(\"tactic %q is undefined - typo?\", id)\n\t}\n\tt.ID = id\n\tif len(h.reposOverride) > 0 {\n\t\tt.Repos = h.reposOverride\n\t}\n\n\tif len(t.Repos) == 0 {\n\t\tt.Repos = h.settings.Repos\n\t}\n\treturn t, nil\n}", "func Description(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldDescription), v))\n\t})\n}", "func Description(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldEQ(FieldDescription, v))\n}", "func (c *Category) FindExample(name string) *Example {\n\tfor _, example := range c.Example {\n\t\tif example.Name == name {\n\t\t\treturn example\n\t\t}\n\t}\n\treturn nil\n}", "func Description(v string) predicate.GameServer {\n\treturn predicate.GameServer(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldDescription), v))\n\t})\n}", "func (r *SecretsResource) FindById(id string) (interface{}, error) {\n\treturn NewSecretResource(r.secrets, id, r.logger), nil\n}", "func (ts *TechStoryService) findById (w http.ResponseWriter, r *http.Request) {\n\tvar techStory model.TechStory\n\ttechStory.Key = mux.Vars(r)[\"id\"]\n\tWithTechStoryDao(func(dao techStoryDao) {\n\t\terr := dao.FindById(&techStory)\n\t\tmodel.CheckErr(err)\n\t\tmodel.WriteResponse(true, nil, techStory, w)\n\t})\n}", "func (s StoryRepository) Find(id int) (story chronicle.Story, err error) {\n\tdefer func() {\n\t\tif err != nil && err != sql.ErrNoRows {\n\t\t\terr = errors.Wrap(err, function.GetFunctionName(s.Find))\n\t\t}\n\t}()\n\n\tstory = chronicle.Story{}\n\tquery := `SELECT\n\t\t\t\t\t\t\tid,\n\t\t\t\t\t\t\ttitle, \n\t\t\t\t\t\t\tslug, \n\t\t\t\t\t\t\texcerpt, \n\t\t\t\t\t\t\tcontent,\n\t\t\t\t\t\t\treporter,\n\t\t\t\t\t\t\teditor,\n\t\t\t\t\t\t\tauthor,\n\t\t\t\t\t\t\tstatus,\n\t\t\t\t\t\t\tmedia, \n\t\t\t\t\t\t\tlikes,\n\t\t\t\t\t\t\tshares,\n\t\t\t\t\t\t\tviews,\n\t\t\t\t\t\t\tcreatedAt, \n\t\t\t\t\t\t\tupdatedAt\n\t\t\t\t\t\tFROM stories \n\t\t\t\t\t\tWHERE id=$1`\n\n\terr = s.db.Get(&story, query, id)\n\tif err != nil {\n\t\treturn chronicle.Story{}, err\n\t}\n\n\t// fill Topics\n\n\tstory.Topics, err = s.getTopicsForStory(story.ID)\n\tif err != nil {\n\t\treturn chronicle.Story{}, err\n\t}\n\n\treturn story, nil\n}", "func (*CodeSystem_ConceptDefinition) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_r4_core_resources_code_system_proto_rawDescGZIP(), []int{0, 5}\n}", "func Description(v string) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldDescription), v))\n\t})\n}", "func FindContainerByShortID(cli client.ContainerAPIClient, id string) (Container, error) {\n\tcontainers, err := cliContainerList(cli)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i, container := range containers {\n\t\tif strings.HasPrefix(container.ID, id) {\n\t\t\treturn makeContainer(&containers[i]), nil\n\t\t}\n\t}\n\treturn nil, nil\n}", "func OutcomeOverviewDescriptionContains(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldOutcomeOverviewDescription), v))\n\t})\n}", "func FindStockCvterm(exec boil.Executor, stockCvtermID int, selectCols ...string) (*StockCvterm, error) {\n\tstockCvtermObj := &StockCvterm{}\n\n\tsel := \"*\"\n\tif len(selectCols) > 0 {\n\t\tsel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), \",\")\n\t}\n\tquery := fmt.Sprintf(\n\t\t\"select %s from \\\"stock_cvterm\\\" where \\\"stock_cvterm_id\\\"=$1\", sel,\n\t)\n\n\tq := queries.Raw(exec, query, stockCvtermID)\n\n\terr := q.Bind(stockCvtermObj)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"chado: unable to select from stock_cvterm\")\n\t}\n\n\treturn stockCvtermObj, nil\n}", "func (*GetConceptRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{30}\n}", "func Description(value string) *SimpleElement { return newSEString(\"description\", value) }", "func (a *Client) GetLTENetworkIDDescription(params *GetLTENetworkIDDescriptionParams) (*GetLTENetworkIDDescriptionOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetLTENetworkIDDescriptionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetLTENetworkIDDescription\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/lte/{network_id}/description\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetLTENetworkIDDescriptionReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetLTENetworkIDDescriptionOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*GetLTENetworkIDDescriptionDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (idents idents) Find(lit string) *Ident {\n\tfor _, id := range idents {\n\t\tif id.Lit != lit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn &id\n\t}\n\n\treturn nil\n}", "func Description(v string) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldDescription), v))\n\t})\n}", "func (vcd *TestVCD) Test_FindCatalog(check *C) {\n\t// Find Catalog\n\tcat, err := vcd.org.FindCatalog(vcd.config.VCD.Catalog.Name)\n\tcheck.Assert(err, IsNil)\n\tcheck.Assert(cat.Catalog.Name, Equals, vcd.config.VCD.Catalog.Name)\n\t// checks if user gave a catalog description in config file\n\tif vcd.config.VCD.Catalog.Description != \"\" {\n\t\tcheck.Assert(cat.Catalog.Description, Equals, vcd.config.VCD.Catalog.Description)\n\t}\n}", "func (handle *DBHandle) FindImageDescriptionByID(id uint) (*ImageDescription, error) {\n\timageDescription := ImageDescription{}\n\tretreived, err := handle.retreiveWithAttribute(\"id\", id, &imageDescription, \"Categories\")\n\treturn retreived.(*ImageDescription), err\n}", "func (api *distributedservicecardAPI) Find(meta *api.ObjectMeta) (*DistributedServiceCard, error) {\n\t// find the object\n\tobj, err := api.ct.FindObject(\"DistributedServiceCard\", meta)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// asset type\n\tswitch obj.(type) {\n\tcase *DistributedServiceCard:\n\t\thobj := obj.(*DistributedServiceCard)\n\t\treturn hobj, nil\n\tdefault:\n\t\treturn nil, errors.New(\"incorrect object type\")\n\t}\n}", "func (t *TST) Find(s string) bool {\n\treturn t.Get(s) != nil\n}", "func getRouteFromDescription(trip *TripSchedule, routes []RouteOption) (RouteOption, error) {\n\tif len(routes) == 0 {\n\t\treturn RouteOption{}, errors.New(\"No routes\")\n\t}\n\tfiltered := []RouteOption{}\n\t// Find all routes with the same descriptions as the trip\n\tfor _, r := range routes {\n\t\tif r.Description == trip.Route.Description {\n\t\t\tfiltered = append(filtered, r)\n\t\t}\n\t}\n\t// If there aren't any then find one close to arrival time\n\tif len(filtered) == 0 {\n\t\treturn findRouteClosestToArrival(GetArrivalTime(trip), routes), nil\n\t}\n\t// If there are multiple with the same description then choose the\n\t// one closest to arrival of the filtered list\n\tif len(filtered) > 1 {\n\t\treturn findRouteClosestToArrival(GetArrivalTime(trip), filtered), nil\n\t}\n\treturn filtered[0], nil\n}", "func Description(v string) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldDescription), v))\n\t})\n}", "func Find(id string) (*Label, error) {\n\tlabel, ok := labelsIdx.Get(id)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"label %q not found\", id)\n\t}\n\treturn label.(*Label), nil\n}", "func Description(v string) predicate.Product {\n\treturn predicate.Product(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldDescription), v))\n\t})\n}", "func (analyzer *Analyzer) Concepts(flavor, payload string, options url.Values) (*ConceptsResponse, error) {\n\tif !entryPoints.hasFlavor(\"concepts\", flavor) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"concepts info for %s not available\", flavor))\n\t}\n\n\toptions.Add(flavor, payload)\n\turl := entryPoints.urlFor(analyzer.baseUrl, \"concepts\", flavor)\n\tdata, err := analyzer.analyze(url, options, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tresponse := new(ConceptsResponse)\n\t\terr := json.Unmarshal(data, &response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tif response.Status != \"OK\" {\n\t\t\t\treturn nil, errors.New(response.StatusInfo)\n\t\t\t} else {\n\t\t\t\treturn response, nil\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *TeamworkTag) GetDescription()(*string) {\n val, err := m.GetBackingStore().Get(\"description\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (t *Todo) Find(id int) string {\n\treturn t.todos[id]\n}", "func (o InferenceClusterOutput) Description() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *InferenceCluster) pulumi.StringPtrOutput { return v.Description }).(pulumi.StringPtrOutput)\n}", "func (a *Client) PutLTENetworkIDDescription(params *PutLTENetworkIDDescriptionParams) (*PutLTENetworkIDDescriptionNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPutLTENetworkIDDescriptionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PutLTENetworkIDDescription\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/lte/{network_id}/description\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PutLTENetworkIDDescriptionReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*PutLTENetworkIDDescriptionNoContent)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*PutLTENetworkIDDescriptionDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func Description(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldDescription), v))\n\t})\n}", "func Description(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldDescription), v))\n\t})\n}", "func GetDescsByID(c echo.Context) error {\n\tvar desc db.Descs\n\tid, _ := strconv.Atoi(c.Param(\"id\"))\n\t//config.DB.Where(\"id = ?\", id).Delete(&admin)\n\n\tif err := config.DB.Where(\"id= ?\", id).Find(&desc).Error; err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, err.Error())\n\t}\n\treturn c.JSON(http.StatusOK, map[string]interface{}{\n\t\t\"message\": \"Berhasil Menampilkan Deskripsi Smartphone \",\n\t\t\"descs\": desc,\n\t})\n}", "func (ds *DatabaseService) GetPreferredDescription(concept *Concept) (*Description, error) {\n\treturn ds.GetPreferredDescriptionForLanguages(concept, []language.Tag{ds.language})\n}", "func (m *RoleDefinition) GetDescription()(*string) {\n return m.description\n}", "func (*ListConceptsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{31}\n}", "func (s *Specs) findSpec(metricName string) (SpecDef, error) {\n\tvar spec SpecDef\n\n\tres := strings.SplitN(metricName, \"_\", 2)\n\tif len(res) < 2 {\n\t\treturn spec, fmt.Errorf(\"metric: %s has no suffix to identify the entity\", metricName)\n\t}\n\tserviceName := res[0]\n\n\tvar ok bool\n\tif spec, ok = s.SpecsByName[serviceName]; !ok {\n\t\treturn spec, fmt.Errorf(\"no spec files for service: %s\", serviceName)\n\t}\n\n\treturn spec, nil\n}", "func (api *dscprofileAPI) Find(meta *api.ObjectMeta) (*DSCProfile, error) {\n\t// find the object\n\tobj, err := api.ct.FindObject(\"DSCProfile\", meta)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// asset type\n\tswitch obj.(type) {\n\tcase *DSCProfile:\n\t\thobj := obj.(*DSCProfile)\n\t\treturn hobj, nil\n\tdefault:\n\t\treturn nil, errors.New(\"incorrect object type\")\n\t}\n}", "func (o LookupAnnotationSpecSetResultOutput) Description() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAnnotationSpecSetResult) string { return v.Description }).(pulumi.StringOutput)\n}", "func (i SNSSubscribeAttribute) Description() string {\n\tif val, ok := _SNSSubscribeAttributeValueToDescriptionMap[i]; ok {\n\t\t// found\n\t\treturn val\n\t} else {\n\t\t// not found\n\t\treturn \"\"\n\t}\n}", "func (c *SmartThingsClient) DeviceDescription(deviceID string) (*Device, error) {\n\n\treq, err := c.newRequest(http.MethodGet, \"/v1/devices/\"+deviceID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar device Device\n\t_, err = c.do(req, &device)\n\treturn &device, err\n}", "func GetRecipeDetail(ctx context.Context, db *gorm.DB, slug string) (*model.Recipe, error) {\n\tif slug == \"\" {\n\t\treturn nil, errInvalidRecipeSlug\n\t}\n\ttx := db.Begin()\n\tr := models.Recipe{}\n\ttx = tx.Find(&r, \"slug = ?\", slug)\n\tif err := tx.Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif ok, err := r.IsValid(false); !ok || err != nil {\n\t\treturn nil, errInvalidRecipeSlug\n\t}\n\trecipe, err := model.BuildRecipe(&r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn recipe, nil\n}", "func getOccurrence(occurrenceID, projectID string) (*grafeaspb.Occurrence, error) {\n\t// occurrenceID := path.Base(occurrence.Name)\n\tctx := context.Background()\n\tclient, err := containeranalysis.NewClient(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewClient: %v\", err)\n\t}\n\tdefer client.Close()\n\n\treq := &grafeaspb.GetOccurrenceRequest{\n\t\tName: fmt.Sprintf(\"projects/%s/occurrences/%s\", projectID, occurrenceID),\n\t}\n\tocc, err := client.GetGrafeasClient().GetOccurrence(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"client.GetOccurrence: %v\", err)\n\t}\n\treturn occ, nil\n}", "func findID(root *Container, id string) (*Container, error) {\n\tif id == \"\" {\n\t\treturn nil, errors.New(\"the container ID must not be empty\")\n\t}\n\n\tvar (\n\t\terrStr string\n\t\tcont *Container\n\t)\n\tpreOrder(root, &errStr, visitFunc(func(c *Container) error {\n\t\tif c.opts.id == id {\n\t\t\tcont = c\n\t\t}\n\t\treturn nil\n\t}))\n\tif cont == nil {\n\t\treturn nil, fmt.Errorf(\"cannot find container with ID %q\", id)\n\t}\n\treturn cont, nil\n}", "func (t *SegmentService) Find(id string) (Segment, error) {\n\treturn t.Repository.find(id)\n}", "func describe(s selection, args []string) {\n\tfmt.Println(runWithStdin(s.archive(), \"guru\", \"-modified\", \"describe\", s.pos()))\n}", "func DescriptionEQ(v string) predicate.Task {\n\treturn predicate.Task(sql.FieldEQ(FieldDescription, v))\n}", "func (p Program) Find(id uint) Program {\n\t//Preload preloads structs - Creates a SQL query pr. Preload. Should be fixed in Gorm V2.\n\tif err := db.Model(p).\n\t\tPreload(\"Production\").\n\t\tPreload(\"Category\").\n\t\tPreload(\"Genres\").\n\t\tPreload(\"Serie\").\n\t\tPreload(\"Season\").\n\t\tPreload(\"Credits\").\n\t\tPreload(\"Credits.Persons\").\n\t\tPreload(\"Credits.CreditGroup\").\n\t\tWhere(\"id = ?\", id).\n\t\tFirst(&p).Error; err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn p\n}", "func (kc *k8sCluster) findDeployment(c context.Context, namespace, name string) (*kates.Deployment, error) {\n\tdep := &kates.Deployment{\n\t\tTypeMeta: kates.TypeMeta{Kind: \"Deployment\"},\n\t\tObjectMeta: kates.ObjectMeta{Name: name, Namespace: namespace},\n\t}\n\tif err := kc.client.Get(c, dep, dep); err != nil {\n\t\treturn nil, err\n\t}\n\treturn dep, nil\n}", "func OutcomeOverviewDoseDescriptionEQ(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldOutcomeOverviewDoseDescription), v))\n\t})\n}", "func (cli *Client) GetSpec(id string) (*spec.Spec, error) {\n\treq, err := http.NewRequest(\"GET\", cli.endpoint+\"/spec/\"+id, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Authorization\", \"Bearer \"+cli.token)\n\tresp, err := cli.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tmsg, _ := ioutil.ReadAll(resp.Body)\n\t\treturn nil, errors.New(\"http error: \" + strconv.Itoa(resp.StatusCode) + \" \" + string(msg))\n\t}\n\tresult := &spec.Spec{}\n\tdecoder := json.NewDecoder(resp.Body)\n\terr = decoder.Decode(result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func (c *Course) getDescription() error {\n\t// locks while requesting\n\thttpSemaphore <- 1\n\tresp, err := http.Get(c.BulletinURL)\n\tdefer resp.Body.Close()\n\t<-httpSemaphore\n\n\t// check for errors\n\tif err != nil {\n\t\tc.BulletinURL = \"\"\n\t\tlog.Printf(\"Error getting bulletin page, %s => %s\", c.BulletinURL, err.Error())\n\t\treturn fmt.Errorf(\"HTTP error querying bulletin for course, %s, %s\", c.Course, err.Error())\n\t} else if resp.StatusCode/100 != 2 {\n\t\tc.BulletinURL = \"\"\n\t\tlog.Printf(\"%d error getting bulletin page, %s\", resp.StatusCode, c.BulletinURL)\n\t\treturn fmt.Errorf(\"Error querying bulletin for course, %s\", c.Course)\n\t}\n\n\t// read in then sanitize description\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading in page (%s) body => %s\", c.BulletinURL, err.Error())\n\t}\n\n\t// parse the page for the description\n\tcourseDesc := parsePage(bodyBytes)\n\tif courseDesc == \"\" { // set to 'no description' if there is not one\n\t\tc.Description = \"no description\"\n\t\treturn nil\n\t}\n\tc.Description = courseDesc\n\treturn nil\n}", "func (g generator) buildCRDDescriptionFromType(gvk schema.GroupVersionKind, kindType *markers.TypeInfo) (v1alpha1.CRDDescription, error) {\n\n\t// Initialize the description.\n\tdescription := v1alpha1.CRDDescription{\n\t\tDescription: kindType.Doc,\n\t\tDisplayName: k8sutil.GetDisplayName(gvk.Kind),\n\t\tVersion: gvk.Version,\n\t\tKind: gvk.Kind,\n\t}\n\n\t// Parse resources and displayName from the kind type's markers.\n\tfor _, markers := range kindType.Markers {\n\t\tfor _, marker := range markers {\n\t\t\tswitch d := marker.(type) {\n\t\t\tcase Description:\n\t\t\t\tif d.DisplayName != \"\" {\n\t\t\t\t\tdescription.DisplayName = d.DisplayName\n\t\t\t\t}\n\t\t\t\tif len(d.Resources) != 0 {\n\t\t\t\t\trefs, err := d.Resources.toResourceReferences()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn v1alpha1.CRDDescription{}, err\n\t\t\t\t\t}\n\t\t\t\t\tdescription.Resources = append(description.Resources, refs...)\n\t\t\t\t}\n\t\t\tcase crdmarkers.Resource:\n\t\t\t\tif d.Path != \"\" {\n\t\t\t\t\tdescription.Name = fmt.Sprintf(\"%s.%s\", d.Path, gvk.Group)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// The default, if the resource marker's path value is not set, is to use a pluralized form of lowercase kind.\n\tif description.Name == \"\" {\n\t\tdescription.Name = fmt.Sprintf(\"%s.%s\", inflect.Pluralize(strings.ToLower(gvk.Kind)), gvk.Group)\n\t}\n\tsortDescription(description.Resources)\n\n\t// Find spec and status in the kind type.\n\tspec, err := findChildForDescType(kindType, specDescType)\n\tif err != nil {\n\t\treturn v1alpha1.CRDDescription{}, err\n\t}\n\tstatus, err := findChildForDescType(kindType, statusDescType)\n\tif err != nil {\n\t\treturn v1alpha1.CRDDescription{}, err\n\t}\n\n\t// Find annotated fields of spec and parse them into specDescriptors.\n\tmarkedFields, err := g.getMarkedChildrenOfField(spec)\n\tif err != nil {\n\t\treturn v1alpha1.CRDDescription{}, err\n\t}\n\tspecDescriptors := []v1alpha1.SpecDescriptor{}\n\tfor _, fields := range markedFields {\n\t\tfor _, field := range fields {\n\t\t\tif descriptor, include := field.toSpecDescriptor(); include {\n\t\t\t\tspecDescriptors = append(specDescriptors, descriptor)\n\t\t\t}\n\t\t}\n\t}\n\tsortDescriptors(specDescriptors)\n\tdescription.SpecDescriptors = specDescriptors\n\n\t// Find annotated fields of status and parse them into statusDescriptors.\n\tmarkedFields, err = g.getMarkedChildrenOfField(status)\n\tif err != nil {\n\t\treturn v1alpha1.CRDDescription{}, err\n\t}\n\tstatusDescriptors := []v1alpha1.StatusDescriptor{}\n\tfor _, fields := range markedFields {\n\t\tfor _, field := range fields {\n\t\t\tif descriptor, include := field.toStatusDescriptor(); include {\n\t\t\t\tstatusDescriptors = append(statusDescriptors, descriptor)\n\t\t\t}\n\t\t}\n\t}\n\tsortDescriptors(statusDescriptors)\n\tdescription.StatusDescriptors = statusDescriptors\n\n\treturn description, nil\n}", "func OutcomeOverviewDoseDescription(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldOutcomeOverviewDoseDescription), v))\n\t})\n}", "func Description(v string) predicate.User {\n\treturn predicate.User(sql.FieldEQ(FieldDescription, v))\n}", "func (o StudioOutput) Description() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Studio) pulumi.StringPtrOutput { return v.Description }).(pulumi.StringPtrOutput)\n}", "func Close(id string) (*Dispute, error) {\n\treturn getC().Close(id)\n}", "func DescriptionIn(vs ...string) predicate.Task {\n\treturn predicate.Task(sql.FieldIn(FieldDescription, vs...))\n}", "func (c *SituationClient) Get(ctx context.Context, id int) (*Situation, error) {\n\treturn c.Query().Where(situation.ID(id)).Only(ctx)\n}" ]
[ "0.72120947", "0.6018156", "0.60081214", "0.59020984", "0.58825856", "0.53835267", "0.5287975", "0.5265803", "0.51992404", "0.51827973", "0.51138985", "0.51096994", "0.51034886", "0.50864065", "0.5048157", "0.50262374", "0.50160176", "0.50133085", "0.5013291", "0.49646744", "0.49646744", "0.49579015", "0.49521664", "0.4931864", "0.4908365", "0.4894063", "0.48828587", "0.4855128", "0.4843237", "0.48379672", "0.48116434", "0.48088253", "0.47570708", "0.47570708", "0.47507203", "0.47498566", "0.4732898", "0.47130066", "0.4708016", "0.4702723", "0.46745571", "0.46603784", "0.4658668", "0.46496356", "0.46304777", "0.4611008", "0.45995292", "0.4596234", "0.4577444", "0.4568706", "0.4568687", "0.454598", "0.45237982", "0.45188075", "0.4515242", "0.44962868", "0.44899768", "0.44881418", "0.44638255", "0.44595972", "0.44563743", "0.44480586", "0.44476584", "0.4440123", "0.44309765", "0.44296852", "0.44264632", "0.44261023", "0.44193706", "0.44170526", "0.4416273", "0.44129032", "0.44129032", "0.4409351", "0.44089043", "0.44030854", "0.43930635", "0.43885902", "0.4382072", "0.437151", "0.43618155", "0.43606478", "0.43577042", "0.43565568", "0.4356362", "0.43551624", "0.43509242", "0.43503404", "0.4344066", "0.43436673", "0.43414146", "0.4338637", "0.43336475", "0.43335256", "0.43304917", "0.43284628", "0.43269306", "0.43225428", "0.43222234", "0.43146977" ]
0.7609221
0
Find a concept by concept STCID
func getConceptBySCTID(stcId string) string { url := baseUrl + "/browser/" + edition + "/" + version + "/concepts/" + stcId return lookup(url) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getConceptByString(searchTerm string, resultLimit string) string {\n\turl := baseUrl + edition + \"/\" + version + \"/concepts?term=\" + searchTerm + \"&activeFilter=true&offset=0&limit=\" + resultLimit\n\treturn lookup(url)\n}", "func getConceptFromService(svc *AggregateService, ctx context.Context, conceptUUID string, bookmark string) (transform.OldAggregatedConcept, string, error) {\n\tc, tid, err := svc.GetConcordedConcept(ctx, conceptUUID, bookmark)\n\tif err != nil {\n\t\treturn transform.OldAggregatedConcept{}, \"\", err\n\t}\n\told, err := transform.ToOldAggregateConcept(c)\n\tif err != nil {\n\t\treturn transform.OldAggregatedConcept{}, \"\", err\n\t}\n\tsort.Strings(old.Aliases)\n\tsort.Strings(old.FormerNames)\n\treturn old, tid, nil\n}", "func (nc *NaiveCache) GetConcept(conceptID int) (*Concept, bool) {\n\tvalue, success := nc.Get(conceptID)\n\tif !success {\n\t\treturn nil, false\n\t}\n\tconcept, success := value.(*Concept)\n\treturn concept, success\n}", "func getConceptByStringInProceduresSemanticTag(searchTerm string) string {\n\turl := baseUrl + \"/browser/\" +edition + \"/\" + version + \"/descriptions?term=\" + searchTerm + \"&conceptActive=true&semanticTag=procedure&groupByConcept=false&searchMode=STANDARD&offset=0&limit=50\"\n\treturn lookup(url)\n\n}", "func (cts *Server) findSCT(ctx context.Context, vlog *verifiable.Log, hash []byte) (*ct.AddChainResponse, error) {\n\t// See if we have an SCT for this\n\tns, err := cts.getNs(vlog)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttsKey := append([]byte(\"sct\"), hash...)\n\n\tvar sct govpb.AddResponse\n\terr = cts.Reader.ExecuteReadOnly(ctx, ns[:], func(ctx context.Context, kr verifiable.KeyReader) error {\n\t\treturn kr.Get(ctx, tsKey, &sct)\n\t})\n\tswitch err {\n\tcase nil:\n\t\t// continue\n\tcase verifiable.ErrNoSuchKey:\n\t\treturn nil, verifiable.ErrNotFound\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\t// Get log ID and we're done\n\tsk, err := cts.getSigningKey(ctx, vlog, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// we're done!\n\treturn &ct.AddChainResponse{\n\t\tID: sk.LogID[:],\n\t\tSCTVersion: ct.V1,\n\t\tSignature: sct.Signature,\n\t\tTimestamp: uint64(sct.Timestamp),\n\t\tExtensions: \"\",\n\t}, nil\n}", "func getConceptByDescription(descriptionId string) string {\n\turl := baseUrl + edition + \"/\" + version + \"/descriptions/\" + descriptionId\n\treturn lookup(url)\n\n}", "func (term *Terminology) SNOMEDCTtoReadV2(ctx context.Context, id *apiv1.Identifier, f func(*apiv1.Identifier) error) error {\n\tsctID, err := snomed.ParseAndValidate(id.GetValue())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not parse SNOMED identifier: %w\", err)\n\t}\n\tif sctID.IsConcept() == false {\n\t\treturn fmt.Errorf(\"can map only concepts: '%d' not a concept\", sctID)\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tstream, err := term.client.CrossMap(ctx, &snomed.CrossMapRequest{\n\t\tConceptId: sctID.Integer(),\n\t\tRefsetId: 900000000000497000,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"crossmap error: %w\", err)\n\t}\n\tfor {\n\t\titem, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"crossmap error: %w\", err)\n\t\t}\n\t\terr = f(&apiv1.Identifier{\n\t\t\tSystem: identifiers.ReadV2,\n\t\t\tValue: item.GetSimpleMap().GetMapTarget(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func _getSubjectIndex(id uint64, subject string) (uint64, bool) {\n\tbook := _getBook(id)\n\n\t// search for the subject\n\tfor i, v := range(book.Subjects){\n\t\tif v == subject{\n\t\t\treturn uint64(i), true\n\t\t}\n\t}\n\t\n\t// subject not found\n\treturn 0, false\n}", "func (ds *DatabaseService) FetchConcept(conceptID int) (*Concept, error) {\n\treturn ds.cache.GetConceptOrElse(conceptID, func(conceptID int) (interface{}, error) {\n\t\tfetched, err := ds.performFetchConcepts(conceptID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconcept := fetched[conceptID]\n\t\tif concept == nil {\n\t\t\treturn nil, fmt.Errorf(\"No concept found with identifier %d\", conceptID)\n\t\t}\n\t\treturn concept, nil\n\t})\n}", "func (idents idents) Find(lit string) *Ident {\n\tfor _, id := range idents {\n\t\tif id.Lit != lit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn &id\n\t}\n\n\treturn nil\n}", "func (s *BaseCGListener) EnterConceptid(ctx *ConceptidContext) {}", "func getOccurrence(occurrenceID, projectID string) (*grafeaspb.Occurrence, error) {\n\t// occurrenceID := path.Base(occurrence.Name)\n\tctx := context.Background()\n\tclient, err := containeranalysis.NewClient(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewClient: %v\", err)\n\t}\n\tdefer client.Close()\n\n\treq := &grafeaspb.GetOccurrenceRequest{\n\t\tName: fmt.Sprintf(\"projects/%s/occurrences/%s\", projectID, occurrenceID),\n\t}\n\tocc, err := client.GetGrafeasClient().GetOccurrence(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"client.GetOccurrence: %v\", err)\n\t}\n\treturn occ, nil\n}", "func (kc *k8sCluster) findSvc(namespace, name string) *kates.Service {\n\tvar svcCopy *kates.Service\n\tkc.accLock.Lock()\n\tif watcher, ok := kc.watchers[namespace]; ok {\n\t\tfor _, svc := range watcher.Services {\n\t\t\tif svc.Namespace == namespace && svc.Name == name {\n\t\t\t\tsvcCopy = svc.DeepCopy()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tkc.accLock.Unlock()\n\treturn svcCopy\n}", "func FindCvtermsynonym(exec boil.Executor, cvtermsynonymID int, selectCols ...string) (*Cvtermsynonym, error) {\n\tcvtermsynonymObj := &Cvtermsynonym{}\n\n\tsel := \"*\"\n\tif len(selectCols) > 0 {\n\t\tsel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), \",\")\n\t}\n\tquery := fmt.Sprintf(\n\t\t\"select %s from \\\"cvtermsynonym\\\" where \\\"cvtermsynonym_id\\\"=$1\", sel,\n\t)\n\n\tq := queries.Raw(exec, query, cvtermsynonymID)\n\n\terr := q.Bind(cvtermsynonymObj)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"chado: unable to select from cvtermsynonym\")\n\t}\n\n\treturn cvtermsynonymObj, nil\n}", "func (term *Terminology) Resolve(ctx context.Context, id *apiv1.Identifier) (proto.Message, error) {\n\tsctID, err := snomed.ParseAndValidate(id.GetValue())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT: %w\", err)\n\t}\n\theader := metadata.New(map[string]string{\"accept-language\": \"en-GB\"})\n\tctx = metadata.NewOutgoingContext(ctx, header)\n\tif sctID.IsConcept() {\n\t\tec, err := term.client.GetExtendedConcept(ctx, &snomed.SctID{Identifier: sctID.Integer()})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT concept '%d': %w\", sctID, err)\n\t\t}\n\t\treturn ec, nil\n\t}\n\tif sctID.IsDescription() {\n\t\td, err := term.client.GetDescription(ctx, &snomed.SctID{Identifier: sctID.Integer()})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT description '%d': %w\", sctID, err)\n\t\t}\n\t\treturn d, nil\n\t}\n\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT entity '%d': only concepts and descriptions supported\", sctID)\n}", "func GetSnatPolicyCRFromPod(c client.Client, pod *corev1.Pod) (aciv1.SnatPolicy, error) {\n\tfoundSnatPolicy := &aciv1.SnatPolicy{}\n\tlocalInfo, err := GetLocalInfoCR(c, pod.Spec.NodeName, os.Getenv(\"ACI_SNAT_NAMESPACE\"))\n\tif err == nil {\n\t\tif len(localInfo.Spec.LocalInfos) > 0 {\n\t\t\tif _, ok := localInfo.Spec.LocalInfos[string(pod.ObjectMeta.UID)]; ok {\n\t\t\t\tsnatPolicyName := localInfo.Spec.LocalInfos[string(pod.ObjectMeta.UID)].SnatPolicyName\n\t\t\t\t*foundSnatPolicy, err = GetSnatPolicyCR(c, snatPolicyName)\n\t\t\t\tif err != nil && errors.IsNotFound(err) {\n\t\t\t\t\tlog.Error(err, \"not matching snatpolicy\")\n\t\t\t\t\treturn *foundSnatPolicy, nil\n\t\t\t\t} else if err != nil {\n\t\t\t\t\treturn *foundSnatPolicy, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn *foundSnatPolicy, nil\n}", "func (term *Terminology) ReadV2toSNOMEDCT(ctx context.Context, id *apiv1.Identifier, f func(*apiv1.Identifier) error) error {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tresponse, err := term.client.FromCrossMap(ctx, &snomed.TranslateFromRequest{S: id.GetValue(), RefsetId: 900000000000497000})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(response.GetTranslations()) == 0 {\n\t\tlog.Printf(\"no translations found for map from '%s:%s' to '%s'\", id.GetSystem(), id.GetValue(), identifiers.SNOMEDCT)\n\t}\n\tfor _, t := range response.GetTranslations() {\n\t\tref := t.GetReferenceSetItem().GetReferencedComponentId()\n\t\tif err := f(&apiv1.Identifier{System: identifiers.SNOMEDCT, Value: strconv.FormatInt(ref, 10)}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (analyzer *Analyzer) Concepts(flavor, payload string, options url.Values) (*ConceptsResponse, error) {\n\tif !entryPoints.hasFlavor(\"concepts\", flavor) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"concepts info for %s not available\", flavor))\n\t}\n\n\toptions.Add(flavor, payload)\n\turl := entryPoints.urlFor(analyzer.baseUrl, \"concepts\", flavor)\n\tdata, err := analyzer.analyze(url, options, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tresponse := new(ConceptsResponse)\n\t\terr := json.Unmarshal(data, &response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tif response.Status != \"OK\" {\n\t\t\t\treturn nil, errors.New(response.StatusInfo)\n\t\t\t} else {\n\t\t\t\treturn response, nil\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *moovWatchmanClient) Search(ctx context.Context, name string, requestID string) (*watchman.OfacSdn, error) {\n\tindividualSearch, err := c.ofacSearch(ctx, name, \"individual\", requestID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tentitySearch, err := c.ofacSearch(ctx, name, \"entity\", requestID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsearch := highestOfacSearchMatch(individualSearch, entitySearch)\n\n\tif search == nil || (len(search.SDNs) == 0 && len(search.AltNames) == 0) {\n\t\treturn nil, nil // Nothing found\n\t}\n\n\t// We prefer to return the SDN, but if there's an AltName with a higher match return that instead.\n\tif len(search.SDNs) > 0 && len(search.AltNames) == 0 {\n\t\treturn &search.SDNs[0], nil // return SDN as it was all we got\n\t}\n\t// Take an Alt and find the SDN for it if that was the highest match\n\tif len(search.SDNs) == 0 && len(search.AltNames) > 0 {\n\t\talt := search.AltNames[0]\n\t\tc.logger.Log(fmt.Sprintf(\"Found AltName=%s,SDN=%s with no higher matched SDNs\", alt.AlternateID, alt.EntityID))\n\t\treturn c.altToSDN(ctx, search.AltNames[0], requestID)\n\t}\n\t// AltName matched higher than SDN names, so return the SDN of the matched AltName\n\tif len(search.SDNs) > 0 && len(search.AltNames) > 0 && (search.AltNames[0].Match > 0.1) && search.AltNames[0].Match > search.SDNs[0].Match {\n\t\talt := search.AltNames[0]\n\t\tc.logger.Log(fmt.Sprintf(\"AltName=%s,SDN=%s had higher match than SDN=%s\", alt.AlternateID, alt.EntityID, search.SDNs[0].EntityID))\n\t\treturn c.altToSDN(ctx, alt, requestID)\n\t}\n\t// Return the SDN as Alts matched lower\n\tif len(search.SDNs) > 0 {\n\t\treturn &search.SDNs[0], nil\n\t}\n\n\treturn nil, nil // Nothing found\n}", "func (r *templateRouter) FindServiceUnit(id string) (v ServiceUnit, ok bool) {\n\tv, ok = r.state[id]\n\treturn\n}", "func FindTerm(c *gin.Context) {\n\tvar ok bool\n\ttermID := c.Param(\"term_id\")\n\tif termID != \"\" {\n\t\tid := uuid.FromStringOrNil(termID)\n\t\tif id != uuid.Nil {\n\t\t\tvar term models.Term\n\t\t\terr := models.DB.Preload(\"RelatedTerms\").Model(&models.Term{}).Where(\"id = ?\", id).Take(&term).Error\n\t\t\tif err == nil {\n\t\t\t\tc.JSON(http.StatusOK, gin.H{\"data\": term})\n\t\t\t\tok = true\n\t\t\t}\n\t\t}\n\t}\n\tif !ok {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"data\": \"id not found\"})\n\t}\n}", "func GetCSDCOProj(identity string) *sparql.Results {\n\t// repo, err := sparql.NewRepo(\"http://data.oceandrilling.org/sparql\",\n\trepo, err := getJena()\n\tif err != nil {\n\t\tlog.Printf(\"%s\\n\", err)\n\t}\n\n\tf := bytes.NewBufferString(projdetails)\n\tbank := sparql.LoadBank(f)\n\n\t// q, err := bank.Prepare(\"my-query\", struct{ Limit, Offset int }{10, 100})\n\tq, err := bank.Prepare(\"csdcoproj\", struct{ ID string }{identity})\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\tlog.Println(q)\n\n\tres, err := repo.Query(q)\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\treturn res\n}", "func GetSingleClinicID(c *gin.Context) {\n\tlog.Infof(\"Get all clinics associated with admin\")\n\tctx := c.Request.Context()\n\taddressID := c.Param(\"addressId\")\n\tif addressID == \"\" {\n\t\tc.AbortWithStatusJSON(\n\t\t\thttp.StatusBadRequest,\n\t\t\tgin.H{\n\t\t\t\tconstants.RESPONSE_JSON_DATA: nil,\n\t\t\t\tconstants.RESPONSDE_JSON_ERROR: \"clinic address id not provided\",\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\t_, _, gproject, err := getUserDetails(ctx, c.Request)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(\n\t\t\thttp.StatusInternalServerError,\n\t\t\tgin.H{\n\t\t\t\tconstants.RESPONSE_JSON_DATA: nil,\n\t\t\t\tconstants.RESPONSDE_JSON_ERROR: err.Error(),\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\tctx, span := trace.StartSpan(ctx, \"Get all clinics associated with admin\")\n\tdefer span.End()\n\tclinicMetaDB := datastoredb.NewClinicMetaHandler()\n\terr = clinicMetaDB.InitializeDataBase(ctx, gproject)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(\n\t\t\thttp.StatusInternalServerError,\n\t\t\tgin.H{\n\t\t\t\tconstants.RESPONSE_JSON_DATA: nil,\n\t\t\t\tconstants.RESPONSDE_JSON_ERROR: err.Error(),\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\n\tregisteredClinics, err := clinicMetaDB.GetSingleClinic(ctx, addressID)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(\n\t\t\thttp.StatusInternalServerError,\n\t\t\tgin.H{\n\t\t\t\tconstants.RESPONSE_JSON_DATA: nil,\n\t\t\t\tconstants.RESPONSDE_JSON_ERROR: err.Error(),\n\t\t\t},\n\t\t)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\tconstants.RESPONSE_JSON_DATA: registeredClinics,\n\t\tconstants.RESPONSDE_JSON_ERROR: nil,\n\t})\n\n\tclinicMetaDB.Close()\n}", "func getSpiffeIDFromCSR(csr []byte) (spiffeID string, err error) {\n\tvar parsedCSR *x509.CertificateRequest\n\tif parsedCSR, err = x509.ParseCertificateRequest(csr); err != nil {\n\t\treturn spiffeID, err\n\t}\n\n\tvar uris []string\n\turis, err = uri.GetURINamesFromExtensions(&parsedCSR.Extensions)\n\n\tif len(uris) != 1 {\n\t\treturn spiffeID, errors.New(\"The CSR must have exactly one URI SAN\")\n\t}\n\tspiffeID = uris[0]\n\n\treturn spiffeID, nil\n}", "func (f *File) CSect(name string) []byte {\n\tfor _, sym := range f.Symbols {\n\t\tif sym.Name == name && sym.AuxCSect.SymbolType == XTY_SD {\n\t\t\tif i := sym.SectionNumber - 1; 0 <= i && i < len(f.Sections) {\n\t\t\t\ts := f.Sections[i]\n\t\t\t\tif sym.Value+uint64(sym.AuxCSect.Length) <= s.Size {\n\t\t\t\t\tdat := make([]byte, sym.AuxCSect.Length)\n\t\t\t\t\t_, err := s.sr.ReadAt(dat, int64(sym.Value))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\treturn dat\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}", "func lookupCanonID(\n\tacct string,\n\tsess *session.Session) (string, error) {\n\n\tlog.Println(\"Looking up canonical id for \", acct)\n\tsvc := s3.New(sess)\n\tcount.Incr(\"aws-listbuckets-canon\")\n\tbObj, err := svc.ListBuckets(&s3.ListBucketsInput{})\n\tif err != nil {\n\t\tlogCountErr(err, \"listBuckets failed\"+acct)\n\t\treturn \"\", err\n\t}\n\treturn parseBucketList(bObj)\n\n}", "func Lookup(ident string) Token {\n\tif tok, is_keyword := directives[ident]; is_keyword {\n\t\treturn tok\n\t}\n\treturn NotFound\n}", "func resourceUcsServiceProfileRead(d *schema.ResourceData, meta interface{}) error {\n\tc := meta.(*ucsclient.UCSClient)\n\n\terr := withSession(c, func(client *ucsclient.UCSClient) error {\n\t\tclient.Logger.Debug(\"Entering resourceUcsServiceProfileRead(...)\\n\")\n\n\t\t//1. Query the UCS for the profile\n\t\tdn := d.Get(\"dn\").(string)\n\t\tsp, err := client.ConfigResolveDN(dn)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the service profile could not be found we assume that it does not exist anymore\n\t\t// We tell Terraform so by setting its id to a blank string.\n\t\tif sp == nil {\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\n\t\t// Fetch vNIC info from ResourceData\n\t\tvNicsFromResourceData := fetchVnicsFromResourceData(d)\n\n\t\t// Merge the UCS vNIC info with the ResourceData vNIC info\n\t\tvnics := mergeVnics(vNicsFromResourceData, sp.VNICs)\n\n\t\t// Update the information related to the service profile fetched from UCS in Terraform.\n\t\td.Set(\"name\", sp.Name)\n\t\td.Set(\"service_profile_template\", sp.Template)\n\t\td.Set(\"target_org\", sp.TargetOrg)\n\t\td.Set(\"vNIC\", vnics)\n\n\t\td.SetConnInfo(map[string]string{\n\t\t\t\"type\": \"ssh\",\n\t\t\t\"host\": d.Get(\"vNIC.0.ip\").(string),\n\t\t})\n\n\t\tclient.Logger.Debug(\"Exiting resourceUcsServiceProfileRead(...)\\n\")\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func sts(r AirflowResource, component string, suffix string, svc bool) *appsv1.StatefulSet {\n\tname, labels, matchlabels := nameAndLabels(r, component, suffix, true)\n\tsvcName := \"\"\n\tif svc {\n\t\tsvcName = name\n\t}\n\n\treturn &appsv1.StatefulSet{\n\t\tObjectMeta: r.getMeta(name, labels),\n\t\tSpec: appsv1.StatefulSetSpec{\n\t\t\tServiceName: svcName,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: matchlabels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: r.getAnnotations(),\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tAffinity: r.getAffinity(),\n\t\t\t\t\tNodeSelector: r.getNodeSelector(),\n\t\t\t\t\tSubdomain: name,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func GetConcepts(data string) (ConceptResult, error) {\n\tresult := ConceptResult{}\n\terr := Request(BuildRequest(\"Concepts\", data), &result)\n\n\treturn result, err\n}", "func GetSnatPolicyCR(c client.Client, policyName string) (aciv1.SnatPolicy, error) {\n\n\tfoundSnatPolicy := &aciv1.SnatPolicy{}\n\terr := c.Get(context.TODO(), types.NamespacedName{Name: policyName, Namespace: os.Getenv(\"WATCH_NAMESPACE\")}, foundSnatPolicy)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlog.V(1).Info(\"SnatPolicy not present for\", \"NodeName: \", policyName)\n\t\treturn aciv1.SnatPolicy{}, err\n\t} else if err != nil {\n\t\treturn aciv1.SnatPolicy{}, err\n\t}\n\n\treturn *foundSnatPolicy, nil\n}", "func (h *HubBub) LookupTactic(id string) (Tactic, error) {\n\tt, ok := h.tactics[id]\n\tif !ok {\n\t\treturn t, fmt.Errorf(\"tactic %q is undefined - typo?\", id)\n\t}\n\tt.ID = id\n\tif len(h.reposOverride) > 0 {\n\t\tt.Repos = h.reposOverride\n\t}\n\n\tif len(t.Repos) == 0 {\n\t\tt.Repos = h.settings.Repos\n\t}\n\treturn t, nil\n}", "func findTraefikTCPServiceByName(spec *traefik.RouteTCP, name string) *traefik.ServiceTCP {\n\tfor _, serviceCandidate := range spec.Services {\n\t\tif serviceCandidate.Name == name {\n\t\t\treturn &serviceCandidate\n\t\t}\n\t}\n\treturn nil\n}", "func findCRDDescription(\n\tns string,\n\tclient dynamic.Interface,\n\tbssGVK schema.GroupVersionKind,\n\tcrd *unstructured.Unstructured,\n) (*olmv1alpha1.CRDDescription, error) {\n\treturn NewOLM(client, ns).SelectCRDByGVK(bssGVK, crd)\n}", "func ReadSID(sid map[string]string) (sidLine map[string]string, err error) {\n sidMap := sid[\"sid\"]\n uuidMap := sid[\"uuid\"]\n path, err := ndb.GetRulesetPath(uuidMap)\n data, err := os.Open(path)\n if err != nil {\n logs.Error(\"File reading error: %s\", err.Error())\n return\n }\n\n var validID = regexp.MustCompile(`sid:\\s?` + sidMap + `;`)\n scanner := bufio.NewScanner(data)\n for scanner.Scan() {\n if validID.MatchString(scanner.Text()) {\n sidLine := make(map[string]string)\n sidLine[\"raw\"] = scanner.Text()\n return sidLine, err\n }\n }\n return nil, err\n}", "func (td taskDefn) findContainerDefn(name string) *ecs.ContainerDefinition {\n\tfor _, cd := range td.ContainerDefinitions {\n\t\tif name == ptr.StringValue(cd.Name) {\n\t\t\treturn cd\n\t\t}\n\t}\n\n\treturn nil\n}", "func (d *Definition) Search(pattern string) Resource {\n\tresource := make(chan Resource)\n\ttree := d.ResourceTree\n\n\tgo func() {\n\t\tdefer close(resource)\n\t\ttree.Traverse(func(r Resource) {\n\t\t\tresource <- r\n\t\t})\n\t}()\n\n\tfor resourceWanted := range resource {\n\t\tpattern := fmt.Sprint(d.Context, \"/\", pattern)\n\t\tif resourceWanted.ID() == pattern {\n\t\t\treturn resourceWanted\n\t\t}\n\t}\n\n\treturn nil\n}", "func FindStockCvterm(exec boil.Executor, stockCvtermID int, selectCols ...string) (*StockCvterm, error) {\n\tstockCvtermObj := &StockCvterm{}\n\n\tsel := \"*\"\n\tif len(selectCols) > 0 {\n\t\tsel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), \",\")\n\t}\n\tquery := fmt.Sprintf(\n\t\t\"select %s from \\\"stock_cvterm\\\" where \\\"stock_cvterm_id\\\"=$1\", sel,\n\t)\n\n\tq := queries.Raw(exec, query, stockCvtermID)\n\n\terr := q.Bind(stockCvtermObj)\n\tif err != nil {\n\t\tif errors.Cause(err) == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.Wrap(err, \"chado: unable to select from stock_cvterm\")\n\t}\n\n\treturn stockCvtermObj, nil\n}", "func (c eeCtx) streq(a, b string) bool {\r\n\tif c.streqfn != nil {\r\n\t\treturn c.streqfn(a, b)\r\n\t}\r\n\tif c.strcmpfn != nil {\r\n\t\treturn c.strcmpfn(a, b) == 0\r\n\t}\r\n\treturn a == b\r\n}", "func rcFindVar(p *TCompiler, code *TCode) (*value.Value, error) {\n\tname := p.Consts.Get(code.B).ToString()\n\tv, _ := p.sys.Scopes.Find(name)\n\tp.regSet(code.A, v)\n\tp.moveNext()\n\treturn v, nil\n}", "func SandboxID(spec specs.Spec) (string, error) {\n\tfor _, key := range CRISandboxNameKeyList {\n\t\tsandboxID, ok := spec.Annotations[key]\n\t\tif ok {\n\t\t\treturn sandboxID, nil\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"Could not find sandbox ID\")\n}", "func (t *TST) Find(s string) bool {\n\treturn t.Get(s) != nil\n}", "func (s *Specs) findSpec(metricName string) (SpecDef, error) {\n\tvar spec SpecDef\n\n\tres := strings.SplitN(metricName, \"_\", 2)\n\tif len(res) < 2 {\n\t\treturn spec, fmt.Errorf(\"metric: %s has no suffix to identify the entity\", metricName)\n\t}\n\tserviceName := res[0]\n\n\tvar ok bool\n\tif spec, ok = s.SpecsByName[serviceName]; !ok {\n\t\treturn spec, fmt.Errorf(\"no spec files for service: %s\", serviceName)\n\t}\n\n\treturn spec, nil\n}", "func (ds *DatabaseService) Genericise(concept *Concept, generics map[Identifier]*Concept) (*Concept, bool) {\n\tpaths, err := ds.PathsToRoot(concept)\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\tvar bestPath []*Concept\n\tbestPos := -1\n\tfor _, path := range paths {\n\t\tfor i, concept := range path {\n\t\t\tif generics[concept.ConceptID] != nil {\n\t\t\t\tif i > 0 && (bestPos == -1 || bestPos > i) {\n\t\t\t\t\tbestPos = i\n\t\t\t\t\tbestPath = path\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif bestPos == -1 {\n\t\treturn nil, false\n\t}\n\treturn bestPath[bestPos], true\n}", "func lookup(s *types.Scope, name string) *types.Struct {\n\tif o := s.Lookup(name); o != nil {\n\t\tu := o.Type().Underlying()\n\t\tif s, ok := u.(*types.Struct); ok {\n\t\t\treturn s\n\t\t}\n\t}\n\tfor i := 0; i < s.NumChildren(); i++ {\n\t\ts := lookup(s.Child(i), name)\n\t\tif s != nil {\n\t\t\treturn s\n\t\t}\n\t}\n\treturn nil\n}", "func (s CertStore) FindBySubjectId(thumb string) []Cert {\n\tbThumb, err := hex.DecodeString(thumb)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tvar hashBlob C.CRYPT_HASH_BLOB\n\thashBlob.cbData = C.DWORD(len(bThumb))\n\tbThumbPtr := C.CBytes(bThumb)\n\tdefer C.free(bThumbPtr)\n\thashBlob.pbData = (*C.BYTE)(bThumbPtr)\n\treturn s.findCerts(C.CERT_FIND_KEY_IDENTIFIER, unsafe.Pointer(&hashBlob))\n}", "func getSdcGuidLabel(plug *sioPlugin) (string, error) {\n\tnodeLabels, err := plug.host.GetNodeLabels()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tlabel, ok := nodeLabels[sdcGuidLabelName]\n\tif !ok {\n\t\tglog.V(4).Info(log(\"node label %s not found\", sdcGuidLabelName))\n\t\treturn \"\", nil\n\t}\n\n\tglog.V(4).Info(log(\"found node label %s=%s\", sdcGuidLabelName, label))\n\treturn label, nil\n}", "func TestGetDeviceBySdsAttribute(t *testing.T) {\n\tsds := getAllSds(t)[0]\n\tassert.NotNil(t, sds)\n\tif sds == nil {\n\t\treturn\n\t}\n\n\tdevices := getAllSdsDevices(t)\n\tassert.NotNil(t, devices)\n\tassert.NotZero(t, len(devices))\n\tif devices == nil {\n\t\treturn\n\t}\n\n\tfound, err := sds.FindDevice(\"Name\", devices[0].Device.Name)\n\tassert.Nil(t, err)\n\tassert.NotNil(t, found)\n\tassert.Equal(t, devices[0].Device.Name, found.Name)\n\n\tfound, err = sds.FindDevice(\"ID\", devices[0].Device.ID)\n\tassert.Nil(t, err)\n\tassert.NotNil(t, found)\n\tassert.Equal(t, devices[0].Device.ID, found.ID)\n}", "func hasIDC(idc string) bool {\n\tm, _ := idcRegion.Load().(map[string]string)\n\t_, ok := m[idc]\n\treturn ok\n}", "func (nc *NaiveCache) PutConcept(conceptID int, concept *Concept) {\n\tnc.Put(conceptID, concept)\n}", "func TaxIDContainsFold(v string) predicate.Watchlist {\n\treturn predicate.Watchlist(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldTaxID), v))\n\t})\n}", "func (l *SLexicon) GetSemantic(word string) string {\n\tl.Lock() // one at a time\n\tdefer l.Unlock()\n\n\tif val, ok := l.Semantic[word]; ok { // non case sensitive first\n\t\treturn val\n\t}\n\tlwrStr := strings.ToLower(word)\n\tstemmedWord := l.GetStem(lwrStr)\n\tif val, ok := l.Semantic[stemmedWord]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}", "func getClaimFrom(claim string, src *simplejson.Json) interface{} {\n\t_, err := jp.ParseString(claim)\n\tif err != nil {\n\t\treturn src.Get(claim).Interface()\n\t}\n\tclaimParts := strings.Split(claim, \".\")\n\treturn src.GetPath(claimParts...).Interface()\n}", "func WatsonIDContainsFold(v string) predicate.Patient {\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldWatsonID), v))\n\t})\n}", "func GetSts(t *testing.T, k8client client.Client, stsName string) (*appsv1.StatefulSet, error) {\n\tsts := &appsv1.StatefulSet{}\n\tns := \"default\"\n\terr := k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: ns, Name: stsName}, sts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sts doesnt exist: %v\", err)\n\t}\n\n\treturn sts, nil\n}", "func (mgr *Mgr) FindSPByEntryID(selector *SPSelector, entryID uint32) (*SPValue, bool) {\n\tmgr.lock.Lock()\n\tdefer mgr.lock.Unlock()\n\n\tif selector == nil {\n\t\treturn nil, false\n\t}\n\n\tif vrf, ok := mgr.vrfs[selector.VRFIndex]; ok {\n\t\tif v, ok := vrf.spdByEntryID[entryID]; ok {\n\t\t\treturn v.Copy(), ok\n\t\t}\n\t}\n\n\treturn nil, false\n}", "func getCDS() types.Resource {\n\treturn types.Resource(\n\t\t&api.Cluster{\n\t\t\tName: someClusterName,\n\t\t\tLbPolicy: api.Cluster_ROUND_ROBIN, // as of grpc-go 1.32.x it's the only option\n\t\t\tClusterDiscoveryType: &api.Cluster_Type{Type: api.Cluster_EDS}, // points to EDS\n\t\t\tEdsClusterConfig: &api.Cluster_EdsClusterConfig{\n\t\t\t\tEdsConfig: &core.ConfigSource{\n\t\t\t\t\tConfigSourceSpecifier: &core.ConfigSource_Ads{}, // as of grpc-go 1.32.x it's the only option for DS config source\n\t\t\t\t\tInitialFetchTimeout: &durationpb.Duration{Seconds: 0, Nanos: 0},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n}", "func (api *dscprofileAPI) Find(meta *api.ObjectMeta) (*DSCProfile, error) {\n\t// find the object\n\tobj, err := api.ct.FindObject(\"DSCProfile\", meta)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// asset type\n\tswitch obj.(type) {\n\tcase *DSCProfile:\n\t\thobj := obj.(*DSCProfile)\n\t\treturn hobj, nil\n\tdefault:\n\t\treturn nil, errors.New(\"incorrect object type\")\n\t}\n}", "func (r *AWSGatewayReconciler) SearchResourceID(input util.TerraVars) util.TerraVars {\n\toutput := input\n\n\tif input.VPCName != \"\" && input.VPCID == \"\" {\n\t\tvpc := &terraformv1alpha1.AWSVPC{}\n\t\tr.Get(context.TODO(), types.NamespacedName{Name: input.VPCName, Namespace: input.Namespace}, vpc)\n\t\toutput.VPCID = vpc.Spec.ID\n\t}\n\n\treturn output\n}", "func (nc *NaiveCache) GetConceptOrElse(conceptID int, f func(conceptID int) (interface{}, error)) (*Concept, error) {\n\tv, err := nc.GetOrElse(conceptID, f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn v.(*Concept), nil\n}", "func (p *BaseServiceClient) FindPatientById(id string) (r *domain.Patient, err error) {\n if err = p.sendFindPatientById(id); err != nil { return }\n return p.recvFindPatientById()\n}", "func (s *Session) GetClusterByMoID(ctx context.Context, moID string) (*object.ClusterComputeResource, error) {\n\tref := types.ManagedObjectReference{Type: \"ClusterComputeResource\", Value: moID}\n\to, err := s.Finder.ObjectReference(ctx, ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn o.(*object.ClusterComputeResource), nil\n}", "func Lookup(ident string) Token {\n\tif tok, ok := keywords[strings.ToLower(ident)]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func Lookup(ident string) Token {\n\tif tok, ok := keywords[strings.ToLower(ident)]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func (r *templateRouter) findMatchingServiceUnit(id ServiceUnitKey) (ServiceUnit, bool) {\n\tv, ok := r.serviceUnits[id]\n\treturn v, ok\n}", "func findStored(cd CRDTData, db *badger.DB) (*vvmap.Map, bool) {\n\n\tcrdt := vvmap.New(vvmap.ID(cd.UserId), DefaultResolver)\n\n\tvar valCopy []byte\n\terr := db.View(func(txn *badger.Txn) error {\n\t\titem, err := txn.Get([]byte(cd.N3id))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvalCopy, err = item.ValueCopy(nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn crdt, false\n\t}\n\n\tdecoded, err := DecodeCRDT(valCopy)\n\tif err != nil {\n\t\treturn crdt, false\n\t}\n\n\treturn decoded, true\n\n}", "func (t *SimpleChaincode) readStudent(APIstub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar name, jsonResp string\n\tvar err error\n\n\tif len(args) <= 0 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting name of the name to query\")\n\t}\n\n\tname = args[0]\n\tvalAsbytes, err := APIstub.GetState(name)\n\tif err != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + name + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t} else if valAsbytes == nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Student does not exist: \" + name + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t}\n\treturn shim.Success(valAsbytes)\n}", "func getSvcID(k8sAPI *k8s.API, clusterIP string, log *logging.Entry) (*watcher.ServiceID, error) {\n\tobjs, err := k8sAPI.Svc().Informer().GetIndexer().ByIndex(watcher.PodIPIndex, clusterIP)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Unknown, err.Error())\n\t}\n\tservices := make([]*corev1.Service, 0)\n\tfor _, obj := range objs {\n\t\tservice := obj.(*corev1.Service)\n\t\tservices = append(services, service)\n\t}\n\tif len(services) > 1 {\n\t\tconflictingServices := []string{}\n\t\tfor _, service := range services {\n\t\t\tconflictingServices = append(conflictingServices, fmt.Sprintf(\"%s:%s\", service.Namespace, service.Name))\n\t\t}\n\t\tlog.Warnf(\"found conflicting %s cluster IP: %s\", clusterIP, strings.Join(conflictingServices, \",\"))\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"found %d services with conflicting cluster IP %s\", len(services), clusterIP)\n\t}\n\tif len(services) == 0 {\n\t\treturn nil, nil\n\t}\n\tservice := &watcher.ServiceID{\n\t\tNamespace: services[0].Namespace,\n\t\tName: services[0].Name,\n\t}\n\treturn service, nil\n}", "func (t *SegmentService) Find(id string) (Segment, error) {\n\treturn t.Repository.find(id)\n}", "func ServiceInDcByTag(tag, name, dc string) (Address, error) {\n\tsrvs, err := srv(tag, name, dc)\n\tif err != nil {\n\t\treturn Address{}, err\n\t}\n\tsrv := srvs[rand.Intn(len(srvs))]\n\treturn srv, nil\n}", "func RtpAttribIdFind(attrib string) RtpAttrib {\n\treturn 0\n}", "func doesCRExist(kind string, csvs *olm.ClusterServiceVersionList) (olm.ClusterServiceVersion, error) {\n\tfor _, csv := range csvs.Items {\n\t\tfor _, operatorCR := range csv.Spec.CustomResourceDefinitions.Owned {\n\t\t\tif kind == operatorCR.Kind {\n\t\t\t\treturn csv, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn olm.ClusterServiceVersion{}, errors.New(\"could not find the requested cluster resource\")\n\n}", "func (r *Registry) FindService(pdb *db.PostgresDB, req rentities.ServiceRequest) (*rentities.ServiceInfo, error) {\n\t/*sList, okName := r.ServicesMap[req.TName]\n\tif okName != true {\n\t\treturn nil, fmt.Errorf(\"FindService error: service not exist\")\n\t}\n\n\t//get min load instance\n\tminLoad := sList[0].Quality.Load\n\tminIndex := 0\n\tfor i, ri := range sList {\n\t\tif ri.Version == req.Version {\n\t\t\tif minLoad >= ri.Quality.Load {\n\t\t\t\tminIndex = i\n\t\t\t}\n\t\t}\n\t}\n\t*/\n\tsrv, err := pdb.FindMinLoadSrv(req.TName, req.Version)\n\tri, err := rentities.NewServiceInfo(srv.TName, srv.IID, srv.IP, srv.Version, config.DefaultTTL)\n\t/*srv, err := rentities.NewServiceInfo(sList[minIndex].TName, sList[minIndex].IID,\n\tsList[minIndex].IP, sList[minIndex].Version, config.DefaultTTL)*/\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ri, nil\n}", "func RentIDContainsFold(v string) predicate.Rent {\n\treturn predicate.Rent(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldRentID), v))\n\t})\n}", "func (api *distributedservicecardAPI) Find(meta *api.ObjectMeta) (*DistributedServiceCard, error) {\n\t// find the object\n\tobj, err := api.ct.FindObject(\"DistributedServiceCard\", meta)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// asset type\n\tswitch obj.(type) {\n\tcase *DistributedServiceCard:\n\t\thobj := obj.(*DistributedServiceCard)\n\t\treturn hobj, nil\n\tdefault:\n\t\treturn nil, errors.New(\"incorrect object type\")\n\t}\n}", "func (r *SmscSessionRepository) FindById(ID string) (*openapi.SmscSession, error) {\n\tentity := openapi.NewSmscSessionWithDefaults()\n\terr := app.BuntDBInMemory.View(func(tx *buntdb.Tx) error {\n\t\tvalue, err := tx.Get(SMSC_SESSION_PREFIX + ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := json.Unmarshal([]byte(value), entity); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\treturn entity, err\n}", "func ConnIDContainsFold(v string) predicate.OfflineSession {\n\treturn predicate.OfflineSession(sql.FieldContainsFold(FieldConnID, v))\n}", "func TaxIDContainsFold(v string) predicate.Watchlisthistory {\n\treturn predicate.Watchlisthistory(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldTaxID), v))\n\t})\n}", "func (a *annotator) getName(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tuparams := r.URL.Query()\n\n\ttermParam := uparams.Get(\"term\")\n\tif termParam == \"\" {\n\t\thttp.Error(w, fmt.Sprintf(\"missing 'term' query parameter\"), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfromParam, err := uintValue(w, uparams, \"from\", 0)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsizeParam, err := uintValue(w, uparams, \"size\", 10)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttype occ struct {\n\t\tId int `json:\"id\"`\n\t\tSource string `json:\"source\"`\n\t\tControlAccess bool `json:\"controlAccess\"`\n\t\tAnnotated bool `json:\"annotated\"`\n\t}\n\n\thits := a.byInput[termParam]\n\tif hits == nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tcontrolAccessTally := 0\n\toccurs := make([]occ, len(hits))\n\n\ta.mu.RLock()\n\tfor i, index := range hits {\n\t\titem := a.occs[index]\n\t\tif item.ControlAccess {\n\t\t\tcontrolAccessTally++\n\t\t}\n\t\toccurs[i] = occ{\n\t\t\tId: index,\n\t\t\tSource: item.Id,\n\t\t\tControlAccess: item.ControlAccess,\n\t\t\tAnnotated: !a.todo.Contains(index),\n\t\t}\n\t}\n\ta.mu.RUnlock()\n\n\tsort.Slice(occurs, func(i, j int) bool {\n\t\tif occurs[i].ControlAccess == occurs[j].ControlAccess {\n\t\t\treturn occurs[i].Id < occurs[j].Id\n\t\t}\n\t\tif occurs[i].ControlAccess {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\n\tfrom, upto := clampRange(fromParam, sizeParam, len(occurs))\n\n\twriteJSON(w, struct {\n\t\tTerm string `json:\"@term\"`\n\t\tFrom int `json:\"@from\"`\n\t\tSize int `json:\"@size\"`\n\t\tTotal int `json:\"total\"`\n\t\tTally int `json:\"inControlAccess\"`\n\t\tOccurs []occ `json:\"occurences\"`\n\t}{\n\t\ttermParam,\n\t\tfromParam,\n\t\tsizeParam,\n\t\tlen(occurs),\n\t\tcontrolAccessTally,\n\t\toccurs[from:upto],\n\t})\n}", "func (c *SituationClient) Get(ctx context.Context, id int) (*Situation, error) {\n\treturn c.Query().Where(situation.ID(id)).Only(ctx)\n}", "func (unit *Unit) FindObj(fullName string) IObject {\n\tif ind, ok := unit.NameSpace[fullName]; ok {\n\t\treturn unit.VM.Objects[ind&NSIndex]\n\t}\n\treturn nil\n}", "func (c *SymptomClient) Get(ctx context.Context, id int) (*Symptom, error) {\n\treturn c.Query().Where(symptom.ID(id)).Only(ctx)\n}", "func (c *Clusters) GetClusterSpec(cluster string) (*ClusterSpec, error) {\n\n\tif c == nil {\n\t\treturn nil, fmt.Errorf(\"uninitialized clusters object\")\n\t}\n\n\tNameOrId := strings.ToLower(cluster)\n\n\tfor _, it := range c.Clusters {\n\t\tif it.ClusterName == NameOrId || it.Id == NameOrId {\n\t\t\tglog.Infof(\"Found cluster %v cluster id %v\", NameOrId, it.Id)\n\t\t\treturn &it, nil\n\t\t}\n\t}\n\n\treturn nil, &ClusterNotFound{ErrMsg: cluster}\n}", "func (atl Atlas) Get(rtid uintptr) (*AtlasEntry, bool) {\n\tent, ok := atl.mappings[rtid]\n\treturn ent, ok\n}", "func (kc *k8sCluster) findDeployment(c context.Context, namespace, name string) (*kates.Deployment, error) {\n\tdep := &kates.Deployment{\n\t\tTypeMeta: kates.TypeMeta{Kind: \"Deployment\"},\n\t\tObjectMeta: kates.ObjectMeta{Name: name, Namespace: namespace},\n\t}\n\tif err := kc.client.Get(c, dep, dep); err != nil {\n\t\treturn nil, err\n\t}\n\treturn dep, nil\n}", "func GetSource(theMap core.Element, trans *core.Transaction) core.Element {\n\tref := theMap.GetFirstOwnedReferenceRefinedFromURI(CrlMapSourceURI, trans)\n\tif ref == nil {\n\t\treturn nil\n\t}\n\treturn ref.GetReferencedConcept(trans)\n}", "func (bi *BinaryInfo) findCompileUnit(pc uint64) *compileUnit {\n\tfor _, image := range bi.Images {\n\t\tfor _, cu := range image.compileUnits {\n\t\t\tfor _, rng := range cu.ranges {\n\t\t\t\tif pc >= rng[0] && pc < rng[1] {\n\t\t\t\t\treturn cu\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (g *Graph) GetStationByID(id int) *Station {\n\tif st, ok := g.Nodes[id]; ok {\n\t\treturn st\n\t}\n\n\treturn nil\n}", "func GetStationbyId(r *http.Request, id string) (*Station, error) {\n\t// Does this station id exist?\n\tif station, ok := stationsCache[id]; ok {\n\t\treturn &station, nil\n\t} else {\n\t\treturn nil, errStationNotFound\n\t}\n}", "func (unit *Unit) FindConst(name string) IObject {\n\treturn unit.FindObj(npConst + name)\n}", "func (c *SentencesUnitCollection) Find(key string) (*SentencesUnit, bool) {\n\tfor _, v := range *c {\n\t\tif v.Key == key {\n\t\t\treturn &v, true\n\t\t}\n\t}\n\treturn nil, false\n}", "func get_span_by_trace(traceId string) *structs.CassandraSpan {\n\tquery := fmt.Sprintf(\"SELECT JSON chapter_id, user_id, session_id, trigger_route FROM project.spans WHERE trace_id='%s' LIMIT 1;\", traceId)\n\n\tj := enumerate_query(query)\n\n\tvar cspan *structs.CassandraSpan\n\tif len(j) > 0 {\n\t\tjson.Unmarshal([]byte(j[0]), &cspan)\n\t}\n\n\treturn cspan\n}", "func (*GetConceptRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{30}\n}", "func (db *DB) GetCVSS(id string) *CVSS {\n\tds := CVSS{}\n\tif psql, args, err := selectCVSS.Where(squirrel.Eq{\"id\": id}).ToSql(); err != nil {\n\t\tdb.GetLogger().Println(err)\n\t\treturn nil\n\t} else if err := db.GetDB().SelectOne(&ds, psql, args...); err != nil {\n\t\tdb.GetLogger().Println(err)\n\t\treturn nil\n\t}\n\treturn &ds\n}", "func (d *dsCache) getByIdent(ident *cachedIdent) *cachedDs {\n\td.RLock()\n\tdefer d.RUnlock()\n\treturn d.byIdent[ident.String()]\n}", "func (o *ordering) find(str string) *entry {\n\te := o.entryMap[str]\n\tif e == nil {\n\t\tr := []rune(str)\n\t\tif len(r) == 1 {\n\t\t\tconst (\n\t\t\t\tfirstHangul = 0xAC00\n\t\t\t\tlastHangul = 0xD7A3\n\t\t\t)\n\t\t\tif r[0] >= firstHangul && r[0] <= lastHangul {\n\t\t\t\tce := []rawCE{}\n\t\t\t\tnfd := norm.NFD.String(str)\n\t\t\t\tfor _, r := range nfd {\n\t\t\t\t\tce = append(ce, o.find(string(r)).elems...)\n\t\t\t\t}\n\t\t\t\te = o.newEntry(nfd, ce)\n\t\t\t} else {\n\t\t\t\te = o.newEntry(string(r[0]), []rawCE{\n\t\t\t\t\t{w: []int{\n\t\t\t\t\t\timplicitPrimary(r[0]),\n\t\t\t\t\t\tdefaultSecondary,\n\t\t\t\t\t\tdefaultTertiary,\n\t\t\t\t\t\tint(r[0]),\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\te.modified = true\n\t\t\t}\n\t\t\te.exclude = true // do not index implicits\n\t\t}\n\t}\n\treturn e\n}", "func SearchTaskbyID(id int) Task {\n\n\tfound := Task{}\n\tfound.ID = -1\n\n\terr := globalDBcollection.Find(bson.M{\"id\": id}).One(&found)\n\tif err != nil {\n\n\t}\n\n\treturn found\n}", "func (c *Catalog) FindService(id string) (*Service, bool) {\n\tfor _, s := range c.Services {\n\t\tif s.ID == id {\n\t\t\treturn s, true\n\t\t}\n\t}\n\treturn nil, false\n}", "func FindGistForID(ID interface{}) Gist {\n\tgist := Gist{}\n\tDb.Get(&gist, \"SELECT * FROM gists WHERE id = $1\", ID)\n\treturn gist\n}", "func (s *service) Get(ctx context.Context, id string) (*model.FormationConstraint, error) {\n\tformationConstraint, err := s.repo.Get(ctx, id)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"while getting Formation Constraint with id %s\", id)\n\t}\n\n\treturn formationConstraint, nil\n}" ]
[ "0.6033473", "0.5603409", "0.5589752", "0.5520785", "0.5373133", "0.5362807", "0.481132", "0.47816208", "0.4779277", "0.47505984", "0.47133955", "0.4710917", "0.46220994", "0.46212226", "0.45982644", "0.4579497", "0.45483467", "0.4547495", "0.4512528", "0.44844702", "0.44724935", "0.4465039", "0.44526064", "0.44018862", "0.44007102", "0.43952462", "0.43841586", "0.43811083", "0.43807557", "0.43793446", "0.43605718", "0.43463725", "0.43422696", "0.43418506", "0.43325534", "0.43212134", "0.43127033", "0.43102226", "0.4309607", "0.43073618", "0.42795107", "0.4279382", "0.42786777", "0.42679727", "0.42527124", "0.4247491", "0.424412", "0.42418712", "0.4229496", "0.42294642", "0.42274144", "0.42257613", "0.42253044", "0.42201078", "0.4216613", "0.42156512", "0.4212812", "0.42043582", "0.4200325", "0.4197138", "0.41940412", "0.41913447", "0.41910878", "0.41910878", "0.41826913", "0.4174776", "0.41672626", "0.41614956", "0.41576603", "0.41565338", "0.41538125", "0.41497064", "0.41491038", "0.41469416", "0.41469073", "0.41460803", "0.4141278", "0.41387925", "0.41369894", "0.41361323", "0.4132054", "0.4123209", "0.4117025", "0.4115172", "0.41115978", "0.41065928", "0.41013741", "0.4100728", "0.40919885", "0.40900746", "0.4088024", "0.40860623", "0.40830815", "0.40807936", "0.40757808", "0.40695295", "0.40687242", "0.40625316", "0.40589932", "0.40566579" ]
0.7717603
0
Find a concept by a string in semantic tags
func getConceptByStringInProceduresSemanticTag(searchTerm string) string { url := baseUrl + "/browser/" +edition + "/" + version + "/descriptions?term=" + searchTerm + "&conceptActive=true&semanticTag=procedure&groupByConcept=false&searchMode=STANDARD&offset=0&limit=50" return lookup(url) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getConceptByString(searchTerm string, resultLimit string) string {\n\turl := baseUrl + edition + \"/\" + version + \"/concepts?term=\" + searchTerm + \"&activeFilter=true&offset=0&limit=\" + resultLimit\n\treturn lookup(url)\n}", "func (l *SLexicon) GetSemantic(word string) string {\n\tl.Lock() // one at a time\n\tdefer l.Unlock()\n\n\tif val, ok := l.Semantic[word]; ok { // non case sensitive first\n\t\treturn val\n\t}\n\tlwrStr := strings.ToLower(word)\n\tstemmedWord := l.GetStem(lwrStr)\n\tif val, ok := l.Semantic[stemmedWord]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}", "func Lookup(ident string) Token {\n\tif tok, is_keyword := directives[ident]; is_keyword {\n\t\treturn tok\n\t}\n\treturn NotFound\n}", "func Lookup(ident string) Token {\n\tif tok, ok := keywords[strings.ToLower(ident)]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func Lookup(ident string) Token {\n\tif tok, ok := keywords[strings.ToLower(ident)]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func (idents idents) Find(lit string) *Ident {\n\tfor _, id := range idents {\n\t\tif id.Lit != lit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn &id\n\t}\n\n\treturn nil\n}", "func getConceptBySCTID(stcId string) string {\n\turl := baseUrl + \"/browser/\" + edition + \"/\" + version + \"/concepts/\" + stcId\n\treturn lookup(url)\n\n}", "func (m Model) Find(word string) *Vector {\n\tfor _, vector := range m {\n\t\tif vector.word == word {\n\t\t\treturn vector\n\t\t}\n\t}\n\treturn nil\n}", "func Lookup(ident string) Token {\n\tif tok, ok := keywords[ident]; ok {\n\t\treturn tok\n\t}\n\treturn Identifier\n}", "func Lookup(ident string) Token {\n\tif tok, is_keyword := keywords[ident]; is_keyword {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func TagNameContains(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldContains(FieldTagName, v))\n}", "func spec(stream []string, uFeat string) []*structs.State {\n if stream == nil || uFeat == \".\" {\n return nil\n }\n uCat, _, _ := defeat(uFeat)\n found := search(stream, \"u\" + uCat, nil)\n return found\n}", "func (s *Specs) findSpec(metricName string) (SpecDef, error) {\n\tvar spec SpecDef\n\n\tres := strings.SplitN(metricName, \"_\", 2)\n\tif len(res) < 2 {\n\t\treturn spec, fmt.Errorf(\"metric: %s has no suffix to identify the entity\", metricName)\n\t}\n\tserviceName := res[0]\n\n\tvar ok bool\n\tif spec, ok = s.SpecsByName[serviceName]; !ok {\n\t\treturn spec, fmt.Errorf(\"no spec files for service: %s\", serviceName)\n\t}\n\n\treturn spec, nil\n}", "func getUpdatedRefFromTag(owner string, repo string, path string, tagName string, commitSHA string, allTags []string) (string, error) {\n\tif recommendationActionRefs == nil {\n\t\tif err := loadRecommendationsFromWeb(); err != nil {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to load recommendations from web\")\n\t\t}\n\t}\n\n\t// if the action is in the recommendations\n\tk := fmt.Sprintf(\"%s/%s\", owner, repo)\n\tif path != \"\" {\n\t\tk = fmt.Sprintf(\"%s/%s\", k, path)\n\t}\n\trecommend, ok := recommendationActionRefs[k]\n\tif ok {\n\t\t// the repo (owner/repo/path) was found in the recommends list\n\t\tif recommend.RecommendedRefType == \"tag\" {\n\t\t\t// the owner recommends tags\n\t\t\tfor _, recommendedTag := range recommend.RecommendedRefs {\n\t\t\t\tif recommendedTag == tagName {\n\t\t\t\t\t// exact match, return it\n\t\t\t\t\treturn ref.CreateRefString(owner, repo, path, tagName), nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// the exact tag was not found in the recommendations\n\t\t\t// so let's sort by semver and return the highest\n\t\t\tparsedVersions := []*semver.Version{}\n\t\t\tfor _, allTag := range allTags {\n\t\t\t\tv, err := semver.NewVersion(allTag)\n\t\t\t\tif err == nil {\n\t\t\t\t\tparsedVersions = append(parsedVersions, v)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(parsedVersions) > 0 {\n\t\t\t\t// there are some semvers so we can pick the highest\n\t\t\t\tsort.Sort(semver.Collection(parsedVersions))\n\t\t\t\thighestVersion := parsedVersions[len(parsedVersions)-1]\n\t\t\t\treturn ref.CreateRefString(owner, repo, path, highestVersion.Original()), nil\n\t\t\t}\n\n\t\t\t// ok so here, we recommend tags, tags aren't semver and we don't have a match\n\t\t\t// this is sorted, so we will recommend the top tag in the list\n\t\t\tif len(allTags) > 0 {\n\t\t\t\treturn ref.CreateRefString(owner, repo, path, allTags[0]), nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// if the version parses as semver\n\tv, err := semver.NewVersion(tagName)\n\tif err == nil {\n\t\tparsedVersions := []*semver.Version{}\n\t\tfor _, allTag := range allTags {\n\t\t\tv, err := semver.NewVersion(allTag)\n\t\t\tif err == nil {\n\t\t\t\tparsedVersions = append(parsedVersions, v)\n\t\t\t}\n\t\t}\n\t\tif len(parsedVersions) > 0 {\n\t\t\t// there are some semvers so we can pick the highest\n\t\t\tsort.Sort(semver.Collection(parsedVersions))\n\n\t\t\t// Find the most specific version of the tag\n\t\t\tmostSpecificVersion := v\n\t\t\tfor _, parsedVersion := range parsedVersions {\n\t\t\t\tif parsedVersion.GreaterThan(v) {\n\t\t\t\t\tif parsedVersion.Major() == v.Major() {\n\t\t\t\t\t\tif parsedVersion.GreaterThan(mostSpecificVersion) {\n\t\t\t\t\t\t\tmostSpecificVersion = parsedVersion\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn ref.CreateRefString(owner, repo, path, mostSpecificVersion.Original()), nil\n\t\t}\n\n\t}\n\n\t// when all else fails, return the commit sha\n\treturn ref.CreateRefString(owner, repo, path, commitSHA), nil\n}", "func getConceptByDescription(descriptionId string) string {\n\turl := baseUrl + edition + \"/\" + version + \"/descriptions/\" + descriptionId\n\treturn lookup(url)\n\n}", "func (w KyTeaWord) Tag(i, j int, util StringUtil) (string, float64) {\n\ttag := C.kytea_word_tag(w.word, C.int(i), C.int(j), util.util)\n\tdefer C.kytea_std_string_destroy(tag.feature)\n\treturn C.GoString(C.kytea_std_string_cstring(tag.feature)), float64(tag.score)\n}", "func TestFindTag(t *testing.T) {\n\tdb := getDb(t)\n\tdefer db.Close()\n\ttags, err := createTags(db, \"a\", 3)\n\tif err != nil {\n\t\tt.Errorf(\"Could not create tags %s\", err)\n\t}\n\n\tfor _, tag := range tags {\n\t\tfoundTag, err := FindTag(db, tag.Text)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not lookup tag with name %s: %s\", tag.Text, err)\n\t\t}\n\t\tif foundTag.Id != tag.Id {\n\t\t\tt.Errorf(\"Lookup of tag %s found id %d but expected %d\", tag.Text, foundTag.Id, tag.Id)\n\t\t}\n\t}\n\n\t// make sure a lookup doesn't return error for not found\n\tfakeTag, err := FindTag(db, \"junk\")\n\tif err != nil {\n\t\tt.Errorf(\"Find should not return error for not found, but got %s\", err)\n\t}\n\tif fakeTag.Id != metadata.UnknownTag.Id {\n\t\tt.Errorf(\"Find on non-existant tag should return unknown id but got %d\", fakeTag.Id)\n\t}\n}", "func (d Dictionary) Search(word string) (string, error) {\n\tdefinition, ok := d[word]\n\n\tif !ok {\n\t\treturn \"\", ErrNotFound\n\t}\n\treturn definition, nil\n}", "func (r *Repository) FindSemverTag(c *semver.Constraints) (*plumbing.Reference, error) {\n\t// Check if Repository is nil to avoid a panic if this function is called\n\t// before repo has been cloned\n\tif r.Repository == nil {\n\t\treturn nil, errors.New(\"Repository is nil\")\n\t}\n\n\ttagsIter, err := r.Tags()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcoll := semverref.Collection{}\n\n\tif err := tagsIter.ForEach(func(t *plumbing.Reference) error {\n\t\tv, err := semver.NewVersion(t.Name().Short())\n\t\tif err != nil {\n\t\t\treturn nil // Ignore errors and thus tags that aren't parsable as a semver\n\t\t}\n\n\t\t// No way to a priori find the length of tagsIter so append to the collection.\n\t\tcoll = append(coll, semverref.SemverRef{Ver: v, Ref: t})\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn coll.HighestMatch(c)\n}", "func matchesTerm(term string, candidate interface{}) (relevance int) {\n\tvals := reflect.ValueOf(candidate)\n\tfor i := 0; i < vals.NumField(); i++ {\n\t\tstringVal := strings.ToLower(fmt.Sprint(vals.Field(i).Interface()))\n\t\t// A basic method of ranking search relevance\n\t\tif stringVal == term {\n\t\t\trelevance += 5\n\t\t} else if strings.HasPrefix(stringVal, term) {\n\t\t\trelevance += 3\n\t\t} else if strings.Contains(stringVal, term) {\n\t\t\trelevance++\n\t\t}\n\t}\n\treturn\n}", "func Given(tag string, trackers []LookupTracker) LookupTracker {\n\tfor _, tracker := range trackers {\n\t\tmatch := tracker.AllPatterns().FindAllStringSubmatch(tag, -1)\n\t\tif len(match) > 0 {\n\t\t\treturn tracker\n\t\t}\n\t}\n\treturn nil\n}", "func LookupIdent(ident string) Type {\n\tif tok, ok := keywords[ident]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func LookupIdent(ident string) Type {\n\tif tok, ok := keywords[ident]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func LookupIdent(ident string) Type {\n\tif tok, ok := keywords[ident]; ok {\n\t\treturn tok\n\t}\n\treturn Ident\n}", "func (search *Search) Tags(tag string) (*SearchResult, error) {\n\tinsta := search.inst\n\tbody, err := insta.sendRequest(\n\t\t&reqOptions{\n\t\t\tEndpoint: urlSearchTag,\n\t\t\tQuery: map[string]string{\n\t\t\t\t\"is_typeahead\": \"true\",\n\t\t\t\t\"rank_token\": insta.rankToken,\n\t\t\t\t\"q\": tag,\n\t\t\t},\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &SearchResult{}\n\terr = json.Unmarshal(body, res)\n\treturn res, err\n}", "func TagNameContainsFold(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldContainsFold(FieldTagName, v))\n}", "func (f *Flow) MatchTag(tag string) []*node {\n\tres := []*node{}\n\tfor _, s := range f.Tasks {\n\t\tif s.matched(tag) {\n\t\t\tres = append(res, s)\n\t\t}\n\t}\n\treturn res\n}", "func (ts TagSet) Find(tag string) int {\n\tfor i := range ts {\n\t\tif ts[i] == tag {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}", "func TagContains(t kappnavv1.Tag, substr string) bool {\n\treturn strings.Contains(string(t), substr)\n}", "func (m *Metadata) Get(tag string) (string, error) {\n\tif val, ok := m.tags[tag]; ok {\n\t\treturn val, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"tag not '%s' not found in tag map\", tag)\n}", "func term[V any](dict map[string]V, w string) (V, bool) {\n\tv, ok := dict[w]\n\treturn v, ok\n}", "func (cts *cachedTagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) {\n\t// Ensure cache is primed.\n\tif err := cts.Prime(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar tags []string\n\tfor tag, digest := range cts.tags {\n\t\tif digest == desc.Digest {\n\t\t\ttags = append(tags, tag)\n\t\t}\n\t}\n\n\treturn tags, nil\n}", "func LookupIdent(ident string) TokenType {\n\tif tok, ok := keywords[ident]; ok { //does the input match a keyword in our hashmap (represented by the bool ok). If so, tok holds the appropriate keyword token type we wish to return\n\t\treturn tok //returns tok, which has the tokenized value of the input, and ok is true\n\t}\n\treturn IDENT //returns TokenType IDENT, since ok evaluated as false because passed in input does not match a keyword in our hashmap\n}", "func (s STags) Lookup(tag string, fields ...string) (value string, ok bool) {\n\tvalue, ok = s.get(tag, fields...)\n\treturn\n}", "func (o *ordering) find(str string) *entry {\n\te := o.entryMap[str]\n\tif e == nil {\n\t\tr := []rune(str)\n\t\tif len(r) == 1 {\n\t\t\tconst (\n\t\t\t\tfirstHangul = 0xAC00\n\t\t\t\tlastHangul = 0xD7A3\n\t\t\t)\n\t\t\tif r[0] >= firstHangul && r[0] <= lastHangul {\n\t\t\t\tce := []rawCE{}\n\t\t\t\tnfd := norm.NFD.String(str)\n\t\t\t\tfor _, r := range nfd {\n\t\t\t\t\tce = append(ce, o.find(string(r)).elems...)\n\t\t\t\t}\n\t\t\t\te = o.newEntry(nfd, ce)\n\t\t\t} else {\n\t\t\t\te = o.newEntry(string(r[0]), []rawCE{\n\t\t\t\t\t{w: []int{\n\t\t\t\t\t\timplicitPrimary(r[0]),\n\t\t\t\t\t\tdefaultSecondary,\n\t\t\t\t\t\tdefaultTertiary,\n\t\t\t\t\t\tint(r[0]),\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\te.modified = true\n\t\t\t}\n\t\t\te.exclude = true // do not index implicits\n\t\t}\n\t}\n\treturn e\n}", "func (c *FontTextureAtlas) Find(word string) (metrics *FontTextureMetrics, ok bool) {\n\tif v, ok := c.fontDictionary[word]; ok {\n\t\treturn v, true\n\t}\n\treturn nil, false\n}", "func (t TagSet) Contains(tag string) bool {\n\t_, ok := t[tag]\n\treturn ok\n}", "func LookupIdent(ident string) TokenType {\r\n\t//If keyword exists return matching token type\r\n\tif tok, ok := keywords[ident]; ok {\r\n\t\treturn tok\r\n\t}\r\n\r\n\t//Otherwise return the IDENT token type\r\n\treturn IDENT\r\n}", "func FindWordsByTagWithMinimumLenght(tag string, limit uint, minWordLenght int) ([]*Word, error) {\n\tvar words []*Word\n\texpectedTag := Tag{}\n\tdb := database.DB()\n\tdb.First(&expectedTag, \"name = ?\", tag)\n\tdb.Where(\"length(value) >= ?\", minWordLenght).Limit(limit).Order(\"RANDOM()\").Model(&expectedTag).Related(&words, \"Words\")\n\treturn words, db.Error\n}", "func (t *Tap) FindLikely(s string) string {\n\ti := len(t.programs) / 2\n\tbeg := 0\n\tend := len(t.programs)\n\tfor {\n\t\tp := t.programs[i]\n\t\tif len(s) <= len(p.Name) {\n\t\t\tif p.Name[:len(s)] == s {\n\t\t\t\t//Check for better fit.\n\t\t\t\tfor j := i; j > 0; j-- {\n\t\t\t\t\tif t.programs[j].Name == s {\n\t\t\t\t\t\treturn t.programs[j].Name\n\t\t\t\t\t}\n\t\t\t\t\tif p.Name[:len(s)] != s {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn p.Name\n\t\t\t}\n\t\t}\n\t\tif s < p.Name {\n\t\t\tend = i\n\t\t\ti = (i + beg) / 2\n\t\t} else {\n\t\t\tbeg = i\n\t\t\ti = (i + end) / 2\n\t\t}\n\t}\n\treturn \"\"\n}", "func (this *WordDictionary) Search(word string) bool {\n \n}", "func (m *store) Find(s string) *ukjent.Word {\n\tw, err := m.get(s)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn &w\n}", "func Find(text string) (string, bool) {\n\top, found := operators[text]\n\treturn op, found\n}", "func FindTerm(c *gin.Context) {\n\tvar ok bool\n\ttermID := c.Param(\"term_id\")\n\tif termID != \"\" {\n\t\tid := uuid.FromStringOrNil(termID)\n\t\tif id != uuid.Nil {\n\t\t\tvar term models.Term\n\t\t\terr := models.DB.Preload(\"RelatedTerms\").Model(&models.Term{}).Where(\"id = ?\", id).Take(&term).Error\n\t\t\tif err == nil {\n\t\t\t\tc.JSON(http.StatusOK, gin.H{\"data\": term})\n\t\t\t\tok = true\n\t\t\t}\n\t\t}\n\t}\n\tif !ok {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"data\": \"id not found\"})\n\t}\n}", "func (m *matcher) getBest(want ...Tag) (got *haveTag, orig language.Tag, c Confidence) {\n\tbest := bestMatch{}\n\tfor i, ww := range want {\n\t\tw := ww.tag()\n\t\tvar max language.Tag\n\t\t// Check for exact match first.\n\t\th := m.index[w.LangID]\n\t\tif w.LangID != 0 {\n\t\t\tif h == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Base language is defined.\n\t\t\tmax, _ = canonicalize(Legacy|Deprecated|Macro, w)\n\t\t\t// A region that is added through canonicalization is stronger than\n\t\t\t// a maximized region: set it in the original (e.g. mo -> ro-MD).\n\t\t\tif w.RegionID != max.RegionID {\n\t\t\t\tw.RegionID = max.RegionID\n\t\t\t}\n\t\t\t// TODO: should we do the same for scripts?\n\t\t\t// See test case: en, sr, nl ; sh ; sr\n\t\t\tmax, _ = max.Maximize()\n\t\t} else {\n\t\t\t// Base language is not defined.\n\t\t\tif h != nil {\n\t\t\t\tfor i := range h.haveTags {\n\t\t\t\t\thave := h.haveTags[i]\n\t\t\t\t\tif equalsRest(have.tag, w) {\n\t\t\t\t\t\treturn have, w, Exact\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif w.ScriptID == 0 && w.RegionID == 0 {\n\t\t\t\t// We skip all tags matching und for approximate matching, including\n\t\t\t\t// private tags.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmax, _ = w.Maximize()\n\t\t\tif h = m.index[max.LangID]; h == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tpin := true\n\t\tfor _, t := range want[i+1:] {\n\t\t\tif w.LangID == t.lang() {\n\t\t\t\tpin = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Check for match based on maximized tag.\n\t\tfor i := range h.haveTags {\n\t\t\thave := h.haveTags[i]\n\t\t\tbest.update(have, w, max.ScriptID, max.RegionID, pin)\n\t\t\tif best.conf == Exact {\n\t\t\t\tfor have.nextMax != 0 {\n\t\t\t\t\thave = h.haveTags[have.nextMax]\n\t\t\t\t\tbest.update(have, w, max.ScriptID, max.RegionID, pin)\n\t\t\t\t}\n\t\t\t\treturn best.have, best.want, best.conf\n\t\t\t}\n\t\t}\n\t}\n\tif best.conf <= No {\n\t\tif len(want) != 0 {\n\t\t\treturn nil, want[0].tag(), No\n\t\t}\n\t\treturn nil, language.Tag{}, No\n\t}\n\treturn best.have, best.want, best.conf\n}", "func (s Space) HasTag(needle string) bool {\n\tisPrefix := strings.HasSuffix(needle, \"/\")\n\tfor i := range s.Tags {\n\t\tswitch isPrefix {\n\t\tcase true:\n\t\t\tif strings.HasPrefix(s.Tags[i], needle) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase false:\n\t\t\tif s.Tags[i] == needle {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func TestSemantic_2527(t *testing.T) {\n\tif !typeparams.Enabled {\n\t\tt.Skip(\"type parameters are needed for this test\")\n\t}\n\t// these are the expected types of identifiers in text order\n\twant := []result{\n\t\t{\"package\", \"keyword\", \"\"},\n\t\t{\"foo\", \"namespace\", \"\"},\n\t\t{\"func\", \"keyword\", \"\"},\n\t\t{\"Add\", \"function\", \"definition deprecated\"},\n\t\t{\"T\", \"typeParameter\", \"definition\"},\n\t\t{\"int\", \"type\", \"defaultLibrary\"},\n\t\t{\"target\", \"parameter\", \"definition\"},\n\t\t{\"T\", \"typeParameter\", \"\"},\n\t\t{\"l\", \"parameter\", \"definition\"},\n\t\t{\"T\", \"typeParameter\", \"\"},\n\t\t{\"T\", \"typeParameter\", \"\"},\n\t\t{\"return\", \"keyword\", \"\"},\n\t\t{\"append\", \"function\", \"defaultLibrary\"},\n\t\t{\"l\", \"parameter\", \"\"},\n\t\t{\"target\", \"parameter\", \"\"},\n\t\t{\"for\", \"keyword\", \"\"},\n\t\t{\"range\", \"keyword\", \"\"},\n\t\t{\"l\", \"parameter\", \"\"},\n\t\t{\"return\", \"keyword\", \"\"},\n\t\t{\"nil\", \"variable\", \"readonly defaultLibrary\"},\n\t}\n\tsrc := `\n-- go.mod --\nmodule example.com\n\ngo 1.19\n-- main.go --\npackage foo\n// Deprecated (for testing)\nfunc Add[T int](target T, l []T) []T {\n\treturn append(l, target)\n\tfor range l {} // test coverage\n\treturn nil\n}\n`\n\tWithOptions(\n\t\tModes(Default),\n\t\tSettings{\"semanticTokens\": true},\n\t).Run(t, src, func(t *testing.T, env *Env) {\n\t\tenv.OpenFile(\"main.go\")\n\t\tenv.AfterChange(\n\t\t\tDiagnostics(env.AtRegexp(\"main.go\", \"for range\")),\n\t\t)\n\t\tp := &protocol.SemanticTokensParams{\n\t\t\tTextDocument: protocol.TextDocumentIdentifier{\n\t\t\t\tURI: env.Sandbox.Workdir.URI(\"main.go\"),\n\t\t\t},\n\t\t}\n\t\tv, err := env.Editor.Server.SemanticTokensFull(env.Ctx, p)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tseen := interpret(v.Data, env.BufferText(\"main.go\"))\n\t\tif x := cmp.Diff(want, seen); x != \"\" {\n\t\t\tt.Errorf(\"Semantic tokens do not match (-want +got):\\n%s\", x)\n\t\t}\n\t})\n\n}", "func TagNameIn(vs ...string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldIn(FieldTagName, vs...))\n}", "func FindAtom(lpString string) ATOM {\n\tlpStringStr := unicode16FromString(lpString)\n\tret1 := syscall3(findAtom, 1,\n\t\tuintptr(unsafe.Pointer(&lpStringStr[0])),\n\t\t0,\n\t\t0)\n\treturn ATOM(ret1)\n}", "func (p Predicate) containsTerm(v Variable) bool {\n\tfor _, variable := range p.parameters {\n\t\tif variable.name == v.name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (d Dictionary) Search(word string) (string, error) {\n\tvalue, exists := d[word]\n\tif exists {\n\t\treturn value, nil\n\t}\n\n\treturn \"\", errNotFound\n}", "func (grammar *Grammar) GetTag(tagname string) *TagDefinition {\n\t// Search tags in reverse order, because tags can be overriden\n\tfor i := len(grammar.Tags) - 1; i >= 0; i-- {\n\t\ttag := grammar.Tags[i]\n\t\tif tag.Name == tagname {\n\t\t\treturn tag\n\t\t}\n\t}\n\treturn nil\n}", "func (ds *DatabaseService) Genericise(concept *Concept, generics map[Identifier]*Concept) (*Concept, bool) {\n\tpaths, err := ds.PathsToRoot(concept)\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\tvar bestPath []*Concept\n\tbestPos := -1\n\tfor _, path := range paths {\n\t\tfor i, concept := range path {\n\t\t\tif generics[concept.ConceptID] != nil {\n\t\t\t\tif i > 0 && (bestPos == -1 || bestPos > i) {\n\t\t\t\t\tbestPos = i\n\t\t\t\t\tbestPath = path\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif bestPos == -1 {\n\t\treturn nil, false\n\t}\n\treturn bestPath[bestPos], true\n}", "func (c *Category) FindExample(name string) *Example {\n\tfor _, example := range c.Example {\n\t\tif example.Name == name {\n\t\t\treturn example\n\t\t}\n\t}\n\treturn nil\n}", "func matchTag(ctxt *build.Context, name string, allTags map[string]bool) bool {\n\tif allTags != nil {\n\t\tallTags[name] = true\n\t}\n\n\t// special tags\n\tif ctxt.CgoEnabled && name == \"cgo\" {\n\t\treturn true\n\t}\n\tif name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"android\" && name == \"linux\" {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"illumos\" && name == \"solaris\" {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"ios\" && name == \"darwin\" {\n\t\treturn true\n\t}\n\tif name == \"unix\" && unixOS[ctxt.GOOS] {\n\t\treturn true\n\t}\n\tif name == \"boringcrypto\" {\n\t\tname = \"goexperiment.boringcrypto\" // boringcrypto is an old name for goexperiment.boringcrypto\n\t}\n\n\t// other tags\n\tfor _, tag := range ctxt.BuildTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\ttoolTags := extractToolTags(ctxt)\n\tfor _, tag := range toolTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, tag := range ctxt.ReleaseTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (l *SLexicon) AddSemantic(word string, semantic string) {\n\tl.Lock() // one at a time\n\tdefer l.Unlock()\n\n\tl.Semantic[word] = strings.ToLower(semantic)\n\tl.testAndAddCompoundWord(word) // add this item to the compound word set\n}", "func main() {\n // perform the search for the specified term\n search.Run(\"president\")\n}", "func (ti *Index) Filter(textTags, metrics []string) []string {\n\tmatches := []string{}\n\tintersectionCounts := make([]int, len(metrics))\n\tfor _, tag := range textTags {\n\t\tsearch := strings.TrimPrefix(tag, ti.textMatchPrefix)\n\t\t// broken pin -> no possible matches -> empty intersection\n\t\tif search[0] == '$' || search[len(search)-1] == '^' {\n\t\t\treturn []string{}\n\t\t}\n\t\tcaret := search[0] == '^'\n\t\tdollar := search[len(search)-1] == '$'\n\t\tnonpositional := strings.Trim(search, \"^$\")\n\n\t\t// TODO: maybe this is slow? map based intersect and such\n\t\t// this case is a little silly, since you should probably just query\n\t\t// graphite for that metric directly\n\t\tif caret && dollar {\n\t\t\tfor i, rawMetric := range metrics {\n\t\t\t\tif rawMetric == nonpositional {\n\t\t\t\t\tintersectionCounts[i]++\n\t\t\t\t\tif intersectionCounts[i] == len(textTags) {\n\t\t\t\t\t\tmatches = append(matches, rawMetric)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if caret {\n\t\t\tfor i, rawMetric := range metrics {\n\t\t\t\tif strings.HasPrefix(rawMetric, nonpositional) {\n\t\t\t\t\tintersectionCounts[i]++\n\t\t\t\t\tif intersectionCounts[i] == len(textTags) {\n\t\t\t\t\t\tmatches = append(matches, rawMetric)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if dollar {\n\t\t\tfor i, rawMetric := range metrics {\n\t\t\t\tif strings.HasSuffix(rawMetric, nonpositional) {\n\t\t\t\t\tintersectionCounts[i]++\n\t\t\t\t\tif intersectionCounts[i] == len(textTags) {\n\t\t\t\t\t\tmatches = append(matches, rawMetric)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor i, rawMetric := range metrics {\n\t\t\t\tif strings.Contains(rawMetric, nonpositional) {\n\t\t\t\t\tintersectionCounts[i]++\n\t\t\t\t\tif intersectionCounts[i] == len(textTags) {\n\t\t\t\t\t\tmatches = append(matches, rawMetric)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn matches\n}", "func (r Dictionary) Search(word string) (string, error) {\n\tif word == \"\" {\n\t\treturn \"\", ErrKeyWordEmpty\n\t}\n\tdefinition, ok := r[word]\n\tif !ok {\n\t\treturn \"\", ErrWordNotFound\n\t}\n\treturn definition, nil\n}", "func termStr(term goraptor.Term) string {\n\tswitch t := term.(type) {\n\tcase *goraptor.Uri:\n\t\treturn string(*t)\n\tcase *goraptor.Blank:\n\t\treturn string(*t)\n\tcase *goraptor.Literal:\n\t\treturn t.Value\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (t *TST) Find(s string) bool {\n\treturn t.Get(s) != nil\n}", "func main() {\n\tt := Constructor()\n\tt.AddWord(\"bad\")\n\tt.AddWord(\"dad\")\n\tt.AddWord(\"mad\")\n\ts := \"pad\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \"dad\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \".ad\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \"b..\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \".adx\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \".ax\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \".\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n\ts = \"d.\"\n\tfmt.Printf(\"find %q, get %t\\n\", s, t.Search(s))\n}", "func (ns nodes) find(name string, param bool) *node {\n\tfor _,n := range ns {\n\t\tif n.param && param {\n\t\t\treturn n\n\t\t}\n\t\tif n.name == name {\n\t\t\treturn n\n\t\t}\n\t}\n\treturn nil\n}", "func (d Dictionary) Search(word string) (string, error) {\n\tvalue, ok := d[word]\n\n\tif ok {\n\t\treturn value, nil\n\t}\n\n\treturn word, errNotFound\n}", "func (lr *Rule) Find(find string) []*Rule {\n\tvar res []*Rule\n\tlr.FuncDownMeFirst(0, lr.This(), func(k ki.Ki, level int, d any) bool {\n\t\tlri := k.(*Rule)\n\t\tif strings.Contains(lri.String, find) || strings.Contains(lri.Nm, find) {\n\t\t\tres = append(res, lri)\n\t\t}\n\t\treturn true\n\t})\n\treturn res\n}", "func (r Repository) GetIdeasByString(query string) Ideas {\n\tsession, err := mgo.Dial(SERVER)\n\n\tif err != nil {\n\t\tfmt.Println(\"Failed to establish connection to Mongo server:\", err)\n\t}\n\n\tdefer session.Close()\n\n\tc := session.DB(DBNAME).C(IdeaCollection)\n\tresult := Ideas{}\n\n\t// Logic to create filter\n\tqs := strings.Split(query, \" \")\n\tand := make([]bson.M, len(qs))\n\tfor i, q := range qs {\n\t\tand[i] = bson.M{\"title\": bson.M{\n\t\t\t\"$regex\": bson.RegEx{Pattern: \".*\" + q + \".*\", Options: \"i\"},\n\t\t}}\n\t}\n\tfilter := bson.M{\"$and\": and}\n\n\tif err := c.Find(&filter).Limit(10).All(&result); err != nil {\n\t\tfmt.Println(\"Failed to write result:\", err)\n\t}\n\n\treturn result\n}", "func (t *Trie) Find(word string) (Node, error) {\n\tif len(word) == 0 {\n\t\treturn nil, fmt.Errorf(\"no string to find\")\n\t}\n\n\trunes := []rune(word)\n\n\ttermNode, err := t.findAtNode(t.Root, runes, 0)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"word %s not found: %s\", word, err)\n\t}\n\n\treturn termNode, nil\n}", "func (term *Term) Get(name *Term) *Term {\n\tswitch v := term.Value.(type) {\n\tcase *object:\n\t\treturn v.Get(name)\n\tcase *Array:\n\t\treturn v.Get(name)\n\tcase interface {\n\t\tGet(*Term) *Term\n\t}:\n\t\treturn v.Get(name)\n\tcase Set:\n\t\tif v.Contains(name) {\n\t\t\treturn name\n\t\t}\n\t}\n\treturn nil\n}", "func (d *Definition) Search(pattern string) Resource {\n\tresource := make(chan Resource)\n\ttree := d.ResourceTree\n\n\tgo func() {\n\t\tdefer close(resource)\n\t\ttree.Traverse(func(r Resource) {\n\t\t\tresource <- r\n\t\t})\n\t}()\n\n\tfor resourceWanted := range resource {\n\t\tpattern := fmt.Sprint(d.Context, \"/\", pattern)\n\t\tif resourceWanted.ID() == pattern {\n\t\t\treturn resourceWanted\n\t\t}\n\t}\n\n\treturn nil\n}", "func FindWordsByTag(tag string, limit uint) ([]*Word, error) {\n\tvar words []*Word\n\texpectedTag := Tag{}\n\tdb := database.DB()\n\tdb.First(&expectedTag, \"name = ?\", tag)\n\tdb.Limit(limit).Order(\"RANDOM()\").Model(&expectedTag).Related(&words, \"Words\")\n\treturn words, db.Error\n}", "func (s *SpecDef) findEntity(metricName string) (EntityDef, bool) {\n\tfor _, e := range s.Entities {\n\t\tfor _, em := range e.Metrics {\n\t\t\tif metricName == em.Name {\n\t\t\t\treturn e, true\n\t\t\t}\n\t\t}\n\t}\n\treturn EntityDef{}, false\n}", "func LookupIdent(ident string) TokenType {\n\tif tok, ok := keywords[ident]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}", "func (f *CompiledFingerprints) matchString(data string, part part) []string {\n\tvar matched bool\n\tvar technologies []string\n\n\tfor app, fingerprint := range f.Apps {\n\t\tswitch part {\n\t\tcase jsPart:\n\t\t\tfor _, pattern := range fingerprint.js {\n\t\t\t\tif pattern.MatchString(data) {\n\t\t\t\t\tmatched = true\n\t\t\t\t}\n\t\t\t}\n\t\tcase scriptPart:\n\t\t\tfor _, pattern := range fingerprint.script {\n\t\t\t\tif pattern.MatchString(data) {\n\t\t\t\t\tmatched = true\n\t\t\t\t}\n\t\t\t}\n\t\tcase htmlPart:\n\t\t\tfor _, pattern := range fingerprint.html {\n\t\t\t\tif pattern.MatchString(data) {\n\t\t\t\t\tmatched = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If no match, continue with the next fingerprint\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the technologies as well as implied ones\n\t\ttechnologies = append(technologies, app)\n\t\tif len(fingerprint.implies) > 0 {\n\t\t\ttechnologies = append(technologies, fingerprint.implies...)\n\t\t}\n\t\tmatched = false\n\t}\n\treturn technologies\n}", "func LookupIdent(ident string) TokenType {\n\tif tok, ok := keywords[ident]; ok {\n\t\treturn tok\n\t}\n\n\treturn IDENT\n}", "func nameAndTag(id string) (name string, tag string, err error) {\n\tsegments := strings.Split(id, \":\")\n\tswitch len(segments) {\n\tcase 2:\n\t\tname = segments[0]\n\t\ttag = segments[1]\n\t\tif len(name) == 0 || len(tag) == 0 {\n\t\t\terr = errors.NewBadRequest(\"imageRepositoryTags must be retrieved with <name>:<tag>\")\n\t\t}\n\tdefault:\n\t\terr = errors.NewBadRequest(\"imageRepositoryTags must be retrieved with <name>:<tag>\")\n\t}\n\treturn\n}", "func getStringToNextTag(tokens []string) string {\n for i, s := range tokens {\n if isMetadataTag(s) {\n // found a metadata tag\n return strings.Join(tokens[:i], \" \")\n }\n }\n return strings.Join(tokens, \" \")\n}", "func contains(s []string, e string) *string {\n\tfor _, a := range s {\n\t\tif strings.Contains(e, a) {\n\t\t\treturn &a\n\t\t}\n\t}\n\treturn nil\n}", "func TagsContains(v string) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldTags), v))\n\t})\n}", "func FindTagForVersion(dir string, version string, gitter Gitter) (string, error) {\n\terr := gitter.FetchTags(dir)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"fetching tags for %s\", dir)\n\t}\n\tanswer := \"\"\n\ttags, err := gitter.FilterTags(dir, version)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"listing tags for %s\", version)\n\t}\n\tif len(tags) == 1 {\n\t\tanswer = tags[0]\n\t} else if len(tags) == 0 {\n\t\t// try with v\n\t\tfilter := fmt.Sprintf(\"v%s\", version)\n\t\ttags, err := gitter.FilterTags(dir, filter)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"listing tags for %s\", filter)\n\t\t}\n\t\tif len(tags) == 1 {\n\t\t\tanswer = tags[0]\n\t\t} else {\n\t\t\treturn \"\", errors.Errorf(\"cannot resolve %s to a single git object (searching for tag %s and tag %s), found %+v\", version, version, filter, tags)\n\t\t}\n\t} else {\n\t\treturn \"\", errors.Errorf(\"cannot resolve %s to a single git object, found %+v\", version, tags)\n\t}\n\treturn answer, nil\n}", "func findTerminfo(name string) (*terminfo.Terminfo, error) {\n\tcachedTerminfoMutex.Lock()\n\tif ti, ok := cachedTerminfo[name]; ok {\n\t\tcachedTerminfoMutex.Unlock()\n\t\treturn ti, nil\n\t}\n\tti, _, e := dynamic.LoadTerminfo(name)\n\tif e == nil {\n\t\tcachedTerminfo[name] = ti\n\t\tcachedTerminfoMutex.Unlock()\n\t\treturn ti, nil\n\t}\n\tti, e = terminfo.LookupTerminfo(name)\n\treturn ti, e\n}", "func FindWordsByTagWithMaximumLenght(tag string, limit uint, minWordLenght int) ([]*Word, error) {\n\tvar words []*Word\n\texpectedTag := Tag{}\n\tdb := database.DB()\n\tdb.First(&expectedTag, \"name = ?\", tag)\n\tdb.Where(\"length(value) <= ?\", minWordLenght).Limit(limit).Order(\"RANDOM()\").Model(&expectedTag).Related(&words, \"Words\")\n\treturn words, db.Error\n}", "func findImageInRepotags(search imageParts, images []*Image) (*storage.Image, error) {\n\t_, searchName, searchSuspiciousTagValueForSearch := search.suspiciousRefNameTagValuesForSearch()\n\tvar results []*storage.Image\n\tfor _, image := range images {\n\t\tfor _, name := range image.Names() {\n\t\t\td, err := decompose(name)\n\t\t\t// if we get an error, ignore and keep going\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t_, dName, dSuspiciousTagValueForSearch := d.suspiciousRefNameTagValuesForSearch()\n\t\t\tif dName == searchName && dSuspiciousTagValueForSearch == searchSuspiciousTagValueForSearch {\n\t\t\t\tresults = append(results, image.image)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// account for registry:/somedir/image\n\t\t\tif strings.HasSuffix(dName, searchName) && dSuspiciousTagValueForSearch == searchSuspiciousTagValueForSearch {\n\t\t\t\tresults = append(results, image.image)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\tif len(results) == 0 {\n\t\treturn &storage.Image{}, errors.Errorf(\"unable to find a name and tag match for %s in repotags\", searchName)\n\t} else if len(results) > 1 {\n\t\treturn &storage.Image{}, errors.Errorf(\"found multiple name and tag matches for %s in repotags\", searchName)\n\t}\n\treturn results[0], nil\n}", "func LookupTag(err error, name string) string {\n\tvar result string\n\tfor _, tag := range deepAppendTags(nil, err) {\n\t\tif tag.Name == name {\n\t\t\tresult = tag.Value\n\t\t}\n\t}\n\n\treturn result\n}", "func findStringInContent(b []byte, s string) bool {\n\n\tstringbody := string(b)\n\tif strings.Contains(stringbody, s) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (term *Terminology) Resolve(ctx context.Context, id *apiv1.Identifier) (proto.Message, error) {\n\tsctID, err := snomed.ParseAndValidate(id.GetValue())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT: %w\", err)\n\t}\n\theader := metadata.New(map[string]string{\"accept-language\": \"en-GB\"})\n\tctx = metadata.NewOutgoingContext(ctx, header)\n\tif sctID.IsConcept() {\n\t\tec, err := term.client.GetExtendedConcept(ctx, &snomed.SctID{Identifier: sctID.Integer()})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT concept '%d': %w\", sctID, err)\n\t\t}\n\t\treturn ec, nil\n\t}\n\tif sctID.IsDescription() {\n\t\td, err := term.client.GetDescription(ctx, &snomed.SctID{Identifier: sctID.Integer()})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT description '%d': %w\", sctID, err)\n\t\t}\n\t\treturn d, nil\n\t}\n\treturn nil, fmt.Errorf(\"could not resolve SNOMED CT entity '%d': only concepts and descriptions supported\", sctID)\n}", "func (tr translations) lookup(s string) (string, bool) {\n\tif tr != nil {\n\t\tt, ok := tr[strings.ToLower(s)]\n\t\treturn t, ok\n\t}\n\treturn \"\", false\n}", "func lexMetadataDirective(\n\tfin *bufio.Reader,\n) (name string, args []string, err error) {\n\terr = eatWhitespace(fin)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tr, _, err := fin.ReadRune()\n\tif r != '@' {\n\t\terr = errors.New(\"Expected directive\")\n\t\treturn\n\t}\n\n\tname, err = readWord(fin)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor name != \"begin\" && name != \"scene\" {\n\t\terr = eatWhitespace(fin)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tr, _, err = fin.ReadRune()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfin.UnreadRune()\n\t\tif r == '@' {\n\t\t\tbreak\n\t\t}\n\n\t\targ := \"\"\n\t\targ, err = readPlainText(fin)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\n\treturn\n}", "func (str String) Find(path Ref) (Value, error) {\n\tif len(path) == 0 {\n\t\treturn str, nil\n\t}\n\treturn nil, errFindNotFound\n}", "func (d Dictionary) Search(word string) (string, error) {\r\n\tvalue, exists := d[word] //word라는 키에 해당하는 값을 반환\r\n\tif exists {\r\n\t\treturn value, nil // 단어 있음 == 에러가 없다는 말임\r\n\t}\r\n\treturn \"\", errNotFound\r\n}", "func Test_Resolve(t *testing.T) {\n\tparser := NewDefault()\n\n\tresolved, err := parser.Resolve([]byte(have), \"test.tf\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, []byte(have), *resolved)\n}", "func exampleStartWith() {\n\t// Read text from file\n\ttext, err := streeng.StringFromFile(\"pp.txt\")\n\tif err != nil {\n\t\tfmt.Println(\"String from file error:\", err.Error())\n\t\treturn\n\t}\n\n\t// split text with whitespace seperator\n\twords := strings.Fields(text)\n\n\t// Make a new Streeng\n\ts := streeng.MakeStreeng(words)\n\n\t// Match string which start with given string in streeng\n\tlistOfIndex := s.StartWith(\"sta\")\n\n\tfmt.Println(listOfIndex)\n}", "func (x *Index) Lookup(query string) (match *LookupResult, alt *AltWords, illegal bool) {\n\tss := strings.Split(query, \".\", 0);\n\n\t// check query syntax\n\tfor _, s := range ss {\n\t\tif !isIdentifier(s) {\n\t\t\tillegal = true;\n\t\t\treturn;\n\t\t}\n\t}\n\n\tswitch len(ss) {\n\tcase 1:\n\t\tmatch, alt = x.LookupWord(ss[0])\n\n\tcase 2:\n\t\tpakname := ss[0];\n\t\tmatch, alt = x.LookupWord(ss[1]);\n\t\tif match != nil {\n\t\t\t// found a match - filter by package name\n\t\t\tdecls := match.Decls.filter(pakname);\n\t\t\tothers := match.Others.filter(pakname);\n\t\t\tmatch = &LookupResult{decls, others};\n\t\t}\n\n\tdefault:\n\t\tillegal = true\n\t}\n\n\treturn;\n}", "func (analyzer *Analyzer) Concepts(flavor, payload string, options url.Values) (*ConceptsResponse, error) {\n\tif !entryPoints.hasFlavor(\"concepts\", flavor) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"concepts info for %s not available\", flavor))\n\t}\n\n\toptions.Add(flavor, payload)\n\turl := entryPoints.urlFor(analyzer.baseUrl, \"concepts\", flavor)\n\tdata, err := analyzer.analyze(url, options, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tresponse := new(ConceptsResponse)\n\t\terr := json.Unmarshal(data, &response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tif response.Status != \"OK\" {\n\t\t\t\treturn nil, errors.New(response.StatusInfo)\n\t\t\t} else {\n\t\t\t\treturn response, nil\n\t\t\t}\n\t\t}\n\t}\n}", "func (sem SEManager) Get(keyword string) (SearchEngine, error) {\n if len(keyword) < 1 {\n return SearchEngine{}, errors.New(\"Keyword too short\")\n }\n for _, se := range sem.Engines {\n if keyword == se.Keyword {\n return se, nil\n }\n }\n return SearchEngine{}, errors.New(fmt.Sprintf(\"No search engine with keyword %s found\", keyword))\n}", "func (f *FlagSet) lookup(name NormalizedName) *Flag {\n\treturn f.formal[name]\n}", "func (d Dictionary) Search(word string) (string, error) {\n\tkey, exists := d[word]\n\tif exists {\n\t\treturn key, nil\n\t}\n\treturn \"\", errNotFound\n}", "func (g *Game) findWord(ws []*scottpb.Word, w string) int {\n\tw = (w + \" \")[0:g.Current.Header.WordLength]\n\tfor i := 0; i < len(ws); i++ {\n\t\tif (ws[i].Word + \" \")[0:g.Current.Header.WordLength] != w {\n\t\t\tcontinue\n\t\t}\n\t\tfor j := i; j >= 0; j-- {\n\t\t\tif !ws[j].Synonym {\n\t\t\t\treturn j\n\t\t\t}\n\t\t}\n\t}\n\treturn UnknownWord\n}", "func Find(name string) (string, bool) { q, ok := queries[name]; return q, ok }", "func IndefiniteArticleFor(word string) string {\n\t// Handle special cases\n\tfor _, regex := range A_explicit_an {\n\t\tif regex.MatchString(word) {\n\t\t\treturn \"an\"\n\t\t}\n\t}\n\n\t// Handle abbreviations (disabled because Go regexps don't have lookahead)\n\t// if A_abbrev.MatchString(word) {\n\t//\treturn \"an\"\n\t// }\n\n\t// \"an A-frame\"\n\tif regexp.MustCompile(`(?i)^[aefhilmnorsx][.-]`).MatchString(word) {\n\t\treturn \"an\"\n\t}\n\n\t// \"a G-string\"\n\tif regexp.MustCompile(`(?i)^[a-z][.-]`).MatchString(word) {\n\t\treturn \"a\"\n\t}\n\n\t// Handle consonants\n\tif regexp.MustCompile(`(?i)^[^aeiouy]`).MatchString(word) {\n\t\treturn \"a\"\n\t}\n\n\t// Handle special vowel-forms\n\tif regexp.MustCompile(`(?i)^e[uw]`).MatchString(word) ||\n\t\tregexp.MustCompile(`(?i)^onc?e\\b`).MatchString(word) ||\n\t\tregexp.MustCompile(`(?i)^uni([^nmd]|mo)`).MatchString(word) ||\n\t\tregexp.MustCompile(`(?i)^u[bcfhjkqrst][aeiou]`).MatchString(word) {\n\t\treturn \"a\"\n\t}\n\n\t// Handle vowels\n\tif regexp.MustCompile(`(?i)^[aeiou]`).MatchString(word) {\n\t\treturn \"an\"\n\t}\n\n\t// Handle y...\n\t// (before certain consonants implies (unnaturalized) \"i..\" sound)\n\tif A_y_cons.MatchString(word) {\n\t\treturn \"an\"\n\t}\n\n\t// Otherwise, guess \"a\"\n\treturn \"a\"\n}", "func (v *VerbalExpression) Find(s string) *VerbalExpression {\n\treturn v.add(`(?:` + quote(s) + `)`)\n}" ]
[ "0.6151782", "0.58596826", "0.5414848", "0.51702994", "0.51702994", "0.50460577", "0.5022692", "0.49993387", "0.49353907", "0.49214068", "0.4907331", "0.49026054", "0.48358035", "0.48210332", "0.4817401", "0.480488", "0.47873816", "0.47856623", "0.47444916", "0.474016", "0.47362557", "0.4715367", "0.4715367", "0.47090843", "0.47051394", "0.4698096", "0.46945465", "0.46922663", "0.46797916", "0.4650516", "0.4639754", "0.46351942", "0.46309122", "0.46028945", "0.4601739", "0.4601426", "0.45833084", "0.45686352", "0.45661455", "0.45656848", "0.4539887", "0.45201847", "0.45099387", "0.4509719", "0.44995806", "0.44948497", "0.44833657", "0.44813755", "0.44809824", "0.4466135", "0.44588608", "0.44412574", "0.44303343", "0.44265175", "0.44174463", "0.4404549", "0.43956727", "0.4394042", "0.43918148", "0.43908054", "0.4390306", "0.43833634", "0.43821698", "0.43816012", "0.43690586", "0.43689162", "0.43680304", "0.43673682", "0.4354996", "0.4348547", "0.4339774", "0.43377852", "0.4336387", "0.43219796", "0.43162924", "0.43139192", "0.43130752", "0.43122762", "0.43042916", "0.43028042", "0.43016383", "0.43006408", "0.42976925", "0.4296688", "0.42917085", "0.4287441", "0.4285794", "0.42846802", "0.42831224", "0.4281457", "0.42804173", "0.42797607", "0.42776734", "0.42745477", "0.42728132", "0.42688274", "0.4264401", "0.4263584", "0.42611885", "0.42529" ]
0.6968239
0
Perform get request against an API URL
func lookup(url string) string { response, err := http.Get(url) if err != nil { fmt.Printf("The HTTP request failed with error %s\n", err) os.Exit(1) } data, _ := ioutil.ReadAll(response.Body) return string(data) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Client) Get(url string, headers map[string]string, params map[string]interface{}) (*APIResponse, error) {\n\tfinalURL := c.baseURL + url\n\tr, err := http.NewRequest(\"GET\", finalURL, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create request: %v\", err)\n\t}\n\n\treturn c.performRequest(r, headers, params)\n}", "func (c *Client) Get(ctx context.Context, url string, data ...interface{}) (*Response, error) {\n\treturn c.DoRequest(ctx, http.MethodGet, url, data...)\n}", "func (c *Client) get(url string, query url.Values) (json.RawMessage, error) {\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create get request\")\n\t}\n\treq.URL.RawQuery = query.Encode()\n\tres, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"cound not get %v\", url)\n\t}\n\tdefer res.Body.Close()\n\tvar resp response\n\tif err := json.NewDecoder(res.Body).Decode(&resp); err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not decode response\")\n\t}\n\tif resp.Code != 0 {\n\t\treturn nil, errors.Errorf(\"get response code %d\", resp.Code)\n\t}\n\treturn resp.Data, nil\n}", "func (c *Client) Get(url string) (*Response, error) {\n\treq, err := c.NewRequest(\"GET\", url, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq = c.Config.AddRequestHeader(req)\n\tresponse, err := c.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func (c *Client) get(url string, result interface{}) error {\n\treq, err := c.newRequest(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := c.http.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err = checkResponse(resp); err != nil {\n\t\treturn err\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = checkResults(b); err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(b, &result)\n\n\treturn err\n}", "func (c *Client) Get(url string) (*http.Response, error) {\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Do(req)\n}", "func (f5 *f5LTM) get(url string, result interface{}) error {\n\treturn f5.restRequest(\"GET\", url, nil, result)\n}", "func (rb *RequestBuilder) Get(url string) *Response {\n\treturn rb.DoRequest(http.MethodGet, url, nil)\n}", "func (c *Client) Get(url string, headers, queryParams map[string][]string) (response *http.Response, err error) {\n\treturn c.makeRequest(url, http.MethodGet, headers, queryParams, nil)\n}", "func get(url string, qparms rest.QParms) ([]byte, error) {\n\theaders := rest.Headers{\"Authorization\": \"Bearer \" + token}\n\tfor k, v := range defaultHeaders {\n\t\theaders[k] = v\n\t}\n\tclient := rest.NewClient(headers, qparms)\n\n\tbody, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}", "func (cli *Client) Get(targetURL *url.URL) {\n\tvar resp *resty.Response\n\tvar err error\n\n\tif cli.Config.Oauth2Enabled {\n\t\tresp, err = resty.R().\n\t\t\tSetHeader(\"Authorization\", fmt.Sprintf(\"Bearer %s\", cli.AccessToken)).\n\t\t\tGet(targetURL.String())\n\t} else {\n\t\tresp, err = resty.R().Get(targetURL.String())\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"ERR: Could not GET request, caused by: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Print(resp)\n}", "func Get(url string, data ...interface{}) (*ClientResponse, error) {\n\treturn DoRequest(\"GET\", url, data...)\n}", "func (g *Getter) Get(url string) (*http.Response, error) {\n\treturn g.Client.Get(url)\n}", "func Get (url string, args map[string]string) (*http.Response, error) {\n\t// create a client\n\tclient, req, _ := GetHttpClient(url)\n\t// build the query\n\tif len(args) > 0 {\n\t\treq = buildQuery(req, args)\n\t}\n\t// execute the request\n\t//fmt.Println(req.URL.String())\n\treturn client.Do(req)\n}", "func Get(url string) (resp *http.Response, err error) {\n\treturn DefaultClient.Get(url)\n}", "func (workCloud *WorkCloud) get(url string) (string, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treq.Header.Add(\"User-Agent\", workCloud.agent)\n\n\tres, err := workCloud.client.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer res.Body.Close()\n\tb, err := ioutil.ReadAll(res.Body)\n\n\treturn string(b), nil\n}", "func Get(url string) (*http.Response, error) {\n\treturn DefaultClient.Get(url)\n}", "func (c *Client) Get(url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"User-Agent\", c.UserAgent)\n\n\tresp, err := c.client.Do(req)\n\treturn resp, err\n}", "func (c *Client) Get(rawurl string, out interface{}) error {\n\treturn c.Do(rawurl, \"GET\", nil, out)\n}", "func Get(url string, key string) (Response, error) {\n\n\tres, err := http.Get(url + \"/jsonResult.php?test=\" + key)\n\n\tif err != nil {\n\t\treturn Response{}, err\n\t}\n\n\tdefer res.Body.Close()\n\treturn process(ioutil.ReadAll(res.Body))\n}", "func (client *HTTPClient) Get(url string, opts *RequestOptions) (resp *http.Response, err error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tresp, err = client.Do(req, opts)\n\treturn\n}", "func get(url string) (string, error) {\n\t//defer fetch.CatchPanic(\"Get()\")\n\tresp, err := httpClient.Get(url)\n\tif err != nil {\n\t\tpanic(\"Couldn't perform GET request to \" + url)\n\t}\n\tdefer resp.Body.Close()\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(\"Unable to read the response body\")\n\t}\n\ts := string(bytes)\n\treturn s, nil\n}", "func (c *Client) Get(url string, headers map[string][]string) (client.Status, map[string][]string, io.ReadCloser, error) {\n\treturn c.Do(\"GET\", url, headers, nil)\n}", "func (l *LegistarApi) doSimpleAPIGetRequest(URL string) (*http.Response, error) {\n\treq, err := http.NewRequest(http.MethodGet, URL, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create http request: %w\", err)\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := l.httpCli.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create get: %w\", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(\"got bad status code \" + resp.Status)\n\t}\n\treturn resp, nil\n}", "func get(url string) (*http.Response, error) {\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tresp.Body.Close()\n\t\treturn nil, fmt.Errorf(\"get failed: %s\\n\", resp.Status)\n\t}\n\n\treturn resp, nil\n}", "func (api *api) doGet(url string) (result []byte, err error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Error(\"this shouldn't happen: %v\", err)\n\t\treturn nil, err\n\t}\n\tresp, err := api.client.Do(req)\n\tif err != nil {\n\t\tlog.Debug(\"got error response for URL %s: %v\", url, err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tstatusCode, status := resp.StatusCode, resp.Status\n\tif statusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"IB returned an error: %s: %s\", status, url)\n\t}\n\n\tresult, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading response body: %v: %s\", err, url)\n\t}\n\n\tlog.Trace(\"%s : SUCCESS\", url)\n\treturn result, nil\n}", "func Get(url string) (resp *http.Response, err error) {\n\treturn do(\"GET\", url, nil)\n}", "func getWithCB(url string) (*http.Response, error) {\n\tresp, err := cb.Execute(func() (interface{}, error) {\n\t\tresp, err := http.Get(url)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif resp.StatusCode == http.StatusInternalServerError {\n\t\t\treturn nil, fmt.Errorf(\"Internal Server Error\")\n\t\t}\n\n\t\treturn resp, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.(*http.Response), nil\n}", "func (a *API) Get(path string) (resp *http.Response, err error) {\n\tu, err := url.ParseRequestURI(a.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu.Path = path\n\n\treturn a.Client.Get(u.String())\n}", "func doAPIRequest(httpClient *http.Client, httpUserAgentString string, apiURLString string, endpoint string, query url.Values) ([]byte, error) {\n\t// build the request\n\treq, err := http.NewRequest(\"GET\", apiURLString+endpoint, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif query != nil {\n\t\treq.URL.RawQuery = query.Encode()\n\t}\n\treq.Header.Set(\"User-Agent\", httpUserAgentString)\n\n\t// make the request, return error if error\n\t// TODO: handle errors like client side timeouts\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// check status code, return error if not 200\n\t// TODO: handle errors like server side timeouts, this is difficult because\n\t// the API is so sparsely documented.\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", resp.Status, respBody)\n\t}\n\n\treturn respBody, nil\n}", "func get(url string) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"User-Agent\", userAgent)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"got status code: %s\", resp.Status)\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}", "func (c *Client) Get(url string) (*http.Response, error) {\n\tb := c.breakerLookup(url)\n\tif b == nil {\n\t\treturn c.client.Get(url)\n\t}\n\n\tctx := getGetCtx()\n\tdefer releaseGetCtx(ctx)\n\n\tctx.Client = c.client\n\tctx.ErrorOnBadStatus = c.errOnBadStatus\n\tctx.URL = url\n\tif err := b.Call(ctx, breaker.WithTimeout(c.timeout)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ctx.Response, ctx.Error\n}", "func Request(url string) ([]byte, error) {\n\tvar err error\n\tclient := resty.New()\n\n\tresponse, err := client.R().Get(url)\n\n\tbytes := []byte(response.Body())\n\n\treturn bytes, err\n}", "func Get(url string, response interface{}) error {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := json.NewDecoder(resp.Body).Decode(response); err != nil {\n\t\treturn fmt.Errorf(\"unable to unmarshal response\")\n\t}\n\n\treturn nil\n}", "func (c *Client) Get(url string, resType interface{}) error {\n\treturn c.CallAPI(\"GET\", url, nil, resType, true)\n}", "func (c *Client) get(endpoint string, queries map[string]string) (*http.Response, error) {\n\t// Assemble request\n\treq, err := c.buildRequest(\"GET\", endpoint, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Add query strings\n\tif queries != nil {\n\t\tencodeQuery(req.URL, queries)\n\t}\n\n\tclient := buildHTTPSClient(c.verifySSL)\n\treturn client.Do(req)\n}", "func (client *Client) Get(action string, params url.Values, header http.Header) (*Response, error) {\r\n\treturn client.Request(\"GET\", action, params, header, nil)\r\n}", "func (client *Client) Get(\n\turl string,\n\tparams url.Values,\n\toptions ...interface{},\n) (io.ReadCloser, int, error) {\n\treply, err := client.request(\"GET\", url, params, nil, options...)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn reply.Body, reply.StatusCode, nil\n}", "func (c *Client) Get(path string) (io.ReadCloser, error) {\n\n\treq, err := http.NewRequest(\"GET\", c.url+\"/\"+path, nil)\n\tif err != nil {\n\t\tlog.Fatalln(\"Failed: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"URL: %v\\n\", req.URL)\n\n\treq.Header.Add(\"Authorization\", \"Bearer \"+c.apiKey)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\tlog.Fatalln(\"Failed: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn resp.Body, nil\n}", "func RequestForAPI(req *http.Request)(*http.Response, error) {\n\t\tclient := &http.Client{}\n\n\tclient = &http.Client{\n\t\tTimeout: time.Second * time.Duration(1500),\n\t}\n\tresp, clientErr := client.Do(req)\n\tif clientErr != nil {\n\t\tlogger.WithField(\"error from api\", clientErr.Error()).Error(\"Get Request Failed\")\n\t\treturn nil, clientErr\n\t}\n\tif resp.StatusCode != 200 {\n\t\tlogger.WithField(\"error from api\", resp).Error(\"Get Request Failed\")\n\t\treturn nil, clientErr\n\t}\n\treturn resp, clientErr\n}", "func Get(url, authToken string) (*http.Response, error) {\n\treturn get(url, authToken, 1)\n}", "func (c *Client) Get(endpoint string, params map[string]string) *grequests.Response {\n\turl := c.Endpoint + endpoint\n\tresp, err := grequests.Get(url, &grequests.RequestOptions{\n\t\tParams: params,\n\t})\n\tif err != nil {\n\t\tutilities.CheckError(resp.Error, \"Unable to make requests\")\n\t}\n\n\tif resp.Ok != true {\n\t\tlog.Println(\"Request did not return OK\")\n\t}\n\n\treturn resp\n}", "func (g *Github) Get(url string) (*http.Response, error) {\n\treturn g.Do(http.MethodGet, url, http.NoBody)\n}", "func Get(url string, data ...interface{}) (*Response, error) {\n\tr := NewRequest()\n\treturn r.Get(url, data...)\n}", "func (cl *Client) Get(c context.Context, url string, opts ...RequestOption) (*Response, error) {\n\treq, err := cl.NewRequest(c, http.MethodGet, url, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cl.Do(c, req)\n}", "func (c Client) get(path string, params url.Values, holder interface{}) error {\n\treturn c.request(\"GET\", path, params, &holder)\n}", "func (c *Client) get(rawURL string, authenticate bool, out interface{}) error {\n\terr := c.do(rawURL, \"GET\", authenticate, http.StatusOK, nil, out)\n\treturn errio.Error(err)\n}", "func Get(ctx context.Context, url string, options ...RequestOption) (*Response, error) {\n\tr, err := newRequest(ctx, http.MethodGet, url, nil, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn doRequest(http.DefaultClient, r)\n}", "func (c *Client) Get(url string, header map[string]string) ([]byte, error) {\n\treq, err := c.newRequest(\"GET\", url, header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bytes, nil\n}", "func (c *Client) Get(headers map[string]string, queryParams map[string]string) ([]byte, error) {\n\n\t// add parameters to the url\n\tv := url.Values{}\n\tfor key, value := range queryParams {\n\t\tv.Add(key, value)\n\t}\n\turi, err := url.Parse(c.baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\turi.RawQuery = v.Encode()\n\tc.baseURL = uri.String()\n\n\t// create a new get request\n\trequest, err := http.NewRequest(\"GET\", c.baseURL, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// add headers to the request\n\tfor key, value := range headers {\n\t\trequest.Header.Add(key, value)\n\t}\n\n\tresponse, err := c.sendRequestWithRetry(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// if response is an error (not a 200)\n\tif response.StatusCode > 299 {\n\t\treturn nil, errors.New(response.Status)\n\t}\n\t// read the body as an array of bytes\n\tresponseBody, err := ioutil.ReadAll(response.Body)\n\treturn responseBody, err\n}", "func fetchFromAPI(link string) (body []byte, err error) {\n\twikiClient := http.Client{\n\t\tTimeout: time.Second * 2, // Maximum of 2 secs\n\t}\n\tfakeBody := []byte{}\n\n\treq, err := http.NewRequest(http.MethodGet, link, nil)\n\tif err != nil {\n\t\treturn fakeBody, err\n\t}\n\n\treq.Header.Set(\"User-Agent\", \"slack-wikipedia-bot\")\n\n\tres, getErr := wikiClient.Do(req)\n\tif getErr != nil {\n\t\treturn fakeBody, getErr\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\treturn ioutil.ReadAll(res.Body)\n}", "func (client *Client) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) {\n\tif opts == nil {\n\t\topts = new(RequestOpts)\n\t}\n\n\tif JSONResponse != nil {\n\t\topts.JSONResponse = JSONResponse\n\t}\n\n\treturn client.Request(\"GET\", url, opts)\n}", "func Get(url, token string) {\n\t// Create a Resty Client\n\tclient := resty.New()\n\tclient.SetAuthToken(token)\n\n\tresp, err := client.R().\n\t\tEnableTrace().\n\t\tGet(url)\n\n\t// Explore response object\n\tfmt.Println(\"Response Info:\")\n\tfmt.Println(\" Error :\", err)\n\tfmt.Println(\" Status Code:\", resp.StatusCode())\n\tfmt.Println(\" Status :\", resp.Status())\n\tfmt.Println(\" Proto :\", resp.Proto())\n\tfmt.Println(\" Time :\", resp.Time())\n\tfmt.Println(\" Received At:\", resp.ReceivedAt())\n\tfmt.Println(\" Body :\\n\", resp)\n\tfmt.Println()\n}", "func (api *API) Get(method string, params map[string]string, result interface{}) {\n\trequest := api.Request(method, result)\n\tif params != nil {\n\t\trequest.SetQueryParams(params)\n\t}\n\n\t_, err := request.Get(api.url + \"/\" + method)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func (c *KeycloakClient) get(url string, v interface{}) error {\n\tvar body []byte\n\terr := c.getRaw(url, &body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(body, v)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (d *Doer) Get(url string, response interface{}) (*http.Response, error) {\n\treq, err := d.newRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn d.do(req, response)\n}", "func (a *API) Get(u string) (*http.Response, []byte, error) {\n\tvar body []byte\n\tvar resp *http.Response\n\tif a.Verbose {\n\t\tfmt.Printf(\"Get: %v\\n\", u)\n\t}\n\treq, err := http.NewRequest(\"GET\", u, nil)\n\tif err == nil {\n\t\t// Salsa's API needs these cookies to verify authentication.\n\t\tfor _, c := range a.Cookies {\n\t\t\treq.AddCookie(c)\n\t\t}\n\t\tresp, err = a.Client.Do(req)\n\t\tif err == nil {\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\tm := fmt.Sprintf(\"invalid response code %v\", resp.Status)\n\t\t\t\terr = errors.New(m)\n\t\t\t\treturn resp, body, err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\t\tif a.Verbose {\n\t\t\t\tfmt.Printf(\"Get: %v\\n\", string(body))\n\t\t\t}\n\t\t}\n\t}\n\treturn resp, body, err\n}", "func (h *Client) Get(url string, values url.Values) (body []byte, statusCode int, err error) {\n\tif values != nil {\n\t\turl += \"?\" + values.Encode()\n\t}\n\tvar req *http.Request\n\treq, err = http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn h.do(req)\n}", "func Get(headers map[string]string, url string) (interface{}, error) {\n var responseBody interface{}\n\n client := &http.Client{}\n\n req, err := http.NewRequest(\"GET\", url, nil)\n if err != nil {\n return nil, err\n }\n\n for key, val := range headers {\n req.Header.Add(key, val)\n }\n\n resp, err := client.Do(req)\n if err != nil {\n return nil, err\n }\n\n defer resp.Body.Close()\n rawBody, err := ioutil.ReadAll(resp.Body)\n if err != nil {\n return nil, err\n }\n\n return json.Unmarshal(rawBody, &responseBody), nil\n}", "func (c *httpClient) Get(url string,\n\theaders http.Header) (*Response, error) {\n\treturn c.do(http.MethodGet, url, headers, nil)\n}", "func (oc *OAuthConsumer) Get( url string, fparams Params, at *AccessToken) (r *http.Response, err os.Error) {\n\treturn oc.oAuthRequest(url, fparams, at, \"GET\")\n}", "func (downloader *DatabaseDownloader) doGETRequest(urlString string) (*http.Response, error) {\n\n\tparsedURL, err := url.Parse(urlString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq := parsedURL.Query()\n\tq.Set(\"edition_id\", \"GeoLite2-City\")\n\tq.Set(\"license_key\", downloader.LicenseKey)\n\tparsedURL.RawQuery = q.Encode()\n\n\treq, err := http.NewRequest(\"GET\", parsedURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Encoding\", \"\")\n\treq.Header.Set(\"Connection\", \"close\")\n\treq.Header.Set(\"Accept-Encoding\", \"deflate, identity\")\n\n\tresp, err := downloader.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode == 401 {\n\t\treturn nil, errors.New(\"Invalid license key\")\n\t}\n\n\treturn resp, nil\n\n}", "func (c *Client) Get(path string, query map[string]string, response interface{}) error {\n\turl, err := c.createURL(path, query)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"client: error creating http request: %s\", err.Error())\n\t}\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"token %s\", c.token))\n\treq.Header.Add(\"Accept\", c.AcceptHeader)\n\n\tres, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"client: error making a http request: %s\", err.Error())\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"client: error reading api error response: %s\", err.Error())\n\t\t}\n\t\treturn fmt.Errorf(\"client: GitHub api error - status %d: %s\", res.StatusCode, body)\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\terr = decoder.Decode(&response)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"client: error unmarshaling json response: %s\", err.Error())\n\t}\n\treturn nil\n}", "func Get(url string) ([]byte, error) {\n\tclient := http.Client{\n\t\tTimeout: time.Second * 3,\n\t}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tdata, err := ioutil.ReadAll(resp.Body)\n\n\treturn data, err\n}", "func Get(url string, ret interface{}) error {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"http get\")\n\t}\n\tdefer func() { _ = resp.Body.Close() }()\n\treturn dealResp(resp, ret)\n}", "func Get(dst []byte, url string) (statusCode int, body []byte, err error) {\n\treturn defaultClient.Get(dst, url)\n}", "func (c *JenkinsClient) Get(url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", c.BaseURL+url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Do(req)\n}", "func httpGet(t *testing.T, url string) ([]byte, error) {\n\tclient := &http.Client{}\n\tresp, err := invokeWithRetry(\n\t\tfunc() (response *http.Response, e error) {\n\t\t\treturn client.Get(url)\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\treturn handleHttpResp(t, resp)\n}", "func (c *TogglHttpClient) GetRequest(endpoint string) (*json.RawMessage, error) {\n\treturn request(c, \"GET\", endpoint, nil)\n}", "func (c *Client) Get(route string, queryValues map[string]string) (*RawResponse, error) {\n return c.doRequest(\"GET\", route, queryValues, nil)\n}", "func Get(url string, r io.Reader, w io.Writer, clientGenerator func() *http.Client, reqTuner ...func(*http.Request)) error {\n\treturn Request(\"GET\", url, r, w, clientGenerator, reqTuner...)\n}", "func (client *Client) Get(url string) (*http.Response, error) {\n\treturn client.Aws4Client.Get(url)\n}", "func HTTPGET(url string, auth bool, authToken string) (int, []byte, error) {\n\treturn httpRequest(\"GET\", url, auth, authToken, nil)\n}", "func Get(targetURL string, params map[string]string, authHeader string) ([]byte, error) {\n\tlog.Debugf(\"GET targetURL=%s, params=%+v, auth=%s\", targetURL, params, authHeader)\n\n\treq, _ := http.NewRequest(\"GET\", targetURL, nil)\n\treq.URL.RawQuery = convertToValues(params).Encode()\n\tif authHeader != \"\" {\n\t\treq.Header.Add(\"Authorization\", authHeader)\n\t}\n\treturn doRequest(req)\n}", "func (i *Instance) doRequest(ctx context.Context, url string) (map[string]interface{}, error) {\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(\"%s%s\", i.address, url), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := i.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\tvar data map[string]interface{}\n\n\t\terr = json.NewDecoder(resp.Body).Decode(&data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn data, nil\n\t}\n\n\tvar res ResponseError\n\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(res.Errors) > 0 {\n\t\treturn nil, fmt.Errorf(res.Errors[0].Msg)\n\t}\n\n\treturn nil, fmt.Errorf(\"%v\", res)\n}", "func Get(query string) (*client.Response, error) {\n\tresponse, err := client.Get(APIBaseURL+query).Header(\"Accept\", acceptType).End()\n\treturn response, err\n}", "func (h *httpCloud) get(path string, resp interface{}) error {\n\trequestType := \"GET\"\n\tbody, err := h.sendHTTPRequest(requestType, path, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"HTTP request to cloudprovider failed: %v\", err)\n\t}\n\tif body != nil {\n\t\tif err := json.Unmarshal(body, resp); err != nil {\n\t\t\treturn fmt.Errorf(\"GET response Unmarshal for %s failed with error: %v\\n\", path, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (a *API) Get(path string, params map[string]string) ([]byte, error) {\n\tpairs := make([]string, 0)\n\tfor key, value := range params {\n\t\tpairs = append(pairs, key+\"=\"+url.QueryEscape(value))\n\t}\n\tquery := strings.Join(pairs, \"&\")\n\turl := BaseURL + \"/\" + path + \"?\" + query\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Client-ID\", a.ClientID)\n\n\tclient := http.Client{}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}", "func SimpleGET(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn []byte(\"\"), err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte(\"\"), err\n\t}\n\tresp.Body.Close()\n\n\treturn body, nil\n}", "func Get(domain, url, token, tokenKey string) (*http.Response, error) {\n\t/*\n\t * First we will initalize the client\n\t * Then we will send the get request\n\t * Then we will return the response\n\t */\n\t//initalizing the client\n\tclient := heimdallC.NewClient(\n\t\theimdallC.WithHTTPClient(&myHTTPClient{\n\t\t\ttoken: token,\n\t\t\ttokenKey: tokenKey,\n\t\t\tdomain: domain,\n\t\t}),\n\t)\n\n\t//then we will make the request\n\tres, err := client.Get(url, http.Header{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//return the response\n\treturn res, nil\n}", "func (c *Client) Get(dst []byte, url string) (statusCode int, body []byte, err error) {\n\treturn clientGetURL(dst, url, c)\n}", "func Get(url string, externalHeader ...map[string]string) ([]byte, error) {\n\t// check if request hit MaxParallel\n\tif cache.IsBurst(url) {\n\t\treturn nil, ErrMaxParallel\n\t}\n\tdefer cache.Release(url)\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Accept\", \"application/json\")\n\n\tfor _, v := range externalHeader {\n\t\tfor k := range v {\n\t\t\treq.Header.Set(k, v[k])\n\t\t}\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar bf bytes.Buffer\n\tpooledCopy(&bf, resp.Body)\n\treturn bf.Bytes(), nil\n}", "func Get(url string) ([]byte, error) {\n\trsp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\tdefer rsp.Body.Close()\n\treturn ioutil.ReadAll(rsp.Body)\n}", "func (c *Client) Get(URL string) (resp *Response, err error) {\n\turlObj, err := url.ParseRequestURI(URL)\n\tif err != nil {\n\t\treturn\n\t}\n\theader := make(map[string]string)\n\theader[HeaderContentLength] = \"0\"\n\theader[HeaderHost] = urlObj.Host\n\treq := &Request{\n\t\tMethod: MethodGet,\n\t\tURL: urlObj,\n\t\tProto: HTTPVersion,\n\t\tHeader: header,\n\t\tContentLength: 0,\n\t\tBody: strings.NewReader(\"\"),\n\t}\n\tresp, err = c.Send(req)\n\treturn\n}", "func (t *ApiTester) Get(route string) (*ApiTesterResponse, error) {\n\t// prepare the request here\n\trequest, err := http.NewRequest(\"GET\", \"http://\"+t.Server.Listener.Addr().String()+route, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// do the actual request here\n\tresponse, err := t.Client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbodyBuffer, _ := ioutil.ReadAll(response.Body)\n\n\treturn &ApiTesterResponse{\n\t\tRawResponse: response,\n\t\tStatusCode: response.StatusCode,\n\t\tBodyStr: string(bodyBuffer),\n\t}, nil\n}", "func (req *Req) Get(u string) ([]byte, error) {\n\treturn req.request(\"GET\", u)\n}", "func httpGet(url string) (string, error) {\n\terrMsg := fmt.Sprintf(\"error fetching data from episodate api for url: %s\", url)\n\tresp, err := http.Get(url)\n\n\tif err != nil {\n\t\terr = errors.Wrapf(err, errMsg)\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\terr = errors.New(fmt.Sprintf(\"%s: Got HTTP StatusCode: %d\", errMsg, resp.StatusCode))\n\t\treturn \"\", err\n\t}\n\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, errMsg)\n\t\treturn \"\", err\n\t}\n\n\treturn string(bodyBytes), nil\n}", "func performRequest(url string) ([]byte, error) {\n\tres, err := http.Get(url)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}", "func getRequest(url string) ([]byte, error) {\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := makeRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp == nil {\n\t\treturn nil, ErrResponseNil\n\t}\n\tdefer resp.Body.Close()\n\n\tbodyBytes, err := getBody(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, types.NewErrServiceClient(resp.StatusCode, bodyBytes)\n\t}\n\n\treturn bodyBytes, nil\n}", "func Get(method, url string, params map[string]string, vPtr interface{}) error {\n\taccount, token, err := LoginWithSelectedAccount()\n\tif err != nil {\n\t\treturn LogError(\"Couldn't get account details or login token\", err)\n\t}\n\turl = fmt.Sprintf(\"%s%s\", account.ServerURL, url)\n\n\treq, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif token != \"\" {\n\t\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\t}\n\tq := req.URL.Query()\n\tfor k, v := range params {\n\t\tq.Add(k, v)\n\t}\n\treq.URL.RawQuery = q.Encode()\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer CloseTheCloser(resp.Body)\n\n\tdata, _ := ioutil.ReadAll(resp.Body)\n\n\tif resp.StatusCode != 200 {\n\t\trespBody := map[string]interface{}{}\n\t\tif err := json.Unmarshal(data, &respBody); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_ = LogError(fmt.Sprintf(\"error while getting service got http status code %s - %s\", resp.Status, respBody[\"error\"]), nil)\n\t\treturn fmt.Errorf(\"received invalid status code (%d)\", resp.StatusCode)\n\t}\n\n\tif err := json.Unmarshal(data, vPtr); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (session *Session) Get(path string, params Params) (response []byte, err error) {\n\turlStr := session.getUrl(path, params)\n\tlog.Println(urlStr)\n\tresponse, err = session.sendGetRequest(urlStr)\n\treturn\n\n\t//res, err = MakeResult(response)\n\t//return\n\n}", "func (v *DCHttpClient) Get(url string, headers map[string]string) (response *DCHttpResponse, err error) {\n\treturn v.DoWithoutContent(http.MethodGet, url, headers)\n}", "func (req *Request) Get(url string) (*Response, error) {\n\tresponse, err := req.Request.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewResponse(response), nil\n}", "func performRequest(url string) (io.ReadCloser, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tdefer func() {\n\t\t\tif err := resp.Body.Close(); err != nil {\n\t\t\t\tlog.Warnf(\"Failed to close response body: %s\", err)\n\t\t\t}\n\t\t}()\n\n\t\tresponseBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"non success response code: %d, body: %s\", resp.StatusCode, string(responseBytes))\n\t}\n\n\treturn resp.Body, nil\n}", "func (m *MockClient) Get(url string) (*http.Response, error) {\n\treturn GetFunc(url)\n}", "func (s *Sender) SimpleGet() (*http.Response, error) {\n\treturn http.Get(s.URL)\n}", "func (s *DefaultClient) Get(endpoint string) ([]byte, *http.Response, error) {\n\treturn s.http(http.MethodGet, endpoint, nil)\n}", "func (a *APITest) Get(url string) *Request {\n\ta.request.method = http.MethodGet\n\ta.request.url = url\n\treturn a.request\n}", "func GETapi(w http.ResponseWriter, request *http.Request) {\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\tURLs := mux.Vars(request)\n\tif len(URLs) != 0 {\n\t\thttp.Error(w, \"400 - Bad Request!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tmetaInfo := &MetaInfo{}\n\tmetaInfo.Uptime = FormatSince(startTime)\n\tmetaInfo.Info = \"Service for IGC tracks\"\n\tmetaInfo.Version = \"version 1.0\"\n\n\tjson.NewEncoder(w).Encode(metaInfo)\n}", "func (c *Client) get(urlStr string, form url.Values, data interface{}) error {\n\tresp, err := oauthClient.Get(c.HTTPClient, c.Credentials, urlStr, form)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\treturn c.decodeResponse(resp, data)\n}", "func (c *Client) get(path string) (string, error) {\n\turl := c.endpoint + path\n\tresp, err := c.httpClient.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tbodyBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbody := string(bodyBytes)\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\treturn body, fmt.Errorf(\"response status was %d\", resp.StatusCode)\n\t}\n\treturn body, nil\n}" ]
[ "0.7583128", "0.7119745", "0.7059129", "0.70423204", "0.7026855", "0.70153975", "0.7013186", "0.6997614", "0.69913566", "0.69484425", "0.6936488", "0.68919736", "0.687983", "0.6803041", "0.679222", "0.67858833", "0.67665464", "0.6743607", "0.67274976", "0.6717777", "0.66881055", "0.6686413", "0.668632", "0.6670138", "0.6669864", "0.6634572", "0.6613961", "0.6599531", "0.6598894", "0.6589005", "0.6582178", "0.6578746", "0.65683407", "0.65667254", "0.6555997", "0.65527755", "0.6527986", "0.65239745", "0.6504694", "0.6503072", "0.64975667", "0.64893025", "0.64888453", "0.64787555", "0.6475441", "0.646841", "0.64658695", "0.6457821", "0.64445263", "0.6439283", "0.6415858", "0.64139724", "0.6405992", "0.6399692", "0.6394418", "0.63725483", "0.63705", "0.6367058", "0.63620967", "0.63571274", "0.6354262", "0.6343602", "0.6340308", "0.63273716", "0.6316897", "0.631611", "0.6315231", "0.6300054", "0.6294927", "0.62864405", "0.6285015", "0.6280881", "0.6273625", "0.6272711", "0.62563396", "0.6252733", "0.62512386", "0.6249204", "0.62489676", "0.62463796", "0.6241251", "0.62369674", "0.6234472", "0.6220337", "0.6211541", "0.6210916", "0.6205949", "0.6200918", "0.6169869", "0.6165746", "0.61637014", "0.61627173", "0.6162373", "0.6162206", "0.6160135", "0.6150395", "0.615013", "0.6145579", "0.61452997", "0.614488", "0.6142241" ]
0.0
-1
MatchString matches a string with either a regexp or direct string match
func (ri *RegexpInfo) MatchString(s string) bool { if ri.Regexp != nil { return ri.Regexp.MatchString(s) } else if ri.Path != "" { return strings.HasSuffix(s, ri.Path) } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (re *Regexp) MatchString(s string) bool\t{ return len(re.doExecute(s, nil, 0)) > 0 }", "func (re *RegexpStd) MatchString(s string) bool {\n\tif ok, err := re.p.MatchString(s); err == nil {\n\t\treturn ok\n\t}\n\treturn false\n}", "func (n *LibPcreRegexp) MatchString(s string) bool {\n\treturn n.re.MatcherString(s, 0).Matches()\n}", "func MatchString(pattern string, s string) (matched bool, error string) {\n\tre, err := CompileRegexp(pattern);\n\tif err != \"\" {\n\t\treturn false, err\n\t}\n\treturn re.MatchString(s), \"\";\n}", "func (re *Regexp) MatchString(s string) bool {\n\tm, err := re.run(true, -1, getRunes(s))\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn m != nil\n}", "func MatchString(pattern string, s string) (matched bool, err error) {\n\tre, err := CompileStd(pattern)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn re.MatchString(s), nil\n}", "func MatchString(pattern, target string) (bool, error) {\n\tr, err := Convert(pattern)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn r.MatchString(target), nil\n}", "func MatchString(pattern string, str string) (bool, error) {\n\tabb, err := Compile(pattern)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn abb.MatchString(str), nil\n}", "func MatchString(pattern string, s string) bool {\n\tre, err := Compile(pattern)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn re.MatchString(s)\n}", "func TestMatchString(t *testing.T) {\n\tpattern, upper, lower := \"^(B|b)rian$\", \"Brian\", \"brian\"\n\n\tif match, err := regexp.MatchString(pattern, upper); match != true {\n\t\tt.Errorf(\"MatchString did not match %q %v\", upper, err)\n\t}\n\n\tif match, err := regexp.MatchString(pattern, lower); match != true {\n\t\tt.Errorf(\"MatchString did not match %q %v\", lower, err)\n\t}\n}", "func StringMatches(s string, params ...string) bool {\n\tif len(params) == 1 {\n\t\tpattern := params[0]\n\t\treturn Matches(s, pattern)\n\t}\n\treturn false\n}", "func (this *MatchString) matchStr(str string, mSrc string) bool {\n\tres, err := regexp.MatchString(mSrc, str)\n\treturn res == true && err == nil\n}", "func MatchString(infix, matchString string) bool {\n\tn := Compile(infix)\n\treturn n.Matches(matchString)\n}", "func Match(regexStr string, textStr string) (result bool, err error) {\n\tregex := []rune(regexStr)\n\ttext := []rune(textStr)\n\tif len(regex) > 0 && regex[0] == '^' {\n\t\treturn matchHere(regex[1:], text)\n\t}\n\tif len(text) == 0 {\n\t\treturn matchHere(regex, text)\n\t}\n\tfor i, _ := range text {\n\t\tr, e := matchHere(regex, text[i:])\n\t\tif r || e != nil {\n\t\t\treturn r, e\n\t\t}\n\t}\n\treturn result, err\n}", "func (r *Go) Match(s string) bool {\n\tt := time.Now()\n\tdefer MatchHistogram.With(\"string\", s, \"duration\", \"seconds\").Observe(time.Since(t).Seconds())\n\n\treturn r.reg.MatchString(s)\n}", "func matchFunc(a, b string) bool {\n\tmatched, _ := regexp.MatchString(b, a)\n\treturn matched\n}", "func (String) Matches(pattern string) bool { return boolResult }", "func Matches(str, pattern string) bool {\n\tmatch, _ := regexp.MatchString(pattern, str)\n\treturn match\n}", "func Match(t Testing, reg, str interface{}, formatAndArgs ...interface{}) bool {\n\tif !tryMatch(reg, str) {\n\t\treturn Fail(t,\n\t\t\tfmt.Sprintf(\"Expect string(%s) to match regexp(%s)\", fmt.Sprint(str), fmt.Sprint(reg)),\n\t\t\tformatAndArgs...)\n\t}\n\n\treturn true\n}", "func Match(patternStr, str string) (matched bool, err error) {\n\tif patternStr == \"\" {\n\t\treturn true, nil\n\t}\n\n\tpattern := preparePattern(patternStr)\n\tstrs, err := prepareStr(str)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn match(pattern, strs)\n}", "func matchStr(rgxp string, compare string) bool {\n\tr, err := regexp.Compile(rgxp)\n\tif err != nil {\n\t\tlog.Fatalf(\"invalid regexp: %s\", rgxp)\n\t}\n\treturn r.MatchString(strings.ToLower(compare))\n}", "func (m RegexpMatcher) Match(s string) bool {\n\treturn m.re.MatchString(s)\n}", "func (t *StringDataType) Match(r *regexp.Regexp) *StringDataType {\n\treturn t.Validate(func(s string) error {\n\t\tif !r.MatchString(s) {\n\t\t\treturn fmt.Errorf(\"value does not match passed in regex %s\", r.String())\n\t\t}\n\t\treturn nil\n\t})\n}", "func matchRegex(params models.RegexParamsProvider, str string) (bool, string) {\n\tregex := params.GetRegexp()\n\tif regex == nil {\n\t\treturn true, str\n\t}\n\n\tif !regex.MatchString(str) {\n\t\treturn false, \"\"\n\t}\n\n\tsubstrings := regex.FindStringSubmatch(str)\n\tif len(substrings) == 1 {\n\t\treturn true, str\n\t}\n\n\treturn true, substrings[1]\n}", "func regexpMatch(r *regexp.Regexp, s string) bool {\n\treturn r != nil && r.MatchString(s)\n}", "func (pattern targetPattern) MatchString(target string) bool {\n\tparts := strings.SplitN(target, \"/\", 2)\n\treturn len(parts) == 2 && parts[1] == pattern.name && pattern.namespace.MatchString(parts[0])\n}", "func OkMatchesString(label, val, regex string, t *testing.T) {\n\tre := regexp.MustCompile(regex)\n\tif re.MatchString(val) {\n\t\tt.Logf(\"ok - %s: '%s' matches '%s'\\n\", label, val, regex)\n\t} else {\n\t\tt.Logf(\"not ok - %s: String '%s' doesn't match '%s'\", label, val, regex)\n\t\tt.Fail()\n\t}\n}", "func RegexpString(re *syntax.Regexp,) string", "func m(t *testing.T, s, re string) {\n\tt.Helper()\n\tmatched, err := regexp.MatchString(re, s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !matched {\n\t\tt.Errorf(\"string does not match pattern %q:\\n%s\", re, s)\n\t}\n}", "func Regexp(str string, pattern string) bool {\n\tok, _ := regexp.MatchString(pattern, str)\n\treturn ok\n}", "func Regexp(str string, pattern string) bool {\n\tok, _ := regexp.MatchString(pattern, str)\n\treturn ok\n}", "func matchesRegex(pattern, str interface{}) bool {\n\tmatch, err := regexp.MatchString(fmt.Sprint(pattern), fmt.Sprint(str))\n\tif err != nil {\n\t\tlog.Errorf(\"bad regex expression %s\", fmt.Sprint(pattern))\n\t\treturn false\n\t}\n\tscope.Debugf(\"%v regex %v? %v\\n\", pattern, str, match)\n\treturn match\n}", "func Match(regx string, arg string) bool {\n\tmatched, err := regexp.MatchString(\"^(\"+regx+\")$\", arg)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn matched\n}", "func TestFindString(t *testing.T) {\n\tregex := regexp.MustCompile(\"Brian\")\n\tsubject := \"Hello Brian\"\n\tmatch := regex.FindString(subject)\n\tAssert(\"Brian\", match, t)\n}", "func StringMatchesPattern(re *regexp.Regexp, reDesc string) String {\n\treturn func(v string) error {\n\t\tif !re.MatchString(v) {\n\t\t\treturn fmt.Errorf(\"%s does not match the pattern: %s\",\n\t\t\t\tv, reDesc)\n\t\t}\n\t\treturn nil\n\t}\n}", "func isMatch(s string, p string) bool {\n\n}", "func TestMatch(t *testing.T) {\n\tpattern := \"^(B|b)rian$\"\n\tif match, err := regexp.Match(pattern, []byte(\"Brian\")); match != true {\n\t\tt.Errorf(\"Brian did not match %q %v\", pattern, err)\n\t}\n\n\tif match, err := regexp.Match(pattern, []byte(\"brian\")); match != true {\n\t\tt.Errorf(\"brian did not match %q %v\", pattern, err)\n\t}\n}", "func (f *Flow) MatchString(key string, predicate getter.StringPredicate) bool {\n\tif s, err := f.GetFieldString(key); err == nil {\n\t\treturn predicate(s)\n\t}\n\treturn false\n}", "func Match(str, pat string) bool {\n\tvar i, j int\n\tvar star, pms int = -1, -1\n\n\tif len(pat) == 0 {\n\t\treturn len(str) == 0\n\t}\n\n\tfor i < len(str) {\n\t\tif j < len(pat) {\n\t\t\tif pat[j] == '?' || (pat[j] == str[i] && str[i] != '*') {\n\t\t\t\ti++\n\t\t\t\tj++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif pat[j] == '*' {\n\t\t\t\tstar = j\n\t\t\t\tpms = i\n\t\t\t\tj++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif star >= 0 {\n\t\t\tpms++\n\t\t\ti = pms\n\t\t\tj = star + 1\n\t\t\tcontinue\n\t\t}\n\t\treturn false\n\t}\n\n\tfor j < len(pat) && pat[j] == '*' {\n\t\tj++\n\t}\n\n\treturn j == len(pat)\n}", "func Match(r Regex, s string) bool {\n\tfor _, c := range s {\n\t\tr = r.Derivative(c)\n\t}\n\treturn r.Accepting()\n}", "func assertStringRegexp(t *testing.T, pattern, s string) {\n\tmatches, _ := regexp.MatchString(pattern, s)\n\n\tif !matches {\n\t\tt.Errorf(\"%s is not in format %s\", s, pattern)\n\t}\n}", "func regexMatch(path, pattern string) (bool, error) {\n\treturn regexp.MatchString(pattern, path)\n}", "func BodyMatchString(pattern string) Func {\n\treturn func(res *http.Response, req *http.Request) error {\n\t\tbody, err := readBody(res)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif match, _ := regexp.MatchString(pattern, string(body)); !match {\n\t\t\treturn fmt.Errorf(\"Body mismatch: pattern not found '%s'\", pattern)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (MatchedText) Matches(pattern string) bool { return boolResult }", "func (t *dataType) Match(r *regexp.Regexp) *dataType {\n\tt.str.Match(r)\n\treturn t\n}", "func Match(regex string, text string) bool {\n runerx := compile(regex)\n runetxt := []rune(text)\n \n if len(runerx) > 0 && runerx[0] == '^' {\n return matchhere(runerx[1:], runetxt)\n }\n \n for {\n if matchhere(runerx, runetxt) {\n return true\n }\n if len(runetxt) == 0 {\n return false\n }\n runetxt = runetxt[1:]\n }\n }", "func TestMatchStringWithQuoteMeta(t *testing.T) {\n\tpattern, str := regexp.QuoteMeta(\"[foo]\"), \"[foo]\"\n\n\tif match, err := regexp.MatchString(pattern, str); match != true {\n\t\tt.Errorf(\"MatchString did not match %q %v\", str, err)\n\t}\n}", "func GetMatchString(str string) string {\n\tescapeStr := GetEscapeString(str)\n\tmatchString := make([]byte, 0, 10)\n\tmatchString = append(matchString, \"%\"...)\n\tmatchString = append(matchString, escapeStr...)\n\tmatchString = append(matchString, \"%\"...)\n\n\treturn string(matchString)\n}", "func RegexMatch(key1 string, key2 string) bool {\n\tres, err := regexp.MatchString(key2, key1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}", "func RegexMatch(key1 string, key2 string) bool {\n\tres, err := regexp.MatchString(key2, key1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}", "func TestFindStringSubmatch(t *testing.T) {\n\tregex := regexp.MustCompile(\"Hello.*(world)\")\n\tsubject := \"Hello brave new world\"\n\tmatches := regex.FindStringSubmatch(subject)\n\tAssert(\"world\", matches[1], t)\n}", "func (regex *Regex) Match(entire bool, str string) map[string]string {\n\tcompiled := regex.compiled\n\tcaptured := compiled.FindStringSubmatch(str)\n\tif captured == nil || (entire && len(captured[0]) != len(str)) {\n\t\treturn nil\n\t}\n\n\tresult := make(map[string]string, compiled.NumSubexp())\n\tfor i, name := range compiled.SubexpNames()[1:] {\n\t\tresult[name] = captured[i+1]\n\t}\n\treturn result\n}", "func CompileString(in string) (Matcher, error) {\n\tif in == \"\" {\n\t\treturn Matcher{(*emptyStringMatcher)(nil)}, nil\n\t}\n\treturn Matcher{&substringMatcher{in, []byte(in)}}, nil\n}", "func NormalMatch(regx string, arg string) bool {\n\tmatched, err := regexp.MatchString(regx, arg)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn matched\n}", "func match(pattern string, message string) bool {\n\treturn strings.Contains(message, pattern)\n}", "func IsMatchStrs(str string, regStrs []string) (bool, error) {\n\n\tvar err error\n\n\tif len(regStrs) == 0 {\n\t\treturn true, nil\n\t}\n\tre, err := CompileStrs(regStrs)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn re.MatchString(str), nil\n}", "func (m NoneMatcher) Match(s string) bool { return false }", "func (s *sqlStrConcat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {\n\tif node, ok := n.(*ast.BinaryExpr); ok {\n\t\tif start, ok := node.X.(*ast.BasicLit); ok {\n\t\t\tif str, e := gosec.GetString(start); e == nil {\n\t\t\t\tif !s.MatchPatterns(str) {\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}\n\t\t\t\tif _, ok := node.Y.(*ast.BasicLit); ok {\n\t\t\t\t\treturn nil, nil // string cat OK\n\t\t\t\t}\n\t\t\t\tif second, ok := node.Y.(*ast.Ident); ok && s.checkObject(second, c) {\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}\n\t\t\t\treturn gosec.NewIssue(c, n, s.ID(), s.What, s.Severity, s.Confidence), nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}", "func makeStringMatcher(arg interface{}) (stringMatcher, error) {\n\tif str, ok := arg.(string); ok {\n\t\treturn &stringLiteralMatcher{str: str}, nil\n\t} else if re, ok := arg.(*regexp.Regexp); ok {\n\t\treturn &stringRegexMatcher{pattern: re}, nil\n\t} else if sm, ok := arg.(func(string) bool); ok {\n\t\treturn &funcStringMatcher{fn: sm}, nil\n\t} else if c, ok := arg.(MatchConst); ok {\n\t\tif c == Any {\n\t\t\treturn &funcStringMatcher{\n\t\t\t\tfn: func(string) bool { return true },\n\t\t\t}, nil\n\t\t} else if c == None {\n\t\t\treturn &funcStringMatcher{\n\t\t\t\tfn: func(string) bool { return false },\n\t\t\t}, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"Cannot use value %v when matching against strings\", arg)\n}", "func match(a, b string) bool {\n\treturn strings.EqualFold(a, b)\n}", "func Matches(pattern string, operand string) (bool, error) {\n\treturn regexp.MatchString(pattern, operand)\n}", "func RegexpMatch(content, pattern string) bool {\n\tre, err := pool.Compile(pattern)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn re.MatchString(content)\n}", "func regexEqualMatch(mrb *oruby.MrbState, self oruby.Value) oruby.MrbValue {\n\tvar s string\n\targs := mrb.GetArgs()\n\tdest := args.Item(0)\n\tpos := oruby.MrbFixnum(args.ItemDef(1, oruby.MrbFixnumValue(0)))\n\n\tswitch dest.Type() {\n\tcase oruby.MrbTTSymbol:\n\t\ts = mrb.SymString(oruby.MrbSymbol(dest))\n\tcase oruby.MrbTTString:\n\t\ts = mrb.StrToCstr(dest)\n\tdefault:\n\t\treturn oruby.False\n\t}\n\n\tregx := mrb.Data(self).(*regexp.Regexp)\n\treturn oruby.Bool(regx.MatchString(s[pos:]))\n}", "func (abb *Abbrev) MatchString(str string) bool {\n\tpos := 0\n\tl := 0\n\tlength := 0\n\tfor i := 0; i < abb.size; i++ {\n\t\tlength += abb.length[i]\n\t}\nmatch:\n\tfor i := 0; i < abb.size; i++ {\n\t\tif len(str[pos:]) > length {\n\t\t\treturn false\n\t\t}\n\t\tif !strings.HasPrefix(str[pos:], abb.pre[i]) {\n\t\t\tif i == 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tfind := false\n\t\t\tfor j := 1; j <= pos-len(abb.pre[i-1]); j++ {\n\t\t\t\tif strings.HasPrefix(str[pos-j:], abb.pre[i]) {\n\t\t\t\t\tfind = true\n\t\t\t\t\tpos -= j\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !find {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tpos += len(abb.pre[i])\n\t\tl = len(abb.follow[i])\n\t\tfor j, s := range str[pos:] {\n\t\t\tif j >= l {\n\t\t\t\tcontinue match\n\t\t\t}\n\t\t\tif rune(abb.follow[i][j]) != s {\n\t\t\t\tif i == abb.size-1 {\n\t\t\t\t\treturn false\n\t\t\t\t} else {\n\t\t\t\t\tcontinue match\n\t\t\t\t}\n\t\t\t}\n\t\t\tpos++\n\t\t}\n\t}\n\treturn true\n}", "func (m *Matcher) Match(message string) (matched bool, arg string, err error) {\n\tmatches := m.matchRegex.FindStringSubmatch(message)\n\tif matches == nil {\n\t\tmatched = false\n\t\treturn\n\t}\n\n\tmatched = true\n\n\tif len(m.arguments) == 0 {\n\t\treturn\n\t} else if len(matches) == 0 {\n\t\tlog.Fatal(\"Expected argument but did not set it up in the regex.\")\n\t}\n\n\targ = strings.TrimSpace(matches[1])\n\tif arg == \"\" {\n\t\terr = Error{name: m.name, arguments: m.arguments}\n\t}\n\treturn\n}", "func (m *Matcher) MatchAnyString(strs interface{}) bool {\n\treturn matchAnyStrings(m.stringMatcher, strs)\n}", "func TestPattern2(t *testing.T) {\n\tre := MustCompile(\"a$\")\n\tif !re.MatchString(\"a\") {\n\t\tt.Errorf(\"expect to match\\n\")\n\t}\n\tif re.MatchString(\"ab\") {\n\t\tt.Errorf(\"expect to mismatch\\n\")\n\t}\n}", "func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {\n\treturn Regexp(a.t, rx, str, msgAndArgs...)\n}", "func RegexMatchFunc(args ...interface{}) (interface{}, error) {\n\tname1 := args[0].(string)\n\tname2 := args[1].(string)\n\n\treturn (bool)(RegexMatch(name1, name2)), nil\n}", "func matches(regex string, input string) bool {\n\tif len(regex) == 0 && len(input) == 0 {\n\t\treturn true\n\t}\n\tvar fi, fr, sr, tr byte\n\tif len(regex) > 0 {\n\t\tfr = regex[0]\n\t}\n\tif len(regex) > 1 {\n\t\tsr = regex[1]\n\t}\n\tif len(regex) > 2 {\n\t\ttr = regex[2]\n\t}\n\tif len(input) > 0 {\n\t\tfi = input[0]\n\t}\n\tif sr == '*' {\n\t\tif fr == fi {\n\t\t\treturn matches(regex, input[1:])\n\t\t} else {\n\t\t\tfor i := 0; i < len(input); i++ {\n\t\t\t\tif input[i] == tr {\n\t\t\t\t\treturn matches(regex[2:], input[i:])\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tif fr == fi {\n\t\t\treturn matches(regex[1:], input[1:])\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n}", "func (t *Check) Matches(r, s string) (bool, error) {\n\treturn regexp.MatchString(r, s)\n}", "func Matches(pattern string, str string) (int, []int, bool) {\n\tpatternRunes := []rune(pattern)\n\tstrRunes := []rune(str)\n\treturn match(patternRunes, strRunes, strRunes, make([]int, 0), maxRecursions)\n}", "func (m *ExactMatcher) MatchAnyString(strs interface{}) bool {\n\treturn matchAnyStrings(m.stringMatcher, strs)\n}", "func Regex() {\n\n\t// This tests whether a pattern matches a string.\n\tmatch, _ := regexp.MatchString(\"p([a-z]+)ch\", \"peach\")\n\tfmt.Println(match)\n\n\t// Above we used a string pattern directly,\n\t// but for other regexp tasks you’ll need to Compile an optimized\n\t// Regexp struct.\n\tr, _ := regexp.Compile(\"p([a-z]+)ch\")\n\n\t// Many methods are available on these structs.\n\t// Here’s a match test like we saw earlier.\n\tfmt.Println(r.MatchString(\"peach\"))\n\n\t// This finds the match for the regexp.\n\tfmt.Println(r.FindString(\"peach punch\"))\n\n\t// This also finds the first match but returns the start\n\t// and end indexes for the match instead of the matching text.\n\tfmt.Println(r.FindStringIndex(\"peach punch\"))\n\n\t// The Submatch variants include information about both\n\t// the whole-pattern matches and the submatches within those matches.\n\t// For example this will return information for both p([a-z]+)ch and ([a-z]+).\n\tfmt.Println(r.FindStringSubmatch(\"peach punch\"))\n\n\t// Similarly this will return information about the indexes of matches and submatches.\n\tfmt.Println(r.FindStringSubmatchIndex(\"peach punch\"))\n\n\t// The All variants of these functions apply to all matches in the input,\n\t// not just the first. For example to find all matches for a regexp.\n\tfmt.Println(r.FindAllString(\"peach punch pinch azerty\", -1))\n\n\t// These All variants are available for the other functions we saw above as well.\n\tfmt.Println(r.FindAllStringSubmatchIndex(\"peach punch pinch azert\", -1))\n\n\t// Providing a non-negative integer as the second argument\n\t// to these functions will limit the number of matches.\n\tfmt.Println(r.FindAllString(\"peach punch pinch azert\", 2))\n\n\t// Our examples above had string arguments and used names like MatchString.\n\t// We can also provide []byte arguments and drop String from the function name.\n\tfmt.Println(r.Match([]byte(\"peach\")))\n\n\t// When creating constants with regular expressions\n\t// you can use the MustCompile variation of Compile.\n\t// A plain Compile won’t work for constants because it has 2 return values.\n\tr = regexp.MustCompile(\"p([a-z]+)ch\")\n\tfmt.Println(r)\n\n\t// The regexp package can also be used to replace subsets of strings with other values.\n\tfmt.Println(r.ReplaceAllString(\"a peach\", \"<fruit>\"))\n\n\t// The Func variant allows you to transform matched text with a given function.\n\tin := []byte(\"a peach\")\n\tout := r.ReplaceAllFunc(in, bytes.ToUpper)\n\tfmt.Println(string(out))\n}", "func (typ TokenType) MatchString() string {\n\ttokInfo, ok := TokenNameMap[typ]\n\t//u.Debugf(\"matchstring: '%v' '%v' '%v'\", tokInfo.T, tokInfo.Kw, tokInfo.Description)\n\tif ok {\n\t\tif tokInfo.HasSpaces {\n\t\t\treturn tokInfo.firstWord\n\t\t}\n\t\treturn tokInfo.Kw\n\t}\n\treturn \"not implemented\"\n}", "func (typ TokenType) MatchString() string {\n\ttokInfo, ok := TokenNameMap[typ]\n\t//u.Debugf(\"matchstring: '%v' '%v' '%v'\", tokInfo.T, tokInfo.Kw, tokInfo.Description)\n\tif ok {\n\t\tif tokInfo.HasSpaces {\n\t\t\treturn tokInfo.firstWord\n\t\t}\n\t\treturn tokInfo.Kw\n\t}\n\treturn \"not implemented\"\n}", "func (v *Value) Match(expr string) bool {\n\t// Compile the regular expression.\n\tre, err := v.script.compileRegexp(expr)\n\tif err != nil {\n\t\treturn false // Fail silently\n\t}\n\n\t// Return true if the expression matches the value, interpreted as a\n\t// string.\n\tloc := re.FindStringIndex(v.String())\n\tif loc == nil {\n\t\tv.script.RStart = 0\n\t\tv.script.RLength = -1\n\t\treturn false\n\t}\n\tv.script.RStart = loc[0] + 1\n\tv.script.RLength = loc[1] - loc[0]\n\treturn true\n}", "func (re *Regexp) Match(b []byte) bool\t{ return len(re.doExecute(\"\", b, 0)) > 0 }", "func regex(s string) (*regexp.Regexp, error) {\n\tif rawString.MatchString(s) {\n\t\ts = fmt.Sprintf(\"^%s$\", s)\n\t}\n\treturn regexp.Compile(s)\n}", "func (o HttpQueryParameterMatchOutput) RegexMatch() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HttpQueryParameterMatch) *string { return v.RegexMatch }).(pulumi.StringPtrOutput)\n}", "func MatchesRegex(s string) bool {\n\treturn reg.MatchString(s)\n}", "func (s StreamID) Match(pattern string) bool {\n\treturn wildcard.MatchSimple(pattern, s.str)\n}", "func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)", "func (rs RegexpSet) IsMatch(s string) bool {\n\tfor _, pattern := range rs.patterns {\n\t\tr := regexp.MustCompile(pattern)\n\t\tif r.MatchString(s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (re *Regexp) MatchStrings(s string) (a []string) {\n\tr := re.doExecute(s, nil, 0);\n\tif r == nil {\n\t\treturn nil\n\t}\n\ta = make([]string, len(r)/2);\n\tfor i := 0; i < len(r); i += 2 {\n\t\tif r[i] != -1 {\t// -1 means no match for this subexpression\n\t\t\ta[i/2] = s[r[i]:r[i+1]]\n\t\t}\n\t}\n\treturn;\n}", "func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {}", "func (e Match) FindString(r string) (matches [1]string, pos int, ok bool) {\n\tvar bt [319]stateMatch // static storage for backtracking state\n\tmatches, pos, ok = e.doString(r, modeFirstMatch, bt[:0])\n\treturn\n}", "func (p *unicodePattern) DoMatch(str string) bool {\n\treturn stringutil.DoMatchInner(str, p.patChars, p.patTypes, func(a, b rune) bool {\n\t\tif a > 0xFFFF || b > 0xFFFF {\n\t\t\treturn a == b\n\t\t}\n\n\t\tar, br := mapTable[a], mapTable[b]\n\t\tif ar != br {\n\t\t\treturn false\n\t\t}\n\n\t\tif ar == longRune {\n\t\t\treturn a == b\n\t\t}\n\n\t\treturn true\n\t})\n}", "func (o HttpQueryParameterMatchResponseOutput) RegexMatch() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HttpQueryParameterMatchResponse) string { return v.RegexMatch }).(pulumi.StringOutput)\n}", "func wildcardMatch(pattern, text string) (bool, error) {\n\tpatternLen := len(pattern)\n\ttextLen := len(text)\n\tif patternLen == 0 {\n\t\treturn textLen == 0, nil\n\t}\n\n\tif pattern == \"*\" {\n\t\treturn true, nil\n\t}\n\n\tpattern = strings.ToLower(pattern)\n\ttext = strings.ToLower(text)\n\n\tmatch, err := regexp.MatchString(toRegexPattern(pattern), text)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"wildcardMatch: unable to perform regex matching: %w\", err)\n\t}\n\n\treturn match, nil\n}", "func TestPattern1(t *testing.T) {\n\tre := MustCompile(`b\\$a`)\n\tif !re.MatchString(\"b$a\") {\n\t\tt.Errorf(\"expect to match\\n\")\n\t}\n\tre = MustCompile(\"b\\\\$a\")\n\tif !re.MatchString(\"b$a\") {\n\t\tt.Errorf(\"expect to match 2\\n\")\n\t}\n}", "func matchPattern(pattern string) func(name string) bool {\n\tre := regexp.QuoteMeta(pattern)\n\tre = strings.Replace(re, `\\.\\.\\.`, `.*`, -1)\n\n\t// Special case: string ending in /\n\tif strings.HasSuffix(pattern, \"/\") {\n\t\tre = strings.TrimSuffix(re, `/`)\n\t}\n\n\t// Special case: foo/... matches foo too.\n\tif strings.HasSuffix(re, `/.*`) {\n\t\tre = re[:len(re)-len(`/.*`)] + `(/.*)?`\n\t}\n\n\treg := regexp.MustCompile(`^` + re + `$`)\n\treturn func(name string) bool {\n\t\treturn reg.MatchString(name)\n\t}\n}", "func Match(t TestingT, r, v interface{}, extras ...interface{}) bool {\n\treg, ok := tryMatch(r, v)\n\tif !ok {\n\t\t_, acts := toString(nil, v)\n\n\t\tErrorf(t, \"Expect to match regexp\", []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"-regexp\",\n\t\t\t\tcontent: fmt.Sprintf(\"%#v\", reg.String()),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"+value\",\n\t\t\t\tcontent: fmt.Sprintf(\"%#v\", acts),\n\t\t\t},\n\t\t})\n\t}\n\n\treturn ok\n}", "func Match(pattern string, b []byte) (matched bool, error string) {\n\tre, err := CompileRegexp(pattern);\n\tif err != \"\" {\n\t\treturn false, err\n\t}\n\treturn re.Match(b), \"\";\n}", "func (g GiveCommand) Matches(str string) bool {\n\treturn giveReg.MatchString(str)\n}", "func ChildMatch(patternStr, str string) (matched bool, err error) {\n\tif patternStr == \"\" {\n\t\treturn true, nil\n\t}\n\n\tpattern := preparePattern(patternStr)\n\tstrs, err := prepareStr(str)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn childMatch(pattern, strs)\n}", "func RegexpEqual(x *syntax.Regexp, y *syntax.Regexp,) bool", "func patternMatch(pattern string, text string) bool {\n\t// Empty pattern only match empty text.\n\tif len(pattern) == 0 {\n\t\treturn len(text) == 0\n\t}\n\n\tif pattern == wildcard {\n\t\treturn true\n\t}\n\n\tparts := strings.Split(pattern, wildcard)\n\n\tif len(parts) == 1 {\n\t\treturn pattern == text\n\t}\n\n\tif strings.HasPrefix(text, parts[0]) {\n\t\ttext = text[len(parts[0]):]\n\t} else {\n\t\treturn false\n\t}\n\n\tfor i := 1; i < len(parts); i++ {\n\t\t// If the last part is empty, we match.\n\t\tif i == len(parts)-1 {\n\t\t\tif parts[i] == \"\" || parts[i] == \"\\n\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tindex := strings.Index(text, parts[i])\n\t\tif index < 0 {\n\t\t\treturn false\n\t\t}\n\t\ttext = text[index+len(parts[i]):]\n\t}\n\n\treturn len(text) == 0\n}", "func main() {\n\tfmt.Println(isMatch(\"mississippi\", \"mis*is*p*.\"))\n\tfmt.Println(isMatch(\"aab\", \"c*a*b\"))\n}", "func Test(str, pattern string) bool {\n\tvar pa string\n\tswitch pattern {\n\tcase \"idcard\":\n\t\tpa = `(^\\d{15}$)|(^\\d{17}(\\d|x|X)$)`\n\tcase \"english\":\n\t\tpa = \"^[A-Za-z]+$\"\n\tcase \"chinese\":\n\t\tpa = \"^[\\u4e00-\\u9fa5]+$\"\n\tcase \"username\":\n\t\tpa = `^[a-z][a-z0-9]{4,19}$`\n\tcase \"email\":\n\t\tpa = `^\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*$`\n\tcase \"zip\":\n\t\tpa = `^[1-9]\\d{5}$`\n\tcase \"qq\":\n\t\tpa = `^[1-9]\\d{4,9}$`\n\tcase \"phone\":\n\t\tpa = `^((\\(\\d{2,3}\\))|(\\d{3}\\-))?(\\(0\\d{2,3}\\)|0\\d{2,3}-)?[1-9]\\d{6,7}(\\-\\d{1,4})?$`\n\tcase \"mobile\":\n\t\tpa = `^(13[0-9]|14[5|7]|15[0-9]|18[0-9]|199)\\d{8}$`\n\tcase \"url\":\n\t\tpa = `^((ht|f)tps?):\\/\\/[\\w\\-]+(\\.[\\w\\-]+)+([\\w\\-.,@?^=%&:\\/~+#]*[\\w\\-@?^=%&\\/~+#])?$`\n\tcase \"ip\":\n\t\tpa = `^\\d+\\.\\d+\\.\\d+\\.\\d+$`\n\tcase \"password\":\n\t\treturn isStrongPassword(str)\n\tdefault:\n\t\tpa = pattern\n\t}\n\treg := regexp.MustCompile(pa)\n\n\treturn reg.MatchString(str)\n}" ]
[ "0.81449896", "0.7982518", "0.7940541", "0.7932", "0.7912423", "0.7780218", "0.7763301", "0.77341", "0.767617", "0.7602793", "0.75557566", "0.74836355", "0.73276955", "0.7312978", "0.70129156", "0.6994904", "0.6981453", "0.69814", "0.6972209", "0.6921195", "0.69159573", "0.689114", "0.6841454", "0.681909", "0.66321033", "0.6612541", "0.6590945", "0.6590267", "0.6581095", "0.6550129", "0.6550129", "0.65498835", "0.6539197", "0.65311503", "0.6503715", "0.6500343", "0.6465358", "0.6442017", "0.64102745", "0.6390992", "0.63652307", "0.63603705", "0.6334163", "0.63031346", "0.6252684", "0.62283343", "0.6195386", "0.61859703", "0.61727476", "0.61727476", "0.61676866", "0.6107749", "0.60807055", "0.6070738", "0.60602987", "0.6031864", "0.60160524", "0.60107493", "0.5969952", "0.59617925", "0.59567726", "0.59536177", "0.59473044", "0.5937396", "0.593195", "0.5930797", "0.59153783", "0.5914414", "0.5913744", "0.58901006", "0.5886644", "0.587649", "0.58763677", "0.58686113", "0.58649814", "0.58649814", "0.58505434", "0.5850226", "0.58074605", "0.58048403", "0.57880014", "0.5750972", "0.57393974", "0.5706695", "0.5691477", "0.5677248", "0.5673096", "0.5672775", "0.5671747", "0.5667008", "0.56583077", "0.565799", "0.56548136", "0.56452316", "0.5643282", "0.56378216", "0.5632929", "0.5621337", "0.55932105", "0.5576345" ]
0.76706564
9
Globexp builds a regular express from from extended glob pattern and then returns a Regexp object.
func Globexp(glob string) *regexp.Regexp { var re bytes.Buffer re.WriteString("^") i, inGroup, L := 0, false, len(glob) for i < L { r, w := utf8.DecodeRuneInString(glob[i:]) switch r { default: re.WriteRune(r) case '\\', '$', '^', '+', '.', '(', ')', '=', '!', '|': re.WriteRune('\\') re.WriteRune(r) case '/': // TODO optimize later, string could be long rest := glob[i:] re.WriteRune('/') if strings.HasPrefix(rest, "/**/") { re.WriteString(zeroOrMoreDirectories) w *= 4 } else if rest == "/**" { re.WriteString(".*") w *= 3 } case '?': re.WriteRune('.') case '[', ']': re.WriteRune(r) case '{': if i < L-1 { if glob[i+1:i+2] == "{" { re.WriteString("\\{") w *= 2 break } } inGroup = true re.WriteRune('(') case '}': if inGroup { inGroup = false re.WriteRune(')') } else { re.WriteRune('}') } case ',': if inGroup { re.WriteRune('|') } else { re.WriteRune('\\') re.WriteRune(r) } case '*': rest := glob[i:] if strings.HasPrefix(rest, "**/") { re.WriteString(zeroOrMoreDirectories) w *= 3 } else { re.WriteString(anyRune) } } i += w } re.WriteString("$") //log.Printf("regex string %s", re.String()) return regexp.MustCompile(re.String()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RegexFromGlob(glob string) string {\n\treturn RegexFromGlobWithOptions(glob, Options{\n\t\tExtended: true,\n\t\tGlobStar: true,\n\t\tDelimiter: '/',\n\t})\n}", "func createGlobbingRegex(globbing string) *regexp.Regexp {\n\tif globbing == \"\" {\n\t\t// nil here as \"\" is fast-tracked elsewhere\n\t\treturn nil\n\t}\n\n\tg := regexp.QuoteMeta(globbing)\n\tg = strings.ReplaceAll(g, \"\\\\*\", \".*\")\n\tg = strings.ReplaceAll(g, \"\\\\?\", \".\")\n\t// (?i) forces case insensitive matches\n\tg = \"(?i)^\" + g + \"$\"\n\treturn regexp.MustCompile(g)\n}", "func RegexFromGlobWithOptions(glob string, config Options) string {\n\treStr := \"\"\n\n\tdelimiter := '/'\n\tif config.Delimiter != 0 {\n\t\tdelimiter = config.Delimiter\n\t}\n\n\tdelimiterOutsideClass, delimiterInsideClass := escapeDelimiter(delimiter)\n\n\tinGroup := false\n\n\tglobLen := len(glob)\n\n\tfor i := 0; i < globLen; i++ {\n\t\tc := glob[i]\n\n\t\tswitch c {\n\t\tcase '/', '$', '^', '+', '.', '(', ')', '=', '!', '|':\n\t\t\treStr += \"\\\\\" + string(c)\n\n\t\tcase '?':\n\t\t\tif config.Extended {\n\t\t\t\treStr += \".\"\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treStr += \"\\\\\" + string(c)\n\n\t\tcase '[', ']':\n\t\t\tif config.Extended {\n\t\t\t\treStr += string(c)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treStr += \"\\\\\" + string(c)\n\n\t\tcase '{':\n\t\t\tif config.Extended {\n\t\t\t\tinGroup = true\n\t\t\t\treStr += \"(\"\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treStr += \"\\\\\" + string(c)\n\n\t\tcase '}':\n\t\t\tif config.Extended {\n\t\t\t\tinGroup = false\n\t\t\t\treStr += \")\"\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treStr += \"\\\\\" + string(c)\n\n\t\tcase ',':\n\t\t\tif inGroup {\n\t\t\t\treStr += \"|\"\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treStr += \"\\\\\" + string(c)\n\n\t\tcase '*':\n\t\t\t// Move over all consecutive \"*\"'s.\n\t\t\t// Also store the previous and next characters\n\t\t\tvar nextChar, prevChar rune\n\t\t\tif i > 0 {\n\t\t\t\tprevChar = rune(glob[i-1])\n\t\t\t}\n\t\t\tstarCount := 1\n\t\t\tfor i < globLen-1 && glob[i+1] == '*' {\n\t\t\t\tstarCount++\n\t\t\t\ti++\n\t\t\t}\n\n\t\t\tif i < globLen-1 {\n\t\t\t\tnextChar = rune(glob[i+1])\n\t\t\t}\n\n\t\t\tif !config.GlobStar {\n\t\t\t\t// globstar is disabled, so treat any number of \"*\" as one\n\t\t\t\treStr += \".*\"\n\t\t\t} else {\n\t\t\t\t// globstar is enabled, so determine if this is a globstar segment\n\t\t\t\tisGlobstar := starCount > 1 && // multiple \"*\"'s\n\t\t\t\t\t(prevChar == delimiter || prevChar == 0) && // from the start of the segment\n\t\t\t\t\t(nextChar == delimiter || nextChar == 0) // to the end of the segment\n\n\t\t\t\tif isGlobstar {\n\t\t\t\t\t// it's a globstar, so match zero or more path segments\n\t\t\t\t\treStr += \"(?:(?:[^\" + delimiterInsideClass + \"]*(?:\" + delimiterOutsideClass + \"|$))*)\"\n\t\t\t\t\ti++ // move over the delimiter\n\t\t\t\t} else {\n\t\t\t\t\t// it's not a globstar, so only match one path segment\n\t\t\t\t\treStr += \"(?:[^\" + delimiterInsideClass + \"]*)\"\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\treStr += string(c)\n\t\t}\n\t}\n\n\treturn \"^\" + reStr + \"$\"\n}", "func NewTestGlobRegexp(glob string) (*regexp.Regexp, error) {\n\tif _, err := validateGlob(glob); err != nil {\n\t\treturn nil, err\n\t}\n\treturn compileGlob(glob)\n}", "func compileGlob(glob string) (*regexp.Regexp, error) {\n\tglob = strings.Replace(glob, \".\", \"\\\\.\", -1)\n\tglob = strings.Replace(glob, \"*\", \".*\", -1)\n\tglob = \"^\" + glob + \"$\"\n\treturn regexp.Compile(glob)\n}", "func (e PiRegExp) exp() *regexp.Regexp { return regexp.MustCompile(string(e)) }", "func NewRuleGlob() *RuleGlob {\n\treturn &RuleGlob{\n\t\tRuleBase: RuleBase{\n\t\t\tname: \"glob\",\n\t\t\tdesc: \"Checks for glob syntax used in branch names, tags, and paths\",\n\t\t},\n\t}\n}", "func GlobMatch(patterns ...string) MatcherFunc { return GlobMatches(patterns) }", "func wildCardToRegexp(pattern string) string {\n\tvar result strings.Builder\n\tfor i, literal := range strings.Split(pattern, \"*\") {\n\n\t\t// Replace * with .*\n\t\tif i > 0 {\n\t\t\tresult.WriteString(\".*\")\n\t\t}\n\n\t\t// Quote any regular expression meta characters in the\n\t\t// literal text.\n\t\tresult.WriteString(regexp.QuoteMeta(literal))\n\t}\n\treturn result.String()\n}", "func wildCardToRegexp(pattern string) string {\n\tvar result strings.Builder\n\tresult.WriteString(\"(?i)\")\n\n\trpattern := strings.Replace(pattern, \"%\", \".*\", -1)\n\trpattern = strings.Replace(rpattern, \"_\", \".+\", -1)\n\tresult.WriteString(rpattern)\n\n\treturn result.String()\n}", "func (g *Group) Glob(patterns ...string) error {\n\tfiles := make([]string, 0, 8)\n\tfor _, pattern := range patterns {\n\t\tpattern = filepath.Join(g.dir, pattern)\n\t\tmatches, err := filepath.Glob(pattern)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, m := range matches {\n\t\t\tm, err = filepath.Rel(g.dir, m)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif filepath.Base(m)[0] == '.' {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfiles = append(files, m)\n\t\t}\n\t}\n\treturn g.Files(files...)\n}", "func regexpToGlobBestEffort(p string) (glob string, equiv bool) {\n\tif p == \"\" {\n\t\treturn \"*\", true\n\t}\n\n\tre, err := regexpsyntax.Parse(p, regexpsyntax.OneLine)\n\tif err != nil {\n\t\treturn \"\", false\n\t}\n\tswitch re.Op {\n\tcase regexpsyntax.OpLiteral:\n\t\treturn \"*\" + globQuoteMeta(re.Rune) + \"*\", true\n\tcase regexpsyntax.OpConcat:\n\t\tif len(re.Sub) < 2 {\n\t\t\treturn \"\", false\n\t\t}\n\t\tvar b strings.Builder\n\t\tif op := re.Sub[0].Op; op != regexpsyntax.OpBeginText && op != regexpsyntax.OpStar {\n\t\t\tb.WriteByte('*')\n\t\t}\n\t\tfor _, sub := range re.Sub {\n\t\t\tswitch sub.Op {\n\t\t\tcase regexpsyntax.OpBeginText, regexpsyntax.OpEndText:\n\t\t\t\t// ignore\n\t\t\tcase regexpsyntax.OpLiteral:\n\t\t\t\tb.WriteString(globQuoteMeta(sub.Rune))\n\t\t\tcase regexpsyntax.OpAnyCharNotNL:\n\t\t\t\tb.WriteByte('?')\n\t\t\tcase regexpsyntax.OpStar:\n\t\t\t\tif sub.Sub[0].Op != regexpsyntax.OpAnyCharNotNL { // only support .*\n\t\t\t\t\treturn \"\", false\n\t\t\t\t}\n\t\t\t\tb.WriteByte('*')\n\t\t\tdefault:\n\t\t\t\treturn \"\", false\n\t\t\t}\n\t\t}\n\t\tif op := re.Sub[len(re.Sub)-1].Op; op != regexpsyntax.OpEndText && op != regexpsyntax.OpStar {\n\t\t\tb.WriteByte('*')\n\t\t}\n\t\tglob := b.String()\n\t\tif strings.HasPrefix(glob, \":\") { // leading : has special meaning\n\t\t\treturn \"\", false\n\t\t}\n\t\treturn glob, true\n\t}\n\treturn \"\", false\n}", "func NewRegexp(exp string) (*Regexp, error) {\n\tr, err := regexp.Compile(exp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Regexp{r}, nil\n}", "func BuildGlobPattern(paths []string) glob.Glob {\n\tvar globString string\n\tif len(paths) == 0 {\n\t\tglobString = \"\"\n\t} else if len(paths) == 1 {\n\t\tglobString = paths[0]\n\t} else {\n\t\tglobString = fmt.Sprintf(\"{%s}\", strings.Join(paths, \",\"))\n\t}\n\n\treturn glob.MustCompile(globString)\n}", "func Glob(filePattern string) ([]string, error) {\n\treturn zglob.Glob(filePattern)\n}", "func (badGlob6) Glob__() error { return nil }", "func builtinDirectoryGlob(node asti.NodeI, env *object.Environment, args ...object.ObjectI) object.ObjectI {\n\tif len(args) != 1 {\n\t\treturn object.NewError(node, \"wrong number of arguments. got=%d, want=1\",\n\t\t\tlen(args))\n\t}\n\tpattern := args[0].(*object.String).Value\n\n\tentries, err := filepath.Glob(pattern)\n\tif err != nil {\n\t\treturn object.NULL\n\t}\n\n\t// Create an array to hold the results and populate it\n\tl := len(entries)\n\tresult := make([]object.ObjectI, l)\n\tfor i, txt := range entries {\n\t\tresult[i] = &object.String{Value: txt}\n\t}\n\treturn &object.Array{Elements: result}\n}", "func expandRegex(r *regexp.Regexp, tmpl string, src string) string {\n\tvar res []byte\n\tfor _, submatches := range r.FindAllStringSubmatchIndex(src, 1) {\n\t\tres = r.ExpandString(res, tmpl, src, submatches)\n\t}\n\treturn string(res)\n}", "func (process *Process) evalGlobPatterns(patterns []cwl.Expression) ([]string, error) {\n\tvar out []string\n\n\tfor _, pattern := range patterns {\n\t\t// TODO what is \"self\" here?\n\t\tval, err := process.eval(pattern, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tswitch z := val.(type) {\n\t\tcase string:\n\t\t\tout = append(out, z)\n\t\tcase []string:\n\t\t\tout = append(out, z...)\n\t\tcase []interface{}:\n\t\t\tfor _, val := range z {\n\t\t\t\tz, ok := val.(string)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\t\"glob expression returned an invalid type. Only string or []string \"+\n\t\t\t\t\t\t\t\"are allowed. Got: %#v\", z)\n\t\t\t\t}\n\t\t\t\tout = append(out, z)\n\t\t\t}\n\t\tcase []cwl.Value:\n\t\t\tfor _, val := range z {\n\t\t\t\tz, ok := val.(string)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\t\"glob expression returned an invalid type. Only string or []string \"+\n\t\t\t\t\t\t\t\"are allowed. Got: %#v\", z)\n\t\t\t\t}\n\t\t\t\tout = append(out, z)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"glob expression returned an invalid type. Only string or []string \"+\n\t\t\t\t\t\"are allowed. Got: %#v\", z)\n\t\t}\n\t}\n\treturn out, nil\n}", "func (p *Project) Glob(pattern string) (paths []string, err error) {\n\tprefix := p.BaseDir + string(filepath.Separator)\n\tfullPattern := prefix + pattern\n\tpaths, err = zglob.Glob(fullPattern)\n\tif err != nil {\n\t\treturn\n\t}\n\tprefixLen := len(prefix)\n\tfor n, fullpath := range paths {\n\t\tpaths[n] = fullpath[prefixLen:]\n\t}\n\treturn\n}", "func extensionPattern(pattern *regexp.Regexp) *regexp.Regexp {\n\treturn suffixPattern(regexp.MustCompile(\"(^|/)[^/]+.\" + pattern.String()))\n}", "func RegExp(pattern string) *RegExpMatcher {\n\treturn &RegExpMatcher{Pattern: pattern}\n}", "func Glob(pattern string) (matches []string, err error) {\n\tpattern = filepath.Clean(pattern)\n\tpattern = escape(pattern)\n\n\tif strings.Count(pattern, dirGlobOperator) > 1 {\n\t\treturn nil, fmt.Errorf(\"invalid pattern '%s': the ** globbing operator may only be used once in a pattern\", pattern)\n\t}\n\n\tif !dirGlobOperatorUseValid(pattern) {\n\t\treturn nil, fmt.Errorf(\"invalid pattern '%s': the ** globbing operator may only be used as path segment on its own, i.e. …/**/… or **/…\", pattern)\n\t}\n\n\tif strings.Contains(pattern, dirGlobOperator) {\n\t\tparts := strings.Split(pattern, dirGlobOperator)\n\t\tbasePattern, endPattern := filepath.Clean(parts[0]), filepath.Clean(parts[1])\n\n\t\tbaseCandidates, err := filepath.Glob(basePattern)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pattern '%s': %s\", pattern, err)\n\t\t}\n\n\t\tfor _, base := range directoriesOnly(baseCandidates) {\n\t\t\t_ = filepath.Walk(filepath.Clean(base), func(path string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif !info.IsDir() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tmatchesInBase, err := Glob(filepath.Join(path, endPattern))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tmatches = append(matches, matchesInBase...)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t} else {\n\t\tcandidates, err := filepath.Glob(pattern)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pattern '%s': %s\", pattern, err)\n\t\t}\n\t\tmatches = filesOnly(candidates)\n\t}\n\n\treturn matches, nil\n}", "func Regexp(expr *regexp.Regexp) Pattern {\n\treturn regexpMatch{expr}\n}", "func r(pattern string) *regexp.Regexp { return regexp.MustCompile(pattern) }", "func Glob(pattern string) (matches, dirs []string, err error) {\n\tmatches, err = filepath.Glob(pattern)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\twildIndices := wildElements(pattern)\n\n\tif len(wildIndices) > 0 {\n\t\tfor _, match := range matches {\n\t\t\tdir := filepath.Dir(match)\n\t\t\tdirElems := strings.Split(dir, string(filepath.Separator))\n\n\t\t\tfor _, index := range wildIndices {\n\t\t\t\tdirs = append(dirs, strings.Join(dirElems[:index],\n\t\t\t\t\tstring(filepath.Separator)))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func regExp(context interface{}, value string) (bson.RegEx, error) {\n\tidx := strings.IndexByte(value[1:], '/')\n\tif value[0] != '/' || idx == -1 {\n\t\terr := fmt.Errorf(\"Parameter %q is not a regular expression\", value)\n\t\tlog.Error(context, \"varLookup\", err, \"Regex parsing\")\n\t\treturn bson.RegEx{}, err\n\t}\n\n\tpattern := value[1 : idx+1]\n\tl := len(pattern) + 2\n\n\tvar options string\n\tif l < len(value) {\n\t\toptions = value[l:]\n\t}\n\n\treturn bson.RegEx{Pattern: pattern, Options: options}, nil\n}", "func parseGlobPatterns(globPatterns []string) []glob.Glob {\n\tresults := make([]glob.Glob, 0)\n\tfor _, exp := range globPatterns {\n\t\tglobP, err := glob.Compile(exp)\n\t\tif err != nil {\n\t\t\tutils.Log.Errorln(\"Ignoring invalid glob pattern provided as cache control matcher rule\", err)\n\t\t\tcontinue\n\t\t}\n\t\tresults = append(results, globP)\n\t}\n\treturn results\n}", "func glob(pattern string) []string {\n\tpattern, err := homedir.Expand(pattern)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpaths, err := filepath.Glob(pattern)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif len(paths) == 0 {\n\t\tlog.Fatalf(\"%s: no such file or directory\", pattern)\n\t}\n\treturn paths\n}", "func New(pattern string, style PathStyle, recursive bool) (*Glob, error) {\n\tif !recursive && strings.Contains(pattern, \"**\") {\n\t\treturn nil, errors.Errorf(\"Non-recursive glob pattern '%s' cannot contain '**'\", pattern)\n\t}\n\n\tvar directorySeparator rune\n\tswitch style {\n\tcase NativeStyle:\n\t\tdirectorySeparator = kNativeDirectorySeparator\n\tcase UnixStyle:\n\t\tdirectorySeparator = '/'\n\tcase WindowsStyle:\n\t\tdirectorySeparator = '\\\\'\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unexpected style %q\", style))\n\t}\n\n\ttokens, err := tokenizePattern(pattern, directorySeparator)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar wildcardPositions []int\n\n\thasTokenWithMultipleAnswers := false\n\tfor tokenIndex, token := range tokens {\n\t\tif token.IsWildcard() {\n\t\t\twildcardPositions = append(wildcardPositions, tokenIndex)\n\t\t}\n\t\tif token.CanHaveMultipleAnswers() {\n\t\t\thasTokenWithMultipleAnswers = true\n\t\t}\n\t}\n\n\treturn &Glob{\n\t\tpattern: pattern,\n\t\tdirectorySeparator: directorySeparator,\n\t\trecursiveAllowed: recursive,\n\t\thasTokenWithMultipleAnswers: hasTokenWithMultipleAnswers,\n\t\ttokens: tokens,\n\t\twildcardPositions: wildcardPositions,\n\t}, nil\n}", "func (badGlob4) Glob__(*context.T, rpc.GlobServerCall, *glob.Glob) {}", "func ParseGlob(pattern string) (*ParsedFile, error) {\n\treturn nil, utils.ErrNotImplemented\n}", "func (badGlob2) Glob__(*context.T) {}", "func (badGlob5) Glob__(*context.T, rpc.ServerCall, *glob.Glob) error { return nil }", "func (badGlob1) Glob__() {}", "func ParseGlobPattern(text string) (pattern []GlobPart) {\n\tfor {\n\t\tstar := strings.IndexByte(text, '*')\n\t\tif star < 0 {\n\t\t\tpattern = append(pattern, GlobPart{Prefix: text})\n\t\t\tbreak\n\t\t}\n\t\tcount := 1\n\t\tfor star+count < len(text) && text[star+count] == '*' {\n\t\t\tcount++\n\t\t}\n\t\twildcard := GlobAllExceptSlash\n\n\t\t// Allow both \"/\" and \"\\\" as slashes\n\t\tif count > 1 && (star == 0 || text[star-1] == '/' || text[star-1] == '\\\\') &&\n\t\t\t(star+count == len(text) || text[star+count] == '/' || text[star+count] == '\\\\') {\n\t\t\twildcard = GlobAllIncludingSlash // A \"globstar\" path segment\n\t\t}\n\n\t\tpattern = append(pattern, GlobPart{Prefix: text[:star], Wildcard: wildcard})\n\t\ttext = text[star+count:]\n\t}\n\treturn\n}", "func (c *Config) GlobPatterns() []string {\n\tif c.noPatterns {\n\t\treturn nil\n\t}\n\treturn c.globPatterns\n}", "func Grep(regex *regexp.Regexp, glob string) (bool, error) {\n\tmatches, err := filepath.Glob(glob)\n\tif err != nil {\n\t\treturn false, errors.WithStackTrace(err)\n\t}\n\n\tfor _, match := range matches {\n\t\tbytes, err := ioutil.ReadFile(match)\n\t\tif err != nil {\n\t\t\treturn false, errors.WithStackTrace(err)\n\t\t}\n\n\t\tif regex.Match(bytes) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func (cs *copyState) glob(pattern string) (files []manager.RepositoryEntry) {\n\tif pattern == \"\" {\n\t\tcs.state.Exitf(\"empty path name\")\n\t}\n\n\t// Path on local machine?\n\tif isLocal(pattern) {\n\t\tfor _, repoPath := range cs.state.GlobLocal(subcmd.Tilde(pattern)) {\n\t\t\tfiles = append(files, manager.RepositoryEntry{\n\t\t\t\tName: path.Base(repoPath),\n\t\t\t\tEnabled: true,\n\t\t\t\tLastUpdate: time.Now(),\n\t\t\t\tPath: repoPath,\n\t\t\t})\n\t\t}\n\t\treturn files\n\t}\n\n\t// Extra check to catch use of relative path on local machine.\n\tif !strings.Contains(pattern, \"@\") {\n\t\tcs.state.Exitf(\"local pattern not qualified path: %s\", pattern)\n\t}\n\n\treturn files\n}", "func translateGlob(glob string) (string, error) {\n\tre := []byte{}\n\tfor i := 0; i < len(glob); i++ {\n\t\tch := glob[i]\n\t\tswitch ch {\n\t\tcase '*':\n\t\t\tif i+1 < len(glob) && glob[i+1] == '*' {\n\t\t\t\tre = append(re, \".*\"...)\n\t\t\t\ti++\n\t\t\t} else {\n\t\t\t\tre = append(re, \"[^/]*\"...)\n\t\t\t}\n\t\tcase '?':\n\t\t\tre = append(re, \"[^/]\"...)\n\t\tcase '.':\n\t\t\tre = append(re, \"\\\\.\"...)\n\t\tcase '[':\n\t\t\tfor ; i < len(glob) && glob[i] != ']'; i++ {\n\t\t\t\tre = append(re, glob[i])\n\t\t\t}\n\t\t\tif i < len(glob) {\n\t\t\t\tre = append(re, ']')\n\t\t\t} else {\n\t\t\t\treturn \"\", errors.New(\"unterminated character range\")\n\t\t\t}\n\t\tdefault:\n\t\t\tre = append(re, ch)\n\t\t}\n\t}\n\tre = append(re, '$')\n\treturn string(re), nil\n}", "func Glob(path ...string) []string {\n\tf, _ := filepathx.Glob(filepath.Join(path...))\n\treturn f\n}", "func GlobPatterns(fSys filesys.FileSystem, patterns []string) ([]string, error) {\n\tvar result []string\n\tfor _, pattern := range patterns {\n\t\tfiles, err := fSys.Glob(pattern)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(files) == 0 {\n\t\t\tlog.Printf(\"%s has no match\", pattern)\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, files...)\n\t}\n\treturn result, nil\n}", "func getRegexpCompile(pattern string) *regexp.Regexp {\n\tr, err := regexp.Compile(pattern)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn r\n}", "func Regexp(pattern string) (*regexp.Regexp, error) {\n\treturn pool.Compile(pattern)\n}", "func WildcardToRegex(wildcard string) string {\n\tvar b strings.Builder\n\tb.WriteByte('^')\n\tfor {\n\t\tidx := strings.IndexByte(wildcard, '*')\n\t\tif idx < 0 {\n\t\t\tbreak\n\t\t}\n\t\tb.WriteString(regexp.QuoteMeta(wildcard[:idx]))\n\t\tb.WriteString(\"(.*)\")\n\t\twildcard = wildcard[idx+1:]\n\t}\n\tb.WriteString(regexp.QuoteMeta(wildcard))\n\tb.WriteByte('$')\n\treturn b.String()\n}", "func GlobPlusPartCompiler(partStr string, next NextPartFn) (Matcher, error) {\n\tchildMatcher, err := next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch partStr {\n\tcase \"**\":\n\t\treturn AnyRecursiveMatcher(childMatcher), nil\n\tcase \"*\":\n\t\treturn AnyMatcher(childMatcher), nil\n\t}\n\treturn GlobMatcherSm(partStr, childMatcher), nil\n}", "func GlobMatches(patterns []string) MatcherFunc {\n\treturn func(el Elem) bool {\n\t\tfor _, pattern := range patterns {\n\t\t\tif ok, _ := path.Match(pattern, el.Name()); ok {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}", "func (badGlob3) Glob__(*context.T, rpc.GlobServerCall) {}", "func Parse(pattern string) (*Glob, error) {\n\tg, err := glob.Parse(pattern)\n\treturn &Glob{g}, err\n}", "func New(expr string) (*Regexp, error) {\n\treturn NewWithLimit(expr, DefaultLimit)\n}", "func expandGlobs(args []string) []string {\n\tvar allFiles []string\n\tfor _, glob := range args {\n\t\tglobbed, err := filepath.Glob(glob)\n\t\tif err != nil{\n\t\t\tlog.Printf(\"Glob Error: %s\\n\", err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, file := range globbed {\n\t\t\tallFiles = append(allFiles, file)\n\t\t}\n\t}\n\treturn allFiles\n}", "func Glob(pattern string, lookup LookupFunc, ls ListFunc) ([]*upspin.DirEntry, error) {\n\tp, err := path.Parse(upspin.PathName(pattern))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If there are no glob meta-characters in the pattern, just do a lookup.\n\tif !hasMeta(p.FilePath()) {\n\t\tde, err := lookup(unquote(p.String()))\n\t\tif de == nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// If the pattern we look up is just a plain file, and it's a link,\n\t\t// just return it. In effect this is equivalent to passing false as the\n\t\t// final argument to Client.Lookup.\n\t\tif err == upspin.ErrFollowLink && de.Name == p.Path() {\n\t\t\terr = nil\n\t\t}\n\t\treturn []*upspin.DirEntry{de}, err\n\t}\n\n\t// Look for the longest path prefix that does not contain a\n\t// metacharacter, so we know which level we need to start listing.\n\tfirstMeta := 0\n\ti := 0\n\tfor ; i < p.NElem(); i++ {\n\t\tfirstMeta = i\n\t\tif hasMeta(p.Elem(i)) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Path without the first meta component.\n\tbasePath := unquote(p.First(firstMeta).String())\n\t// Pattern including first meta component.\n\tbasePattern := p.First(firstMeta + 1).String()\n\t// Tail of the patterm starting with the first meta component.\n\tpatternTail := strings.TrimPrefix(p.String(), basePattern)\n\n\t// The return values of this function.\n\tvar result []*upspin.DirEntry\n\tvar errLink error\n\n\tvar toGlob []string // Additional patterns to glob.\n\n\tentries, err := ls(basePath)\n\tif err != nil {\n\t\tif err == upspin.ErrFollowLink {\n\t\t\treturn entries, err\n\t\t}\n\t\treturn nil, errors.E(basePath, err)\n\t}\n\tfor _, e := range entries {\n\t\t// Match the entire entry name against our base pattern as we\n\t\t// are listing the directory before the pattern meta component.\n\t\tmatch, err := goPath.Match(basePattern, string(e.Name))\n\t\tif err != nil {\n\t\t\treturn nil, errors.E(errors.Invalid, err)\n\t\t}\n\t\tif !match {\n\t\t\tcontinue\n\t\t}\n\n\t\tif patternTail != \"\" {\n\t\t\t// If we haven't reached the end of the pattern...\n\t\t\tif e.IsDir() {\n\t\t\t\t// ...and this is a directory, then append the\n\t\t\t\t// pattern tail to this name and add it to the\n\t\t\t\t// list of globs yet to try.\n\t\t\t\ttoGlob = append(toGlob, string(path.Join(upspin.QuoteGlob(e.Name), patternTail)))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !e.IsLink() {\n\t\t\t\t// ...and this is not a directory or link,\n\t\t\t\t// then it's only a partial match of the full\n\t\t\t\t// pattern, so we skip it.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// ...and this is a link, we want to emit it as a\n\t\t\t// result but also return a 'must follow link' error.\n\t\t\terrLink = upspin.ErrFollowLink\n\t\t}\n\t\tresult = append(result, e)\n\t}\n\n\t// Perform any additional glob operations recursively.\n\tfor _, pattern := range toGlob {\n\t\tentries, err := Glob(pattern, lookup, ls)\n\t\tif errors.Is(errors.Private, err) ||\n\t\t\terrors.Is(errors.Permission, err) ||\n\t\t\terrors.Is(errors.NotExist, err) {\n\t\t\t// Ignore paths when access is restricted.\n\t\t\tcontinue\n\t\t}\n\t\tif err == upspin.ErrFollowLink {\n\t\t\terrLink = err\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, entries...)\n\t}\n\n\tupspin.SortDirEntries(result, false)\n\treturn result, errLink\n}", "func getRegexp(env *lisp.LEnv, v *lisp.LVal) (re *regexp.Regexp, lerr *lisp.LVal) {\n\tif v.Type == lisp.LString {\n\t\tre, err := regexp.Compile(v.Str)\n\t\tif err != nil {\n\t\t\treturn nil, invalidPatternError(env, err)\n\t\t}\n\t\treturn re, nil\n\t}\n\tif v.Type != lisp.LNative {\n\t\treturn nil, env.Errorf(\"argument is not a regexp: %v\", v.Type)\n\t}\n\tre, ok := v.Native.(*regexp.Regexp)\n\tif !ok {\n\t\treturn nil, env.Errorf(\"argument is not a regexp: %v\", v)\n\t}\n\treturn re, nil\n}", "func (allGlobberObject) Glob__(*context.T, rpc.GlobServerCall, *glob.Glob) error {\n\treturn nil\n}", "func R(pattern string) *regexp.Regexp {\n\treturn regexp.MustCompile(pattern)\n}", "func NewRegExpMux() *RegExpMux {\n\treturn &RegExpMux{\n\t\tentries: []*RegExpMuxEntry{},\n\t}\n}", "func G(pat Pattern) Pattern {\n\treturn &patternGrouping{pat: pat, grpname: \"\"}\n}", "func Glob(pattern string) (Files, error) {\n\tmatching := map[string][]byte{}\n\tfiles, err := filepath.Glob(pattern)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, name := range files {\n\t\tbytes, err := ioutil.ReadFile(name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmatching[name] = bytes\n\t}\n\n\treturn matching, nil\n}", "func (s *perfSuite) openGlob(pattern string) (readers []io.Reader) {\n\tassert := s.NewAssert()\n\n\ts.Pause(func() {\n\t\tglob, err := filepath.Glob(pattern)\n\t\tassert.NoError(err)\n\t\treaders = make([]io.Reader, len(glob))\n\t\tfor i, m := range glob {\n\t\t\tr, err := os.Open(m)\n\t\t\tassert.NoError(err)\n\t\t\treaders[i] = r\n\t\t}\n\t})\n\treturn\n}", "func (m *Builder) Compile() (*MultiGlob, error) {\n\tvar final *parser.Node\n\tfor _, p := range m.patterns {\n\t\tif final == nil {\n\t\t\tfinal = p\n\t\t} else {\n\t\t\tfinal = parser.Merge(final, p)\n\t\t}\n\t}\n\n\tpatterns := make(map[string]*parser.Node)\n\tfor k, v := range m.patterns {\n\t\tpatterns[k] = v\n\t}\n\n\treturn &MultiGlob{\n\t\tnode: final,\n\t\tpatterns: patterns,\n\t}, nil\n}", "func HybridGlobRegexPartCompiler(partStr string, next NextPartFn) (Matcher, error) {\n\tchildMatcher, err := next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch partStr {\n\tcase \"**\":\n\t\treturn AnyRecursiveMatcher(childMatcher), nil\n\tcase \"*\":\n\t\treturn AnyMatcher(childMatcher), nil\n\t}\n\tif strings.HasPrefix(partStr, \"^\") || strings.HasSuffix(partStr, \"$\") {\n\t\treturn parseRegexToMatcher(partStr, childMatcher)\n\t}\n\treturn GlobMatcherSm(partStr, childMatcher), nil\n}", "func generatePatternForRegexp(pattern string) (string, error) {\n\tpattern = patternRegexp.ReplaceAllStringFunc(pattern, func(subMatch string) string {\n\t\t// The sub match string conforms the parameter pattern: `{parameter-name:regexp-expression}`.\n\t\tfoos := strings.SplitN(subMatch, \":\", 2)\n\t\tif len(foos) < 2 {\n\t\t\treturn `([^/]+)`\n\t\t} else {\n\t\t\treturn \"(\" + foos[1][0:len(foos[1])-1] + \")\"\n\t\t}\n\t})\n\t// Checking for abnormal patterns.\n\t_, err := regexp.Compile(pattern)\n\treturn pattern, err\n}", "func GobwasMatcherFromPatterns(patterns []string) ([]Matcher, error) {\n\tret := make([]Matcher, 0, len(patterns))\n\tfor _, p := range patterns {\n\t\tmatcher, err := glob.Compile(p)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"can not instantiate matcher\")\n\t\t}\n\t\tret = append(ret, matcher)\n\t}\n\treturn ret, nil\n}", "func (t UrnTargets) getMatcher(glob string) *regexp.Regexp {\n\tif r := t.globs[glob]; r != nil {\n\t\treturn r\n\t}\n\tsegmentGlob := strings.Split(glob, \"**\")\n\tfor i, v := range segmentGlob {\n\t\tpart := strings.Split(v, \"*\")\n\t\tfor i, v := range part {\n\t\t\tpart[i] = regexp.QuoteMeta(v)\n\t\t}\n\t\tsegmentGlob[i] = strings.Join(part, \"[^:]*\")\n\t}\n\n\t// Because we have quoted all input, this is safe to compile.\n\tr := regexp.MustCompile(\"^\" + strings.Join(segmentGlob, \".*\") + \"$\")\n\n\t// We cache and return the matcher\n\tt.globs[glob] = r\n\treturn r\n}", "func expression(res ...*regexp.Regexp) *regexp.Regexp {\n\tvar s string\n\tfor _, re := range res {\n\t\ts += re.String()\n\t}\n\n\treturn match(s)\n}", "func expression(res ...*regexp.Regexp) *regexp.Regexp {\n\tvar s string\n\tfor _, re := range res {\n\t\ts += re.String()\n\t}\n\n\treturn match(s)\n}", "func expression(res ...*regexp.Regexp) *regexp.Regexp {\n\tvar s string\n\tfor _, re := range res {\n\t\ts += re.String()\n\t}\n\n\treturn match(s)\n}", "func NewRegexPatternSet(ctx *pulumi.Context,\n\tname string, args *RegexPatternSetArgs, opts ...pulumi.ResourceOption) (*RegexPatternSet, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RegularExpressionList == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RegularExpressionList'\")\n\t}\n\tif args.Scope == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Scope'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource RegexPatternSet\n\terr := ctx.RegisterResource(\"aws-native:wafv2:RegexPatternSet\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func CompileERE(pattern string) (*regexp.Regexp, error) { return regexp.CompilePOSIX(pattern) }", "func (l *LazyRegexp) Pattern() *regexp.Regexp {\n\tif l.regexp == nil { // No need to take a lock. Worst case, we'll just compile it multiple times.\n\t\tl.regexp = regexp.MustCompile(l.pattern)\n\t}\n\treturn l.regexp\n}", "func NewRegexpFile(r *regexp.Regexp) *File {\n\treturn NewFile(&regexpElement{Data: r})\n}", "func (b ValExprBuilder) RegExp(val interface{}) BoolExprBuilder {\n\treturn b.makeComparisonExpr(astRegExp, makeValExpr(val))\n}", "func (o *globObject) Glob__(ctx *context.T, call rpc.GlobServerCall, g *glob.Glob) error {\n\to.globLoop(call, \"\", g, o.n)\n\treturn nil\n}", "func NewMatcher(pats []string) (*Matcher, error) {\n\tif len(pats) == 1 && strings.HasPrefix(pats[0], \"(\") && strings.HasSuffix(pats[0], \")\") {\n\t\treturn compileExpr(pats[0][1 : len(pats[0])-1])\n\t}\n\treturn compileGlobs(pats)\n}", "func NewRegexpList(path string) *RegexpList {\n\tc := &ConfList{\n\t\tpath: path,\n\t}\n\tr := &RegexpList{\n\t\tConfList: c,\n\t}\n\tr.update()\n\treturn r\n}", "func (fsOnDisk) Glob(pattern string) ([]string, error) {\n\tvar result []string\n\tallFilePaths, err := filepath.Glob(pattern)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif IsHiddenFilePath(pattern) {\n\t\tresult = allFilePaths\n\t} else {\n\t\tresult = RemoveHiddenFiles(allFilePaths)\n\t}\n\treturn result, nil\n}", "func (v *VerbalExpression) Regex() *regexp.Regexp {\n\n\tif !v.compiled {\n\t\tv.regexp = regexp.MustCompile(\n\t\t\tstrings.Join([]string{\n\t\t\t\tstrings.Join(v.parts, \"\"),\n\t\t\t\t`(?` + v.getFlags() + `)`,\n\t\t\t\tv.prefixes,\n\t\t\t\tv.expression,\n\t\t\t\tv.suffixes}, \"\"))\n\t\tv.compiled = true\n\t}\n\treturn v.regexp\n}", "func GlobMany(filePatterns []string) ([]string, error) {\n\tvar files []string\n\tfor _, filePattern := range filePatterns {\n\t\tglobResult, err := Glob(filePattern)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfiles = append(files, globResult...)\n\t}\n\treturn files, nil\n}", "func (g *goGetter) SubDirGlob(destPath, subDir string) (string, error) {\n\treturn getter.SubdirGlob(destPath, subDir)\n}", "func GetNewRegExp(filename string) string {\n\tf, _ := os.Open(filename)\n\tre := regexp.MustCompile(`\\w+\\s*:\\s*\\d+`)\n\tscanner := bufio.NewScanner(f)\n\tfmt.Println(\"filename\", filename)\n\tnewstr := \"\"\n\tfor scanner.Scan() {\n\t\tln := strings.TrimSpace(scanner.Text())\n\t\tfmt.Println(\"Scannerl Text\", ln)\n\t\ttt := re.ReplaceAllString(ln, `(\\w+):\\s*(\\d+)`)\n\t\tnewstr = newstr + `\\s*` + tt\n\t}\n\n\treturn newstr\n}", "func NG(grpname string, pat Pattern) Pattern {\n\treturn &patternGrouping{pat: pat, grpname: grpname}\n}", "func glob(dir, pattern string) ([]string, error) {\n\tm := make([]string, 0)\n\tfi, err := pkger.Stat(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, err\n\t}\n\td, err := pkger.Open(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer d.Close()\n\n\tnames, _ := d.Readdir(-1)\n\n\tfor _, n := range names {\n\t\tmatched, err := filepath.Match(pattern, n.Name())\n\t\tif err != nil {\n\t\t\treturn m, err\n\t\t}\n\t\tif matched {\n\t\t\tm = append(m, dir+\"/\"+n.Name())\n\t\t}\n\t}\n\treturn m, nil\n}", "func globMatch(registry, glob string) bool {\n\tregistryParts := strings.Split(registry, \".\")\n\tglobParts := strings.Split(glob, \".\")\n\tif len(globParts) != len(registryParts) {\n\t\treturn false\n\t}\n\tfor i, globPart := range globParts {\n\t\tmatched, e := filepath.Match(globPart, registryParts[i])\n\t\tif e != nil {\n\t\t\treturn false\n\t\t}\n\t\tif !matched {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func rawPattern(s string) *regexp.Regexp {\n\treturn regexp.MustCompile(regexp.QuoteMeta(s))\n}", "func compile(regex string) (regslc []rune) {\n regslc = make([]rune, 0, len(regex) + 10)\n \n for _, r := range regex {\n if r == '+' {\n regslc = append(regslc, regslc[len(regslc) - 1], '*')\n } else {\n regslc = append(regslc, r)\n }\n } \n return regslc\n }", "func BuildRegex(cfg YamlConfig) *regexp.Regexp {\n\n\t// Create subPatterns slice from cfg.Definitions\n\tsubPatterns := make([]interface{}, len(cfg.Definitions))\n\tfor i, def := range cfg.Definitions {\n\t\tsubPatterns[i] = def.Pattern\n\t}\n\n\t// Interpolate subpatterns in main pattern, compile regex\n\tpattern := fmt.Sprintf(cfg.LogPattern, subPatterns...)\n\tregex := regexp.MustCompile(pattern)\n\n\treturn regex\n\n}", "func execParseGlob(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := template.ParseGlob(args[0].(string))\n\tp.Ret(1, ret, ret1)\n}", "func newPattern(pattern string) *Pattern {\n\treturn &Pattern{*newPath(pattern)}\n\n}", "func RegexpBuilder(spec FilterSpec) (Filter, error) {\n\tswitch strings.ToLower(spec.Type) {\n\tcase \"regexp\", \"regex\", \"re\":\n\tdefault:\n\t\treturn nil, nil\n\t}\n\n\tswitch len(spec.Args) {\n\tcase 0, 1:\n\t\treturn nil, errors.New(\"regular expression filter requires a subject and an expression\")\n\tcase 2:\n\t\tsubject, expression := spec.Args[0], spec.Args[1]\n\n\t\t// Force case-insensitive matching\n\t\tif !strings.HasPrefix(expression, \"(?i)\") {\n\t\t\texpression = \"(?i)\" + expression\n\t\t}\n\n\t\tre, err := regexp.Compile(expression)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid expression \\\"%s\\\": %v\", expression, err)\n\t\t}\n\n\t\treturn regexpFilter{\n\t\t\tsubject: subject,\n\t\t\tre: re,\n\t\t}.Filter, nil\n\tdefault:\n\t\treturn nil, errors.New(\"regular expression filter has %d arguments when two are needed\")\n\t}\n}", "func (c *ConfigParser) regexp(regexp string) *regexp.Regexp {\n\treturn c.regexps[regexp]\n}", "func CompileRegex(pattern string) (*regexp.Regexp, error) { return regexp.Compile(pattern) }", "func NewLazyRegexp(pattern string) LazyRegexp {\n\treturn LazyRegexp{pattern, nil}\n}", "func NewGlobSourcer(pattern string, parser FileParser, configs ...GlobSourcerConfigFunc) Sourcer {\n\toptions := getGlobSourcerConfigOptions(configs)\n\n\tsourcers := []Sourcer{}\n\tif paths, err := options.fs.Glob(pattern); err == nil {\n\t\tfor _, path := range paths {\n\t\t\tsourcers = append(sourcers, NewFileSourcer(\n\t\t\t\tpath,\n\t\t\t\tparser,\n\t\t\t\tWithFileSourcerFS(options.fs),\n\t\t\t))\n\t\t}\n\t}\n\n\treturn NewMultiSourcer(sourcers...)\n}", "func CompileRegexp(pattern string) *regexp.Regexp {\n\tif pattern == \"\" {\n\t\treturn nil\n\t}\n\n\t// add ^...$ to all regexp when not given\n\tif !strings.HasPrefix(pattern, \"^\") {\n\t\tpattern = \"^\" + pattern\n\t}\n\tif !strings.HasSuffix(pattern, \"$\") {\n\t\tpattern += \"$\"\n\t}\n\n\t// make all regexp case insensitive by default\n\tif !strings.Contains(pattern, \"(?i)\") {\n\t\tpattern = \"(?i)\" + pattern\n\t}\n\n\treturn regexp.MustCompile(pattern)\n}", "func Subexp(r *regexp.Regexp, target string, subexp string) (val string) {\n\tmatches := r.FindStringSubmatch(target)\n\tfor i, name := range r.SubexpNames() {\n\t\tif i > len(matches) {\n\t\t\treturn\n\t\t}\n\t\tif name == subexp {\n\t\t\treturn matches[i]\n\t\t}\n\t}\n\treturn\n}", "func Regexp(expr string) (func(string) bool, error) {\n\tif expr == \"\" {\n\t\treturn nil, fmt.Errorf(\"empty regex expression\")\n\t}\n\n\t// add the last $ if missing (and not wildcard(?))\n\tif i := expr[len(expr)-1]; i != '$' && i != '*' {\n\t\texpr += \"$\"\n\t}\n\n\tr, err := regexp.Compile(expr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.MatchString, nil\n}", "func ListFilesGlob(ctx context.Context, base string, pattern string) ([]string, error) {\n\tlist := make([]string, 0)\n\n\t// check if the version is hidden\n\tif isHidden(base) {\n\t\tlog.Printf(\"ignoring hidden version %s\\n\", base)\n\t\treturn list, nil\n\t}\n\n\tif _, err := os.Stat(base); os.IsNotExist(err) {\n\t\tlog.Printf(\"match %s in %s but doesn't exists\\n\", pattern, base)\n\t\treturn list, nil\n\t}\n\n\tcmd := exec.Command(\"/glob/index.js\", pattern)\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Stderr = &out\n\tcmd.Dir = base\n\terr := cmd.Run()\n\tif err != nil {\n\t\tfmt.Printf(\"%s: %s\\n\", err, out.String())\n\t\treturn list, err\n\t}\n\n\tfor _, line := range strings.Split(out.String(), \"\\n\") {\n\t\tif strings.Trim(line, \" \") != \"\" {\n\t\t\tlist = append(list, line)\n\t\t}\n\n\t}\n\treturn list, nil\n}", "func execmTemplateParseGlob(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret, ret1 := args[0].(*template.Template).ParseGlob(args[1].(string))\n\tp.Ret(2, ret, ret1)\n}", "func Glob(str string) ([]string, error) {\n\tabs, err := filepath.Abs(str)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\troot, glob := \"\", \"\"\n\t// Look for rightmost directory delimiter that's left of a wildcard. Use\n\t// that to split the 'root' from the match 'glob'.\n\tfor i, c := range abs {\n\t\tswitch c {\n\t\tcase '/':\n\t\t\troot, glob = abs[:i], abs[i+1:]\n\t\tcase '*', '?':\n\t\t\ttest, err := match.New(glob)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfiles, err := Scan(root, Config{Paths: searchRules{\n\t\t\t\tfunc(path string, cond bool) bool { return test(path) },\n\t\t\t}})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfor i, f := range files {\n\t\t\t\tfiles[i] = filepath.Join(root, f) // rel -> abs\n\t\t\t}\n\t\t\treturn files, nil\n\t\t}\n\t}\n\t// No wildcard found. Does the file exist at 'str'?\n\tif s, err := os.Stat(str); err != nil && !s.IsDir() {\n\t\treturn []string{str}, nil\n\t}\n\treturn []string{}, nil\n}", "func NewRegexp(s string) (Regexp, error) {\n\tregex, err := regexp.Compile(s)\n\treturn Regexp{\n\t\tRegexp: regex,\n\t\toriginal: s,\n\t}, err\n}" ]
[ "0.7698923", "0.7152533", "0.7130733", "0.6727072", "0.6700064", "0.64594734", "0.6450237", "0.64090204", "0.6368397", "0.6299874", "0.62363666", "0.62225", "0.6221954", "0.6196934", "0.61662436", "0.6144593", "0.61306083", "0.6106889", "0.61045927", "0.60943174", "0.6090603", "0.60340124", "0.6020899", "0.6019304", "0.5966898", "0.5941105", "0.59255064", "0.58692175", "0.58648354", "0.5862435", "0.58032817", "0.5792738", "0.5777451", "0.5684172", "0.5662712", "0.56523854", "0.564222", "0.5636483", "0.562418", "0.5623768", "0.5623305", "0.56232566", "0.56193095", "0.5604821", "0.5584303", "0.5575943", "0.5557002", "0.5548463", "0.5534231", "0.5533699", "0.5523257", "0.5515732", "0.5509525", "0.5498575", "0.5494094", "0.546646", "0.5433346", "0.54313684", "0.54299265", "0.54123294", "0.54062843", "0.5380155", "0.5365125", "0.532751", "0.5324803", "0.5324803", "0.5324803", "0.5322095", "0.5280475", "0.52739877", "0.5265261", "0.5265084", "0.5262995", "0.5261831", "0.52474654", "0.5239043", "0.52324724", "0.5229995", "0.522718", "0.5219754", "0.5219321", "0.5209915", "0.5199817", "0.5193839", "0.5192851", "0.51652867", "0.5161793", "0.51528454", "0.5146397", "0.5143077", "0.51382405", "0.5123412", "0.5113546", "0.51030034", "0.50926596", "0.50919956", "0.50899386", "0.507684", "0.5062092", "0.5033664" ]
0.7753381
0
hasMeta determines if a path has special chars used to build a Regexp.
func hasMeta(path string) bool { return strings.IndexAny(path, "*?[{") >= 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func hasMeta(path string) bool {\n\tmagicChars := `*?[`\n\tif runtime.GOOS != \"windows\" {\n\t\tmagicChars = `*?[\\`\n\t}\n\treturn strings.ContainsAny(path, magicChars)\n}", "func hasMeta(path string) bool {\n\tmagicChars := `*?[`\n\tif runtime.GOOS != \"windows\" {\n\t\tmagicChars = `*?[\\`\n\t}\n\treturn strings.ContainsAny(path, magicChars)\n}", "func hasMeta(path string) bool {\n\treturn strings.ContainsAny(path, \"*?[\")\n}", "func hasMeta(elem string) bool {\n\tesc := false\n\tfor _, r := range elem {\n\t\tif esc {\n\t\t\tesc = false\n\t\t\tcontinue\n\t\t}\n\t\tswitch r {\n\t\tcase '\\\\':\n\t\t\tesc = true\n\t\tcase '*', '[', '?':\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsMeta(p string) bool {\n\tbase := path.Base(p)\n\n\t// https://wiki.debian.org/RepositoryFormat#Compression_of_indices\n\tswitch {\n\tcase strings.HasSuffix(base, \".gz\"):\n\t\tbase = base[0 : len(base)-3]\n\tcase strings.HasSuffix(base, \".bz2\"):\n\t\tbase = base[0 : len(base)-4]\n\tcase strings.HasSuffix(base, \".xz\"):\n\t\tbase = base[0 : len(base)-3]\n\tcase strings.HasSuffix(base, \".lzma\"):\n\t\tbase = base[0 : len(base)-5]\n\tcase strings.HasSuffix(base, \".lz\"):\n\t\tbase = base[0 : len(base)-3]\n\t}\n\n\tswitch base {\n\tcase \"Release\", \"Release.gpg\", \"InRelease\":\n\t\treturn true\n\tcase \"Packages\", \"Sources\", \"Index\":\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (n *node) HasMeta() bool {\n\treturn n.meta != nil\n}", "func HasGlobChar(s string) bool {\n\tfor i := 0; i < len(s); i++ {\n\t\tif syntax.Special(s[i]) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func extractMetaProperty(t html.Token, property string) (content string, isValid bool) {\n\tfor _, attr := range t.Attr {\n\t\tif (attr.Key == \"property\" && attr.Val == property) || (attr.Key == \"name\" && property == attr.Val) {\n\t\t\tisValid = true\n\t\t}\n\t\tif attr.Key == \"content\" {\n\t\t\tcontent = attr.Val\n\t\t}\n\t}\n\treturn content, isValid\n}", "func isValidMetadataPath(path parser.Path) bool {\n\t// Path must be metadata.annotations.something or metadata.labels.something\n\tif len(path.Nodes) != 3 ||\n\t\tpath.Nodes[0].Type() != parser.ObjectNode ||\n\t\tpath.Nodes[1].Type() != parser.ObjectNode ||\n\t\tpath.Nodes[2].Type() != parser.ObjectNode {\n\t\treturn false\n\t}\n\n\tif reflect.DeepEqual(path.Nodes[0:2], labelsValidSubPath) {\n\t\treturn true\n\t}\n\tif reflect.DeepEqual(path.Nodes[0:2], annotationValidSubPath) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (obj *match) HasPattern() bool {\n\treturn obj.pattern != \"\"\n}", "func (util copyHandlerUtil) containsSpecialChars(name string) bool {\n\tfor _, r := range name {\n\t\tif r == '\"' || r == '\\\\' || r == '<' ||\n\t\t\tr == '>' || r == '|' || r == '*' ||\n\t\t\tr == '?' || r == ':' {\n\t\t\treturn true\n\t\t}\n\t}\n\t// if the last character in the file / dir name is ' '\n\t// then it not accepted by OS.\n\t// 'test1 ' is created as 'test1'\n\tif len(name) > 0 && name[len(name)-1] == ' ' {\n\t\treturn true\n\t}\n\treturn false\n}", "func getMetaForPath(path string) string {\n\tfor _, mp := range metapaths {\n\t\tif mp.path == path {\n\t\t\treturn mp.name\n\t\t}\n\t}\n\n\t// Unknown, so use path\n\treturn path\n}", "func QuoteMeta(s string) string {\n\t// // A byte loop is correct because all metacharacters are ASCII.\n\t// var i int\n\t// for i = 0; i < len(s); i++ {\n\t// \tif special(s[i]) {\n\t// \t\tbreak\n\t// \t}\n\t// }\n\t// // No meta characters found, so return original string.\n\t// if i >= len(s) {\n\t// \treturn s\n\t// }\n\n\t// b := make([]byte, 2*len(s)-i)\n\t// copy(b, s[:i])\n\t// j := i\n\t// for ; i < len(s); i++ {\n\t// \tif special(s[i]) {\n\t// \t\tb[j] = '\\\\'\n\t// \t\tj++\n\t// \t}\n\t// \tb[j] = s[i]\n\t// \tj++\n\t// }\n\t// return string(b[:j])\n\tpanic(\"\")\n}", "func getPathForMeta(metaname string) string {\n\tfor _, mp := range metapaths {\n\t\tif strings.EqualFold(mp.name, metaname) {\n\t\t\treturn mp.path\n\t\t}\n\t}\n\n\t// Unknown, so use metaname\n\treturn metaname\n}", "func (o *UserActionNamingPlaceholderProcessingStep) HasRegularExpression() bool {\n\tif o != nil && o.RegularExpression != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (l BaseLink) IsRegexp() bool {\n\treturn strings.HasPrefix(l.Path, \"/\") && strings.HasSuffix(l.Path, \"/\")\n}", "func (o *SoftwarerepositoryCategoryMapper) HasRegexPattern() bool {\n\tif o != nil && o.RegexPattern != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *ccMetric) HasMeta(key string) bool {\n\t_, ok := m.meta[key]\n\treturn ok\n}", "func isMetadataFile(filename string) bool {\n\treturn strings.HasSuffix(filename, metaFileExt)\n}", "func registerMeta(n *html.Node, hn *headNodes) {\n\tif htmlnode.HasAttribute(n, \"\", \"charset\") {\n\t\thn.metaCharset = n\n\t\treturn\n\t}\n\thn.metaOther = append(hn.metaOther, n)\n}", "func isParentMetaExist(path string) (bool, error) {\n\n\t_, err := os.Stat(path)\n\tif err == nil { return true, nil }\n\tif os.IsNotExist(err) { return false, nil }\n\treturn true, err\n}", "func isMetadataFile(filename string) bool {\n\treturn metadataMatch.MatchString(filename)\n}", "func (o *PermissionOptionsPagination) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func QuoteMeta(s string) string {\n\tvar buf bytes.Buffer\n\n\tfor _, ch := range s {\n\t\tswitch ch {\n\t\tcase '.', '+', '\\\\', '(', '$', ')', '[', '^', ']', '*', '?':\n\t\t\tbuf.WriteRune('\\\\')\n\t\t}\n\n\t\tbuf.WriteRune(ch)\n\t}\n\n\treturn buf.String()\n}", "func validateMetaKey(key string) bool {\n\treturn metaKeyValidator.MatchString(key)\n}", "func (me TdtypeType) IsPath() bool { return me.String() == \"path\" }", "func (de *Node) doesMeta() bool {\n\tfor d := de; d != nil; d = d.Parent {\n\t\tif d.RefCount[RefMeta] > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (me TxsdRecordPatternSimpleContentExtensionType) IsRegex() bool { return me.String() == \"regex\" }", "func (rxp *RegExParsed) ShowMeta(f io.Writer, indent string) {\n\tif rxp.Meta != nil {\n\t\tfor _, k := range KeyList(rxp.Meta) {\n\t\t\tfor _, s := range strings.Split(rxp.Meta[k], \"\\n\") {\n\t\t\t\tfmt.Fprintf(f, \"%s#%s: %s\\n\", indent, k, s)\n\t\t\t}\n\t\t}\n\t}\n}", "func (s stat) metaPath(prefix []string) []string {\n\treturn append(prefix, ElemLatency, ElemWindow, CompactDurationString(s.window), s.typ.String())\n}", "func checkAssetMetadata(meta string) (string, error) {\n\tif \"\" == meta {\n\t\treturn \"\", fault.AssetMetadataIsRequired\n\t}\n\tmeta, err := strconv.Unquote(`\"` + meta + `\"`)\n\tif nil != err {\n\t\treturn \"\", err\n\t}\n\tif 1 == len(strings.Split(meta, \"\\u0000\"))%2 {\n\t\treturn \"\", fault.AssetMetadataMustBeMap\n\t}\n\treturn meta, nil\n}", "func HasUsermetaViaMetaKey(iMetaKey string) bool {\n\tif has, err := Engine.Where(\"meta_key = ?\", iMetaKey).Get(new(Usermeta)); err != nil {\n\t\treturn false\n\t} else {\n\t\tif has {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n}", "func isReservedMetadataKey(k string) bool {\n\tswitch {\n\tcase strings.HasPrefix(k, \"X-Prpc-\"):\n\t\treturn true\n\n\tcase k == \"Accept\",\n\t\tk == \"Accept-Encoding\",\n\t\tk == \"Content-Encoding\",\n\t\tk == \"Content-Length\",\n\t\tk == \"Content-Type\",\n\t\tk == \"X-Content-Type-Options\":\n\t\treturn true\n\n\tdefault:\n\t\treturn false\n\t}\n}", "func (o *GetRecipeInformation200ResponseExtendedIngredientsInner) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *TimerTimersResponse) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *CustomfieldCustomFieldsResponse) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func HasSpecialCharacters(str string) bool {\n\tre := regexp.MustCompile(`^[a-zA-Z0-9\\.\\, ]+$`)\n\n\tif !re.MatchString(str) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isFSMetaValid(version, format string) bool {\n\treturn ((version == fsMetaVersion || version == fsMetaVersion100) &&\n\t\tformat == fsMetaFormat)\n}", "func IsMetaPackage(name string) bool {\n\treturn name == \"std\" || name == \"cmd\" || name == \"all\"\n}", "func isPathInvalid(path string) bool {\n\tfor strings.Contains(path, \"%\") {\n\t\texp := regexp.MustCompile(`(.*)%(.*)%(.*)`)\n\t\tparts := exp.FindStringSubmatch(path)\n\t\tpath = parts[1] + os.ExpandEnv(\"${\"+parts[2]+\"}\") + parts[3]\n\t}\n\t_, err := os.Stat(path)\n\treturn err != nil\n}", "func (set *ContentTypeSet) StringHas(mediaType string) bool {\n\tct, _, err := mime.ParseMediaType(mediaType)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn set.Has(ContentType(ct))\n}", "func HasProfanity(path []byte) bool {\n\tfor _, word := range StrongFilter {\n\t\tif bytes.Contains(path, word) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (md *MetaData) HadMetaData() bool {\n\treturn md.mdtype != MdInvalid\n}", "func (n *node) GetMeta(name string) (string, bool) {\n\tv, ok := n.meta[name]\n\treturn v, ok\n}", "func displayAsMeta(node *Node, level int) string {\n\tm := metaRegex.FindStringSubmatch(node.fullPath)\n\tvar n string\n\tif len(m) == 0 || m[1] == \"\" {\n\t\tn = node.fullPath\n\t} else {\n\t\tn = m[1]\n\t}\n\tret := fmt.Sprintf(\"%s%s\\n\", strings.Repeat(\" \", level), n)\n\t// No children to iterate over.\n\treturn ret\n}", "func (p *Password) HasSpecial() bool {\n\treturn p.ContainsSpecial\n}", "func needsParse(pi *res.PathInfo) (bool, error) {\n\tr, err := os.Open(pi.Path)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Unable to open file %s: %s\", pi.Path, err)\n\t}\n\tdefer r.Close()\n\n\treturn needsParseContents(pi, r), nil\n}", "func TestMatchStringWithQuoteMeta(t *testing.T) {\n\tpattern, str := regexp.QuoteMeta(\"[foo]\"), \"[foo]\"\n\n\tif match, err := regexp.MatchString(pattern, str); match != true {\n\t\tt.Errorf(\"MatchString did not match %q %v\", str, err)\n\t}\n}", "func (o *NotebookNotebooksResponse) HasMeta() bool {\n\tif o != nil && o.Meta != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isPathy(name string) bool {\n\treturn strings.Contains(name, string(filepath.Separator))\n}", "func Meta(text string) (string, error) {\n\treturn ds.Extract(text, metaL, metaR) // extract meta-data\n}", "func ValidPath(p string) bool {\n\tif len(p) < 3 {\n\t\treturn false\n\t}\n\tif len(p) > 64 {\n\t\treturn false\n\t}\n\tfor _, r := range p {\n\t\tif r == '-' {\n\t\t\tcontinue\n\t\t}\n\t\tif r >= '0' && r <= '9' {\n\t\t\tcontinue\n\t\t}\n\t\tif r >= 'a' && r <= 'z' {\n\t\t\tcontinue\n\t\t}\n\t\treturn false\n\t}\n\n\treturn true\n}", "func isMdFile(path string) bool {\n\treturn strings.HasSuffix(path, \".md\")\n}", "func metadata(str string) (version string, wrap bool) {\n\tsB := strings.Split(pattern(\"~V(?:\\\\w*\\\\s*)*\\n\\\\s*\").Split(str, 2)[1], \"~\")[0]\n\tsw := removeComment(sB)\n\taccum := [][]string{}\n\tfor _, val := range sw {\n\t\tcurrent := pattern(\"\\\\s{2,}|\\\\s*:\").Split(val, -1)[0:2]\n\t\taccum = append(accum, current)\n\t}\n\tversion = accum[0][1]\n\tif strings.ToLower(accum[1][1]) == \"yes\" {\n\t\twrap = true\n\t} else {\n\t\twrap = false\n\t}\n\treturn\n}", "func Has(path string) bool { return mustGetDefaultProvider().Has(path) }", "func HasUsermetaViaMetaValue(iMetaValue string) bool {\n\tif has, err := Engine.Where(\"meta_value = ?\", iMetaValue).Get(new(Usermeta)); err != nil {\n\t\treturn false\n\t} else {\n\t\tif has {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n}", "func (m *MetaSpec) MetaFilePath() string {\n\treturn filepath.Join(m.MetaSpace, m.MetaFile+\".json\")\n}", "func isPath(path string) bool {\n\treturn strings.HasPrefix(path, \"~\") ||\n\t\tstrings.HasPrefix(path, \".\") ||\n\t\tstrings.HasPrefix(path, \"/\")\n}", "func hasSpecialChar(word string) (bool, error) {\n\treturn regexp.MatchString(`[^a-zA-Z'!?,. -]`, word)\n}", "func validPath(name string) bool {\n\treturn fs.ValidPath(name) && !strings.ContainsRune(name, '\\\\')\n}", "func IsMetaDataValid(XQSMetaData *map[string]string) error {\n\tXQSMetaDataIsValid := true\n\twrongKey := \"\"\n\twrongValue := \"\"\n\n\tmetadataValuelength := 0\n\tmetadataKeylength := 0\n\n\tfor k, v := range *XQSMetaData {\n\t\tmetadataKeylength += len(k)\n\t\tmetadataValuelength += len(v)\n\t\tstartstr := strings.Split(k, \"-\")\n\t\tif len(startstr) < 4 {\n\t\t\twrongKey = k\n\t\t\twrongValue = v\n\t\t\tXQSMetaDataIsValid = false\n\t\t\tbreak\n\t\t}\n\t\tif startstr[0] != \"x\" || startstr[1] != \"qs\" || startstr[2] != \"meta\" || startstr[3] == \"\" {\n\t\t\twrongKey = k\n\t\t\twrongValue = v\n\t\t\tXQSMetaDataIsValid = false\n\t\t\tbreak\n\t\t}\n\n\t\tfor i := 0; i < len(k); i++ {\n\t\t\tch := k[i]\n\t\t\tif !(ch >= 65 && ch <= 90 || ch >= 97 && ch <= 122 || ch <= 57 && ch >= 48 || ch == 45 || ch == 46) {\n\t\t\t\twrongKey = k\n\t\t\t\twrongValue = v\n\t\t\t\tXQSMetaDataIsValid = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tfor i := 0; i < len(v); i++ {\n\t\t\tch := v[i]\n\t\t\tif ch < 32 || ch > 126 {\n\t\t\t\twrongKey = k\n\t\t\t\twrongValue = v\n\t\t\t\tXQSMetaDataIsValid = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif metadataKeylength > 512 {\n\t\t\twrongKey = k\n\t\t\twrongValue = v\n\t\t\tXQSMetaDataIsValid = false\n\t\t\tbreak\n\t\t}\n\t\tif metadataValuelength > 2048 {\n\t\t\twrongKey = k\n\t\t\twrongValue = v\n\t\t\tXQSMetaDataIsValid = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !XQSMetaDataIsValid {\n\t\treturn errors.ParameterValueNotAllowedError{\n\t\t\tParameterName: \"XQSMetaData\",\n\t\t\tParameterValue: \"map[\" + wrongKey + \"]=\" + wrongValue,\n\t\t\tAllowedValues: []string{\"https://docs.qingcloud.com/qingstor/api/common/metadata.html\"},\n\t\t}\n\t}\n\treturn nil\n}", "func (sc *Scavenger) RegexpExists(pat string) (yes bool) {\n\t_, _, yes = sc.Finder().FindRegexp(pat)\n\treturn\n}", "func (m *Media) IsValid() bool {\n if ext := filepath.Ext(m.FullPath); len(ext) > 0 {\n for _, pattern := range extpatterns {\n match, err := filepath.Match(\".\"+pattern, ext)\n if err != nil {\n fmt.Println(\"malfoemd pattern?\")\n return false\n }\n if match {\n return true\n }\n }\n }\n\n return false\n}", "func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {\n\terr = f.pacer.Call(func() (bool, error) {\n\t\tentry, err = f.srv.GetMetadata(&files.GetMetadataArg{\n\t\t\tPath: f.opt.Enc.FromStandardPath(objPath),\n\t\t})\n\t\treturn shouldRetry(ctx, err)\n\t})\n\tif err != nil {\n\t\tswitch e := err.(type) {\n\t\tcase files.GetMetadataAPIError:\n\t\t\tif e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {\n\t\t\t\tnotFound = true\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func isIndex(path string) bool {\n\treturn strings.HasSuffix(path, \"_index.md\")\n}", "func hasUnsupportedChar(str string) bool {\n\treg := `[^0-9a-zA-Z-_\\.\\s]` // add some characters that contain in the vm names\n\treturn regexp.MustCompile(reg).Match([]byte(str))\n}", "func (mod Mod) Has(p string) bool {\n\tfor _, v := range mod.All() {\n\t\tif filepath.Clean(p) == filepath.Clean(v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestSnapshotGetMeta(t *testing.T) {\n\tts := validSnapshotTemplate()\n\tf, err := ts.GetMeta(CanonicalRootRole)\n\trequire.NoError(t, err)\n\trequire.IsType(t, &FileMeta{}, f)\n\n\t// now one that doesn't exist\n\tf, err = ts.GetMeta(\"targets/a/b\")\n\trequire.Error(t, err)\n\trequire.IsType(t, ErrMissingMeta{}, err)\n\trequire.Nil(t, f)\n}", "func (proc *SProcess) HasProperty(prop string) bool {\n\n\t// first, check if a File is open.\n\tif proc.files[prop] != nil {\n\t\treturn true\n\t}\n\n\t// otherwise, do a dirty check and see if the file exists.\n\t_, err := os.Lstat(\"/system/process/\" + strconv.Itoa(proc.pid))\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func RegularFileExists(filePath string) (bool, error) {\n\tfilePath = strings.Replace(filePath, \"~\", HomeDir(), 1)\n\n\tif stat, err := os.Stat(filePath); err == nil {\n\t\tif stat.Mode().IsRegular() {\n\t\t\treturn true, nil\n\t\t} else {\n\t\t\treturn false, errors.New(filePath + \" is not a regular file\")\n\t\t}\n\t} else if os.IsNotExist(err) {\n\t\treturn false, nil\n\t} else {\n\t\treturn false, err\n\t}\n}", "func (f Unstructured) HasPath(fieldPath ...string) bool {\n\tif f.IsUndefined() || len(fieldPath) == 0 {\n\t\treturn true\n\t}\n\tif !f.HasByName(fieldPath[0]) {\n\t\treturn false\n\t}\n\treturn f.Field(fieldPath[0]) != nil\n}", "func KeyMetaType(s string) (mt MetaType, ok bool) {\n\tif len(s) < 7 {\n\t\treturn mt, false\n\t}\n\tmt = MetaType(s[len(s)-7:])\n\t_, ok = metaTypes[mt]\n\treturn mt, ok\n}", "func has(m map[string]string, required []string) bool {\n\tfor _, r := range required {\n\t\tif m[r] == \"\" {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (mod Mod) Has(p string) bool {\n\tfor _, v := range mod.All() {\n\t\tif path.Clean(p) == path.Clean(v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isServerProxyCookiePathDirective(directive string) bool {\n\tif isEqualString(directive, ServerProxyCookiePathDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func HasTemplateExt(paths string) bool {\n\tfor _, v := range templateExt {\n\t\tif strings.HasSuffix(paths, \".\"+v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func hasMountinfoOption(opts string, vals ...string) bool {\n\tfor _, opt := range strings.Split(opts, \" \") {\n\t\tfor _, val := range vals {\n\t\t\tif strings.HasPrefix(opt, val) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func needsParseContents(pi *res.PathInfo, r io.Reader) bool {\n\tif pi.Type == res.Raw {\n\t\treturn false\n\t}\n\tif filepath.Ext(pi.Path) == \".xml\" {\n\t\treturn true\n\t}\n\tif filepath.Ext(pi.Path) == \"\" {\n\t\tvar header [5]byte\n\t\t_, err := io.ReadFull(r, header[:])\n\t\tif err != nil && err != io.EOF {\n\t\t\tlog.Fatal(\"Unable to read file %s: %s\", pi.Path, err)\n\t\t}\n\t\tif string(header[:]) == \"<?xml\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p *Page) MustHasR(selector, regex string) bool {\n\thas, _, err := p.HasR(selector, regex)\n\tp.e(err)\n\treturn has\n}", "func (me TxsdRecordPatternSimpleContentExtensionType) IsXpath() bool { return me.String() == \"xpath\" }", "func isPathToPrecompiledObjectFile(path string) bool {\n\treturn strings.Count(path, string(os.PathSeparator)) == separatorsNumber && !isDir(path)\n}", "func isServerProxyTempPathDirective(directive string) bool {\n\tif isEqualString(directive, ServerProxyTempPathDirective) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TAttlistGeneralNoteOwner) IsHmd() bool { return me.String() == \"HMD\" }", "func (p *pathIterator) seenTrailingSlash() bool {\n\treturn p.ts\n}", "func (o *OpenapiProcessFileAllOf) HasSpecFilePath() bool {\n\tif o != nil && o.SpecFilePath != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isnamedPipePath(p string) bool {\n\treturn strings.HasPrefix(p, `\\\\.\\pipe\\`)\n}", "func (me TAttlistMedlineCitationOwner) IsHmd() bool { return me.String() == \"HMD\" }", "func (parser EhentaiParser) GetMeta() Meta {\r\n\treturn Meta{\r\n\t\tURLRgx: `^https://e-hentai.org/g/\\w+/\\w+/?$`,\r\n\t\tPriority: 9,\r\n\r\n\t\tName: \"Ehentai下载\",\r\n\t\tInternalName: \"ehentai-parser\",\r\n\t\tVersion: \"0.1\",\r\n\t\tDescription: \"可解析下载Ehentai相册\",\r\n\t\tAuthor: \"\",\r\n\t\tLink: \"\",\r\n\t}\r\n}", "func (mi *MinIOInstance) HasMetadata() bool {\n\treturn mi.Spec.Metadata != nil\n}", "func (z *Zone) HasMetadata() bool {\n\tj, _ := json.Marshal(z)\n\tj2, _ := json.Marshal(&Zone{Domain: z.Domain})\n\treturn string(j) != string(j2)\n}", "func HasMetaInfo(ctx context.Context) bool {\n\treturn getKV(ctx) != nil\n}", "func isHeader(line string) bool {\n\treturn strings.HasPrefix(line, \"/\")\n}", "func validRelPath(p string) bool {\n\tif p == \"\" || strings.Contains(p, `\\`) || strings.HasPrefix(p, \"/\") || strings.Contains(p, \"../\") {\n\t\treturn false\n\t}\n\treturn true\n}", "func validRelPath(p string) bool {\n\tif p == \"\" || strings.Contains(p, `\\`) || strings.HasPrefix(p, \"/\") || strings.Contains(p, \"../\") {\n\t\treturn false\n\t}\n\treturn true\n}", "func (tp Protocol) WithMeta() bool {\n\treturn tp != PurePayload && tp != Framed\n}", "func (ps Paths) Has(path string) bool {\n\tfor _, p := range ps {\n\t\tif p == path {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *SchemaDefinitionRestDto) HasMetadata() bool {\n\tif o != nil && o.Metadata != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ViewMetaPage) HasHasMore() bool {\n\tif o != nil && o.HasMore != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func needsEscaping(b byte) bool {\n\t// If it isn't an alphanumeric character or a ., -, or a _, then it always\n\t// needs to be escaped.\n\treturn !isAlphaNumSym(b)\n}", "func loadMeta(ctx context.Context, bucket objstore.BucketReader, id ulid.ULID) (*metadata.Meta, bool, error) {\n\tsrc := path.Join(id.String(), thanosblock.MetaFilename)\n\n\tr, err := bucket.Get(ctx, src)\n\tif bucket.IsObjNotFoundErr(err) {\n\t\treturn nil, true, fmt.Errorf(\"get meta file: %w\", err)\n\t}\n\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"get meta file: %w\", err)\n\t}\n\n\tdefer r.Close()\n\n\tmetaContent, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"read meta file: %w\", err)\n\t}\n\n\tvar m metadata.Meta\n\tif err := json.Unmarshal(metaContent, &m); err != nil {\n\t\treturn nil, true, fmt.Errorf(\"unmarshal meta: %w\", err)\n\t}\n\n\tif m.Version != metadata.MetaVersion1 {\n\t\treturn nil, false, errors.Errorf(\"unexpected meta file version %d\", m.Version)\n\t}\n\n\treturn &m, false, nil\n}" ]
[ "0.8183802", "0.8183802", "0.8055467", "0.68387294", "0.66377515", "0.5728693", "0.561423", "0.55432343", "0.5530025", "0.53491795", "0.5326753", "0.5290416", "0.52896106", "0.5280442", "0.5273996", "0.5268894", "0.5263377", "0.52493787", "0.5227846", "0.52121854", "0.51940775", "0.5085831", "0.5068895", "0.5037393", "0.49796185", "0.49746335", "0.4959018", "0.49458033", "0.49451151", "0.4929671", "0.490117", "0.48962584", "0.4894654", "0.48926356", "0.48863304", "0.4879474", "0.48703867", "0.4865142", "0.4835133", "0.48334745", "0.4807303", "0.47869664", "0.47669283", "0.47656265", "0.4760451", "0.4749484", "0.47016665", "0.46961835", "0.4670048", "0.46583143", "0.46365514", "0.462754", "0.46235093", "0.4620803", "0.46163464", "0.46138936", "0.4610197", "0.46094748", "0.46063304", "0.46029565", "0.45933422", "0.45810482", "0.4579398", "0.4560372", "0.4556497", "0.45554805", "0.45540392", "0.45523143", "0.45446393", "0.45427778", "0.45426002", "0.4541702", "0.45325163", "0.4528918", "0.45277876", "0.45245138", "0.45234054", "0.4519711", "0.45171103", "0.4516923", "0.45147425", "0.45137414", "0.4503939", "0.4500707", "0.44936872", "0.44847977", "0.4478511", "0.4465964", "0.44615075", "0.44509232", "0.44495752", "0.44416472", "0.4440045", "0.4440045", "0.4433024", "0.44272974", "0.4424116", "0.4415427", "0.4413558", "0.44052526" ]
0.78990114
3
PatternRoot gets a real directory root from a pattern. The directory returned is used as the start location for globbing.
func PatternRoot(s string) string { if isDir(s) { return s } // No directory in pattern parts := strings.Split(s, "/") if len(parts) == 1 { return "." } // parts returns an empty string at positio 0 if the s starts with "/" root := "" // Build path until a dirname has a char used to build regex for i, part := range parts { if hasMeta(part) { break } if i > 0 { root += "/" } root += part } // Default to cwd if root == "" { root = "." } return root }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetFileLocationFromPattern(root, pattern string) (string, error) {\n\tvar match string\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {\n\t\t\treturn err\n\t\t} else if matched {\n\t\t\tmatch, err = filepath.Abs(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn io.EOF\n\t\t}\n\t\treturn nil\n\t})\n\treturn match, err\n}", "func rootDir(dir string) string {\n\tpkgIndex := -1\n\tparts := strings.Split(dir, string(filepath.Separator))\n\tfor i, d := range parts {\n\t\tif d == \"pkg\" {\n\t\t\tpkgIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif pkgIndex == -1 {\n\t\treturn dir\n\t}\n\treturn strings.Join(parts[:pkgIndex], string(filepath.Separator))\n}", "func (o TransferJobTransferSpecPosixDataSourcePtrOutput) RootDirectory() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobTransferSpecPosixDataSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.RootDirectory\n\t}).(pulumi.StringPtrOutput)\n}", "func (util copyHandlerUtil) getRootPathWithoutWildCards(path string) (string, string) {\n\tif len(path) == 0 {\n\t\treturn path, \"*\"\n\t}\n\t// if no wild card exists, then root directory is the given directory\n\t// pattern is '*' i.e to include all the files inside the given path\n\twIndex := util.firstIndexOfWildCard(path)\n\tif wIndex == -1 {\n\t\treturn path, \"*\"\n\t}\n\tpathWithoutWildcard := path[:wIndex]\n\t// find the last separator in path without the wildCards\n\t// result will be content of path till the above separator\n\t// for Example: source = C:\\User\\a*\\a1*\\*.txt pathWithoutWildcard = C:\\User\\a\n\t// sepIndex = 7\n\t// rootDirectory = C:\\User and pattern = a*\\a1*\\*.txt\n\tsepIndex := strings.LastIndex(pathWithoutWildcard, common.AZCOPY_PATH_SEPARATOR_STRING)\n\tif sepIndex == -1 {\n\t\treturn \"\", path\n\t}\n\treturn pathWithoutWildcard[:sepIndex], path[sepIndex+1:]\n}", "func ResolveRootDir(p string) string {\n\tparts := strings.Split(path.Dir(p), \"/\")\n\tvar roots []string\n\tfor _, part := range parts {\n\t\tif HasGlobChar(part) {\n\t\t\tbreak\n\t\t}\n\t\troots = append(roots, part)\n\t}\n\n\tif len(roots) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn strings.Join(roots, \"/\")\n}", "func (s *DjangoEngine) RootDir(root string) *DjangoEngine {\n\tif s.fs != nil && root != \"\" && root != \"/\" && root != \".\" && root != s.rootDir {\n\t\tsub, err := fs.Sub(s.fs, s.rootDir)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\ts.fs = sub // here so the \"middleware\" can work.\n\t}\n\n\ts.rootDir = filepath.ToSlash(root)\n\treturn s\n}", "func (o TransferJobTransferSpecPosixDataSinkPtrOutput) RootDirectory() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobTransferSpecPosixDataSink) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.RootDirectory\n\t}).(pulumi.StringPtrOutput)\n}", "func (o TransferJobTransferSpecPosixDataSourceOutput) RootDirectory() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TransferJobTransferSpecPosixDataSource) string { return v.RootDirectory }).(pulumi.StringOutput)\n}", "func getStartDir(pattern string) string {\n\tvar dir string\n\n\ti := strings.Index(pattern, \"...\")\n\tif i != -1 {\n\t\tdir, _ = path.Split(pattern[:i])\n\t} else {\n\t\tdir = pattern\n\t}\n\n\treturn dir\n}", "func (o TransferJobTransferSpecPosixDataSinkOutput) RootDirectory() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TransferJobTransferSpecPosixDataSink) string { return v.RootDirectory }).(pulumi.StringOutput)\n}", "func BasePath(pattern string) string {\n\tsplit := strings.IndexAny(pattern, \"*{}?[]\")\n\tif split >= 0 {\n\t\tpattern = pattern[:split]\n\t}\n\tdir, _ := path.Split(pattern)\n\treturn path.Clean(dir)\n}", "func RootDir() string {\n\t_, b, _, _ := runtime.Caller(0)\n\td := path.Join(path.Dir(b))\n\treturn filepath.Dir(d)\n}", "func rootDir() (string, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlastdir := \"\"\n\n\tfor lastdir != dir {\n\t\tif _, err = os.Stat(filepath.Join(dir, \".gong\")); err == nil {\n\t\t\treturn dir, nil\n\t\t}\n\n\t\tlastdir = dir\n\t\tdir = filepath.Dir(dir)\n\t}\n\n\treturn \"\", ErrNoRootDir\n}", "func (r *RelativePath) RootPath() string {\n\treturn \"/\" + strings.Join(r.stack[:r.limit], \"/\")\n}", "func (c *Container) RootDirectory() (string, error) {\n\t// The root directory of this container's runtime.\n\trootDir := fmt.Sprintf(\"/var/run/docker/runtime-%s/moby\", c.runtime)\n\t_, err := os.Stat(rootDir)\n\tif err == nil {\n\t\treturn rootDir, nil\n\t}\n\t// In docker v20+, due to https://github.com/moby/moby/issues/42345 the\n\t// rootDir seems to always be the following.\n\tconst defaultDir = \"/var/run/docker/runtime-runc/moby\"\n\t_, derr := os.Stat(defaultDir)\n\tif derr == nil {\n\t\treturn defaultDir, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"cannot stat %q: %v or %q: %v\", rootDir, err, defaultDir, derr)\n}", "func TemplateRootDir() (string, error) {\n\tconfig, err := os.UserConfigDir()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get UserConfigDir\")\n\t}\n\n\ttmplPath := filepath.Join(config, \"suborbital\", \"templates\")\n\n\tif os.Stat(tmplPath); err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\tif err := os.MkdirAll(tmplPath, os.ModePerm); err != nil {\n\t\t\t\treturn \"\", errors.Wrap(err, \"failed to MkdirAll template directory\")\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to Stat template directory\")\n\t\t}\n\t}\n\n\treturn tmplPath, nil\n}", "func getRootDir() (string, error) {\n\t//TODO: fix this!! think it's a tad dodgy!\n\tpwd, _ := os.Getwd()\n\tlog.Printf(\"[DEBUG] getRootDir pwd is: %v\", pwd)\n\n\tb := strings.Contains(pwd, rootDirName)\n\tif !b {\n\t\treturn \"\", fmt.Errorf(\"could not find '%v' root directory in %v\", rootDirName, pwd)\n\t}\n\n\ts := strings.SplitAfter(pwd, rootDirName)\n\tlog.Printf(\"[DEBUG] path(s) after splitting: %v\\n\", s)\n\n\tif len(s) < 1 {\n\t\t//expect at least one result\n\t\treturn \"\", fmt.Errorf(\"could not split out '%v' from directory in %v\", rootDirName, pwd)\n\t}\n\n\tif !strings.HasSuffix(s[0], rootDirName) {\n\t\t//the first path should end with \"probr\"\n\t\treturn \"\", fmt.Errorf(\"first path after split (%v) does not end with '%v'\", s[0], rootDirName)\n\t}\n\n\treturn s[0], nil\n}", "func rootDir(path string) string {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn filepath.Join(`c:\\`, path)\n\t}\n\treturn filepath.Join(\"/\", path)\n}", "func (c DirCollector) Root() *VDir {\n\t// do we have a single slashed directory path /\n\tif c.Has(\"/\") {\n\t\treturn c.Get(\"/\")\n\t}\n\n\t// do we have a single dot directory path .\n\tif c.Has(\".\") {\n\t\treturn c.Get(\".\")\n\t}\n\n\t//else fallback to search for root boolean set\n\tvar vdir *VDir\n\n\tfor _, dir := range c {\n\t\tif dir.root {\n\t\t\tvdir = dir\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn vdir\n}", "func RootDir() string {\n\treturn environ.GetValueStrOrPanic(\"ROOT_DIR\")\n}", "func (w *World) RootDir() string {\n\treturn w.rootdir\n}", "func getRootPath() string {\n\tp, _ := filepath.Abs(\"../../\")\n\treturn p + string(filepath.Separator)\n}", "func findRootDir(path string) string {\n\tpathItems := strings.Split(path, sepStr)\n\tout := sepStr\n\tfor i, item := range pathItems {\n\t\tif i == len(pathItems)-1 {\n\t\t\tbreak\n\t\t}\n\t\tif item == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif hasMeta(item) {\n\t\t\tbreak\n\t\t}\n\t\tout += item + sepStr\n\t}\n\tif out != sepStr {\n\t\tout = strings.TrimSuffix(out, sepStr)\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tout = strings.TrimPrefix(out, sepStr)\n\t\t}\n\t}\n\treturn out\n}", "func Root(name, path string) *TRoot {\n\tvar tmpl = &Template{template.New(name), name}\n\tvar t = &TRoot{tmpl, path}\n\n\treturn t\n}", "func (r *Repository) RootPath() string {\n\treturn r.root\n}", "func getProjectRoot() (string, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error getting pwd: %s\", err)\n\t}\n\tfor {\n\t\tparent, name := filepath.Split(dir)\n\t\tif name == \"acr-builder\" {\n\t\t\tbreak\n\t\t}\n\t\tparent = filepath.Clean(parent)\n\t\tif parent == \"\" {\n\t\t\tpanic(\"no acr-builder directory find on pwd\")\n\t\t}\n\t\tdir = parent\n\t}\n\treturn dir, nil\n}", "func Root(directory string) (string, error) {\n\tcmd := exec.Command(\"git\", \"rev-parse\", \"--show-toplevel\")\n\tcmd.Dir = filepath.Dir(directory)\n\tresult, err := cmd.CombinedOutput()\n\ttrimmed := strings.TrimSpace(string(result))\n\tif err != nil {\n\t\treturn \"\", errors.New(trimmed)\n\t}\n\treturn strings.TrimSpace(trimmed), nil\n}", "func (o *Project) GetRootDirectory() string {\n\tif o == nil || o.RootDirectory == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.RootDirectory\n}", "func RootPkg(pkgs []*pkgs.Package) string {\n\troot := \"\"\n\tfor _, pkg := range pkgs {\n\t\tif root == \"\" {\n\t\t\troot = pkg.PkgPath\n\t\t} else {\n\t\t\troot = commonPrefix(root, pkg.PkgPath)\n\t\t}\n\t}\n\treturn root\n}", "func (r *Root) Root() (fs.Node, error) {\n\treturn newDir(nil, r.registry), nil\n}", "func (fs HgmFs) Root() (fs.Node, error) {\n\treturn &HgmDir{hgmFs: fs, localDir: \"/\"}, nil\n}", "func (r Repo) ImportPathPattern() string {\n\treturn r.Root + \"/...\"\n}", "func GetGoRootDir() string {\n\troseDir := GetRosieDir()\n\treturn path.Join(roseDir, goDirName)\n}", "func (f *Fs) rootSlash() string {\n\tif f.root == \"\" {\n\t\treturn f.root\n\t}\n\treturn f.root + \"/\"\n}", "func ProjectRoot() (path string) {\n\t_, err := ioutil.ReadFile(RootConfigFile)\n\tif err != nil {\n\t\tpath = setroot.Set(RootConfigFile, GlobalConfigDir())\n\t} else {\n\t\tdata, err := ioutil.ReadFile(RootConfigFile)\n\t\tif err != nil {\n\t\t\tstatuser.Error(\"Failed to read from global config file\", err, 1)\n\t\t}\n\t\tglobalConfig := struct {\n\t\t\tPath string `yaml:\"path\"`\n\t\t}{}\n\t\terr = yaml.Unmarshal(data, &globalConfig)\n\t\tif err != nil {\n\t\t\tstatuser.Error(\"Failed to parse yaml from global config file\", err, 1)\n\t\t}\n\t\tpath = globalConfig.Path\n\t}\n\treturn path\n}", "func WalkMatch(root, pattern string) ([]string, error) {\n\tvar matches []string\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {\n\t\t\treturn err\n\t\t} else if matched {\n\t\t\tmatches = append(matches, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn matches, nil\n}", "func (h *Handler) AbsRoot() (string, error) {\n\tvar absRoot string\n\n\tif len(h.Root) > 0 {\n\t\tabsRoot = h.Root\n\t} else {\n\t\tabsRoot = filepath.Dir(h.Procfile)\n\t}\n\n\treturn filepath.Abs(absRoot)\n}", "func (h *Handler) AbsRoot() (string, error) {\n\tvar absRoot string\n\n\tif len(h.Root) > 0 {\n\t\tabsRoot = h.Root\n\t} else {\n\t\tabsRoot = filepath.Dir(h.Procfile)\n\t}\n\n\treturn filepath.Abs(absRoot)\n}", "func splitPattern(pattern string) (\n\trecursive bool,\n\tprefix, tail string,\n\terr error) {\n\tidx := strings.Index(pattern, \"**\")\n\tif idx == -1 {\n\t\trecursive = false\n\t\treturn\n\t}\n\tif idx > 0 && pattern[idx-1] != '/' {\n\t\t// XXX assumes patterns have been normalized to Unix syntax\n\t\terr = errors.New(\n\t\t\t\"recursive glob pattern ** may only occur \" +\n\t\t\t\t\"at the start of a pattern or immediately after /\")\n\t\treturn\n\t}\n\tif idx > len(pattern)-4 || pattern[idx+2] != '/' {\n\t\t// the minimum valid pattern is \"**/x\": \"**/\" and \"**\" are invalid\n\t\terr = errors.New(\n\t\t\t\"recursive glob pattern ** must be followed \" +\n\t\t\t\t\"by / and at least one more character\")\n\t\treturn\n\t}\n\trecursive = true\n\tif idx == 0 {\n\t\tprefix = \".\"\n\t} else {\n\t\tprefix = pattern[0 : idx-1]\n\t}\n\ttail = pattern[idx+3:]\n\treturn\n}", "func detectRoot(startDir string) (string, bool, error) {\n\tpath := startDir\n\tmaxParts := 256\n\tfor i := 0; i < maxParts; i++ {\n\t\traw, found, err := raw.LoadFromDir(path, path) // \"Guess\" path is the root dir.\n\t\tif err == nil && found {\n\t\t\tisRoot, err := raw.IsRoot()\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", false, err\n\t\t\t}\n\t\t\tif isRoot {\n\t\t\t\treturn path, true, nil\n\t\t\t}\n\t\t}\n\n\t\tif path == \"/\" {\n\t\t\tbreak\n\t\t}\n\t\tpath = filepath.Dir(path)\n\t}\n\n\treturn \"\", false, nil\n}", "func Glob(pattern string) (matches, dirs []string, err error) {\n\tmatches, err = filepath.Glob(pattern)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\twildIndices := wildElements(pattern)\n\n\tif len(wildIndices) > 0 {\n\t\tfor _, match := range matches {\n\t\t\tdir := filepath.Dir(match)\n\t\t\tdirElems := strings.Split(dir, string(filepath.Separator))\n\n\t\t\tfor _, index := range wildIndices {\n\t\t\t\tdirs = append(dirs, strings.Join(dirElems[:index],\n\t\t\t\t\tstring(filepath.Separator)))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func (f *FileList) Root() string {\n\treturn f.root\n}", "func (mcm *MinioChunkManager) RootPath() string {\n\treturn mcm.rootPath\n}", "func (crypto LocalCryptomatorVault) GetRootDirectory() Directory {\n\tvar dir LocalDirectory\n\n\tdir.crypto = &crypto\n\tdir.decryptedPath = \"\"\n\tdir.uuid = \"\"\n\tdir.encryptedPath = crypto.getFilePath(dir.uuid)\n\n\tdir.updateDirectory()\n\n\treturn Directory(&dir)\n}", "func getProjectRoot(t *testing.T) (rootPath string) {\n\troot, err := os.Getwd()\n\trequire.NoError(t, err, \"could not get current working directory\")\n\tfor root != \"/\" { // Walk up path to find dir containing go.mod\n\t\tif _, err := os.Stat(filepath.Join(root, \"go.mod\")); os.IsNotExist(err) {\n\t\t\troot = filepath.Dir(root)\n\t\t} else {\n\t\t\treturn root\n\t\t}\n\t}\n\tt.Fatal(\"could not find project root\")\n\treturn\n}", "func GoModRootPath(path string) (string, error) {\n\tif path == \"\" {\n\t\treturn \"\", &PathIsNotSetError{}\n\t}\n\n\tpath = filepath.Clean(path)\n\n\tfor {\n\t\tif fi, err := os.Stat(filepath.Join(path, goModFilename)); err == nil && !fi.IsDir() {\n\t\t\treturn path, nil\n\t\t}\n\n\t\td := filepath.Dir(path)\n\t\tif d == path {\n\t\t\tbreak\n\t\t}\n\n\t\tpath = d\n\t}\n\n\treturn \"\", nil\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (c Cache) Root() Path {\n\treturn c.Join(\"root\")\n}", "func (p Path) BaseDir() Path {\r\n\tret := Path(filepath.Dir(string(p)))\r\n\treturn ret\r\n}", "func (pctx *processContext) ModuleRoot(ctx context.Context) (string, error) {\n\tfor dir, prevDir := pctx.workdir, \"\"; ; dir = filepath.Dir(dir) {\n\t\tif dir == prevDir {\n\t\t\treturn \"\", xerrors.Errorf(\"couldn't find a Go CDK project root at or above %s\", pctx.workdir)\n\t\t}\n\t\tprevDir = dir\n\t\tif _, err := os.Stat(filepath.Join(dir, \"go.mod\")); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := os.Stat(biomesRootDir(dir)); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\treturn dir, nil\n\t}\n}", "func walkMatch(root, pattern string) ([]string, []string, error) {\r\n\tvar matches []string\r\n\tvar matchesRaw []string\r\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\tif info.IsDir() {\r\n\t\t\treturn nil\r\n\t\t}\r\n\t\tif matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {\r\n\t\t\treturn err\r\n\t\t} else if matched {\r\n\t\t\tmatches = append(matches, path)\r\n\t\t\tmatchesRaw = append(matchesRaw, filepath.Base(path))\r\n\t\t}\r\n\t\treturn nil\r\n\t})\r\n\tif err != nil {\r\n\t\treturn nil, nil, err\r\n\t}\r\n\treturn matches, matchesRaw, nil\r\n}", "func RootedPath(elem ...string) string { return filesys.RootedPath(elem...) }", "func Glob(pattern string) (matches []string, err error) {\n\tpattern = filepath.Clean(pattern)\n\tpattern = escape(pattern)\n\n\tif strings.Count(pattern, dirGlobOperator) > 1 {\n\t\treturn nil, fmt.Errorf(\"invalid pattern '%s': the ** globbing operator may only be used once in a pattern\", pattern)\n\t}\n\n\tif !dirGlobOperatorUseValid(pattern) {\n\t\treturn nil, fmt.Errorf(\"invalid pattern '%s': the ** globbing operator may only be used as path segment on its own, i.e. …/**/… or **/…\", pattern)\n\t}\n\n\tif strings.Contains(pattern, dirGlobOperator) {\n\t\tparts := strings.Split(pattern, dirGlobOperator)\n\t\tbasePattern, endPattern := filepath.Clean(parts[0]), filepath.Clean(parts[1])\n\n\t\tbaseCandidates, err := filepath.Glob(basePattern)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pattern '%s': %s\", pattern, err)\n\t\t}\n\n\t\tfor _, base := range directoriesOnly(baseCandidates) {\n\t\t\t_ = filepath.Walk(filepath.Clean(base), func(path string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif !info.IsDir() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tmatchesInBase, err := Glob(filepath.Join(path, endPattern))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tmatches = append(matches, matchesInBase...)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t} else {\n\t\tcandidates, err := filepath.Glob(pattern)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid pattern '%s': %s\", pattern, err)\n\t\t}\n\t\tmatches = filesOnly(candidates)\n\t}\n\n\treturn matches, nil\n}", "func (d *DHCPv4) RootPath() string {\n\treturn GetString(OptionRootPath, d.Options)\n}", "func GetRootPath(name string) string {\n\tfor prefix, num := range setting.RootPathPairs {\n\t\tif strings.HasPrefix(name, prefix) {\n\t\t\treturn joinPath(name, num)\n\t\t}\n\t}\n\n\tif strings.HasPrefix(name, \"gopkg.in\") {\n\t\tm := gopkgPathPattern.FindStringSubmatch(strings.TrimPrefix(name, \"gopkg.in\"))\n\t\tif m == nil {\n\t\t\treturn name\n\t\t}\n\t\tuser := m[1]\n\t\trepo := m[2]\n\t\tif len(user) == 0 {\n\t\t\tuser = \"go-\" + repo\n\t\t}\n\t\treturn path.Join(\"gopkg.in\", user, repo+\".\"+m[3])\n\t}\n\treturn name\n}", "func (this *SearchDir) SearchDirs(root, pattern string) {\n this.search(root, pattern, false, true)\n}", "func (d *FQDN) PathToRoot() []string {\n\tres\t:= make([]string, 1)\n\tfqdn\t:= string(*d)\n\tend\t:= len(fqdn)\n\tlast\t:= 0\n\tres[0]\t= fqdn\n\n\tquote\t:= false\n\tfor pos,char := range fqdn {\n\t\tswitch char {\n\t\t\tcase '\\\\':\n\t\t\t\tquote = !quote\n\n\t\t\tcase '.':\n\t\t\t\tif !quote {\n\t\t\t\t\tres\t= append(res, fqdn[pos:end] )\n\t\t\t\t\tlast\t= pos\n\t\t\t\t}\n\t\t\t\tquote = false\n\n\t\t\tdefault:\n\t\t\t\tquote = false\n\t\t}\n\t}\n\n\tif last < end-1 {\n\t\tres\t= append(res, \".\" )\n\t}\n\n\treturn res\n}", "func (cfg *Config) SetRoot(root string) *Config {\n\tcfg.RootDir = root\n\treturn cfg\n}", "func HostDirFromRoot(root string) func(string) (string, error) {\n\treturn func(host string) (string, error) {\n\t\tfor _, p := range hostPaths(root, host) {\n\t\t\tif _, err := os.Stat(p); err == nil {\n\t\t\t\treturn p, nil\n\t\t\t} else if !os.IsNotExist(err) {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t\treturn \"\", errdefs.ErrNotFound\n\t}\n}", "func GetRootProjectDir() (string, error) {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor !strings.HasSuffix(wd, \"git2consul-go\") {\n\t\tif wd == \"/\" {\n\t\t\treturn \"\", errors.New(`cannot find project directory, \"/\" reached`)\n\t\t}\n\t\twd = filepath.Dir(wd)\n\t}\n\treturn wd, nil\n}", "func (db *Database) Root() *Group {\n\treturn db.root\n}", "func (o BlobReferenceInputDataSourceOutput) PathPattern() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BlobReferenceInputDataSource) *string { return v.PathPattern }).(pulumi.StringPtrOutput)\n}", "func RootPath() string {\n\treturn configRootPath\n}", "func Package(dir string) string {\n\tabs, err := filepath.Abs(dir)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"filepath.Abs: %s\", err))\n\t}\n\n\treturn filepath.Base(abs)\n}", "func (p *Project) Root() string {\n\treturn p.root\n}", "func GetRootlessDir() string {\n\treturn rootlessDir\n}", "func WatchRootDir(rootDir string, watchInterval time.Duration, open OpenFunc) error {\n\twatcher := newDirWatcher(open)\n\tt := time.Now().UTC()\n\tfor {\n\t\tif !dirExists(rootDir) {\n\t\t\ttime.Sleep(watchInterval)\n\t\t\tcontinue\n\t\t}\n\t\twatcher.poll(rootDir)\n\t\tsubDirs, err := filepath.Glob(path.Join(rootDir, \"*\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, subDir := range subDirs {\n\t\t\twatcher.poll(subDir)\n\t\t}\n\t\tif delay := watchInterval - time.Since(t); delay > 0 {\n\t\t\ttime.Sleep(delay)\n\t\t}\n\t\tt = time.Now().UTC()\n\t}\n}", "func (o *Project) SetRootDirectory(v string) {\n\to.RootDirectory = &v\n}", "func GetRootURL() string {\n\n\t// Alles nach / verwerfen\n\turlArray := strings.Split(PageURL, \"/\")\n\n\t// Altes Format:\n\t// https://blablabla.bla/blabla\n\t// Alles nach drittem / verwerfen\n\t// wird zu\n\t// https://blablabla.bla\n\n\t// die ersten beiden / wieder einsetzen und Wert zurückgeben\n\treturn urlArray[0] + \"//\" + urlArray[1] + urlArray[2]\n\n}", "func (p *Project) Glob(pattern string) (paths []string, err error) {\n\tprefix := p.BaseDir + string(filepath.Separator)\n\tfullPattern := prefix + pattern\n\tpaths, err = zglob.Glob(fullPattern)\n\tif err != nil {\n\t\treturn\n\t}\n\tprefixLen := len(prefix)\n\tfor n, fullpath := range paths {\n\t\tpaths[n] = fullpath[prefixLen:]\n\t}\n\treturn\n}", "func (fs *FS) Root() (fspkg.Node, error) {\n\tte, ok := fs.r.Lookup(\"\")\n\tif !ok {\n\t\treturn nil, errors.New(\"failed to find root in stargz\")\n\t}\n\treturn &node{fs, te}, nil\n}", "func (k *Kademlia) FindRoot(req FindDirRequest, res *FindDirResult) {\n res.MsgID = CopyID(req.MsgID)\n res.Inode = req.StartInode\n res.Key = req.StartKey\n return\n}", "func (w *RootWalker) Root() *Root {\n\treturn w.r\n}", "func (tp *Template) Root(name string) *Template {\n\ttp.root = name\n\treturn tp\n}", "func (c ContainerInfo) RootDir() string {\n\treturn fmt.Sprintf(\"/var/lib/lxc/%s\", c.Name)\n}", "func (r *Ruby) BaseDir() string {\n\treturn path.Join(BaseDir(), \"ruby\")\n}", "func (o BlobStreamInputDataSourceOutput) PathPattern() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BlobStreamInputDataSource) *string { return v.PathPattern }).(pulumi.StringPtrOutput)\n}", "func (l *fileLoader) Root() string {\n\treturn l.root\n}", "func findGitRoot(dir string) string {\n\torig := dir\n\tfor dir != \"\" && dir != \".\" && dir != \"/\" {\n\t\t_, err := os.Stat(filepath.Join(dir, \".git\"))\n\t\tif err == nil {\n\t\t\t// Found dir/.git, return dir.\n\t\t\treturn dir\n\t\t} else if !os.IsNotExist(err) {\n\t\t\t// Error finding .git, return original input.\n\t\t\treturn orig\n\t\t}\n\t\tdir, _ = filepath.Split(dir)\n\t\tdir = strings.TrimSuffix(dir, \"/\")\n\t}\n\treturn orig\n}", "func (o BlobOutputDataSourceOutput) PathPattern() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BlobOutputDataSource) *string { return v.PathPattern }).(pulumi.StringPtrOutput)\n}", "func (s *FSxWindowsFileServerVolumeConfiguration) SetRootDirectory(v string) *FSxWindowsFileServerVolumeConfiguration {\n\ts.RootDirectory = &v\n\treturn s\n}", "func (pstFile *File) GetRootFolder(formatType string) (Folder, error) {\n\tnodeBTreeOffset, err := pstFile.GetNodeBTreeOffset(formatType)\n\n\tif err != nil {\n\t\treturn Folder{}, err\n\t}\n\n\trootFolderNode, err := pstFile.FindBTreeNode(nodeBTreeOffset, IdentifierTypeRootFolder, formatType)\n\n\tif err != nil {\n\t\treturn Folder{}, err\n\t}\n\n\trootFolderNodeDataIdentifier, err := rootFolderNode.GetDataIdentifier(formatType)\n\n\tif err != nil {\n\t\treturn Folder{}, err\n\t}\n\n\tblockBTreeOffset, err := pstFile.GetBlockBTreeOffset(formatType)\n\n\tif err != nil {\n\t\treturn Folder{}, err\n\t}\n\n\trootFolderDataNode, err := pstFile.FindBTreeNode(blockBTreeOffset, rootFolderNodeDataIdentifier, formatType)\n\n\tif err != nil {\n\t\treturn Folder{}, err\n\t}\n\n\trootFolderNodeDataNodeHeapOnNode, err := pstFile.GetHeapOnNode(rootFolderDataNode, formatType)\n\n\tif err != nil {\n\t\treturn Folder{}, err\n\t}\n\n\tpropertyContextItems, err := pstFile.GetPropertyContext(rootFolderNodeDataNodeHeapOnNode, formatType)\n\n\tif err != nil {\n\t\treturn Folder{}, err\n\t}\n\n\treturn Folder{\n\t\tIdentifier: IdentifierTypeRootFolder,\n\t\tPropertyContext: propertyContextItems,\n\t}, nil\n}", "func (q *pathCompression) Root(r int) int {\n\tfor {\n\t\tif r == q.IDs[r] {\n\t\t\tbreak\n\t\t}\n\t\tq.IDs[r] = q.IDs[q.IDs[r]]\n\t\tr = q.IDs[r]\n\t}\n\treturn r\n}", "func GetRootPath() string {\n\t_, filename, _, ok := runtime.Caller(0)\n\tif !ok {\n\t\tpanic(\"Cannot get file into for env\")\n\t}\n\n\treturn path.Dir(path.Dir(filename))\n}", "func SetRootDirectory(rootDirectory string) {\n\troot = rootDirectory\n\tif root[len(root)-1] != '/' {\n\t\troot += \"/\"\n\t}\n\tlog.Println(\"Set root directory to : '\" + root + \"'\")\n}", "func applicationRoot(baseURL string) (base string, err error) {\n\tb, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif b.RawQuery != \"\" {\n\t\treturn \"\", fmt.Errorf(\"query component is not allowed: %s\", baseURL)\n\t}\n\tif b.Fragment != \"\" {\n\t\treturn \"\", fmt.Errorf(\"fragment component is not allowed: %s\", baseURL)\n\t}\n\tb.Path = strings.TrimRight(b.Path, \"/\")\n\treturn b.String(), nil\n}", "func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {\n\tif prefix, root, has := sm.rootxt.LongestPrefix(ip); has {\n\t\t// The non-matching tail of the import path could still be malformed.\n\t\t// Validate just that part, if it exists\n\t\tif prefix != ip {\n\t\t\t// TODO(sdboyer) commented until i find a proper description of how\n\t\t\t// to validate an import path\n\t\t\t//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+\"/\")) {\n\t\t\t//return \"\", fmt.Errorf(\"%q is not a valid import path\", ip)\n\t\t\t//}\n\t\t\t// There was one, and it validated fine - add it so we don't have to\n\t\t\t// revalidate it later\n\t\t\tsm.rootxt.Insert(ip, root)\n\t\t}\n\t\treturn root, nil\n\t}\n\n\tft, err := sm.deducePathAndProcess(ip)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tr, err := ft.rootf()\n\treturn ProjectRoot(r), err\n}", "func (c Config) RootOrDefault() string {\n\tif c.Root != \"\" {\n\t\treturn c.Root\n\t}\n\treturn \".\"\n}", "func (cs *copyState) glob(pattern string) (files []manager.RepositoryEntry) {\n\tif pattern == \"\" {\n\t\tcs.state.Exitf(\"empty path name\")\n\t}\n\n\t// Path on local machine?\n\tif isLocal(pattern) {\n\t\tfor _, repoPath := range cs.state.GlobLocal(subcmd.Tilde(pattern)) {\n\t\t\tfiles = append(files, manager.RepositoryEntry{\n\t\t\t\tName: path.Base(repoPath),\n\t\t\t\tEnabled: true,\n\t\t\t\tLastUpdate: time.Now(),\n\t\t\t\tPath: repoPath,\n\t\t\t})\n\t\t}\n\t\treturn files\n\t}\n\n\t// Extra check to catch use of relative path on local machine.\n\tif !strings.Contains(pattern, \"@\") {\n\t\tcs.state.Exitf(\"local pattern not qualified path: %s\", pattern)\n\t}\n\n\treturn files\n}", "func pathPattern(pattern *regexp.Regexp) *regexp.Regexp {\n\treturn suffixPattern(regexp.MustCompile(\"(^|/)\" + pattern.String()))\n}", "func (l *Loader) AppRoot() string {\n\tif appRoot, ok := l.lookUp(l.EnvironmentPrefix() + _appRoot); ok {\n\t\treturn appRoot\n\t}\n\n\tif cwd, err := os.Getwd(); err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to get the current working directory: %q\", err.Error()))\n\t} else {\n\t\treturn cwd\n\t}\n}", "func parsePattern(path string, stash Stash) string {\n\t// Standard placeholders\n\t// XXX: Add relaxed and wildcard placeholders\n\t// XXX: Add restricted placeholders\n\tpathPattern := \"\"\n\tlastIndex := 0\n\tfor _, v := range stdPlaceholder.FindAllStringSubmatchIndex(path, -1) {\n\t\t// v is an array of pairs of ints. Each pair is start and end\n\t\t// indexes of the match in the string. The first pair is the\n\t\t// entire match, and other pairs are the corresponding\n\t\t// submatches\n\t\tgap := path[lastIndex:v[0]]\n\t\tlastIndex = v[1]\n\n\t\tstart := path[v[2]:v[3]]\n\t\tif start != \"/\" {\n\t\t\tstart = \"\"\n\t\t}\n\n\t\t//placeType := path[v[4]:v[5]]\n\t\tplaceName := path[v[6]:v[7]]\n\t\t//end := path[v[8]:v[9]] // unused\n\n\t\tmatchType := \"+\" // required\n\t\tif _, ok := stash[placeName]; ok {\n\t\t\tmatchType = \"*\" // optional\n\t\t\tif start == \"/\" {\n\t\t\t\tstart += \"?\"\n\t\t\t}\n\t\t}\n\n\t\tpathPattern += gap + fmt.Sprintf(\"%s(?P<%s>[^/.]%s)\", start, placeName, matchType)\n\t}\n\n\t// If we never matched, there were no placeholders\n\tif pathPattern == \"\" {\n\t\treturn path\n\t}\n\n\treturn pathPattern\n}", "func FindRootRepoPath() (string, error) {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Error getting pwd: \", err)\n\t\tos.Exit(1)\n\t}\n\n\tparts := strings.SplitAfter(pwd, string(os.PathSeparator))\n\tfor i, _ := range parts {\n\t\ttestPath := path.Join(parts[:i+1]...)\n\t\tif IsRepo(testPath) {\n\t\t\treturn testPath, nil\n\t\t}\n\t}\n\n\t// Return pwd in case we're cloning into pwd.\n\treturn pwd, fmt.Errorf(\"No .git found in %s or any parent dir.\", pwd)\n}", "func SplitAtDirGlobOperator(path, pattern string) (pathStart, patternStart, pathEnd, patternEnd string, err error) {\n\tif !dirGlobOperatorUseValid(pattern) {\n\t\treturn \"\", \"\", \"\", \"\", fmt.Errorf(\"invalid pattern '%s': the ** globbing operator may only be used as path segment on its own, i.e. …/**/… or **/…\", pattern)\n\t}\n\n\tparts := strings.Split(pattern, string(filepath.Separator)+dirGlobOperator+string(filepath.Separator))\n\tpatternStart = parts[0]\n\tif len(parts) == 2 {\n\t\tpatternEnd = parts[1]\n\t}\n\n\tnumSegmentsStart := len(Segments(patternStart))\n\tnumSegmentsEnd := len(Segments(patternEnd))\n\n\tsegments := Segments(path)\n\n\tpathStart = filepath.Join(segments[:numSegmentsStart]...)\n\tpathEnd = filepath.Join(segments[len(segments)-numSegmentsEnd:]...)\n\n\tif strings.HasPrefix(path, \"/\") && pathStart != \"\" {\n\t\tpathStart = \"/\" + pathStart\n\t}\n\n\treturn\n}" ]
[ "0.62859017", "0.5933551", "0.59157205", "0.58893263", "0.5866653", "0.58485776", "0.5830294", "0.58021134", "0.5781904", "0.5775506", "0.57523704", "0.56884545", "0.56441873", "0.5570755", "0.54796016", "0.542803", "0.5410576", "0.5341367", "0.5301002", "0.5298957", "0.523072", "0.5210841", "0.5151026", "0.5088709", "0.5057161", "0.49974915", "0.49967945", "0.49924946", "0.4953245", "0.4917012", "0.48858806", "0.48801282", "0.4856136", "0.48485997", "0.48458496", "0.48419255", "0.48187625", "0.48187625", "0.481259", "0.48091108", "0.479438", "0.47909528", "0.47863546", "0.47859526", "0.47843936", "0.4764737", "0.47631845", "0.47631845", "0.47631845", "0.47631845", "0.47631845", "0.47631845", "0.47631845", "0.47527534", "0.47371024", "0.4733632", "0.47335643", "0.46908638", "0.46815512", "0.466703", "0.46638998", "0.46599913", "0.46532393", "0.4648505", "0.46418592", "0.46374163", "0.46182197", "0.4614138", "0.4593422", "0.45668837", "0.45568216", "0.45557877", "0.45529652", "0.45498234", "0.45455146", "0.4541177", "0.45276585", "0.45048112", "0.4491656", "0.44898385", "0.44871294", "0.44859052", "0.4485452", "0.44694546", "0.4454736", "0.44493702", "0.44482931", "0.4445768", "0.44381088", "0.44352648", "0.4429717", "0.4429074", "0.44263014", "0.4423344", "0.4422036", "0.44039464", "0.43987373", "0.4398611", "0.43980202", "0.43918693" ]
0.78038275
0
walkFiles walks a directory starting at root returning all directories and files include those found in subdirectories.
func walkFiles(root string) ([]*FileAsset, error) { fileAssets := []*FileAsset{} var lock sync.Mutex visitor := func(path string, info os.FileInfo, err error) error { // if err != nil { // fmt.Println("visitor err", err.Error(), "root", root) // } if err == nil { lock.Lock() fileAssets = append(fileAssets, &FileAsset{FileInfo: info, Path: filepath.ToSlash(path)}) lock.Unlock() } return nil } err := walk.Walk(root, visitor) if err != nil { return nil, err } return fileAssets, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Walk(dir string, includeDir bool) ([]File, error) {\n\tvar files []File\n\tcleanDir := dir\n\n\t// Strip out ./ from the beginning of path\n\t// if it exists\n\tif strings.Index(dir, \"./\") == 0 {\n\t\tcleanDir = dir[2:]\n\t}\n\n\t// Build a list of files from cleanDir\n\terr := filepath.Walk(cleanDir, func(path string, info os.FileInfo, err error) error {\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t// We don't have to include the root directory\n\t\tif cleanDir == path {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Don't include directories\n\t\tif !includeDir && info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tfiles = append(files, File{Path: path, Info: info})\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn files, err\n}", "func walkFiles(done <-chan struct{}, root string) (<-chan string, <-chan error) {\n\tpaths := make(chan string)\n\terrc := make(chan error, 1)\n\n\tgo func() {\n\t\t// Close the paths channel after return\n\t\tdefer close(paths)\n\t\t// No select needed since errc is buffered\n\t\terrc <- filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !info.Mode().IsRegular() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase paths <- path:\n\t\t\tcase <-done:\n\t\t\t\treturn errors.New(\"walk canceled\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}()\n\t// Return channel with paths\n\treturn paths, errc\n}", "func walkFiles(done <-chan struct{}, root string) (<-chan string, <-chan error) {\n\tpaths := make(chan string)\n\terrc := make(chan error, 1)\n\tgo func() { // HL\n\t\t// Close the paths channel after Walk returns.\n\t\tdefer close(paths) // HL\n\t\t// No select needed for this send, since errc is buffered.\n\t\terrc <- filepath.Walk(root, func(path string, info os.FileInfo, err error) error { // HL\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !info.Mode().IsRegular() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase paths <- path: // HL\n\t\t\tcase <-done: // HL\n\t\t\t\treturn errors.New(\"walk canceled\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}()\n\treturn paths, errc\n}", "func WalkFilesInPath(dirPath string, callback filepath.WalkFunc) error {\n \n fullPath, err := filepath.Abs(dirPath)\n\n if err != nil {\n return err\n }\n\n\tcb := func(path string, fi os.FileInfo, err error) error { \n\n goodFile, res := TestFile(fi)\n\n if !goodFile {\n return res\n }\n\n\t\treturn callback(path, fi, err)\n\t}\n \n return filepath.Walk(fullPath, cb) \n}", "func (s *Scraper) filePathWalkDir(root string) ([]string, error) {\n\tvar files []string\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\tfiles = append(files, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn files, err\n}", "func (gen *Generator) WalkFiles(inPath string, file os.FileInfo, err error) error {\n\t// identify template files\n\tisTemplate, err := gen.Template.FileIsTemplate(inPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// skip all directories\n\tif file.IsDir() {\n\t\treturn nil\n\t}\n\n\t// skip template files if we are only generating statics.\n\tif isTemplate && gen.Options.StaticOnly == true {\n\t\treturn nil\n\t}\n\n\tLog.Info(\"walk_files\", fmt.Sprintf(\"source %s\", inPath))\n\n\toutPath := strings.Replace(inPath, gen.Template.Files, gen.Project.Directory, 1)\n\n\tif err := os.MkdirAll(filepath.Dir(outPath), 0700); err != nil {\n\t\treturn err\n\t}\n\n\tif isTemplate {\n\t\tLog.Info(\"walk_files\", fmt.Sprintf(\"expanding template %s\", outPath))\n\t\toutPath = strings.Replace(outPath, \".tmpl\", \"\", 1)\n\n\t\tgenerated, err := templates.ExpandFile(inPath, gen.Project.helpers)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tLog.Info(\"walk_files\", fmt.Sprintf(\"writing %s\", outPath))\n\t\treturn utils.WriteFile(outPath, generated)\n\t}\n\n\treturn gen.Copy(inPath, outPath)\n}", "func FilePathWalkDir(root string) ([]string, error) {\n\tvar files []string\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\tfiles = append(files, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn files, err\n}", "func FilePathWalkDir(root string) ([]string, error) {\n\tvar files []string\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\tfiles = append(files, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn files, err\n}", "func Walk(root File, depth int, visit func(f File, depth int) error) (err error) {\n\tif err := visit(root, depth); err != nil {\n\t\treturn err\n\t}\n\n\tif root.IsDirectory() {\n\t\tfor {\n\t\t\tf, err := root.NextFile()\n\t\t\tif err != nil {\n\t\t\t\tif err.Error() == \"EOF\" {\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err := Walk(f, depth+1, visit); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func collectFiles(root string) {\n\n\tobjectsInPath, err := ioutil.ReadDir(root)\n\tif err != nil {\n\t\t// handling file in root path\n\t\tif err.Error() == \"readdirent: not a directory\" {\n\t\t\tprocessFile(root)\n\t\t\treturn\n\t\t}\n\t\tlog.Fatal(err)\n\t}\n\n\t// walk throug all objects\n\tfor _, object := range objectsInPath {\n\t\tfullPath := utils.JoinPaths(root, object.Name())\n\n\t\t// skip hidden\n\t\tif !searchHidden == true && strings.HasPrefix(object.Name(), \".\") {\n\t\t\tcontinue\n\t\t}\n\n\t\t// excluded paths\n\t\tif len(excludedPaths) > 0 && utils.IsThisIn(fullPath, excludedPaths) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// enter directories or append file\n\t\tswitch object.IsDir() {\n\t\tcase true:\n\t\t\tcollectFiles(fullPath)\n\t\tcase false:\n\t\t\tallFiles = append(allFiles, fullPath)\n\t\t}\n\t}\n}", "func ListFiles(root, pattern string) (files []string, err error) {\r\n\t// Check if path exists.\r\n\texists, err := PathExists(root)\r\n\tif err != nil {\r\n\t\treturn files, err\r\n\t}\r\n\tif !exists {\r\n\t\treturn files, fmt.Errorf(\"filehelper.ListFiles: path %s does not exist\", root)\r\n\t}\r\n\r\n\terr = filepath.Walk(root, func(file string, info os.FileInfo, walkErr error) error {\r\n\t\t// Convert file path to /, otherwise match will not work (for some reason).\r\n\t\tfile = filepath.ToSlash(file)\r\n\t\tmatch, matchErr := filepath.Match(pattern, file)\r\n\t\tif matchErr != nil {\r\n\t\t\treturn fmt.Errorf(\"filehelper.ListFiles: match error %s\", matchErr.Error())\r\n\t\t}\r\n\t\tif match && !info.IsDir() {\r\n\t\t\trelpath, relErr := filepath.Rel(root, file)\r\n\t\t\tif relErr != nil {\r\n\t\t\t\t// Theoretically this shouldn't happen because we are only parsing\r\n\t\t\t\t// files under root, every file path should be relative to root.\r\n\t\t\t\t// But if it does move on.\r\n\t\t\t\t// files = append(files, file)\r\n\t\t\t\treturn nil\r\n\t\t\t}\r\n\t\t\tfiles = append(files, relpath)\r\n\t\t}\r\n\t\treturn nil\r\n\t})\r\n\treturn files, err\r\n}", "func WalkFiles(hfs fs.FS, prefixPath string, walker FileWalkerFunc) error {\n\twalkErr := fs.WalkDir(hfs, prefixPath, func(filePath string, d fs.DirEntry, err error) error {\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"fs walk error\")\n\t\t}\n\t\tif d.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tf, err := hfs.Open(filePath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"wrong filepath: %q\", filePath)\n\t\t}\n\t\tdefer f.Close()\n\n\t\tdata, err := ioutil.ReadAll(f)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"read error for %q\", filePath)\n\t\t}\n\t\tsubPath := strings.TrimPrefix(filePath, prefixPath+\"/\")\n\t\terr = walker(data, subPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn walkErr\n}", "func walkFiles(p Pipes) {\n\tdefer p.Done.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase root_ := <-p.Input:\n\t\t\tif root_ == nil {\n\t\t\t\t// Channel is closed:\n\t\t\t\treturn\n\t\t\t}\n\t\t\troot := root_.(string)\n\t\t\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !info.Mode().IsRegular() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase p.Output <- path:\n\t\t\t\tcase <-p.Quit:\n\t\t\t\t\treturn fmt.Errorf(\"done\")\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\t//log.Fatal(err)\n\t\t\t}\n\t\tcase <-p.Quit:\n\t\t\treturn\n\t\t}\n\t}\n}", "func walkFiles(dir string) ([]Page, string) {\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, fmt.Sprintf(\"Failed to read pages dir (%s)\", err)\n\t}\n\tpgs := []Page{}\n\tfor _, f := range files {\n\t\tif f.IsDir() {\n\t\t\tpgs2, errMsg := walkFiles(dir + \"/\" + f.Name())\n\t\t\tif errMsg != \"\" {\n\t\t\t\treturn nil, errMsg\n\t\t\t}\n\t\t\tpgs = append(pgs, pgs2...)\n\t\t} else {\n\t\t\tpgs = append(pgs, buildPage(dir, f.Name()))\n\t\t}\n\t}\n\treturn pgs, \"\"\n}", "func WalkFileTree(directory Directory, consumer func(file File)) {\n\tfor _, file := range directory.Files() {\n\t\tconsumer(file)\n\t}\n\n\tfor _, dir := range directory.Directories() {\n\t\tWalkFileTree(dir, consumer)\n\t}\n}", "func FindAllFiles(ctx context.Context, startPath, matchGlob string) (output []string, err error) {\n\terr = filepath.Walk(startPath, func(path string, info os.FileInfo, walkErr error) error {\n\t\tif walkErr != nil {\n\t\t\treturn walkErr\n\t\t}\n\t\tif info.IsDir() {\n\t\t\tif path == startPath {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif strings.HasPrefix(info.Name(), \"_\") {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tif strings.HasPrefix(info.Name(), \".\") {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tif info.Name() == \"node_modules\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tif strings.HasPrefix(path, \"vendor/\") {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\tmatched, err := filepath.Match(matchGlob, info.Name())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif matched {\n\t\t\tif !strings.HasPrefix(path, \"./\") {\n\t\t\t\tpath = \"./\" + path\n\t\t\t}\n\t\t\toutput = append(output, path)\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}", "func (this *SearchDir) SearchFiles(root, pattern string) {\n this.search(root, pattern, true, false)\n}", "func Walk(hfs http.FileSystem, root string, walkFn filepath.WalkFunc) error {\n\tdh, err := hfs.Open(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdi, err := dh.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfis, err := dh.Readdir(-1)\n\tdh.Close()\n\tif err = walkFn(root, di, err); err != nil {\n\t\tif err == filepath.SkipDir {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfor _, fi := range fis {\n\t\tfn := path.Join(root, fi.Name())\n\t\tif fi.IsDir() {\n\t\t\tif err = Walk(hfs, fn, walkFn); err != nil {\n\t\t\t\tif err == filepath.SkipDir {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err = walkFn(fn, fi, nil); err != nil {\n\t\t\tif err == filepath.SkipDir {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func walk(basePath string, files chan<- file) {\n\tfilepath.Walk(basePath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\t// skip hidden directories like .git\n\t\t\tif strings.HasPrefix(info.Name(), \".\") {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t} else {\n\t\t\tif info.Name() == \".DS_Store\" {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tabs, err := filepath.Abs(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trel, err := filepath.Rel(basePath, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trel = filepath.ToSlash(rel)\n\t\t\tfiles <- file{path: rel, absPath: abs, size: info.Size(), lastModified: info.ModTime()}\n\t\t}\n\t\treturn nil\n\t})\n\tclose(files)\n}", "func walk(dir string) (files []string, err error) {\n\tdir, err = filepath.Abs(dir)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = filepath.Walk(dir,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !info.Mode().IsRegular() {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\trel, err := filepath.Rel(dir, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfiles = append(files, rel)\n\n\t\t\treturn nil\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn files, nil\n}", "func readFiles(rootpath string) {\n\tvar realpath, symname string\n\t// path walker error channel\n\terrorChannel := make(chan os.Error, 64)\n\n\t// check if this is a symlink\n\tif dir, err := os.Stat(rootpath); err == nil {\n\t\tif dir.FollowedSymlink {\n\t\t\trealpath, _ = os.Readlink(rootpath)\n\t\t\tif realpath[0] != '/' {\n\t\t\t\trealpath = rootpath[0:strings.LastIndex(rootpath, \"/\")+1] + realpath\n\t\t\t}\n\t\t\tsymname = rootpath[len(rootPath)+1:]\n\t\t} else {\n\t\t\trealpath = rootpath\n\t\t}\n\t} else {\n\t\tlogger.Warn(\"%s\\n\", err)\n\t}\n\n\t// visitor for the path walker\n\tvisitor := &goFileVisitor{rootpath, realpath, symname}\n\n\tpath.Walk(visitor.realpath, visitor, errorChannel)\n\n\tselect {\n\tcase err := <-errorChannel:\n\t\tlogger.Error(\"Error while traversing directories: %s\\n\", err)\n\tdefault:\n\t}\n}", "func gatherFiles(root string, cfg Config) ([]string, error) {\n\tfiles := []string{}\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\trel, err := filepath.Rel(root, path)\n\t\tif err != nil {\n\t\t\trel = path\n\t\t}\n\n\t\tswitch rel {\n\t\tcase \".git\":\n\t\t\treturn filepath.SkipDir\n\t\tcase ConfigFileName:\n\t\t\treturn nil\n\t\t}\n\n\t\tif !cfg.shouldExamine(root, path) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\tfiles = append(files, rel)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn files, nil\n}", "func Scan(root string, cfg Config) ([]string, error) {\n\tfiles := []string{}\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\trel, err := filepath.Rel(root, path)\n\t\tif err != nil {\n\t\t\trel = path\n\t\t}\n\n\t\tif rel == \".git\" {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\n\t\tif !cfg.shouldExamine(root, path) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\tfiles = append(files, rel)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn files, nil\n}", "func (this *SearchDir) search(root, pattern string, files bool, dirs bool) {\n err := filepath.Walk(\n root,\n func(path string, info os.FileInfo, err error) error {\n // err chek\n if err != nil {\n return err\n }\n\n // type matching\n if info.IsDir() && !dirs {\n return nil\n }\n\n if !info.IsDir() && !files {\n return nil\n }\n\n // name matching\n matched, err := filepath.Match(pattern, info.Name())\n if !matched {\n return nil\n }\n\n if err != nil {\n return err\n }\n\n // relevant result\n this.FileChan <- path\n return nil\n },\n )\n\n if err != nil {\n this.ErrChan <- fmt.Sprintf(\n \"SearchDir.search error: %v (%v)\",\n root,\n err,\n )\n }\n\n this.DoneChan <- true\n}", "func Pathwalk(root string, walkFn filepath.WalkFunc) error {\n\tfileInfo, err := os.Lstat(root)\n\n\terr = walkFn(root, fileInfo, err)\n\tif err == filepath.SkipDir {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t} else if !fileInfo.IsDir() {\n\t\treturn nil\n\t}\n\n\tdir, err := os.Open(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dir.Close()\n\n\tchildren, err := dir.Readdirnames(-1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, name := range children {\n\t\terr = Pathwalk(root+\"/\"+name, walkFn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func findDirectoriesIn(dir string) (dirs []string) {\n\n\t// Get all files in current dir\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tlog.Fatal(\"Error reading dir\", err)\n\t}\n\n\t// Investigate each file...\n\tfor _, file := range files {\n\n\t\t// Create the path starting from the starting dir (current directory)\n\t\tpath := path.Join(dir, file.Name())\n\n\t\t// We want non hidden directories (no .git)...\n\t\tif file.IsDir() && !strings.HasPrefix(file.Name(), DIR_TO_WATCH) {\n\n\t\t\t// Aggregate them and go deeper...\n\t\t\tdirs = append(dirs, path)\n\t\t\tdirs = append(dirs, findDirectoriesIn(path)...)\n\t\t}\n\t}\n\treturn\n}", "func walk(dir string) ([]string, error) {\n\n\tfileList := []string{}\n\terr := filepath.Walk(dir, func(path string, info os.FileInfo, e error) error {\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\n\t\t// check if it is a regular file (not dir)\n\t\tif info.Mode().IsRegular() {\n\t\t\tfileList = append(fileList, path)\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn fileList, err\n}", "func (di *directoryInfo) walk() {\n\tif di.regexp == nil {\n\t\treturn\n\t}\n\n\tdi.mu.Lock()\n\tdefer di.mu.Unlock()\n\n\tdi.root.Walk(func(fi fileinfo.FileInfo) error {\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\t// append\n\t\tif _, _, err := di.addPath(fi); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func FileIterateDir(path, filter string, childrendir bool, callback func(file string) bool) error {\n\tisSelf := true\n\terr := filepath.Walk(path, func(path string, f os.FileInfo, err error) error {\n\t\tif f == nil {\n\t\t\treturn err\n\t\t}\n\t\tif f.IsDir() {\n\t\t\tif childrendir || isSelf {\n\t\t\t\tisSelf = false\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif filter != \"\" {\n\t\t\tfs := strings.Split(filter, \"|\")\n\t\t\tisMatch := false\n\t\t\tfor _, v := range fs {\n\t\t\t\tif strings.HasSuffix(path, v) {\n\t\t\t\t\tisMatch = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !isMatch {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tif !callback(path) {\n\t\t\treturn fmt.Errorf(\"walk file over\")\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn err\n}", "func listFiles(root string) ([]string, error) {\n var paths = make([]string, 0)\n var walker filepath.WalkFunc = func(path string, info os.FileInfo, err error) error {\n if err != nil {\n return err\n } else {\n if ! info.IsDir() {\n path, _ = filepath.Rel(root, path)\n paths = append(paths, path)\n }\n }\n return nil\n }\n err := filepath.Walk(root, walker)\n\n return paths, err\n}", "func walk(path string, info os.FileInfo, pathFilter PathFilter, walkFn filepath.WalkFunc) (fileList []string, direcotyList []string, err error) {\n\terr = walkFn(path, info, nil)\n\tif err != nil {\n\t\tif info.IsDir() && err == filepath.SkipDir {\n\t\t\treturn fileList, direcotyList, nil\n\t\t}\n\t\treturn fileList, direcotyList, err\n\t}\n\n\tif !info.IsDir() {\n\t\tfileListLen := len(fileList)\n\t\tinfoName := info.Name()\n\t\tif fileListLen < fileListDefault {\n\t\t\tfileList[fileListLen] = infoName\n\t\t} else {\n\t\t\tfileList = append(fileList, infoName)\n\t\t}\n\t\treturn fileList, direcotyList, nil\n\t}\n\n\tfilterResult := filterPath(path, info, pathFilter) //filter\n\n\tif filterResult == false { //filtered\n\t\treturn fileList, direcotyList, filepath.SkipDir\n\t}\n\n\tdirecotyListLen := len(direcotyList)\n\tinfoName := info.Name()\n\tif direcotyListLen < direcotyListDefault {\n\t\tdirecotyList[direcotyListLen] = infoName\n\t} else {\n\t\tdirecotyList = append(direcotyList, infoName)\n\t}\n\n\tnames, err := readDirNames(path)\n\tif err != nil {\n\t\terr = walkFn(path, info, err)\n\t\treturn fileList, direcotyList, err\n\t}\n\n\tfor _, name := range names {\n\t\tfilename := filepath.Join(path, name)\n\t\tfileInfo, err := os.Lstat(filename)\n\t\tif err != nil {\n\t\t\tif err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {\n\t\t\t\treturn fileList, direcotyList, err\n\t\t\t}\n\t\t} else {\n\t\t\tpathFilterResult := filterPath(path, fileInfo, pathFilter) //filter\n\t\t\tif pathFilterResult {\n\t\t\t\tfileList, direcotyList, err = walk(filename, fileInfo, pathFilter, walkFn)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tif !fileInfo.IsDir() || err != filepath.SkipDir {\n\t\t\t\t\treturn fileList, direcotyList, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn fileList, direcotyList, nil\n}", "func walkRecursive(files chan *os.File, fname string) {\n\tfile, err := os.Open(fname)\n\tif err != nil {\n\t\tfmt.Println(\"bad\", err)\n\t\tos.Exit(1)\n\t}\n\tif strings.HasSuffix(fname, \".txt\") {\n\t\tfiles <- file\n\t\treturn\n\t}\n\tdirContents, err := file.Readdirnames(0)\n\tif err != nil {\n\t\t// this is a non-txt file, non-directory, ignore\n\t\tfile.Close()\n\t\treturn\n\t}\n\t// fname is a directory, its contents are in 'dirContents'\n\t{\n\t\terr := os.Chdir(fname)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"bad\", err)\n\t\t}\n\t}\n\tfor _, name := range dirContents {\n\t\twalkRecursive(files, name)\n\t}\n\tos.Chdir(\"..\")\n\tfile.Close()\n}", "func Walk(client *goftp.Client, root string, walkFn filepath.WalkFunc) (ret error) {\n\tdirsToCheck := make(chan string, 100)\n\n\tvar workCount int32 = 1\n\tdirsToCheck <- root\n\n\tfor dir := range dirsToCheck {\n\t\tgo func(dir string) {\n\t\t\tfiles, err := client.ReadDir(dir)\n\n\t\t\tif err != nil {\n\t\t\t\tif err = walkFn(dir, nil, err); err != nil && err != filepath.SkipDir {\n\t\t\t\t\tret = err\n\t\t\t\t\tclose(dirsToCheck)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, file := range files {\n\t\t\t\tp := path.Join(dir, file.Name())\n\n\t\t\t\tif err = walkFn(p, file, nil); err != nil {\n\t\t\t\t\tif file.IsDir() && err == filepath.SkipDir {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tret = err\n\t\t\t\t\tclose(dirsToCheck)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif file.IsDir() {\n\t\t\t\t\tatomic.AddInt32(&workCount, 1)\n\t\t\t\t\tdirsToCheck <- path.Join(dir, file.Name())\n\t\t\t\t} else {\n\t\t\t\t\tbuf := new(bytes.Buffer)\n\t\t\t\t\tclient.Retrieve(p, buf)\n\t\t\t\t\tunzip(buf)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tatomic.AddInt32(&workCount, -1)\n\t\t\tif workCount == 0 {\n\t\t\t\tclose(dirsToCheck)\n\t\t\t}\n\t\t}(dir)\n\t}\n\n\treturn ret\n}", "func getAllFileNamesInDir(root string) ([]string, error) {\n\tvar allFiles []string\n\terr := filepath.Walk(root,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"walk into %q: %w\",\n\t\t\t\t\tpath, err)\n\t\t\t}\n\t\t\tif !info.IsDir() {\n\t\t\t\tallFiles = append(allFiles, path)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\treturn allFiles, err\n}", "func AllFilePathsIn(dirPath string, ignoreSubPath string, fileName ustr.Pat) (allFilePaths []string) {\n if ignoreSubPath != \"\" && !ustr.Pref(ignoreSubPath, dirPath) {\n ignoreSubPath = filepath.Join(dirPath, ignoreSubPath)\n }\n ok1, ok2 := ignoreSubPath == \"\", fileName == \"\"\n WalkAllFiles(dirPath, func(curfilepath string, _ os.FileInfo) (keepwalking bool) {\n if (ok1 || !ustr.Pref(curfilepath, ignoreSubPath)) && (ok2 || fileName.Match(filepath.Base(curfilepath))) {\n allFilePaths = append(allFilePaths, curfilepath)\n }\n return true\n })\n return\n}", "func FindFilesRecursive(dir string, deep int) (files []string, err error) {\n\tentries, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read dir %s failed: %v\", dir, err)\n\t}\n\tfor _, e := range entries {\n\t\tif e.IsDir() && deep > 0 {\n\t\t\tnestedDir := filepath.Join(dir, e.Name())\n\t\t\tif nested, err := FindFilesRecursive(nestedDir, deep-1); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tfiles = append(files, nested...)\n\t\t\t}\n\t\t} else if !e.IsDir() && strings.HasSuffix(e.Name(), APIFileExtension) {\n\t\t\tfiles = append(files, filepath.Join(dir, e.Name()))\n\t\t}\n\t}\n\treturn files, nil\n}", "func WalkDirs(name string, includeDirsInList bool, files ...string) ([]string, error) {\n\tf, err := FS.OpenFile(CTX, name, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfos, err := f.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = f.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range fileInfos {\n\t\tfilename := path.Join(name, info.Name())\n\n\t\tif includeDirsInList || !info.IsDir() {\n\t\t\tfiles = append(files, filename)\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tfiles, err = WalkDirs(filename, includeDirsInList, files...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn files, nil\n}", "func WalkDirs(name string, includeDirsInList bool, files ...string) ([]string, error) {\n\tf, err := FS.OpenFile(CTX, name, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfos, err := f.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = f.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range fileInfos {\n\t\tfilename := path.Join(name, info.Name())\n\n\t\tif includeDirsInList || !info.IsDir() {\n\t\t\tfiles = append(files, filename)\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tfiles, err = WalkDirs(filename, includeDirsInList, files...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn files, nil\n}", "func WalkDirs(name string, includeDirsInList bool, files ...string) ([]string, error) {\n\tf, err := FS.OpenFile(CTX, name, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfos, err := f.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = f.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range fileInfos {\n\t\tfilename := path.Join(name, info.Name())\n\n\t\tif includeDirsInList || !info.IsDir() {\n\t\t\tfiles = append(files, filename)\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tfiles, err = WalkDirs(filename, includeDirsInList, files...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn files, nil\n}", "func WalkDirs(name string, includeDirsInList bool, files ...string) ([]string, error) {\n\tf, err := FS.OpenFile(CTX, name, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfos, err := f.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = f.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range fileInfos {\n\t\tfilename := path.Join(name, info.Name())\n\n\t\tif includeDirsInList || !info.IsDir() {\n\t\t\tfiles = append(files, filename)\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tfiles, err = WalkDirs(filename, includeDirsInList, files...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn files, nil\n}", "func Walk(client *Client, root string, walkFn filepath.WalkFunc) (ret error) {\n\tdirsToCheck := make(chan string, 100)\n\n\tvar workCount int32 = 1\n\tdirsToCheck <- root\n\n\tfor dir := range dirsToCheck {\n\t\tgo func(dir string) {\n\t\t\tfiles, err := client.ReadDir(dir)\n\n\t\t\tif err != nil {\n\t\t\t\tif err = walkFn(dir, nil, err); err != nil && err != filepath.SkipDir {\n\t\t\t\t\tret = err\n\t\t\t\t\tclose(dirsToCheck)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, file := range files {\n\t\t\t\tif err = walkFn(path.Join(dir, file.Name()), file, nil); err != nil {\n\t\t\t\t\tif file.IsDir() && err == filepath.SkipDir {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tret = err\n\t\t\t\t\tclose(dirsToCheck)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif file.IsDir() {\n\t\t\t\t\tatomic.AddInt32(&workCount, 1)\n\t\t\t\t\tdirsToCheck <- path.Join(dir, file.Name())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tatomic.AddInt32(&workCount, -1)\n\t\t\tif workCount == 0 {\n\t\t\t\tclose(dirsToCheck)\n\t\t\t}\n\t\t}(dir)\n\t}\n\n\treturn ret\n}", "func directoryWalk(files *[]string) filepath.WalkFunc {\n\treturn func(path string, info os.FileInfo, err error) error {\n\t\t*files = append(*files, path)\n\t\treturn err\n\t}\n}", "func searchForFiles(pathToDir string) {\n\tfiles, err := ioutil.ReadDir(pathToDir)\n\tif err != nil {\n\t\t//fmt.Println(err)\n\t\treturn\n\t}\n\t// loop all files in current dir, throw away the index var\n\tfor _, file := range files {\n\t\tif stringLooper(file.Name(), ignoreNames) {\n\t\t\t//fmt.Printf(\"the file %s%s, matched for an ignore file name! excluding file!!\", pathToDir, file.Name())\n\t\t} else {\n\t\t\t//fmt.Println(file.Name())\n\t\t\tif file.IsDir() {\n\t\t\t\t//fmt.Println(\"--DEBUG-- File is a dir, recurse time!\")\n\t\t\t\t// Need to add the tailing slash for new base directory\n\t\t\t\tdirName := file.Name() + \"/\"\n\t\t\t\tfullPath := strings.Join([]string{pathToDir, dirName}, \"\")\n\t\t\t\t// Recurse into the new base directory (note, this makes it a depth first search)\n\t\t\t\tsearchForFiles(fullPath)\n\t\t\t} else {\n\t\t\t\t// If we find what we are looking for\n\t\t\t\tif searchFileForCriteria(pathToDir, file.Name()) {\n\t\t\t\t\tfullPath := strings.Join([]string{pathToDir, file.Name()}, \"\")\n\t\t\t\t\t//fmt.Printf(\"--DEBUG-- The file at %s, is worth taking\\n\", fullPath)\n\t\t\t\t\tKeyz = append(Keyz, fullPath)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func recurse(fromDir string, config Config, repo Repository,\n\timports map[string]string) error {\n\n\twalk := func(sub string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif info.Name()[0] == '.' {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\treturn dir(sub, config, repo, imports)\n\t}\n\treturn filepath.Walk(fromDir, walk)\n}", "func walk(fs http.FileSystem, path string, info os.FileInfo, walkFn WalkFunc) error {\n\tif !info.IsDir() {\n\t\treturn walkFn(path, info, nil)\n\t}\n\tnames, err := readDirNames(fs, path)\n\terr1 := walkFn(path, info, err)\n\t// If err != nil, walk can't walk into this directory.\n\t// err1 != nil means walkFn want walk to skip this directory or stop walking.\n\t// Therefore, if one of err and err1 isn't nil, walk will return.\n\tif err != nil || err1 != nil {\n\t\t// The caller's behavior is controlled by the return value, which is decided\n\t\t// by walkFn. walkFn may ignore err and return nil.\n\t\t// If walkFn returns SkipDir, it will be handled by the caller.\n\t\t// So walk should return whatever walkFn returns.\n\t\treturn err1\n\t}\n\tfor _, name := range names {\n\t\tfilename := filepath.Join(path, name)\n\t\tfileInfo, err := fsstat(fs, filename)\n\t\tif err != nil {\n\t\t\tif err := walkFn(filename, fileInfo, err); err != nil && err != SkipDir {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = walk(fs, filename, fileInfo, walkFn)\n\t\t\tif err != nil {\n\t\t\t\tif !fileInfo.IsDir() || err != SkipDir {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func walkDir(path string) ([]string, []string) {\n\tvar dirs []string\n\tvar files []string\n\terr := filepath.Walk(path, func(path string, f os.FileInfo, err error) error {\n\t\tif f.IsDir() {\n\t\t\tdirs = append(dirs, path)\n\t\t} else {\n\t\t\tfiles = append(files, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\treturn dirs, files\n}", "func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {\n\tmatchedFiles := make(map[string]os.FileInfo)\n\twalkfn := func(path string, info os.FileInfo, _ error) error {\n\t\tif info != nil && g.Match(path) {\n\t\t\tmatchedFiles[path] = info\n\t\t}\n\t\treturn nil\n\t}\n\tfilepath.Walk(root, walkfn)\n\treturn matchedFiles\n}", "func ListFiles(rootDir string, predicate FileInfoPredicate) []string {\n\n\tvar files []string\n\n\terr := filepath.Walk(rootDir, func(filePath string, fileInfo os.FileInfo, err error) error {\n\n\t\tif predicate == nil || predicate(fileInfo) {\n\t\t\tfiles = append(files, filePath)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn files\n}", "func walk(path string) ([]io.Reader, error) {\n\tp := filepath.Clean(path)\n\tstat, err := os.Stat(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !stat.IsDir() {\n\t\tfile, err := os.Open(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn []io.Reader{file}, nil\n\t}\n\n\tvar in []io.Reader\n\twerr := filepath.Walk(p, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tfile, err := os.Open(filepath.Clean(path))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tin = append(in, file)\n\t\treturn nil\n\t})\n\n\tif werr != nil {\n\t\treturn nil, werr\n\t}\n\n\treturn in, nil\n}", "func ListFilesRecursively(dir string) (files []File, err error) {\n\tdir = filepath.Clean(dir)\n\tfiles = []File{}\n\terr = filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {\n\t\tfile := File{\n\t\t\tPath: path,\n\t\t\tSize: f.Size(),\n\t\t\tMode: f.Mode(),\n\t\t\tModTime: f.ModTime(),\n\t\t\tIsDir: f.IsDir(),\n\t\t}\n\t\tif ComputeHashes {\n\t\t\th, err := hashstructure.Hash(file, nil)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfile.Hash = h\n\t\t}\n\t\tfiles = append(files, file)\n\t\treturn nil\n\t})\n\treturn\n}", "func findGoFiles(ctx context.Context, basedir string, paths chan<- string) func() error {\n\treturn func() error {\n\t\tdefer close(paths)\n\t\treturn filepath.Walk(basedir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif info == nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif info.IsDir() {\n\t\t\t\tdirname := info.Name()\n\t\t\t\tif dirname == \".git\" || dirname == \"vendor\" || dirname == \"internal\" {\n\t\t\t\t\treturn filepath.SkipDir\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif isMainGoFile(info) {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase paths <- path:\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\treturn ctx.Err()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t}\n}", "func (ros RealOS) GetFiles(dir string, includeBaseFiles bool) ([]FileInfo, error) {\n\tfiles := []FileInfo{}\n\terr := filepath.Walk(dir, func(p string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !info.IsDir() {\n\t\t\tif !includeBaseFiles {\n\t\t\t\tif path.Dir(p) != path.Clean(dir) {\n\t\t\t\t\tfiles = append(files, File(p))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfiles = append(files, File(p))\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn files, nil\n}", "func walk_file(path string) Entries {\n\tfiles, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tentries := Entries{}\n\tentries.path = path\n\n\tfor _, fi := range files {\n\t\tif strings.HasPrefix(fi.Name(), \".\") || contains(ignores, fi.Name()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fi.IsDir() {\n\t\t\tentries.entries = append(entries.entries, walk_file(filepath.Join(path, fi.Name())))\n\t\t} else {\n\t\t\tentries.contents = append(entries.contents, Content{title: fi.Name()})\n\n\t\t}\n\t}\n\n\treturn entries\n}", "func walk() (err error) {\n if 0 == len(goPath) {\n return errUnsetGoPath\n }\n\n err = filepath.Walk(goPath, func(path string, info os.FileInfo, err error) error {\n if info.IsDir() && strings.HasSuffix(path, PACKAGE_NAME) {\n directories = append(directories, path)\n }\n\n return err\n })\n\n return\n}", "func (l Util) Walk(root string, fn func(path string, d fs.DirEntry, err error) error) {\n\tfs.WalkDir(l, l.path(root), fn)\n}", "func scanLocalFiles(root string) (ff []string) {\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() {\n\t\t\tff = append(ff, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"scanLocalFiles | filepath.Walk [%s]\", err)\n\t}\n\treturn\n}", "func (i *Iterator) Walk(f IteratorFunc) error {\n\tfor _, p := range i.Entries {\n\t\tif !i.AllowPath(p) {\n\t\t\tcontinue\n\t\t}\n\t\tinfo, err := os.Stat(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\terr = filepath.Walk(p, func(s string, info os.FileInfo, err error) error {\n\t\t\t\tif info.IsDir() || err != nil {\n\t\t\t\t\tif !i.AllowPath(s) {\n\t\t\t\t\t\treturn filepath.SkipDir\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !i.AllowPath(s) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\t// chop off walk root and filepath separator\n\t\t\t\treturn f(s, s[len(p)+1:], info)\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\terr = f(p, filepath.Base(p), info)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func GetFiles(searchDir, ignoreFile string, verbose bool, maxFileSize int64) (fileContext Context, err error) {\n\tignorePatterns = getIgnorePatterns(searchDir, ignoreFile, verbose)\n\tfileList := make([]scan.File, 0)\n\tvar curFile scan.File\n\terr = filepath.Walk(searchDir, func(path string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error reading directory: \", err)\n\t\t}\n\t\tif !isIgnoredFile(path, searchDir) {\n\t\t\t// Ignore the path if it's a directory\n\t\t\tpathIsDirectory, isDirErr := isDirectory(path)\n\t\t\tif !pathIsDirectory {\n\t\t\t\tif isDirErr != nil && verbose {\n\t\t\t\t\tlog.Println(\"Error checking if path is directory\")\n\t\t\t\t}\n\t\t\t\tif getFileSizeOK(path, maxFileSize) {\n\t\t\t\t\tcurFile.Name = f.Name()\n\t\t\t\t\tcurFile.Path = path\n\t\t\t\t\tfileList = append(fileList, curFile)\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Println(\"Reading file \", curFile.Path)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfileContext.SkippedFiles = append(fileContext.SkippedFiles, path)\n\t\t\t\t\tif verbose {\n\t\t\t\t\t\tlog.Println(\"Ignoring\", path, \". Filesize is too large.\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfileContext.SkippedFiles = append(fileContext.SkippedFiles, path)\n\t\t\tif verbose {\n\t\t\t\tlog.Println(\"Ignoring\", path, \". File blacklisted.\")\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn fileContext, err\n\t}\n\n\tvar compressList, convertList []scan.File\n\tcompressList, fileList = separateCompressedAndUncompressed(fileList)\n\tcompressList, fileContext.CompressPaths, err = GetCompressedFiles(compressList, searchDir) //Get the files within our compressed list\n\tif err != nil {\n\t\treturn fileContext, err\n\t}\n\tfileContext.Files = append(fileList, compressList...)\n\tconvertList, fileContext.ConvertPaths = GetConvertedFiles(fileContext.Files) //Get the files that need to be converted and convert them to plaintext\n\tfileContext.Files = append(fileContext.Files, convertList...)\n\tfileContext.IgnorePatterns = ignorePatterns\n\treturn fileContext, nil\n}", "func (s *Synchronizer) traverseFiles(filesChan contracts.FilesChan, sync contracts.SyncChan) {\n\troot, err := s.fr.GetRootFolder()\n\tif nil != err {\n\t\ts.log.Error(\"Error getting root folder.\", err)\n\t\tclose(filesChan)\n\t\treturn\n\t}\n\troot.PrevPath = root.PrevRemoteName\n\troot.CurPath = root.CurRemoteName\n\tfilesChan <- root\n\t<-sync\n\n\tif err = s.getFilesByParentRecursively(root.Id, filesChan, sync); err != nil {\n\t\ts.log.Error(\"Error getting files by parent\", err)\n\t\tclose(filesChan)\n\t\treturn\n\t}\n\tclose(filesChan)\n}", "func walkDir(root string, dirFunc func(string) error) error {\n\treturn filepath.Walk(root, func(subPath string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\treturn dirFunc(subPath)\n\t})\n}", "func ScanFiles(dir, regex string) (files []string) {\n\tlog.Logger.Debug(\"ScanFiles...\", zap.String(\"dir\", dir), zap.String(\"regex\", regex))\n\n\tif err := filepath.Walk(dir, func(fname string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"scan files got error for dir %v\", dir)\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tif ok, err := IsFileReadyToUpload(regex, info.Name(), time.Now()); err != nil {\n\t\t\treturn errors.Wrapf(err, \"Check file name error\")\n\t\t} else if !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tabsPath, err := filepath.Abs(filepath.Join(dir, info.Name()))\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"get absolute file path error\")\n\t\t}\n\t\tfiles = append(files, absPath)\n\t\treturn nil\n\t}); err != nil {\n\t\tlog.Logger.Error(\"scan files got error\", zap.Error(err))\n\t}\n\n\treturn\n}", "func FindFiles(searchPath string, log *zap.SugaredLogger) []string {\n\n\tvar result []string\n\n\t// this function will handle each object inside the Walk()\n\tvar searchFunc = func(pathX string, infoX os.FileInfo, errX error) error {\n\n\t\t// check for errors\n\t\tif errX != nil {\n\t\t\t//log.Warnw(\"FindFiles error\",\n\t\t\t//\t\"path\", pathX,\n\t\t\t//\t\"err\", errX,\n\t\t\t//)\n\t\t\treturn errX\n\t\t}\n\n\t\tif common.IsFile(pathX, log) {\n\t\t\tlog.Debugw(\"FindFiles found file\",\n\t\t\t\t\"fileName\", infoX.Name(),\n\t\t\t)\n\n\t\t\t// TODO more expressive way to ignore certain files (in git) that users may want.. eg helm charts\n\t\t\text := filepath.Ext(pathX)\n\t\t\tswitch ext {\n\t\t\tcase \".yml\":\n\t\t\t\tresult = append(result, pathX)\n\t\t\tcase \".yaml\":\n\t\t\t\tresult = append(result, pathX)\n\t\t\tdefault:\n\t\t\t\tlog.Warnw(\"file not yaml, ignoring\",\n\t\t\t\t\t\"path\", pathX)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\trealPath := common.GetFileAbsPath(searchPath, log)\n\terr := filepath.Walk(realPath, searchFunc)\n\n\tif err != nil {\n\t\tlog.Debugw(\"file error\",\n\t\t\t\"path\", searchPath,\n\t\t\t\"error\", err,\n\t\t)\n\t}\n\n\treturn result\n}", "func walkDir(dname string) []string {\n\t//list for all found files0\n\tvar fileList []string\n\t// walk files\n\tfilepath.Walk(dname, func(path string, f os.FileInfo, err error) error {\n\t\tif err == nil {\n\t\t\tif f.Mode().IsRegular() {\n\t\t\t\tfileList = append(fileList, path)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn fileList\n}", "func Walk(root string, walkFn filepath.WalkFunc) error {\n\tif start, err := os.Open(root); err != nil {\n\t\treturn walkFn(root, nil, err)\n\t} else {\n\t\tinfo := unixFileInfo{name: path.Base(root)}\n\t\tif err := fstat(int(start.Fd()), &info.sys); err != nil {\n\t\t\tstart.Close()\n\t\t\tinfo.fill()\n\t\t\treturn walkFn(root, &info, err)\n\t\t}\n\t\tinfo.fill()\n\t\tif err := walkFn(root, &info, nil); err == filepath.SkipDir {\n\t\t\tstart.Close()\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\tstart.Close()\n\t\t\treturn err\n\t\t} else if info.IsDir() {\n\t\t\treturn walkInternal(root, start, walkFn)\n\t\t} else {\n\t\t\tstart.Close()\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func walkDir(\n\tdone <-chan interface{},\n\tsrc string,\n\tsrcExts []string,\n) (<-chan string, <-chan error) {\n\tpathChan := make(chan string)\n\terrChan := make(chan error, 1)\n\n\tgo func() {\n\t\t// Close the paths channel after Walk returns.\n\t\tdefer close(pathChan)\n\n\t\terrChan <- filepath.Walk(src, func(path string, f os.FileInfo, err error) error {\n\t\t\tisFileSupported := false\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !f.Mode().IsRegular() {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Get the file base name.\n\t\t\tfx := filepath.Ext(f.Name())\n\t\t\tfor _, ext := range srcExts {\n\t\t\t\tif ext == fx {\n\t\t\t\t\tisFileSupported = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif isFileSupported {\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\t\treturn errors.New(\"directory walk cancelled\")\n\t\t\t\tcase pathChan <- path:\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}()\n\treturn pathChan, errChan\n}", "func walkDir(root string, n *node, fs chan<- map[string]int64) {\n\tdefer n.wg.Done()\n\tfor _, entry := range dirents(n.path) {\n\t\tif entry.IsDir() {\n\t\t\tsubdir := filepath.Join(n.path, entry.Name())\n\t\t\tn := &node{path: subdir, wg: n.wg}\n\t\t\tn.wg.Add(1)\n\t\t\tgo walkDir(root, n, fs)\n\t\t} else {\n\t\t\tfs <- map[string]int64{root: entry.Size()}\n\t\t}\n\t}\n}", "func scanDirRec(rootDir string) error {\n\n\terr := filepath.Walk(rootDir, fileWalker)\n\tif err != nil {\n\t\treturn errors.New(err.Error())\n\t}\n\treturn nil\n}", "func WalkMatch(root, pattern string) ([]string, error) {\n\tvar matches []string\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {\n\t\t\treturn err\n\t\t} else if matched {\n\t\t\tmatches = append(matches, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn matches, nil\n}", "func (f *Finder) FindFiles(searchDir string) []Element {\n\tfilepath.Walk(searchDir, func(path string, file os.FileInfo, _ error) error {\n\t\tif f.NeedToExclude(file) {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\n\t\tif !f.IsValidName(path) {\n\t\t\treturn nil\n\t\t}\n\n\t\t// if is a file valid add in list\n\t\tf.list = append(f.list, *NewElement(path, file, len(f.list)))\n\n\t\treturn nil\n\t})\n\n\treturn f.list\n}", "func Scan(startDir string, inspector Inspector) (*DirScan, error) {\n\t// Are we scanning a directory?\n\tstat, err := os.Stat(startDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !stat.Mode().IsDir() {\n\t\treturn nil, errors.New(startDir + \" is not a directory\")\n\t}\n\n\t// Setup default if not specified\n\tif inspector == nil {\n\t\tinspector = DefaultInspector{}\n\t}\n\n\t// Track results of directory scan\n\td := DirScan{Name: startDir}\n\n\t// Closure to walk directory\n\twalkFn := func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Do we want to traverse this directory?\n\t\tif info.IsDir() {\n\t\t\terr := inspector.acceptDir(path, info)\n\n\t\t\tif err != nil {\n\t\t\t\tinfof(\"Ignoring directory %s\", path)\n\t\t\t\td.IgnoredDirCount++\n\t\t\t} else if path != startDir { // Don't count the initial directory\n\t\t\t\td.DirCount++\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\tif inspector.acceptFile(path, info) {\n\t\t\t// Found a file to process\n\t\t\td.FileCount++\n\n\t\t\tif d.FileCount % 500 == 0 {\n\t\t\t\tdebugf(\"Found %d files\", d.FileCount)\n\t\t\t}\n\n\t\t\t// Will need file path relative to the starting directory\n\t\t\trelPath, err := filepath.Rel(startDir, path)\n\t\t\tif err != nil {\n\t\t\t\tinfof(\"Unable to calculate relative path for file %s\", path)\n\t\t\t}\n\n\t\t\t// Perform optional further processing of file\n\t\t\tfile := inspector.process(&File{Name: relPath, AbsolutePath: path, Size: info.Size(), Metadata: make(map[string]string)})\n\n\t\t\t// Update directory scan results\n\t\t\td.Files = append(d.Files, file)\n\t\t} else {\n\t\t\tdebugf(\"Ignoring file %s\", path)\n\t\t\td.IgnoredFileCount++\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tinfof(\"Scanning %s\", startDir)\n\n\tstart := time.Now()\n\terr = filepath.Walk(startDir, walkFn)\n\telapsed := time.Since(start)\n\n\tinfof(\"Found %d files in %s (ignored %d directories, %d files)\", d.FileCount, elapsed, d.IgnoredDirCount, d.IgnoredFileCount)\n\n\treturn &d, err\n}", "func WalkDirRecurse(searchDir string, walkFn walkFunc) ([]string, error) {\n\twalkedDirs := []string{}\n\ttoBeWalked, err := filepath.Glob(path.Join(searchDir, \"*\"))\n\tif err != nil {\n\t\treturn walkedDirs, err\n\t}\n\n\tvar errs []error\n\t// call walkFn for every sub folder traversed (deepest sub-folder first)\n\tfor _, walkPath := range toBeWalked {\n\t\tfi, err := os.Stat(walkPath)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tsuccessSubs, err2 := WalkDirRecurse(walkPath, walkFn)\n\t\twalkedDirs = append(walkedDirs, successSubs...)\n\t\tif err2 != nil {\n\t\t\treturn walkedDirs, err2\n\t\t}\n\t\tif walkFn(walkPath) {\n\t\t\twalkedDirs = append(walkedDirs, walkPath)\n\t\t}\n\t}\n\tif len(errs) > 0 {\n\t\terr = fmt.Errorf(\"err stat: %v\", errs)\n\t}\n\n\treturn walkedDirs, err\n}", "func walkDir(dir string, matchPatterns stringSlice, excludeDirs stringSlice, n *sync.WaitGroup, fileSizes chan<- int64, fileNames chan<- string) {\n\tdefer n.Done()\n\n\tfor _, entry := range dirents(dir) {\n\t\t// If Entry Is Directory And Not In excludedDirs Recursively Walk It\n\t\tif entry.IsDir() && contains(excludeDirs, entry.Name()) == false {\n\t\t\tn.Add(1)\n\t\t\tsubdir := filepath.Join(dir, entry.Name())\n\t\t\tgo walkDir(subdir, matchPatterns, excludeDirs, n, fileSizes, fileNames)\n\t\t} else {\n\t\t\t// If Entry Is Not A Directory, Test For Pattern Match. Exclude Files with Size 0\n\t\t\t// Those don't need to be decoded.\n\t\t\tfor _, pattern := range matchPatterns {\n\t\t\t\tif match, _ := filepath.Match(pattern, entry.Name()); match == true && entry.Size() > 0 {\n\t\t\t\t\tfileSizes <- entry.Size()\n\t\t\t\t\tfileNames <- filepath.Join(dir, entry.Name())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func walker(files *[]string, excludedDirNames stringList) filepath.WalkFunc {\n return func(path string, info os.FileInfo, err error) error {\n if err != nil {\n log.Fatal(\"ERROR: rx error on the walk function err param:\", err)\n }\n\n if info.IsDir() {\n\n // *nix hidden dir should be skipped completely\n // Not accounting for any Windows stuff in this app.\n if info.Name()[0:1] == \".\" {\n // fmt.Println(\"## --skipping hidden dir path: \", path, \", filename: \", info.Name())\n\n // skip the directory and do not descend into it\n return filepath.SkipDir\n }\n\n // If this is an excluded directory name, skip it and don't\n // decend any further.\n for _, dirName := range excludedDirNames {\n if dirName == info.Name() {\n return filepath.SkipDir\n }\n }\n\n // fmt.Println(\"## --skipping dir path: \", path, \", filename: \", info.Name())\n\n // skip the directory file but go ahead and descend into the directory\n return nil\n }\n\n // skip hidden files\n if info.Name()[0:1] == \".\" {\n return nil\n }\n\n // Add this file to the found file list\n *files = append(*files, path)\n\n return nil\n }\n}", "func FilePathWalker(pattern string) []string {\n\n\tvar files []string\n\tvar res []string\n\n\terr := filepath.Walk(\".\", func(path string, info os.FileInfo, err error) error {\n\t\tfiles = append(files, path)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor _, file := range files {\n\t\t_, fileName := filepath.Split(file)\n\n\t\tif fileName == pattern {\n\t\t\tres = append(res, file)\n\t\t}\n\n\t}\n\treturn res\n}", "func findDirPathsRecursively(dirPath string, ignoreDirs []string) (dirs []string) {\n\terr := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {\n\t\tfor _, ignoreDir := range ignoreDirs {\n\t\t\tif strings.HasPrefix(path, ignoreDir) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tdirs = append(dirs, path)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tpanic(\"Unable to automatically register types due to a directory reading error: \" + err.Error())\n\t}\n\n\treturn\n}", "func collectFiles(dir string, extensions []string) ([]string, error) {\n\n\tfiles := []string{}\n\n\terr := filepath.Walk(dir, func(file string, info os.FileInfo, err error) error {\n\t\t// If we have an err pass it up\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Deal with files only\n\t\tif !info.IsDir() {\n\t\t\t// Check for go files\n\t\t\tname := filepath.Base(file)\n\t\t\tif !strings.HasPrefix(name, \".\") && strings.HasSuffix(name, \".go\") {\n\t\t\t\tfiles = append(files, file)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn files, err\n\t}\n\n\treturn files, nil\n\n}", "func walkDir(root, dir string, n *sync.WaitGroup, fileSizes chan<- size) {\n\tdefer n.Done()\n\tfor _, entry := range dirents(dir) {\n\t\tif entry.IsDir() {\n\t\t\tn.Add(1)\n\t\t\tsubdir := filepath.Join(dir, entry.Name())\n\t\t\tgo walkDir(root, subdir, n, fileSizes)\n\t\t} else {\n\t\t\tfileSizes <- size{\n\t\t\t\troot: root,\n\t\t\t\tsz: entry.Size(),\n\t\t\t}\n\t\t}\n\t}\n}", "func watchDirAndChildren(w *fsnotify.Watcher, path string, depth int) error {\n\tif err := w.Watch(path); err != nil {\n\t\treturn err\n\t}\n\tbaseNumSeps := strings.Count(path, string(os.PathSeparator))\n\treturn filepath.Walk(path, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\tpathDepth := strings.Count(path, string(os.PathSeparator)) - baseNumSeps\n\t\t\tif pathDepth > depth {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tif opts.Verbose {\n\t\t\t\tfmt.Fprintln(os.Stderr, \"Watching\", path)\n\t\t\t}\n\t\t\tif err := w.Watch(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}", "func FindFilesIn(dirname string) []string {\n\tvar list []string\n\twalkFunc := func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\tp, err := filepath.Rel(dirname, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlist = append(list, p)\n\t\t\treturn nil\n\t\t}\n\t\treturn nil\n\t}\n\n\t_ = filepath.Walk(dirname, walkFunc)\n\treturn list\n}", "func (w *Walker) TraverseDirectory() {\n\t_l := metrics.StartLogDiff(\"traverse-directory\")\n\n\t// option --prefix makes the directory walk shorter.\n\tif w.prefix != \"\" {\n\t\tw.dirPath = filepath.Join(w.dirPath, w.prefix)\n\t}\n\n\t// Walk all files in directory\n\tfilepath.Walk(w.dirPath, w.processFile)\n\n\tmetrics.StopLogDiff(\"traverse-directory\", _l)\n}", "func addRecursiveWatch(watcher *fsnotify.Watcher, rootPath string, path string, ignores []string) error {\n\n\tfsys := filesystem.DefaultFs{}\n\n\tfile, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"error introspecting path %s: %v\", path, err)\n\t}\n\n\tignoreMatcher := gitignore.CompileIgnoreLines(ignores...)\n\n\tmode := file.Mode()\n\tif mode.IsRegular() {\n\t\tvar rel string\n\t\trel, err = filepath.Rel(rootPath, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmatched := ignoreMatcher.MatchesPath(rel)\n\t\tif !matched {\n\t\t\tklog.V(4).Infof(\"adding watch on path %s\", path)\n\n\t\t\t// checking if the file exits before adding the watcher to it\n\t\t\tif !util.CheckPathExists(fsys, path) {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\terr = watcher.Add(path)\n\t\t\tif err != nil {\n\t\t\t\tklog.V(4).Infof(\"error adding watcher for path %s: %v\", path, err)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tfolders := []string{}\n\terr = filepath.Walk(path, func(newPath string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\t// Ignore the error if it's a 'path does not exist' error, no need to walk a non-existent path\n\t\t\tif !util.CheckPathExists(fsys, newPath) {\n\t\t\t\tklog.V(4).Infof(\"Walk func received an error for path %s, but the path doesn't exist so this is likely not an error. err: %v\", path, err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unable to walk path: %s: %w\", newPath, err)\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\t// If the current directory matches any of the ignore patterns, ignore them so that their contents are also not ignored\n\t\t\trel, err := filepath.Rel(rootPath, newPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmatched := ignoreMatcher.MatchesPath(rel)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to addRecursiveWatch on %s: %w\", newPath, err)\n\t\t\t}\n\t\t\tif matched {\n\t\t\t\tklog.V(4).Infof(\"ignoring watch on path %s\", newPath)\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\t// Append the folder we just walked on\n\t\t\tfolders = append(folders, newPath)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, folder := range folders {\n\n\t\trel, err := filepath.Rel(rootPath, folder)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmatched := ignoreMatcher.MatchesPath(rel)\n\n\t\tif matched {\n\t\t\tklog.V(4).Infof(\"ignoring watch for %s\", folder)\n\t\t\tcontinue\n\t\t}\n\n\t\t// checking if the file exits before adding the watcher to it\n\t\tif !util.CheckPathExists(fsys, path) {\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.V(4).Infof(\"adding watch on path %s\", folder)\n\t\terr = watcher.Add(folder)\n\t\tif err != nil {\n\t\t\t// Linux \"no space left on device\" issues are usually resolved via\n\t\t\t// $ sudo sysctl fs.inotify.max_user_watches=65536\n\t\t\t// BSD / OSX: \"too many open files\" issues are ussualy resolved via\n\t\t\t// $ sysctl variables \"kern.maxfiles\" and \"kern.maxfilesperproc\",\n\t\t\tklog.V(4).Infof(\"error adding watcher for path %s: %v\", folder, err)\n\t\t}\n\t}\n\treturn nil\n}", "func GetFileScanners(rootpath string) (scanners []*bufio.Scanner, funcerr error) {\n\tfuncerr = filepath.Walk(rootpath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tif info.IsDir() == true {\n\t\t\treturn nil\n\t\t}\n\t\tf, _ := os.Open(path)\n\t\tscanner := bufio.NewScanner(f)\n\t\tscanners = append(scanners, scanner)\n\t\treturn nil\n\t})\n\treturn\n}", "func walk(path string, precedence []string, info os.FileInfo, walkFn filepath.WalkFunc) error {\n\tif !info.IsDir() {\n\t\treturn walkFn(path, info, nil)\n\t}\n\n\tnames, err := readDirNames(path)\n\terr1 := walkFn(path, info, err)\n\t// If err != nil, walk can't walk into this directory.\n\t// err1 != nil means walkFn want walk to skip this directory or stop walking.\n\t// Therefore, if one of err and err1 isn't nil, walk will return.\n\tif err != nil || err1 != nil {\n\t\t// The caller's behavior is controlled by the return value, which is decided\n\t\t// by walkFn. walkFn may ignore err and return nil.\n\t\t// If walkFn returns SkipDir, it will be handled by the caller.\n\t\t// So walk should return whatever walkFn returns.\n\t\treturn err1\n\t}\n\n\tpreferredNames := applyPrecdence(path, names, precedence)\n\n\tfor _, name := range preferredNames {\n\t\tfilename := filepath.Join(path, name)\n\t\tfileInfo, err := os.Lstat(filename)\n\t\tif err != nil {\n\t\t\tif err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = walk(filename, precedence, fileInfo, walkFn)\n\t\t\tif err != nil {\n\t\t\t\tif !fileInfo.IsDir() || err != filepath.SkipDir {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func findUntrackedFilesFromDir(c *Client, opts LsFilesOptions, root, parent, dir File, tracked map[IndexPath]bool, recursedir bool, ignorePatterns []IgnorePattern) (untracked []*IndexEntry) {\n\tfiles, err := ioutil.ReadDir(dir.String())\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, ignorefile := range opts.ExcludePerDirectory {\n\t\tignoreInDir := ignorefile\n\t\tif dir != \"\" {\n\t\t\tignoreInDir = dir + \"/\" + ignorefile\n\t\t}\n\n\t\tif ignoreInDir.Exists() {\n\t\t\tlog.Println(\"Adding excludes from\", ignoreInDir)\n\n\t\t\tpatterns, err := ParseIgnorePatterns(c, ignoreInDir, dir)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tignorePatterns = append(ignorePatterns, patterns...)\n\t\t}\n\t}\nfiles:\n\tfor _, fi := range files {\n\t\tfname := File(fi.Name())\n\t\tif fi.Name() == \".git\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, pattern := range ignorePatterns {\n\t\t\tvar name File\n\t\t\tif parent == \"\" {\n\t\t\t\tname = fname\n\t\t\t} else {\n\t\t\t\tname = parent + \"/\" + fname\n\t\t\t}\n\t\t\tif pattern.Matches(name.String(), fi.IsDir()) {\n\t\t\t\tcontinue files\n\t\t\t}\n\t\t}\n\t\tif fi.IsDir() {\n\t\t\tif !recursedir {\n\t\t\t\t// This isn't very efficient, but lets us implement git ls-files --directory\n\t\t\t\t// without too many changes.\n\t\t\t\tindexPath, err := (parent + \"/\" + fname).IndexPath(c)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tdirHasTracked := false\n\t\t\t\tfor path := range tracked {\n\t\t\t\t\tif strings.HasPrefix(path.String(), indexPath.String()) {\n\t\t\t\t\t\tdirHasTracked = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !dirHasTracked {\n\t\t\t\t\tif opts.Directory {\n\t\t\t\t\t\tif opts.NoEmptyDirectory {\n\t\t\t\t\t\t\tif files, err := ioutil.ReadDir(fname.String()); len(files) == 0 && err == nil {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tindexPath += \"/\"\n\t\t\t\t\t}\n\t\t\t\t\tuntracked = append(untracked, &IndexEntry{PathName: indexPath})\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar newparent, newdir File\n\t\t\tif parent == \"\" {\n\t\t\t\tnewparent = fname\n\t\t\t} else {\n\t\t\t\tnewparent = parent + \"/\" + fname\n\t\t\t}\n\t\t\tif dir == \"\" {\n\t\t\t\tnewdir = fname\n\t\t\t} else {\n\t\t\t\tnewdir = dir + \"/\" + fname\n\t\t\t}\n\n\t\t\trecurseFiles := findUntrackedFilesFromDir(c, opts, root, newparent, newdir, tracked, recursedir, ignorePatterns)\n\t\t\tuntracked = append(untracked, recurseFiles...)\n\t\t} else {\n\t\t\tvar filePath File\n\t\t\tif parent == \"\" {\n\t\t\t\tfilePath = File(strings.TrimPrefix(fname.String(), root.String()))\n\n\t\t\t} else {\n\t\t\t\tfilePath = File(strings.TrimPrefix((parent + \"/\" + fname).String(), root.String()))\n\t\t\t}\n\t\t\tindexPath, err := filePath.IndexPath(c)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tindexPath = IndexPath(filePath)\n\n\t\t\tif _, ok := tracked[indexPath]; !ok {\n\t\t\t\tuntracked = append(untracked, &IndexEntry{PathName: indexPath})\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func walkDir(dir string, fileSizes chan<- int64) {\n\tfor _, entry := range dirents(dir) {\n\t\tif entry.IsDir() {\n\t\t\tsubdir := filepath.Join(dir, entry.Name())\n\t\t\twalkDir(subdir, fileSizes)\n\t\t} else {\n\t\t\tfileSizes <- entry.Size()\n\t\t}\n\t}\n}", "func walkMatch(root, pattern string) ([]string, []string, error) {\r\n\tvar matches []string\r\n\tvar matchesRaw []string\r\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\tif info.IsDir() {\r\n\t\t\treturn nil\r\n\t\t}\r\n\t\tif matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {\r\n\t\t\treturn err\r\n\t\t} else if matched {\r\n\t\t\tmatches = append(matches, path)\r\n\t\t\tmatchesRaw = append(matchesRaw, filepath.Base(path))\r\n\t\t}\r\n\t\treturn nil\r\n\t})\r\n\tif err != nil {\r\n\t\treturn nil, nil, err\r\n\t}\r\n\treturn matches, matchesRaw, nil\r\n}", "func (j *juiceFS) walk(path string, info *fs.FileStat, isSymlink bool, walkFn WalkFunc) syscall.Errno {\n\terr := walkFn(path, info, isSymlink, 0)\n\tif err != 0 {\n\t\tif info.IsDir() && err == skipDir {\n\t\t\treturn 0\n\t\t}\n\t\treturn err\n\t}\n\n\tif !info.IsDir() {\n\t\treturn 0\n\t}\n\n\tentries, err := j.readDirSorted(path)\n\tif err != 0 {\n\t\treturn walkFn(path, info, isSymlink, err)\n\t}\n\n\tfor _, e := range entries {\n\t\tp := path + e.name\n\t\terr = j.walk(p, e.fi, e.isSymlink, walkFn)\n\t\tif err != 0 && err != skipDir && err != syscall.ENOENT {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn 0\n}", "func (p *FileMap) loadFilesRecursively(cwd string) error {\n\tfileList, err := ioutil.ReadDir(cwd)\n\n\tif err != nil {\n\t\terr = StringError{s: \"ERROR: Can't open \\\"\" + cwd + \"\\\" directory!\"}\n\t\treturn err\n\t}\n\n\tfor _, f := range fileList {\n\t\tfileName := f.Name()\n\n\t\tif f.IsDir() {\n\t\t\terr := p.loadFilesRecursively(cwd + fileName + \"/\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tbaseName, ext := getBaseAndExt(fileName)\n\n\t\t\t_, err := p.load(cwd+baseName, ext)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Println(\"INFO: Loaded file: \" + cwd + filepath.Base(fileName))\n\t\t}\n\t}\n\treturn nil\n}", "func (this *SearchDir) SearchDirs(root, pattern string) {\n this.search(root, pattern, false, true)\n}", "func (f *FakeFileSystem) Walk(root string, walkFn filepath.WalkFunc) error {\n\treturn filepath.Walk(root, walkFn)\n}", "func FilePathWalker(root string, index *Index, hasher PathHasher) filepath.WalkFunc {\n\treturn func(path string, f os.FileInfo, err error) error {\n\t\tif !f.IsDir() {\n\t\t\tdoLog(\"Add in file to index: %s\", path)\n\t\t\tkey := normalisePath(path)\n\t\t\tif root != \"\" {\n\t\t\t\tkey = fmt.Sprintf(\"%s/%s\", root, key)\n\t\t\t}\n\t\t\thash, errHash := hasher(path)\n\t\t\tif err != nil {\n\t\t\t\treturn errHash\n\t\t\t}\n\t\t\tindex.Files[path] = Sourcefile{\n\t\t\t\tKey: key,\n\t\t\t\tHash: hash,\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n}", "func (fs *FileSystem) Walk(root string, fn func(string, os.FileInfo, error) error) error {\n\n\tdir, filename := fs.cleanPath(root)\n\tparent, node := fs.loadParentChild(dir, filename)\n\troot = filepath.Join(dir, filename)\n\tif node == nil {\n\t\tnode = parent\n\t}\n\n\tif !node.IsDir() {\n\t\treturn fn(root, inodeinfo{root, node}, nil)\n\t}\n\tino := node.Ino\n\n\tvar recurse func(string, uint64) error\n\n\treturn fs.db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"inodes\"))\n\n\t\trecurse = func(path string, ino uint64) error {\n\t\t\tnode := new(iNode)\n\t\t\terr := decodeNode(b, ino, node)\n\n\t\t\terr = fn(path, inodeinfo{filepath.Base(path), node}, err)\n\n\t\t\tif err != nil {\n\t\t\t\tif err == walkpath.SkipDir {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif node == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfor _, child := range node.Children {\n\t\t\t\terr := recurse(filepath.Join(path, child.Name), child.Ino)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn recurse(root, ino)\n\t})\n}", "func walkDirs(done <-chan struct{}, root string) (<-chan string, <-chan error) {\n\tdirs := make(chan string)\n\terrc := make(chan error, 1)\n\tgo func() { // HL\n\t\t// Close the paths channel after Walk returns.\n\t\tdefer close(dirs) // HL\n\t\t// No select needed for this send, since errc is buffered.\n\t\terrc <- filepath.Walk(root, func(dir string, info os.FileInfo, err error) error { // HL\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !info.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase dirs <- dir:\n\t\t\tcase <-done:\n\t\t\t\treturn errors.New(\"walk canceled\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}()\n\treturn dirs, errc\n}", "func WalkPathAndUploadFindings(root, dname string) {\n\tdrivename = dname\n\terr := filepath.Walk(root, walk)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\treturn\n}", "func walkTree(workspace string) ([]*finalizeFileInfo, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get pwd: %v\", err)\n\t}\n\t// make everything relative to the workspace\n\t_ = os.Chdir(workspace)\n\tdirMap := make(map[string]*finalizeFileInfo)\n\tfileList := make([]*finalizeFileInfo, 0)\n\tvar entry *finalizeFileInfo\n\t_ = filepath.Walk(\".\", func(fp string, fi os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tisRoot := fp == \".\"\n\t\tname := fi.Name()\n\t\tm := fi.Mode()\n\t\tvar fType fileType\n\t\tswitch {\n\t\tcase m&os.ModeSocket == os.ModeSocket:\n\t\t\tfType = fileSocket\n\t\tcase m&os.ModeSymlink == os.ModeSymlink:\n\t\t\tfType = fileSymlink\n\t\tcase m&os.ModeNamedPipe == os.ModeNamedPipe:\n\t\t\tfType = fileFifo\n\t\tcase m&os.ModeDir == os.ModeDir:\n\t\t\tfType = fileDirectory\n\t\tcase m&os.ModeDevice == os.ModeDevice && m&os.ModeCharDevice == os.ModeCharDevice:\n\t\t\tfType = fileChar\n\t\tcase m&os.ModeDevice == os.ModeDevice && m&os.ModeCharDevice != os.ModeCharDevice:\n\t\t\tfType = fileBlock\n\t\tdefault:\n\t\t\tfType = fileRegular\n\t\t}\n\t\txattrNames, err := xattr.List(fp)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to list xattrs for %s: %v\", fp, err)\n\t\t}\n\t\txattrs := map[string]string{}\n\t\tfor _, name := range xattrNames {\n\t\t\tval, err := xattr.Get(fp, name)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to get xattr %s for %s: %v\", name, fp, err)\n\t\t\t}\n\t\t\txattrs[name] = string(val)\n\t\t}\n\t\tnlink, uid, gid := getFileProperties(fi)\n\n\t\tentry = &finalizeFileInfo{\n\t\t\tpath: fp,\n\t\t\tname: name,\n\t\t\tisDir: fi.IsDir(),\n\t\t\tisRoot: isRoot,\n\t\t\tmodTime: fi.ModTime(),\n\t\t\tmode: m,\n\t\t\tfileType: fType,\n\t\t\tsize: fi.Size(),\n\t\t\txattrs: xattrs,\n\t\t\tuid: uid,\n\t\t\tgid: gid,\n\t\t\tlinks: nlink,\n\t\t}\n\n\t\t// we will have to save it as its parent\n\t\tparentDir := filepath.Dir(fp)\n\t\tparentDirInfo := dirMap[parentDir]\n\n\t\tif fi.IsDir() {\n\t\t\tentry.children = make([]*finalizeFileInfo, 0, 20)\n\t\t\tdirMap[fp] = entry\n\t\t} else {\n\t\t\t// calculate blocks\n\t\t\tentry.size = fi.Size()\n\t\t}\n\t\tif !isRoot {\n\t\t\tparentDirInfo.children = append(parentDirInfo.children, entry)\n\t\t\tdirMap[parentDir] = parentDirInfo\n\t\t}\n\t\tfileList = append(fileList, entry)\n\t\treturn nil\n\t})\n\t// reset the workspace\n\t_ = os.Chdir(cwd)\n\n\treturn fileList, nil\n}", "func (fd *finder) findAllGoFiles(dir string) ([]string, error) {\n\tvar err error\n\tvar names []string\n\toncer.Do(fd.key(\"findAllGoFiles\", dir), func() {\n\t\tplog.Debug(fd, \"findAllGoFiles\", \"dir\", dir)\n\n\t\tcallback := func(path string, do *godirwalk.Dirent) error {\n\t\t\text := filepath.Ext(path)\n\t\t\tif ext != \".go\" {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//check if path is a dir\n\t\t\tfi, err := os.Stat(path)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif fi.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tnames = append(names, path)\n\t\t\treturn nil\n\t\t}\n\t\terr = godirwalk.Walk(dir, &godirwalk.Options{\n\t\t\tFollowSymbolicLinks: true,\n\t\t\tCallback: callback,\n\t\t})\n\t})\n\n\treturn names, err\n}", "func (c *walkerContext) directoryWalker(path string, info os.FileInfo, err error) error {\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.setCurrentDir(path)\n\n\tif info.IsDir() {\n\t\t// Don't add the base directory as a sub directory. Otherwise our base\n\t\t// directory has itself as a sub directory in it's list of sub directories.\n\t\tif c.baseDir != path {\n\t\t\tc.addSubdir(path, info)\n\t\t}\n\t} else {\n\t\tc.addFile(path, info)\n\t}\n\n\treturn nil\n}", "func Walk(startPath string, filterFn func(p string, info os.FileInfo) bool, outputFn func(s string, info os.FileInfo) error) error {\n\terr := filepath.Walk(startPath, func(p string, info os.FileInfo, err error) error {\n\t\t// Are we interested in this path?\n\t\tif filterFn(p, info) == true {\n\t\t\t// Yes, so send to output function.\n\t\t\tif err := outputFn(p, info); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn err\n}", "func GetAllFiles(target string) ([]string, error) {\n\tfileList := []string{}\n\n\t// the following tw lines adds the \"/\" to the end of path\n\t// this is helpful to create relative path\n\ttarget = filepath.Join(target, \"/\")\n\ttarget = target + \"/\"\n\n\terr := filepath.Walk(target, func(path string, f os.FileInfo, err error) error {\n\t\tif !f.IsDir() {\n\t\t\tfileList = append(fileList, strings.Replace(path, target, \"\", -1))\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fileList, nil\n}", "func FindFiles(dir string, r *regexp.Regexp) ([]os.FileInfo, error) {\n\tvar result []os.FileInfo\n\tfor _, f := range ScanDir(dir) {\n\t\tif r.MatchString(f.Name()) {\n\t\t\tresult = append(result, f)\n\t\t}\n\t}\n\treturn result, nil\n}" ]
[ "0.6946612", "0.67929393", "0.67067945", "0.6680786", "0.6658487", "0.65556496", "0.6537987", "0.6537987", "0.6511727", "0.6485101", "0.6457912", "0.641355", "0.641266", "0.63754463", "0.6352275", "0.62927413", "0.6269232", "0.6261703", "0.625812", "0.61913836", "0.617507", "0.61704725", "0.6164544", "0.6162656", "0.6152244", "0.6150203", "0.6118253", "0.6108125", "0.6107578", "0.6089666", "0.6047669", "0.60427153", "0.6039247", "0.60187835", "0.6005788", "0.5999122", "0.59947616", "0.59947616", "0.59947616", "0.59947616", "0.5982941", "0.59783024", "0.5974076", "0.5942681", "0.59407806", "0.59323657", "0.5922522", "0.58576393", "0.58537394", "0.58531934", "0.58375484", "0.58293444", "0.5819022", "0.5809378", "0.5805899", "0.58048344", "0.575993", "0.57554513", "0.5718126", "0.57164735", "0.57139134", "0.5700653", "0.5694658", "0.56919587", "0.5676319", "0.56499135", "0.5630954", "0.5596812", "0.5595941", "0.5588044", "0.55873847", "0.55828315", "0.55775857", "0.55766845", "0.5573861", "0.556916", "0.5567618", "0.55644894", "0.5546106", "0.55083877", "0.5499413", "0.5470587", "0.5470571", "0.5462877", "0.546022", "0.5452052", "0.54416335", "0.54415476", "0.5436077", "0.5432899", "0.54324055", "0.54310083", "0.5419754", "0.5419745", "0.5415044", "0.5398609", "0.5393208", "0.5381693", "0.5381128", "0.53786963" ]
0.67595935
2
TODO implement (and update the Solutions list, above) nolint:deadcode,unused //golangcilint Delete Middle Node: Implement an algorithm to delete a node in the middle (i.e., any node but the first and last node, not necessarily the exact middle) of a singly linked list, given only access to that node. EXAMPLE Input: the node c from the linked list a>b>c>d>e>f Result: nothing is returned, but the new linked list looks like a>b>d>e>f Hints (Cracking the Coding Interview 6): 72
func removeMiddle1(node *ds.Node) error { return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RunRemoveMid() {\n\troot := ListNode{\n\t\tVal: 1,\n\t\tNext: &ListNode{\n\t\t\tVal: 2,\n\t\t\tNext: &ListNode{\n\t\t\t\tVal: 3,\n\t\t\t\tNext: &ListNode{\n\t\t\t\t\tVal: 4,\n\t\t\t\t\tNext: &ListNode{\n\t\t\t\t\t\tVal: 5,\n\t\t\t\t\t\tNext: nil,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tremoveNthFromEnd(&root, 2)\n}", "func middleNodeBrute(head *ListNode) *ListNode {\n\t// brute force\n\tnode := head\n\tslice := []*ListNode{}\n\n\tfor node != nil {\n\t\tslice = append(slice, node)\n\n\t\tnode = node.Next\n\t}\n\n\treturn slice[len(slice)/2]\n}", "func middleNode(head *ListNode) *ListNode {\r\n\tvar (\r\n\t\tp *ListNode\r\n\t\tl, i, m int\r\n\t)\r\n\tfor l, p = 0, head; p != nil; p = p.Next {\r\n\t\tl += 1\r\n\t}\r\n\t\r\n\tfor m, i, p = l / 2, 0, head; i < m; i++ {\r\n\t\tp = p.Next\r\n\t}\r\n\treturn p\r\n}", "func middleOfLinkedList(head *Node) *Node {\n\tsingleP := head\n\tdoubleP := singleP\n\tif singleP == nil {\n\t\treturn singleP\n\t}\n\tfor doubleP != nil && doubleP.Next != nil {\n\t\tsingleP = singleP.Next\n\t\tdoubleP = doubleP.Next.Next\n\t}\n\treturn singleP\n}", "func middleNode(head *ListNode) *ListNode {\n\tslow,fast := head,head\n\n\tfor fast.Next !=nil && fast.Next.Next != nil {\n\t\tslow = slow.Next\n\t\tfast = fast.Next.Next\n\t}\n\n\treturn slow\n}", "func (l *Slist) RemoveFirst(d interface{}) {\n\tif l.len == 0 {\n\t\treturn\n\t}\n\n\t// the case the head data is equal d\n\tif l.head.data == d {\n\t\tl.head = l.head.next\n\t\tl.len--\n\t\treturn\n\t}\n\n\tprev := l.head\n\tcurrent := prev.next\n\tfor current != nil {\n\t\tif current.data == d {\n\t\t\t// headを持っているのがprevなので、prev.nextをcurrent.nextにする\n\t\t\t// current = current.nextだと、currentはheadを持っていないので、リムーブできない。\n\t\t\tprev.next = current.next\n\t\t\tl.len--\n\t\t\treturn\n\t\t}\n\t\tprev = current\n\t\tcurrent = current.next\n\t}\n}", "func RmByMidRatio(head *ListNode, a int, b int) *ListNode {\n\tif a < 1 || a > b {\n\t\treturn head\n\t}\n\t// Get l\n\tcur := head\n\tl := 0\n\tfor cur != nil {\n\t\tl++\n\t\tcur = cur.Next\n\t}\n\t// 获取要删除的元素\n\tl = int(math.Ceil(float64(a*l) / float64(b)))\n\n\tif l == 1 {\n\t\thead = head.Next\n\t}\n\tif l > 1 {\n\t\tcur = head\n\t\t// 不等于 1 就是因为 cur 是要删除元素的前一个元素\n\t\tfor l != 1 {\n\t\t\tl--\n\t\t\tcur = cur.Next\n\t\t}\n\t\tcur.Next = cur.Next.Next\n\t}\n\treturn head\n}", "func FindMiddleNode(head *ListNode) *ListNode {\n\tfast, slow := head, head\n\tfor fast != nil && fast.Next != nil {\n\t\tfast = fast.Next.Next\n\t\tslow = slow.Next\n\t}\n\treturn slow\n}", "func (s *SinglyLinkedList) Remove(n *Node) {\n if s.front == n {\n s.front = n.next\n s.length--\n } else {\n currentNode := s.front\n\n // search for node n\n for currentNode != nil && currentNode.next != nil && currentNode.next != n {\n currentNode = currentNode.next\n }\n\n // see if current's next node is n\n // if it's not n, then node n wasn't found in list s\n if currentNode.next == n {\n currentNode.next = currentNode.next.next\n s.length--\n }\n }\n}", "func FindMiddle(head *linkedlist.Node) int {\n\n\tslowNode, fastNode := head, head\n\n\tif head != nil {\n\t\tfor fastNode != nil && fastNode.Next != nil {\n\t\t\tfastNode = fastNode.Next.Next\n\t\t\tslowNode = slowNode.Next\n\t\t}\n\t}\n\treturn slowNode.Data\n}", "func (list *DoublyLinkedList) DeleteNode(index int) {\n\n\tif list.isEmpty() {\n\t\treturn\n\t}\n\tnode, _ := list.GetNodeAt(index)\n\tfmt.Println(node)\n\tfmt.Println(node.data.Name)\n\n\tif list.lenght == 1 {\n\t\tlist.head = nil\n\t} else if node == list.GetLastNode() { // Si es el ultimo al anterior se le apunta a nil\n\t\tnode.previous.next = nil\n\t} else if node == list.head { // Si es el primero al siguiente se le apunta a nil\n\t\ttemp := node.next\n\t\tlist.head = temp\n\t\tnode.next.previous = nil\n\n\t} else {\n\t\tnode.previous.next = node.next\n\t\tnode.next.previous = node.previous\n\t}\n\n\tlist.lenght--\n\n}", "func (head *Node) DeleteFirst() (*Node, error) {\n\tif head == nil {\n\t\treturn nil, fmt.Errorf(\"empty list\")\n\t}\n\t// simply send whatever is there at 2nd pos\n\treturn head.Next, nil\n}", "func (list *List) DeleteFromBeginning() {\n // 1. Provide message to user if the list is empty and return\n if list.Size() == 0 {\n fmt.Println(\"Nothing to delete, the list is empty\")\n return\n }\n\n // 2. Get the current head and save it in temp\n oldHead := list.Head()\n\n // 3. Update the list's head to next element in list\n list.head = oldHead.next\n\n // 4. Remove the link from the old head\n oldHead.next = nil\n\n // 5. Decrement the list size\n list.size--\n}", "func (c *CirLnLs) DeleteNode(index int) (temp *CirLnLs, delnode *CirLnLs, err error) {\n\ttemp = c\n\thelper := c.NewHelper()\n\t//如果只有一個結點 且Index是要刪除的值\n\tif temp.Next == c && temp.Index == index {\n\t\ttemp.Next = nil\n\t\treturn c, c, nil\n\t}\n\n\tfor {\n\t\t//找到最後 而且沒有正確的index\n\t\tif temp.Next == c && temp.Index != index {\n\t\t\terr = errors.New(\"Index not found\")\n\t\t\treturn\n\t\t}\n\t\tif c.Index == index {\n\t\t\tc = c.Next\n\t\t\thelper.Next = c\n\t\t\treturn c, c, nil\n\t\t}\n\t\tif temp.Index == index {\n\t\t\tfmt.Println(\"找到\", index)\n\t\t\thelper.Next = temp.Next\n\t\t\treturn c, temp, nil\n\t\t}\n\n\t\ttemp = temp.Next\n\t\thelper = helper.Next\n\t}\n}", "func josephusKill(head *ListNode, m int) *ListNode {\n\tif head == nil || head.Next == head || m < 1 {\n\t\treturn head\n\t}\n\tlast := head\n\tfor last.Next != head {\n\t\tlast = last.Next\n\t}\n\tcnt := 0\n\tfor head != last {\n\t\tcnt++\n\t\tif cnt == m {\n\t\t\tlast.Next = head.Next\n\t\t\tcnt = 0\n\t\t} else {\n\t\t\tlast = last.Next\n\t\t}\n\t\thead = last.Next\n\t}\n\treturn head\n}", "func (l *LinkedList) DeleteFromEnd() (*Node, error) {\n\tif l.head == nil {\n\t\treturn nil, fmt.Errorf(\"Can't delete from empty list\")\n\t}\n\tl.size--\n\tif l.head.next == nil {\n\t\tdeleted := l.head\n\t\tl.head = nil\n\t\treturn deleted, nil\n\t}\n\tprevious := l.head\n\tfor previous.next.next != nil {\n\t\tprevious = previous.next\n\t}\n\tdeleted := previous.next\n\tprevious.next = nil\n\treturn deleted, nil\n}", "func (t *SkipList) Delete(e ListElement) {\n\n\tif t == nil || t.IsEmpty() || e == nil {\n\t\treturn\n\t}\n\n\tkey := e.ExtractKey()\n\n\tindex := t.findEntryIndex(key, 0)\n\n\tvar currentNode *SkipListElement\n\tnextNode := currentNode\n\n\tfor {\n\n\t\tif currentNode == nil {\n\t\t\tnextNode = t.startLevels[index]\n\t\t} else {\n\t\t\tnextNode = currentNode.next[index]\n\t\t}\n\n\t\t// Found and remove!\n\t\tif nextNode != nil && math.Abs(nextNode.key-key) <= t.eps {\n\n\t\t\tif currentNode != nil {\n\t\t\t\tcurrentNode.next[index] = nextNode.next[index]\n\t\t\t}\n\n\t\t\tif index == 0 {\n\t\t\t\tif nextNode.next[index] != nil {\n\t\t\t\t\tnextNode.next[index].prev = currentNode\n\t\t\t\t}\n\t\t\t\tt.elementCount--\n\t\t\t}\n\n\t\t\t// Link from start needs readjustments.\n\t\t\tif t.startLevels[index] == nextNode {\n\t\t\t\tt.startLevels[index] = nextNode.next[index]\n\t\t\t\t// This was our currently highest node!\n\t\t\t\tif t.startLevels[index] == nil {\n\t\t\t\t\tt.maxLevel = index - 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Link from end needs readjustments.\n\t\t\tif nextNode.next[index] == nil {\n\t\t\t\tt.endLevels[index] = currentNode\n\t\t\t}\n\t\t\tnextNode.next[index] = nil\n\t\t}\n\n\t\tif nextNode != nil && nextNode.key < key {\n\t\t\t// Go right\n\t\t\tcurrentNode = nextNode\n\t\t} else {\n\t\t\t// Go down\n\t\t\tindex--\n\t\t\tif index < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (sl *SkipList) Delete(d int) *SkipNode {\n\tf := sl.head.getTop().find(d)\n\n\tn := f\n\tfor n != nil {\n\t\tn.prev.next = n.next\n\t\tn.next.prev = n.prev\n\t\tn = n.higher\n\t}\n\n\treturn f\n}", "func (cll *CircularLinkedList) DeleteBeginning() int {\n\t//check if list if empty\n\tif !(cll.CheckIfEmpty()) {\n\t\thead := cll.Start\n\t\tdeletedElem := head.Data\n\t\tif cll.Len == 1 {\n\t\t\tcll.Start = nil\n\t\t\tcll.Len--\n\t\t\treturn deletedElem\n\t\t}\n\t\tprevStart := cll.Start\n\t\tcll.Start = head.Next\n\t\t// traverse till end and update last node's next to updated start\n\t\tfor {\n\t\t\tif head.Next == prevStart {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\thead = head.Next\n\t\t}\n\t\thead.Next = cll.Start\n\t\tcll.Len--\n\t\treturn deletedElem\n\t}\n\treturn -1\n}", "func (s *SinglyLinkedList) RemoveBefore(before *Node) {\n if s.front != nil && s.front != before {\n if s.front.next == before {\n s.front = before\n } else {\n currentNode := s.front\n for currentNode.next.next != nil && currentNode.next.next != before {\n currentNode = currentNode.next\n }\n if currentNode.next.next == before {\n currentNode.next = before\n }\n }\n }\n}", "func (head *Node) DeleteLast() (*Node, error) {\n\n\tif head == nil {\n\t\treturn nil, fmt.Errorf(\"empty list\")\n\t}\n\t// if head has only one element, then it would be last\n\tif head.Next == nil {\n\t\treturn nil, nil\n\t}\n\t// reach till second last node\n\tfor head.Next.Next != nil {\n\t\thead = head.Next\n\t}\n\t//then remove it's next, which is pointing to last element\n\thead.Next = nil\n\treturn head, nil\n}", "func (l *List) DeleteList(name string) {\n\n\tl.size--\n\t/* single item in list */\n \tif l.head == l.tail {\n \t\tl.head = nil\n \t \tl.tail = nil\n \t\treturn\n \t} \n\n \t/* Find the entry to delete */\n\tcurrentNode := l.head\n\tvar prev *Node = nil\n\tfor currentNode != nil && \n\t\tstrings.Compare(strings.ToUpper(name), strings.ToUpper(currentNode.emp.name)) != 0 {\n\t\tprev = currentNode\n\t\tcurrentNode = currentNode.next\n\t}\n\n\t/* If entry not found */\n\tif currentNode == nil {\n\t\tfmt.Println(\"Node not found for name: %s\", name)\n\t\tl.size++\n\t\treturn\n\t}\n\n\t/*If the last entry to be removed */\n\tif (currentNode == l.tail) {\n\t\tprev.next = nil;\n\t\tl.tail = prev\n\t}\n\n\t/* if the first entry to be removed */\n\tif (currentNode == l.head) {\n\t\tl.head = currentNode.next\n\t\tcurrentNode.next = nil\n\t} else { /* middle entry to be removed */\n\t\tprev.next = currentNode.next\n\t}\n}", "func RmLastKthNode(head *DNode, k int) *DNode {\n\tif k < 1 || head == nil {\n\t\treturn head\n\t}\n\tcurr := head\n\tfor curr != nil {\n\t\tk--\n\t\tcurr = curr.Next\n\t}\n\t// after a loop\n\tif k == 0 {\n\t\thead = head.Next\n\t\thead.Prev = nil\n\t}\n\tif k < 0 {\n\t\tcurr = head\n\t\tfor k != 0 {\n\t\t\tk++\n\t\t\tcurr = curr.Next\n\t\t}\n\t\t// ! mind the diff\n\t\ttemp := curr.Next.Next\n\t\tcurr.Next = temp\n\t\tif temp != nil {\n\t\t\ttemp.Prev = curr\n\t\t}\n\t}\n\treturn head\n}", "func removeNthFromEnd(head *ListNode, n int) *ListNode {\n\tdummyHead := ListNode{Next: head}\n\n\tvar length int\n\tfor cursor := &dummyHead; cursor.Next != nil; cursor = cursor.Next {\n\t\tlength++\n\t}\n\n\tif n <= 0 || n > length {\n\t\treturn dummyHead.Next\n\t}\n\n\tpreIdx := length - n\n\tpreNode := &dummyHead\n\tfor i := 0; i < preIdx; i++ {\n\t\tpreNode = preNode.Next\n\t}\n\n\tdelNode := preNode.Next\n\tpreNode.Next = delNode.Next\n\tdelNode.Next = nil // avoid memory leaks\n\n\treturn dummyHead.Next\n}", "func (l *list) delete(i int) {\n\tif i < 0 || i >= l.size {\n\t\tpanic(\"list index out of bounds\")\n\t}\n\n\tvar n *node\n\tmid := l.size/2\n\tif mid >= i {\n\t\tn = l.root\n\t\tfor ; i!=0; i-- {\n\t\t\tn = n.next\n\t\t}\n\t} else {\n\t\tn = l.tail\n\t\tfor i=l.size-i-1; i!=0; i-- {\n\t\t\tn = n.prev\n\t\t}\n\t}\n\tif n.prev != nil {\n\t\tn.prev.next = n.next\n\t} else {\n\t\tl.root = n.next\n\t}\n\tif n.next != nil {\n\t\tn.next.prev = n.prev\n\t} else {\n\t\tl.tail = n.prev\n\t}\n\tl.size--\t\n}", "func (l *LinkedList) DeleteFromFront() (*Node, error) {\n\tif l.head == nil {\n\t\treturn nil, fmt.Errorf(\"Can't delete from empty list\")\n\t}\n\tdeleted := l.head\n\tl.head = l.head.next\n\tl.size--\n\treturn deleted, nil\n}", "func (l *LinkedList) DeleteAtFirst() error {\n\t// validate the position\n\tif l.len == 0 {\n\t\tfmt.Println(\"No nodes in list\")\n\t\treturn errors.New(\"No nodes in list\")\n\t} else if l.len == 1 {\n\t\tl.head = nil\n\t\tl.tail = nil\n\t\tl.len--\n\t\treturn nil\n\t}\n\tl.head = l.head.next\n\tl.head.prev = nil\n\tl.len--\n\treturn nil\n}", "func deleteTreeNode(stack *stack, node *Node, item compare.Lesser) (*Node, interface{}) {\n\troot := node\n\n\tvar ret interface{}\n\n\t// find the node\nFOR:\n\tfor node != nil {\n\t\tswitch {\n\t\tcase item.Less(node.Item):\n\t\t\tstack.push(node, Left)\n\t\t\tnode = node.Left\n\t\tcase node.Item.Less(item):\n\t\t\tstack.push(node, Right)\n\t\t\tnode = node.Right\n\t\tdefault:\n\t\t\tret = node.Item\n\t\t\tbreak FOR\n\t\t}\n\t}\n\n\t// not find\n\tif node == nil {\n\t\treturn root, nil\n\t}\n\n\tvar inorderSuccessor *Node\n\n\t// find the inorder successor\n\tif node.Right != nil {\n\t\tstack.push(node, Right)\n\n\t\tinorderSuccessor = node.Right\n\n\t\tfor inorderSuccessor.Left != nil {\n\t\t\tstack.push(inorderSuccessor, Left)\n\n\t\t\tinorderSuccessor = inorderSuccessor.Left\n\t\t}\n\n\t\tnode.Item = inorderSuccessor.Item\n\t\tnode.Item = inorderSuccessor.Item\n\n\t\tnode = inorderSuccessor\n\t}\n\n\t// get the child of node\n\tc := node.Left\n\tif c == nil {\n\t\tc = node.Right\n\t}\n\n\t// N has no child\n\tif c == nil {\n\t\t// delete N\n\t\tstack.bindChild(nil)\n\n\t\tif node.Color == Red {\n\t\t\treturn root, ret\n\t\t}\n\n\t\tdeleteTreeNodeBalance(stack)\n\t\troot = stack.root()\n\t\tif root != nil {\n\t\t\troot.Color = Black\n\t\t}\n\t\treturn root, ret\n\t}\n\n\t// N has one next\n\t// then copy key/value from next to N\n\tnode.Item = c.Item\n\n\t// delete the next\n\tnode.Left = nil\n\tnode.Right = nil\n\n\t// N has diff color with next\n\tif node.Color != c.Color {\n\t\t// set color of N to black\n\t\tnode.Color = Black\n\n\t\treturn root, ret\n\t}\n\n\t// the color of N and next are both Black\n\tdeleteTreeNodeBalance(stack)\n\n\troot.Color = Black\n\treturn root, ret\n}", "func (s *Service) DelMid(mid int64) (err error) {\n\tvar exist bool\n\tif exist, err = s.existMid(mid); err != nil {\n\t\treturn\n\t}\n\tif !exist {\n\t\treturn ecode.TvUpperNotInList\n\t}\n\tif err = s.DB.Model(&model.Upper{}).Where(\"mid = ?\", mid).Update(map[string]int{\"deleted\": _deleted, \"toinit\": _removeArcs}).Error; err != nil {\n\t\tlog.Error(\"DelMid %d, Error %v\", mid, err)\n\t}\n\treturn\n}", "func (l *LinkedList) Delete(index int) error {\n // lock searchers\n // lock inserters\n\n var prev *LLNode\n n := l.Root\n\n if index == 0 {\n if l.Root.Next != nil {\n l.Root = l.Root.Next\n } else {\n l.Root = nil\n }\n\n return nil\n }\n\n i := 0\n\n for ; n != nil && i != index; i++ {\n prev = n\n n = n.Next\n }\n\n if i != index {\n return errors.New(\"index does not exist\")\n fmt.Println(\"error\")\n }\n\n if n.Next != nil {\n prev.Next = n.Next\n } else {\n prev.Next = nil\n }\n\n return nil\n}", "func (head *Node) DeleteNode(data int) {\n\tnode := head\n\tif node.Data == data {\n\t\t*head = *head.Next\n\t\treturn\n\t}\n\tfor node.Next != nil {\n\t\tif node.Next.Data == data {\n\t\t\tnode.Next = node.Next.Next\n\t\t\treturn\n\t\t}\n\t\tnode = node.Next\n\t}\n}", "func (list *LinkedList) DeleteNode(element interface{}) interface{} {\n\tif list.size == 0 {\n\t\treturn nil\n\t}\n\tif list.head.value == element {\n\t\tdeleted := list.head.value\n\t\tlist.head = list.head.next\n\t\tlist.size--\n\t\tif list.size == 0 {\n\t\t\tlist.tail = nil\n\t\t}\n\t\treturn deleted\n\t}\n\tresult := list.head.SearchPrevElement(element)\n\tif result.value == nil {\n\t\treturn nil\n\t}\n\tfmt.Printf(\"result = %+v\\n\", result)\n\tdeleted := result.next.value\n\t// if the deleted node is tail\n\tif result.next.next == nil {\n\t\tlist.tail = result\n\t}\n\tresult.next = result.next.next\n\tlist.size--\n\treturn deleted\n}", "func (p *LinkedList) DelNode(node *Cell) {\n\n\tif node == nil {\n\t\tcolor.Yellow.Println(\"Possibility list is empty or has reached the end.\")\n\t\treturn\n\t}\n\n\tif node == p.Head {\n\t\tp.Head = node.Next\n\t} else {\n\t\tif node != nil {\n\t\t\tif node.Next != nil {\n\t\t\t\tnode.Prev.Next = node.Next\n\t\t\t\tnode.Next.Prev = node.Prev\n\t\t\t} else {\n\t\t\t\tnode.Prev.Next = nil\n\t\t\t}\n\t\t}\n\t}\n}", "func removeNode(node *doubleListNode) {\n\tnode.right.left, node.left.right = node.left, node.right\n}", "func (s *SinglyLinkedList) RemoveNthFromEnd(n int) *Node {\n\tdummy := &Node{0, nil}\n\tdummy = s.Head\n\tfirst := dummy\n\tsecond := dummy\n\tfor i := 1; i <= n+1; i++ {\n\t\tfirst = first.Next\n\t}\n\n\tfor first != nil {\n\t\tfirst = first.Next\n\t\tsecond = second.Next\n\t}\n\tsecond.Next = second.Next.Next\n\treturn dummy.Next\n}", "func (list *List) DeleteFromEnd() {\n // 1. Provide message to user if the list is empty and return\n if list.Size() == 0 {\n fmt.Println(\"Nothing to delete, the list is empty\")\n return\n }\n\n // 2. Get the head of the list as current iterator\n current := list.Head()\n\n // 3. Traverse the list until the second last element is reached\n for current.next.next != nil {\n current = current.next\n }\n\n // 4. Update next pointer of second last element such that last element is removed\n current.next = nil\n\n // 5. Decrement the list size\n list.size--\n\n}", "func (hn *HeadNode) Delete(key interface{}) Node {\n\t// if !hn.Search(key) {\n\t// \treturn nil\n\t// }\n\tflag := false\n\tcur := hn.head\n\ti := hn.level\n\tvar level int\n\tvar node Node\n\tpath := make([]*IndexNode, i+1)\n\tfor ; i >= 0; i-- {\n\t\tfor cur.index[i] != nil {\n\t\t\tres := cur.index[i].Node.Compare(key)\n\t\t\tif res == 0 {\n\t\t\t\tflag = true\n\t\t\t\tlevel = i\n\t\t\t\tnode = cur.index[i].Node\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif res > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcur = cur.index[i]\n\t\t}\n\t\tpath[i] = cur\n\t}\n\tif !flag {\n\t\treturn nil\n\t}\n\n\tj := level\n\tfor ; j >= 0; j-- {\n\t\tif path[j].index[j].index[j] != nil {\n\t\t\tpath[j].index[j] = path[j].index[j].index[j]\n\t\t} else {\n\t\t\tpath[j].index[j] = nil\n\t\t}\n\t\thn.para[j]--\n\t}\n\n\treturn node\n}", "func (l *LinkedList) Remove(index int) (*LLNode, error) {\n\tswitch {\n\tcase index > l.Size-1:\n\t\treturn nil, fmt.Errorf(\"Index %d out of range\", index)\n\n\tcase index == 0:\n\t\treturn l.Shift(), nil\n\n\tcase index == l.Size-1:\n\t\treturn l.Pop(), nil\n\n\tdefault:\n\t\tvar removedNode *LLNode\n\t\tcurrentNode, NextNode := l.Head, l.Head.Next\n\t\tfor i := 0; i < l.Size-1; i++ {\n\t\t\tif i+1 == index {\n\t\t\t\tremovedNode = NextNode\n\t\t\t\tcurrentNode.Next = NextNode.Next\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcurrentNode = NextNode\n\t\t\tNextNode = NextNode.Next\n\t\t}\n\n\t\tl.Size--\n\n\t\treturn removedNode, nil\n\t}\n}", "func (dl *DoublyLinkedList) shift() *Node {\n\thead := dl.head\n\tif head == nil {\n\t\treturn nil\n\t}\n\tif dl.length == 1 {\n\t\tdl.head = nil\n\t\tdl.tail = nil\n\t} else {\n\t\tdl.head.next.previous = nil\n\t\tdl.head = dl.head.next\n\t\thead.next = nil\n\t}\n\tdl.length--\n\treturn head\n}", "func (l *LinkedList) Delete(data interface{}) *LLNode {\n\tvar deletedNode *LLNode\n\n\tswitch l.Size {\n\tcase 0:\n\t\treturn deletedNode\n\tcase 1:\n\t\tif l.Head.Data == data {\n\t\t\tdeletedNode = l.Head\n\t\t\tl.Head, l.Tail = nil, nil\n\t\t}\n\tdefault:\n\t\tif l.Head.Data == data {\n\t\t\tdeletedNode = l.Head\n\t\t\tl.Head = l.Head.Next\n\t\t} else {\n\t\t\tcurrentNode, NextNode := l.Head, l.Head.Next\n\t\t\tfor NextNode != nil {\n\t\t\t\tif NextNode.Data == data {\n\t\t\t\t\tdeletedNode = NextNode\n\t\t\t\t\tcurrentNode.Next = NextNode.Next\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tcurrentNode = NextNode\n\t\t\t\tNextNode = NextNode.Next\n\t\t\t}\n\n\t\t\tif deletedNode == l.Tail {\n\t\t\t\tl.Tail = currentNode\n\t\t\t}\n\t\t}\n\t}\n\n\tif deletedNode != nil {\n\t\tl.Size--\n\t}\n\n\treturn deletedNode\n}", "func RemoveBeginning(list List) List {\n\tlist.FirstNode = list.FirstNode.Next\n\treturn list\n}", "func (ll *LinkedList) deleteNode(n *node) {\n\tif n == nil {\n\t\treturn\n\t}\n\n\tif n.previous != nil {\n\t\tn.previous.next = n.next\n\t}\n\n\tif n.next != nil {\n\t\tn.next.previous = n.previous\n\t}\n\n\tll.length--\n}", "func removeNthFromEnd(head *ListNode, n int) *ListNode {\n\tif head == nil || n == 0 {\n\t\treturn nil\n\t}\n\tvar (\n\t\tend = head\n\t\ti = 1\n\t)\n\tfor ; i <= n && end.Next != nil; i++ {\n\t\tend = end.Next\n\t}\n\tpre := head\n\tfor end.Next != nil {\n\t\tend = end.Next\n\t\tpre = pre.Next\n\t}\n\tif pre.Next != nil && i != n {\n\t\tpre.Next = pre.Next.Next\n\t} else {\n\t\treturn head.Next\n\t}\n\treturn head\n}", "func removeNthFromEnd(head *ListNode, n int) *ListNode {\n\ttemp := head\n\tlist := []*ListNode{}\n\tfor temp != nil {\n\t\tlist = append(list, temp)\n\t\ttemp = temp.Next\n\t}\n\tlength := len(list)\n\tthisOne := list[length - n]\n\tbeforeIdx := length - n - 1;\n\tif beforeIdx < 0 {\n\t\treturn thisOne.Next\n\t}\n\tlist[beforeIdx].Next = thisOne.Next\n\treturn head\n}", "func removeNthFromEndSlice(head *ListNode, n int) *ListNode {\n\tif head == nil || head.Next == nil {\n\t\treturn nil\n\t}\n\ts := []*ListNode{}\n\tfor {\n\t\ts = append(s, head)\n\t\thead = head.Next\n\t\tif head == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tx := len(s) - n\n\tif x > 0 {\n\t\ts[x-1].Next = s[x].Next\n\t\treturn s[0]\n\t}\n\treturn s[1]\n}", "func RemoveKthNodeFromEnd(head Node, k int) {\n\tfirst, second := &head, &head\n\tfor i := 0; i < k; i++ {\n\t\tsecond = second.Next\n\t}\n\n\t// edge case: the Kth node from end is the head node\n\tif second == nil {\n\t\thead.Value = head.Next.Value\n\t\thead.Next = head.Next.Next\n\t\treturn\n\t}\n\n\tfor second.Next != nil {\n\t\tfirst = first.Next\n\t\tsecond = second.Next\n\t}\n\n\t// now second pointer points to last node in linked list, first pointer\n\t// points to the previous node of the target node to be removed\n\t// and the skipped node will be garbage collected\n\tfirst.Next = first.Next.Next\n}", "func RemoveNode(ll *List, node *Node) (*Node, error) {\n\tll.Mux.Lock()\n\tif ll.Size == 0 {\n\t\tll.Mux.Unlock()\n\t\treturn nil, errors.New(\"List is empty\")\n\t} else if ll.Size == 1 {\n\t\tll.Mux.Unlock()\n\t\treturnNode, _ := RemoveLast(ll)\n\t\treturn returnNode, nil\n\t}\n\tll.Mux.Unlock()\n\n\tll.Mux.Lock()\n\tdefer ll.Mux.Unlock()\n\n\tprevNode := node.Prev\n\tnextNode := node.Next\n\n\tprevNode.Next = node.Next\n\tnextNode.Prev = node.Prev\n\n\tatomic.AddInt32(&ll.Size, -1)\n\n\treturn node, nil\n}", "func deleteDuplicates(head *ListNode) *ListNode {\n dummyHead := new(ListNode)\n dummyHead.Next = head \n \n curr := head \n \n for (curr != nil && curr.Next != nil) {\n if (curr.Val == curr.Next.Val) {\n // Skip next node if found duplicate\n curr.Next = curr.Next.Next\n } else {\n curr = curr.Next\n }\n }\n \n return dummyHead.Next\n \n}", "func deleteNode(node *linkedlist.Node) {\n\t*node = *node.Next\n}", "func Delete(x int, l linklist.List) {\n\tp := FindPrevious(x, l)\n\tif p.Next != nil {\n\t\ttemp := p.Next\n\t\tp.Next = temp.Next\n\t}\n}", "func (l *SinglyLinkedList) remove(node *Node) *Node {\n\tvar previous *Node\n\tif l.head.next != nil {\n\t\tfor previous = l.head.next; previous.next != nil && previous.next != node; previous = previous.next {\n\n\t\t}\n\t}\n\n\tprevious.next = node.next\n\tnode.next = nil // avoid memory leaks\n\tnode.list = nil\n\tl.size--\n\tif previous.next == nil {\n\t\tl.last = previous\n\t} else {\n\t\tl.last = previous.next\n\t}\n\treturn node\n}", "func (l *DoublyLinkedList) Remove(i int) {\n\tcur := l.head\n\n\tif cur == nil {\n\t\tpanic(\"DoublyLinkedList is empty!\")\n\t}\n\n\tif cur.value == i {\n\t\tl.head = &Node{}\n\t\treturn\n\t}\n\n\tfor cur.next != nil {\n\t\tcur = cur.next\n\n\t\tif cur.value == i {\n\t\t\tcur.prev.next = cur.next\n\t\t\treturn\n\t\t}\n\t}\n}", "func (ll *LinkedList) Delete(element int) {\n\tif ll.IsEmpty() {\n\t\treturn\n\t}\n\n\tif ll.start.value == element {\n\t\tll.PopLeft()\n\n\t\treturn\n\t}\n\n\tif ll.end.value == element {\n\t\tll.PopRight()\n\n\t\treturn\n\t}\n\n\tcurrent := ll.start.next\n\tfor current != nil {\n\t\tif current.value == element {\n\t\t\tll.deleteNode(current)\n\t\t\treturn\n\t\t}\n\n\t\tcurrent = current.next\n\t}\n}", "func (l *LinkedList) Shift() *LLNode {\n\tvar shiftedNode *LLNode\n\n\tif l.Head == nil {\n\t\treturn nil\n\t}\n\n\tshiftedNode = l.Head\n\tif l.Head == l.Tail {\n\t\tl.Head, l.Tail = nil, nil\n\t} else {\n\t\tl.Head = l.Head.Next\n\t}\n\n\tshiftedNode.Next = nil\n\tl.Size--\n\n\treturn shiftedNode\n}", "func TestDeleteFromHeadDLL(testCase *testing.T) {\n\ttestCase.Log(\"To test the doubly linked list node is deleted from the head of the linked list\")\n\thead := NewDoublyLinkedListNode(-1)\n\n\thead.InsertAtHeadDLL(1)\n\thead.InsertAtHeadDLL(2)\n\thead.InsertAtHeadDLL(3)\n\n\telement, err := head.DeleteFromHeadDLL()\n\tif element != 3 || err != nil {\n\t\ttestCase.Errorf(\"Doubly LList Error: Wrong Node was deleted\")\n\t}\n\tif head.next.data != 2 || head.next.prev != head {\n\t\ttestCase.Errorf(\"Doubly LList Error: Delete operation wasn't successful\")\n\t}\n\n\t_, _ = head.DeleteFromHeadDLL()\n\t_, _ = head.DeleteFromHeadDLL()\n\n\tif _, err = head.DeleteFromHeadDLL() ; err == nil {\n\t\ttestCase.Errorf(\"Doubly LList Error: Linked List underflow not reported\")\n\t}\n}", "func (l *LinkedList) Delete(index int) {\n\t// Abort if index is not valid\n\tif index > l.length || index <= 0 {\n\t\treturn\n\t}\n\n\tif index == 1 {\n\t\tl.head = l.head.next\n\t\tl.length--\n\t\treturn\n\t}\n\n\tp := l.head\n\t// loop until the place right before the node that is going away\n\tfor i := 0; i < index-2; i++ {\n\t\tp = p.next\n\t}\n\tp.next = p.next.next\n\tl.length--\n}", "func (r *RGASS) LocalDelete(tarID ID, pos int, delLen int) ([]*Node, int, error) {\n\ttarNode, ok := r.Model.Get(tarID)\n\tnodeList := []*Node{tarNode}\n\teffectiveLen := delLen\n\n\tif !ok {\n\t\treturn nodeList, effectiveLen, errors.New(\"Node not found in model\")\n\t}\n\n\tif pos == 0 && delLen == tarNode.Length() {\n\t\ttarNode.DeleteWhole()\n\t}\n\n\tif pos == 0 && delLen < tarNode.Length() {\n\t\tfNode, lNode := tarNode.DeletePrior(delLen)\n\t\tr.Model.Replace(tarNode, fNode, lNode)\n\t}\n\n\tif pos > 0 && pos+delLen == tarNode.Length() {\n\t\tfNode, lNode := tarNode.DeleteLast(delLen)\n\t\tr.Model.Replace(tarNode, fNode, lNode)\n\t}\n\n\tif pos > 0 && pos+delLen < tarNode.Length() {\n\t\tfNode, mNode, lNode := tarNode.DeleteMiddle(pos, delLen)\n\t\tr.Model.Replace(tarNode, fNode, mNode, lNode)\n\t}\n\n\tif pos > 0 && pos+delLen > tarNode.Length() {\n\t\tremainingLen := delLen - (tarNode.Length() - pos)\n\t\tfNode, lNode := tarNode.DeleteLast(pos)\n\t\tr.Model.Replace(tarNode, fNode, lNode)\n\n\t\tnode := lNode.Next\n\n\t\tfor remainingLen > 0 {\n\t\t\tif remainingLen > node.Length() {\n\t\t\t\tremainingLen -= node.Length()\n\t\t\t\tnode.DeleteWhole()\n\t\t\t} else {\n\t\t\t\tfNode, lNode := node.DeletePrior(remainingLen)\n\t\t\t\tr.Model.Replace(node, fNode, lNode)\n\t\t\t\tremainingLen = 0\n\t\t\t}\n\n\t\t\tif remainingLen > 0 {\n\t\t\t\tnode = node.Next\n\t\t\t\tfor node.Hidden {\n\t\t\t\t\teffectiveLen += node.Length()\n\t\t\t\t\tnode = node.Next\n\t\t\t\t}\n\t\t\t\tnodeList = append(nodeList, node)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nodeList, effectiveLen, nil\n}", "func (b *Skiplist) Delete(searchKey interface{}) error {\n\tupdateList := make([]*Skipnode, b.MaxLevel)\n\tcurrentNode := b.Header\n\n\t//Quick search in forward list\n\tfor i := b.Header.Level - 1; i >= 0; i-- {\n\t\tfor currentNode.Forward[i] != nil && currentNode.Forward[i].compare(b.Comparator,searchKey) < 0 {\n\t\t\tcurrentNode = currentNode.Forward[i]\n\t\t}\n\t\tupdateList[i] = currentNode\n\t}\n\n\t//Step to next node. (which is the target delete location)\n\tcurrentNode = currentNode.Forward[0]\n\n\tif currentNode.compare(b.Comparator,searchKey) == 0 {\n\t\tfor i := 0; i <= currentNode.Level-1; i++ {\n\t\t\tif updateList[i].Forward[i] != nil && updateList[i].Forward[i].compare(b.Comparator,currentNode.Key) != 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tupdateList[i].Forward[i] = currentNode.Forward[i]\n\t\t}\n\n\t\tfor currentNode.Level > 1 && b.Header.Forward[currentNode.Level] == nil {\n\t\t\tcurrentNode.Level--\n\t\t}\n\n\t\t//free(currentNode) //no need for Golang because GC\n\t\tcurrentNode = nil\n\t\treturn nil\n\t}\n\treturn errors.New(\"Not found\")\n}", "func (tree *SplayTree) DeleteMin() Item {\n\tnode := tree.root\n\tif node == nil {\n\t\treturn nil\n\t}\n\tif node.left == nil {\n\t\ttree.root = node.right\n\t} else {\n\t\tvar parent = node\n\t\tnode = node.left\n\t\tfor node.left != nil {\n\t\t\tparent = node\n\t\t\tnode = node.left\n\t\t}\n\t\tparent.left = node.right\n\t\ttree.splay(parent.item)\n\t}\n\treturn node.item\n}", "func (s *SinglyLinkedList) DeleteNode(val interface{}) bool {\n\t// if list is empty return false\n\tif s.Count == 0 {\n\t\treturn false\n\t}\n\tif val == s.Head.Val {\n\t\ts.Head = s.Head.Next\n\t\ts.Count -= 1\n\t\treturn true\n\t}\n\n\tfor node := s.Head; node != nil; node = node.Next {\n\t\tif node.Next.Val == val {\n\t\t\tnode.Next = node.Next.Next\n\t\t\ts.Count -= 1\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (l *LinkedList) Delete(val fmt.Stringer) (*Node, error) {\n\tif l.head == nil {\n\t\treturn nil, fmt.Errorf(\"Can't find %d in list\", val)\n\t}\n\tif l.head.data == val {\n\t\treturn l.DeleteFromFront()\n\t}\n\tnode := l.head\n\tfor node.next != nil {\n\t\tif node.next.data == val {\n\t\t\tdeleted := node.next\n\t\t\tnode.next = node.next.next\n\t\t\tl.size--\n\t\t\treturn deleted, nil\n\t\t}\n\t\tnode = node.next\n\t}\n\treturn nil, fmt.Errorf(\"Can't find %d in list\", val)\n}", "func (l *LinkedList) DeleteFirst() {\n\tl.Head = l.Head.Next\n\tl.Size--\n}", "func DeleteNode(n *Node) error {\n\tm := n.next\n\tif m == nil {\n\t\treturn errors.New(\"node is not in a list\")\n\t}\n\tn.next = m.next\n\tn.data = m.data\n\treturn nil\n}", "func DelSlistNode(i *slist.Item) bool {\n\tif i == nil {\n\t\treturn false\n\t}\n\n\tn := i.Next\n\tif n == nil { // 如果i是单链表最后一个节点就无法处理\n\t\treturn false\n\t}\n\n\ti.Data = n.Data\n\ti.Next = n.Next\n\treturn true\n}", "func moveToHead(head, node *doubleListNode) {\n\tif node != nil {\n\t\tremoveNode(node)\n\t\taddToHead(head, node)\n\t}\n}", "func (l *List) remove(n *Node) {\n\tn.prev.next = n.next\n\tn.next.prev = n.prev\n\tn.next = nil\n\tn.prev = nil\n\tn.list = nil\n\tl.Size--\n}", "func TestRemoveNthFromEnd(t *testing.T) {\n\thead := GenListNode([]int{1, 2, 3, 4, 5})\n\tresult := removeNthFromEnd(head, 2)\n\tLogListNode(result)\n}", "func findMidInList(start, last *listNode) *listNode {\n\tif start == nil || last == nil {\n\t\treturn nil\n\t}\n\tslow := start\n\tfast := start.next\n\n\tfor fast != last {\n\t\tfast = fast.next\n\t\tif fast != last {\n\t\t\tslow = slow.next\n\t\t\tfast = fast.next\n\t\t}\n\t}\n\treturn slow\n}", "func (l SList) Remove(prev int64, free func(off int64) error) error {\n\tif free != nil {\n\t\tif err := free(l.DataOff()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif prev != 0 {\n\t\tnext, err := l.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tp, err := l.OpenSList(prev)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := p.setNext(next); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn l.Free(l.Off)\n}", "func RemoveNthFromEnd(head *ListNode, n int) *ListNode {\n\thead = &ListNode{ // append a starter to the head\n\t\tVal: -1,\n\t\tNext: head,\n\t}\n\t// find the first n+1 th node\n\tstartNode, node := head, head\n\tfor i := 0; i < n; i++ { // we want find the node before n, the first node is the current node\n\t\tif node == nil {\n\t\t\tbreak\n\t\t}\n\t\t// move to the next head\n\t\tnode = node.Next\n\t}\n\tif node == nil {\n\t\t// not exists\n\t\treturn nil\n\t}\n\t// exists\n\tendNode := node\n\n\t// moving both start and end node, until the end node reach the end of linked list\n\tfor endNode.Next != nil {\n\t\tendNode = endNode.Next\n\t\tstartNode = startNode.Next\n\t}\n\n\t// now, the start node is the node a node before n-th node in the linked list\n\tstartNode.Next = startNode.Next.Next\n\n\treturn head.Next\n}", "func (l *LinkedList) DeleteWithValue(v interface{}) {\n\t// Abort if list is empty\n\tif l.length == 0 {\n\t\treturn\n\t}\n\n\t// Head is not value to delete\n\tfor l.head.value == v {\n\t\tl.head = l.head.next\n\t\tl.length--\n\n\t\t// If there are no more items exit function\n\t\tif l.length == 0 {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Two pointers to jump point previous node.next to curent node.next\n\tp := l.head\n\tp2 := l.head.next\n\n\t// Loop until end of the list\n\tfor p2 != nil {\n\t\t// If current value is v jump over unwanted node & decrease list length by 1\n\t\tif p2.value == v {\n\t\t\tp.next = p2.next\n\t\t\tp2 = p2.next\n\t\t\tl.length--\n\t\t}\n\n\t\t// Move both pointer a step forward\n\t\tp = p2\n\t\tif p2 != nil {\n\t\t\tp2 = p2.next\n\t\t}\n\t}\n}", "func (ll *DoubleLinkedList) removeNode(node *node) {\n\tif node == ll.head {\n\t\tll.head = node.next\n\t\tll.head.prev = nil\n\t} else if node.next == nil {\n\t\tll.tail = node.prev\n\t\tll.tail.next = nil\n\t} else {\n\t\ttemp := ll.head\n\t\tfor temp.next != node {\n\t\t\ttemp = temp.next\n\t\t}\n\n\t\ttemp.next = node.next\n\t\tnode.next.prev = temp\n\t}\n\n\tnode = nil\n\tll.count--\n}", "func (l *List) Delete(i interface{}) {\n\tl = GetHead(l)\n\tfor l != nil {\n\t\tif l.value == i {\n\t\t\tif l.prev != nil {\n\t\t\t\tl.prev.next = l.next\n\t\t\t}\n\t\t\tif l.next != nil {\n\t\t\t\tl.next.prev = l.prev\n\t\t\t}\n\t\t\tl = nil\n\t\t\tbreak\n\t\t}\n\t\tl = l.next\n\t}\n}", "func (t *RedBlackTree) DeleteNode(node *Node) {\n\tif node.predecessor != nil {\n\t\tnode.predecessor.successor = node.successor\n\t} else {\n\t\tt.min = node.successor\n\t}\n\tif node.successor != nil {\n\t\tnode.successor.predecessor = node.predecessor\n\t} else {\n\t\tt.max = node.predecessor\n\t}\n\n\tif node.left != nil && node.right != nil {\n\t\tpred := node.left.Max()\n\t\tnode.key = pred.key\n\t\tnode.value = pred.value\n\t\tnode = pred\n\t}\n\n\tvar child *Node\n\tif node.left == nil || node.right == nil {\n\t\tif node.right == nil {\n\t\t\tchild = node.left\n\t\t} else {\n\t\t\tchild = node.right\n\t\t}\n\t\tif node.isBlack {\n\t\t\tnode.isBlack = child.IsBlack()\n\t\t\tt.deleteCase1(node)\n\t\t}\n\t\tt.swapNodes(node, child)\n\t\tif node.parent == nil && child != nil {\n\t\t\tchild.isBlack = true\n\t\t}\n\t}\n\tt.size--\n}", "func (list *List) DeleteElement(data int) {\n // 1. Provide message to user if the list is empty and return\n if list.Size() == 0 {\n fmt.Println(\"Nothing to delete, the list is empty\")\n return\n }\n\n // 2. Get the current head of the list\n current := list.Head()\n\n // 3. Update the head if current head is the requested element and return\n if current.data == data {\n list.head = current.next\n current.next = nil\n list.size--\n return\n }\n\n // 4. Traverse the list, remove the requested element and return\n for current.next != nil {\n if current.next.data == data {\n tmp := current.next.next\n current.next.next = nil\n current.next = tmp\n list.size--\n return\n }\n current = current.next\n }\n\n // 5. Provide a message to user if the requested element is not found in list\n fmt.Println(\"Could not delete since the element requested does not exist in list\")\n}", "func (list *Turn) RemoveHead() ( /*head*/ *Turn /*newList*/, *Turn) {\n\tassert.True(!list.IsEmpty(), \"Cannot remove from empty list.\")\n\n\thead := list.next\n\n\tif list.next == list {\n\t\tlist = Empty\n\t\thead.next = nil\n\t} else {\n\t\tlist.next = head.next\n\t\thead.next = nil\n\t}\n\n\tlog.V(3).Infof(\"%v: RemoveHead %v\", list, head)\n\treturn head, list\n}", "func (l *List) InsertInMiddle(value interface{}) {\n\tl = GetHead(l)\n\tlistSize := l.ListSize()\n\tmiddleIndex := listSize / 2\n\tfor i := 0; i <= middleIndex; i++ {\n\t\tl = l.next\n\t}\n\tnewElement := List{value: value, next: l, prev: l.prev}\n\tl.prev.next = &newElement\n\tl.prev = &newElement\n}", "func (f * LinkedList) delFirst() (*Element) {\n\t// will return deleted element\n\tif (f.length == 0) {\n\t\treturn nil\n\t} else {\n\t\tcurrentElmt := f.start\n\t\tf.start = f.start.next\n\t\tf.start.prev = nil //Remove previous pointer\n\t\tf.length--\n\t\treturn currentElmt\n\t}\n}", "func (l *List) Remove(n uint16) error {\n\n\tif l.Root == nil {\n\t\treturn fmt.Errorf(\"value does not exist in List or the list is empty\")\n\t}\n\n\tprev, curr := l.Root, l.Root\n\n\tif curr.Value == n {\n\t\tl.Root = curr.Next\n\t\tcurr = nil\n\t\treturn nil\n\t}\n\n\tfor {\n\t\t// 1. Move cur to Next\n\t\tcurr = curr.Next\n\n\t\t// 2. if cur is nil we have reached the end of the list and the value has\n\t\t// not been found; thus we error. If cur is not nil continue to step 3.\n\t\tif curr == nil {\n\t\t\treturn fmt.Errorf(\"value did not exist in List\")\n\t\t}\n\t\t// 3. if cur equals our n value, we found the value we wanted to remove. We\n\t\t// need to set prev.Next to cur.Next and cur.Next to nil; we have now\n\t\t// removed the value and can return. If cur did not equal our n value we\n\t\t// have not foud the value we wish to remove; proceed to step 4.\n\t\tif curr.Value == n {\n\t\t\tprev.Next = curr.Next\n\t\t\tcurr.Next = nil\n\t\t\treturn nil\n\t\t}\n\t\t// 4. We need to get prev and current to the same node (this was our initial\n\t\t// conditions for looping) - thus we make prev prev.Next. Prev and Cur\n\t\t// are now pointing at the same thing and we can loop.\n\t\tprev = prev.Next\n\n\t}\n}", "func Delete(head *Node, data int) (bool, int) {\n\tif head != nil && head.Next != nil {\n\t\t// get the first node\n\t\tcur := head.Next\n\t\tprev := cur\n\t\tfound := false\n\t\tdeleted := -1\n\t\t// iternate the list\n\t\tfor {\n\t\t\tif data == cur.Data {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t} else if cur.Next != nil {\n\t\t\t\tprev = cur\n\t\t\t\tcur = cur.Next\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif found {\n\t\t\tdeleted = cur.Data\n\t\t\t// remove the reference of cur node\n\t\t\tprev.Next = cur.Next\n\t\t\tcur.Next = nil\n\t\t\treturn true, deleted\n\t\t}\n\t}\n\treturn false, -1\n}", "func TestDoubleLinkedNode(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping test in short mode.\")\n\t}\n\n\tl := linkedlist.NewDoubleLinkedNode()\n\tassert.Equal(t, true, l.IsEmpty(), \"they should be equal\")\n\tassert.Equal(t, 0, l.Len(), \"they should be equal\")\n\n\tl.Append(1)\n\tl.Append(2)\n\tl.Append(3)\n\tl.Insert(2, 4)\n\tl.Insert(3, 5)\n\tassert.Equal(t, 5, l.Len(), \"they should be equal\")\n\n\te, err := l.Get(2)\n\tassert.Equal(t, nil, err, \"they should be equal\")\n\tassert.Equal(t, 4, e.(int), \"they should be equal\")\n\n\tassert.Equal(t, []interface{}{1, 4, 5, 2, 3}, l.Data(), \"they should be equal\")\n\n\tl.Del(2)\n\tassert.Equal(t, 4, l.Len(), \"they should be equal\")\n\n\te, err = l.Get(2)\n\tassert.Equal(t, nil, err, \"they should be equal\")\n\tassert.Equal(t, 5, e.(int), \"they should be equal\")\n\n\tl.Clear()\n\tassert.Equal(t, true, l.IsEmpty(), \"they should be equal\")\n\tassert.Equal(t, 0, l.Len(), \"they should be equal\")\n\n\terr = l.Insert(1, 1)\n\tassert.Equal(t, nil, err, \"they should be equal\")\n\tassert.Equal(t, 1, l.Len(), \"they should be equal\")\n\te, err = l.Get(1)\n\tassert.Equal(t, nil, err, \"they should be equal\")\n\tassert.Equal(t, 1, e.(int), \"they should be equal\")\n\tl.Del(1)\n\tassert.Equal(t, true, l.IsEmpty(), \"they should be equal\")\n}", "func (nl *NodeList) Remove(n *Node) *Node {\n\tif n == nil || nl.length == 0 {\n\t\treturn nil\n\t}\n\n\tif n.prev == nil {\n\t\tif nl.front == n {\n\t\t\tnl.front = n.Next\n\t\t}\n\t} else {\n\t\tn.prev.Next = n.Next\n\t}\n\n\tif n.Next == nil {\n\t\tif nl.back == n {\n\t\t\tnl.back = n.prev\n\t\t}\n\t} else {\n\t\tn.Next.prev = n.prev\n\t}\n\n\tn.Next = nil\n\tn.prev = nil\n\n\tnl.length--\n\treturn n\n}", "func deleteNode_237(node *ListNode) {\n\tnode.Val = node.Next.Val\n\tnode.Next = node.Next.Next\n}", "func (l *LinkedList) Delete(val int) {\n\t// check if head is exist\n\tif l.Head == nil {\n\t\treturn\n\t}\n\n\t// if element that will be deleted is head, delegate head to next element\n\tif l.Head.Data == val {\n\t\tl.Head = l.Head.Next\n\t\treturn\n\t}\n\n\t// loop until element is found, then removes previos and next connection of deleted element\n\tcurrent := l.Head\n\tfor current.Next != nil {\n\t\tif current.Next.Data == val {\n\t\t\tcurrent.Next = current.Next.Next\n\t\t\treturn\n\t\t}\n\t\tcurrent = current.Next\n\t}\n\n}", "func (s *SkipList) Delete(key interface{}) (value interface{}, ok bool) {\n\tif key == nil {\n\t\tpanic(\"goskiplist: nil keys are not supported\")\n\t}\n\n\tupdate := make([]*snode, s.level()+1, s.effectiveMaxLevel())\n\tcandidate := s.getPath(s.header, update, key)\n\tif candidate == nil || candidate.key != key {\n\t\treturn nil, false\n\t}\n\n\tprevious := candidate.backward\n\tif s.footer == candidate {\n\t\ts.footer = previous\n\t}\n\n\t// 设置节点的前指针\n\tnext := candidate.next()\n\tif next != nil {\n\t\tnext.backward = previous\n\t}\n\n\t// 设置节点levelN的后指针\n\tfor i := 0; i <= s.level() && update[i].forward[i] == candidate; i++ {\n\t\tupdate[i].forward[i] = candidate.forward[i]\n\t}\n\n\t// 删除节点后,levelN链表为空的情况\n\tfor s.level() > 0 && s.header.forward[s.level()] == nil {\n\t\ts.header.forward = s.header.forward[:s.level()]\n\t}\n\n\ts.length--\n\n\treturn candidate.value, true\n}", "func del(node *Node, val int) *Node {\n\tif node == nil {\n\t\treturn nil\n\t}\n\tif node.val == val {\n\t\tif node.left == nil && node.right == nil {\n\t\t\treturn nil\n\t\t} else if node.left == nil {\n\t\t\treturn node.right\n\t\t} else if node.right == nil {\n\t\t\treturn node.left\n\t\t}\n\t\tvar leftMost *Node\n\t\tfor leftMost = node.left; leftMost.left != nil; leftMost = leftMost.left {\n\t\t}\n\t\tleftMost.left = node.right\n\t\treturn node.left\n\t}\n\tnode.left, node.right = del(node.left, val), del(node.right, val)\n\treturn node\n}", "func (s *SinglyLinkedList) RemoveAfter(after *Node) {\n if s.front != nil && s.front.next != nil {\n currentNode := s.front\n for currentNode != after && currentNode.next != nil {\n currentNode = currentNode.next\n }\n\n if currentNode == after {\n currentNode.next = currentNode.next.next\n }\n }\n}", "func (node *circularLinkedNode) delete() {\n\tnode.prev.next, node.next.prev = node.next, node.prev\n}", "func (l *LinkedList) RemoveNth(n int) {\n\tif l.head == nil {\n\t\treturn\n\t}\n\n\tvar prev, cur *node = nil, l.head\n\ti := 0\n\tfor {\n\t\tif i == n {\n\t\t\tprev.next = cur.next\n\t\t\tbreak\n\t\t} else {\n\t\t\tif cur.next == nil {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tprev = cur\n\t\t\t\tcur = cur.next\n\t\t\t\ti++\n\t\t\t}\n\t\t}\n\t}\n}", "func (list *LinkedList) remove(index int) interface{}{\n\tfor i:=0; i<index-1;i++{\n\t\tif(list==nil){\n\t\t\treturn errors.New(\"Out of Bounds\")\n\t\t}\n\t\tlist=list.next\n\t}\n\tpivot:=list.next\n\tif pivot==nil{\n\t\treturn nil\n\t}\n\tlist.next = pivot.next\n\tpivot.next=nil\n\treturn pivot.data\n\n\n}", "func (list *LinkedList) Remove() {\n\tlink := list.firstLink\n\tif list.firstLink != nil {\n\t\tlist.firstLink = link.next\n\t}\n}", "func DeleteFront(head *Node) (bool, int) {\n\tif head != nil && head.Next != nil {\n\t\t// get the 2nd element from head\n\t\tnext := head.Next.Next\n\t\tdeleted := head.Next.Data\n\t\t// remove reference from head\n\t\thead.Next = nil\n\t\t// add new reference to head's next,\n\t\t// so we are letting GC to know that element should be GC'd\n\t\thead.Next = next\n\t\t// deleted the first element\n\t\treturn true, deleted\n\t}\n\treturn false, -1\n}", "func (t *Tree) Del(s string) {\n\tif t.Safe {\n\t\tdefer t.mtx.Unlock()\n\t\tt.mtx.Lock()\n\t}\n\n\ttnode := t.root\n\n\tif tnode.IsLeaf() {\n\t\treturn\n\t}\n\n\tleaf := tnode\n\tcount := uint(0)\n\n\tfor i := 0; i < len(s); i++ {\n\t\tfor j := uint8(8); j > 0; j-- {\n\t\t\texp := byte(1 << (j - 1))\n\t\t\tmask := s[i] & exp\n\t\t\tbit := uint8(0)\n\n\t\t\tif mask > 0 {\n\t\t\t\tbit = 1\n\t\t\t}\n\n\t\t\tif tnode.edges[bit] == nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttnode = tnode.edges[bit].node\n\t\t\tcount++\n\n\t\t\tif i == len(s)-1 && j-1 == 0 && tnode.IsLeaf() {\n\t\t\t\tleaf.edges = [uint8(2)]*edge{}\n\t\t\t\tt.size -= count\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tnode != nil && tnode.Value != nil {\n\t\t\t\tleaf = tnode\n\t\t\t\tcount = 0\n\t\t\t}\n\t\t}\n\t}\n}", "func removeTail(head, tail *doubleListNode) *doubleListNode {\n\tif head.right != tail {\n\t\tret := tail.left\n\t\tremoveNode(tail.left)\n\t\treturn ret\n\t}\n\treturn nil\n}", "func removeN(head *node, n int) *node {\n\tret := head.next\n\n\tcur := head\n\tfor i := 0; i < n; i++ {\n\t\tcur = cur.next\n\t}\n\t// cur is now the last node we need to extract\n\thead.next = cur.next\n\tcur.next = ret\n\n\treturn ret\n}", "func DeleteKthLast(node *Node, k int) bool {\n\tif node == nil {\n\t\treturn false\n\t}\n\n\tfirst := node\n\tsecnd := node\n\n\tfor first.Next != nil && k > 0 {\n\t\tfirst = first.Next\n\t\tk--\n\t}\n\n\tif k > 0 {\n\t\t// the list is not atleast k nodes long\n\t\treturn false\n\t}\n\n\tfor first.Next != nil {\n\t\tfirst = first.Next\n\t\tsecnd = secnd.Next\n\t}\n\n\tsecnd.Next = secnd.Next.Next\n\n\treturn true\n}", "func (l *list) delete(i int) {\n\n\tif l.begin == nil {\n\t\tpanic(\"list empty\")\n\t}\n\n\t// List over/underflow\n\tif i > l.nodes || i < 0 {\n\t\tpanic(\"not exists\")\n\t}\n\n\t// Removing the last node\n\tif l.nodes == 1 && i == 0 {\n\t\tl.begin = nil\n\t\tl.nodes = 0\n\t\treturn\n\t}\n\n\t// Removing at the end of the list\n\tif i == l.nodes-1 {\n\t\tn := l.begin\n\n\t\tfor j := 0; j < l.nodes-1; j++ {\n\t\t\tn = n.right\n\t\t}\n\n\t\tn.left.right = nil\n\t\tn = nil\n\t\tl.nodes--\n\t\treturn\n\t}\n\n\t// Removing the first node\n\tif i == 0 {\n\t\tn := l.begin.right\n\t\tl.begin = n\n\t\tl.begin.left = nil\n\t\tl.nodes--\n\t\treturn\n\t}\n\n\n\t// Removing in somewhere between\n\tc := l.begin\n\n\tfor j := 0; j < i; j++ {\n\t\tc = c.right\n\t}\n\n\tc.left.right, c.right.left = c.right, c.left\n\tl.nodes--\n}", "func (l *LinkedList) Delete(elem string) {\n\tprevious := l.Head\n\tcurrent := l.Head\n\tfor current != nil {\n\t\tif current.Value == elem {\n\t\t\tprevious.Next = current.Next\n\t\t}\n\t\tprevious = current\n\t\tcurrent = current.Next\n\t}\n}", "func removeNodeFromList(id string) {\n\ti := 0\n\tfor i < len(nodes) {\n\t\tcurrentNode := nodes[i]\n\t\tif currentNode.Id == id {\n\t\t\tnodes = append(nodes[:i], nodes[i+1:]...)\n\t\t} else {\n\t\t\ti++\n\t\t}\n\t}\n}", "func RemoveLast(ll *List) (*Node, error) {\n\t// Lock access\n\tll.Mux.Lock()\n\tdefer ll.Mux.Unlock()\n\n\tif ll.Size == 0 {\n\t\treturn nil, errors.New(\"List is empty\")\n\t}\n\t// Update reference pointers\n\tnodeToRemove := ll.Tail.Prev\n\n\tnodeToRemove.Prev.Next = ll.Tail\n\tll.Tail.Prev = nodeToRemove.Prev\n\n\tatomic.AddInt32(&ll.Size, -1)\n\n\treturn nodeToRemove, nil\n}" ]
[ "0.68582207", "0.66443104", "0.64381874", "0.6419752", "0.62867576", "0.6033936", "0.59821177", "0.59769464", "0.59676826", "0.5905013", "0.58688986", "0.5760188", "0.57408834", "0.573256", "0.57003593", "0.56589913", "0.5623653", "0.5570273", "0.5549566", "0.5538682", "0.5527279", "0.5519698", "0.55002475", "0.54991806", "0.5485603", "0.547786", "0.54720867", "0.54611427", "0.54561275", "0.54429656", "0.54355806", "0.5431028", "0.54276735", "0.5408334", "0.53934157", "0.5370094", "0.536897", "0.53642243", "0.5351036", "0.5341678", "0.53372455", "0.53321326", "0.5326562", "0.53005606", "0.5298878", "0.52889353", "0.5283959", "0.5267193", "0.5263029", "0.52600044", "0.52565444", "0.52545106", "0.5250207", "0.5243539", "0.5240921", "0.5227917", "0.52266943", "0.5224261", "0.5211064", "0.51963925", "0.5195828", "0.51801515", "0.51766837", "0.51764417", "0.5156495", "0.51551265", "0.5153772", "0.51340336", "0.51236165", "0.5123294", "0.5116744", "0.5109561", "0.5106672", "0.5100118", "0.50995845", "0.5093142", "0.5092384", "0.5089295", "0.5087462", "0.50781614", "0.5077889", "0.507639", "0.5076272", "0.5075357", "0.506919", "0.5045051", "0.5040629", "0.5039804", "0.50222456", "0.5020316", "0.5013371", "0.5001309", "0.49919918", "0.49865672", "0.4977117", "0.4976673", "0.4974486", "0.49620795", "0.49618825", "0.49605802" ]
0.6530984
2
CustomToken creates a signed custom authentication token with the specified user ID. The resulting JWT can be used in a Firebase client SDK to trigger an authentication flow. See for more details on how to use custom tokens for client authentication.
func (c *Client) CustomToken(ctx context.Context, uid string) (string, error) { client := firebase.FirebaseAuth return client.CustomToken(ctx, uid) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Client) GenerateCustomToken(uid string) (string, error) {\n\ttoken, err := c.AuthClient.CustomToken(context.Background(), uid)\n\tif err != nil {\n\t\tlog.Fatalf(\"error minting custom token: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\tlog.Printf(\"Got custom token: %v\\n\", token)\n\treturn token, err\n}", "func (c *MockClient) CustomToken(uid string) (string, error) {\n\treturn \"abc\", nil\n}", "func GenerateCustomToken(payload map[string]interface{}, secret string, expire time.Duration) (string, error) {\n\tif len(payload) == 0 {\n\t\treturn \"\", ErrPayloadEmpty\n\t}\n\n\tif secret == \"\" {\n\t\tsecret = defaultSecret\n\t}\n\n\tif expire == 0 {\n\t\texpire = defaultExpireTime\n\t}\n\n\t// Set claims\n\tclaims := jwt.MapClaims{}\n\tfor k, v := range payload {\n\t\tclaims[k] = v\n\t}\n\tclaims[\"exp\"] = time.Now().Add(expire).Unix()\n\n\t// Create token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Generate encoded token and send it as response\n\treturn token.SignedString([]byte(secret))\n}", "func CreateCustomToken(ID string) (string, error) {\n\tclient, err := fbApp.Auth(context.Background())\n\tif err != nil {\n\t\treturn \"\", errors.New(\"error getting Auth client\")\n\t}\n\n\ttoken, err := client.CustomToken(ID)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"error minting custom token\")\n\t}\n\n\treturn token, nil\n}", "func (engine ssoEngineImpl) generateJWTToken(authenticatedUser *authenticatedUser) (*common.CustomClaims, string, error) {\n\n\t// Build the claims\n\tclaims := &common.CustomClaims{\n\t\tUser: authenticatedUser.UserName,\n\t\tRoles: authenticatedUser.Roles,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + engine.tokenSecondsToLive,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"EasySSO Server\",\n\t\t},\n\t}\n\t// Build the token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS512, claims)\n\n\t// Convert the token to a string\n\ttokenString, err := token.SignedString(engine.privateKey)\n\tif err != nil {\n\t\tlog.Error(\"Unable to sign generated token\", err)\n\t\treturn nil, \"\", err\n\t}\n\treturn claims, tokenString, nil\n}", "func (a *Auth) SignInWithCustomToken(token string) (*User, error) {\n\tclient := &http.Client{}\n\n\turl := a.App.Prefix + \"https://identitytoolkit.googleapis.com/v1/accounts:signInWithCustomToken?key=\" + a.App.APIKey\n\n\tdata := map[string]interface{}{\"token\": token, \"returnSecureToken\": true}\n\treqdata, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", url, strings.NewReader(string(reqdata)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar respdata map[string]interface{}\n\terr = json.Unmarshal(body, &respdata)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, exists := respdata[\"error\"]\n\tif exists {\n\t\treturn nil, errors.New(respdata[\"error\"].(map[string]interface{})[\"message\"].(string))\n\t}\n\n\ttimeLength, err := strconv.Atoi(respdata[\"expiresIn\"].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &User{\n\t\tIDToken: respdata[\"idToken\"].(string),\n\t\tRefreshToken: respdata[\"refreshToken\"].(string),\n\t\tExpiresIn: time.Duration(timeLength),\n\t\tOtherData: OtherData{},\n\t}, nil\n}", "func generateUserToken(identity *Identity) *jwt.Token {\n\ttoken := jwt.New(jwt.SigningMethodRS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"jti\"] = uuid.NewV4().String()\n\tiat := time.Now().Unix()\n\tclaims[\"exp\"] = 0\n\tclaims[\"iat\"] = iat\n\tclaims[\"typ\"] = \"Bearer\"\n\tclaims[\"preferred_username\"] = identity.Username\n\tclaims[\"sub\"] = identity.ID.String()\n\tclaims[\"email\"] = identity.Email\n\n\ttoken.Header[\"kid\"] = \"test-key\"\n\n\treturn token\n}", "func (middleware *Middleware) GenerateToken(field interface{}) (string, error) {\n\treturn middleware.CreateToken(CustomClaims{\n\t\tCustomField: field,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tNotBefore: time.Now().Unix() - 10,\n\t\t\tExpiresAt: time.Now().Unix() + middleware.ExpireSecond,\n\t\t\tIssuer: middleware.SigningKeyString,\n\t\t},\n\t})\n}", "func (a *Service) GenerateJweToken(customClaims map[string]interface{}) (string, *time.Time, *error_utils.ApiError) {\n\n\tenc, err := jose.NewEncrypter(\n\t\tjose.ContentEncryption(a.encryptionAlgorithm),\n\t\tjose.Recipient{Algorithm: jose.DIRECT, Key: a.encryptionKey},\n\t\t(&jose.EncrypterOptions{}).WithType(\"JWT\"),\n\t)\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\texpire := a.timeFunc().UTC().Add(a.timeout)\n\n\tclaims := map[string]interface{} { }\n\tclaims[\"exp\"] = expire.Unix()\n\tclaims[\"orig_iat\"] = a.timeFunc().Unix()\n\tclaims[\"iss\"] = a.issuer\n\n\tif customClaims != nil {\n\t\tfor key, value := range customClaims {\n\t\t\tclaims[key] = value\n\t\t}\n\t}\n\n\ttoken, err := jwt.Encrypted(enc).Claims(claims).CompactSerialize()\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\treturn token, &expire, nil\n}", "func NewCustomTokenFactory(token string) TokenFactory {\n\treturn &customTokenfactory{tokenString: token}\n}", "func verifyCustomToken(t *testing.T, ct, uid string) *auth.Token {\n\tidt, err := signInWithCustomToken(ct)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer deleteUser(uid)\n\n\tvt, err := client.VerifyIDToken(context.Background(), idt)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif vt.UID != uid {\n\t\tt.Errorf(\"UID = %q; want UID = %q\", vt.UID, uid)\n\t}\n\tif vt.Firebase.Tenant != \"\" {\n\t\tt.Errorf(\"Tenant = %q; want = %q\", vt.Firebase.Tenant, \"\")\n\t}\n\treturn vt\n}", "func CreateToken(userId primitive.ObjectID) (tokenString string, err error) {\n\n\t// Get config file\n\tconfig, err := ConfigHelper.GetConfig()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\ttype MyCustomClaims struct {\n\t\tUserId primitive.ObjectID `json:\"userId\"`\n\t\tjwt.StandardClaims\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, MyCustomClaims{\n\t\tuserId,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + (config.JwtExpHours * 3600),\n\t\t},\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err = token.SignedString([]byte(config.JwtSecret))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\treturn\n}", "func createJwtToken(u user.User) (string, error) {\n\t// Set custom claims\n\tclaims := &middleware.LoginCustomClaims{\n\t\tu.Username,\n\t\tfalse,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 72).Unix(),\n\t\t},\n\t}\n\n\t// Create token with claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Generate encoded token and send it as response.\n\tkey := viper.GetString(\"auth.signkey\")\n\tt, err := token.SignedString([]byte(key))\n\treturn t, err\n\n}", "func (t *Jwt) GenerateToken(userID uint, expiredAt time.Duration) (accessToken string, err error) {\n\texp := time.Now().Add(expiredAt)\n\t// jwt token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{\"exp\": exp.Unix(), \"userID\": userID})\n\t// sign the jwt token\n\taccessToken, err = token.SignedString(t.PrivateKey)\n\tif err != nil {\n\t\t// todo: log error\n\t}\n\treturn\n}", "func GenerateToken(username string, isAdmin bool, expires int, signingKey []byte) (string, error) {\n\tiat := time.Now()\n\texpirationTime := iat.Add(time.Duration(expires) * time.Second)\n\t// Create the JWT claims, which includes the username and expiry time\n\tclaims := &CustomClaims{\n\t\tUsername: username,\n\t\tIsAdmin: isAdmin,\n\t\tIssuedAt: iat.Unix(),\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// In JWT, the expiry time is expressed as unix milliseconds\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\t// Declare the token with the algorithm used for signing, and the claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t// Create the JWT string.\n\treturn token.SignedString(signingKey)\n}", "func (j *JWT) GenerateToken(user models.User) (string, error) {\n\texpirationTime := time.Now().Add(7 * 24 * time.Hour)\n\tclaims := &requset.CustomClaims{\n\t\tTelephone: user.Telephone,\n\t\tUserName: user.Username,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"y\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(j.JwtSecret)\n}", "func GenerateToken(m *models.User) (*AuthToken, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(24 * time.Hour)\n\n\tclaims := userStdClaims{\n\t\tUser: m,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"gin-server-api\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\tauthToken := &AuthToken{Token: token, ExpiresAt: expireTime.Format(\"2006-01-02 15:04:05\")}\n\treturn authToken, err\n}", "func (m *manager) GenerateToken(userID string, username string, roles []string) (string, error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(m.expireTime * time.Second)\n\n claims := Token{\n UserID: userID,\n Name: m.hashService.Make(username),\n Roles: roles,\n StandardClaims: &jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n Issuer: m.issuer,\n Audience: m.audience,\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err := tokenClaims.SignedString(m.jwtSecret)\n\n return token, err\n}", "func (t *jwtMgr) createJWTToken(user *auth.User, privateClaims map[string]interface{}) (string, time.Time, error) {\n\tcurrTime := time.Now()\n\texp := currTime.Add(t.expiration)\n\tif user == nil || user.Name == \"\" {\n\t\tt.logger.Errorf(\"User information is required to create a JWT token\")\n\t\treturn \"\", exp, ErrMissingUserInfo\n\t}\n\t// standard jwt claims like sub, iss, exp\n\tclaims := jwt.Claims{\n\t\tSubject: user.Name,\n\t\tIssuer: issuerClaimValue,\n\t\tExpiry: jwt.NewNumericDate(exp),\n\t\tIssuedAt: jwt.NewNumericDate(currTime),\n\t}\n\t// venice custom claims\n\tif privateClaims == nil {\n\t\tprivateClaims = make(map[string]interface{})\n\t}\n\tprivateClaims[TenantClaim] = user.GetTenant()\n\tprivateClaims[RolesClaim] = user.Status.GetRoles()\n\t// create signed JWT\n\ttoken, err := jwt.Signed(t.signer).Claims(claims).Claims(privateClaims).CompactSerialize()\n\tif err != nil {\n\t\tt.logger.Errorf(\"Unable to create JWT token: Err: %v\", err)\n\t\treturn \"\", exp, err\n\t}\n\treturn token, exp, err\n}", "func Token(user *User, secretSignKey []byte) (string, error) {\n\t// \ttoken := jwt.New(jwt.SigningMethodHS256)\n\t// FooFoo\n\n\t// Create the Claims\n\tclaims := &Claims{\n\t\tuser.ID,\n\t\tuser.Email,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(TokenDuration).Unix(),\n\t\t\tIssuer: \"gorth\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(secretSignKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}", "func (c *RESTClient) ExchangeCustomTokenForIDAndRefreshToken(firebaseAPIKey, token string) (*TokenAndRefreshToken, error) {\n\t// build the URL including Query params\n\tv := url.Values{}\n\tv.Set(\"key\", firebaseAPIKey)\n\turi := url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"www.googleapis.com\",\n\t\tPath: \"identitytoolkit/v3/relyingparty/verifyCustomToken\",\n\t\tForceQuery: false,\n\t\tRawQuery: v.Encode(),\n\t}\n\n\t// build and execute the request\n\treqBody := verifyCustomTokenRequest{\n\t\tToken: token,\n\t\tReturnSecureToken: true,\n\t}\n\tbuf := new(bytes.Buffer)\n\tjson.NewEncoder(buf).Encode(reqBody)\n\treq, err := http.NewRequest(\"POST\", uri.String(), buf)\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating new POST request: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == 400 {\n\t\tvar badReqRes badRequestResponse\n\t\terr = json.NewDecoder(res.Body).Decode(&badReqRes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"decode failed: %w\", err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%d %s\", badReqRes.Error.Code, badReqRes.Error.Message)\n\t} else if res.StatusCode > 400 {\n\t\treturn nil, fmt.Errorf(\"%s\", res.Status)\n\t}\n\n\ttokenResponse := verifyCustomTokenResponse{}\n\terr = json.NewDecoder(res.Body).Decode(&tokenResponse)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"json decode failed: %w\", err)\n\t}\n\treturn &TokenAndRefreshToken{\n\t\tIDToken: tokenResponse.IDToken,\n\t\tRefreshToken: tokenResponse.RefreshToken,\n\t}, nil\n}", "func (asap *ASAP) SignCustomClaims(audience string, customClaims jws.Claims, privateKey cr.PrivateKey) (token []byte, err error) {\n\tvar signingMethod crypto.SigningMethod\n\n\tswitch privateKey.(type) {\n\tcase *rsa.PrivateKey:\n\t\tsigningMethod = crypto.SigningMethodRS256\n\tcase *ecdsa.PrivateKey:\n\t\tsigningMethod = crypto.SigningMethodES256\n\tdefault:\n\t\treturn nil, errors.New(\"bad private key\")\n\t}\n\n\tasap.setAsapClaims(customClaims, audience)\n\treturn asap.signClaims(customClaims, privateKey, signingMethod)\n}", "func GenerateToken(c *gin.Context, user *models.UserResource) string {\n\tclaims := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.StandardClaims{\n\t\tIssuer: user.ID,\n\t\tExpiresAt: jwt.NewTime(float64(time.Now().Add(24 * time.Hour).UnixNano())),\n\t})\n\n\ttoken, err := claims.SignedString([]byte(SecretKey))\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Unable to authonticate\"})\n\t\treturn \"\"\n\t}\n\tc.SetCookie(\n\t\t\"jwt\", token, int(time.Now().Add(24*time.Hour).UnixNano()), \"/\", \"localhost\", false, true,\n\t)\n\treturn token\n}", "func (this *Token) CreateJWTToken(typeUser string, user interface{}) string {\n\n\t// Create new JWT token for the newly registered account\n\tvar id uint64\n\tswitch typeUser {\n\tcase \"user_buyers\":\n\t\tid = user.(*UserBuyers).ID\n\t}\n\n\ttk := &Token{UserId: id, UserType: typeUser, UserDetail: user}\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, _ := token.SignedString([]byte(os.Getenv(\"TOKEN_PASSWORD\")))\n\n\treturn tokenString\n}", "func GenToken(id uint) string {\n\tjwt_token := jwt.New(jwt.GetSigningMethod(\"HS256\"))\n\t// Set some claims\n\tjwt_token.Claims = jwt.MapClaims{\n\t\t\"id\": id,\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\t// Sign and get the complete encoded token as a string\n\ttoken, _ := jwt_token.SignedString([]byte(NBSecretPassword))\n\treturn token\n}", "func GenerateToken(user string) (string, error) {\n\tvar err error\n\tsecret := \"secret\"\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": user,\n\t\t\"iss\": strconv.FormatInt(GetCurrentTimeMillis(), 10),\n\t})\n\ttokenString, err := token.SignedString([]byte(secret))\n\n\treturn tokenString, err\n}", "func GenerateToken(c *gin.Context) {\n\tcurrentUser := GetCurrentUser(c.Request)\n\tif currentUser == nil {\n\t\terr := c.AbortWithError(http.StatusUnauthorized, fmt.Errorf(\"Invalid session\"))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\treturn\n\t}\n\n\ttokenID := uuid.NewV4().String()\n\n\t// Create the Claims\n\tclaims := &ScopedClaims{\n\t\tjwt.StandardClaims{\n\t\t\tIssuer: auth0ApiIssuer,\n\t\t\tAudience: auth0ApiAudiences[0],\n\t\t\tIssuedAt: time.Now().UnixNano(),\n\t\t\tExpiresAt: time.Now().UnixNano() * 2,\n\t\t\tSubject: strconv.Itoa(int(currentUser.ID)),\n\t\t\tId: tokenID,\n\t\t},\n\t\t\"api:invoke\",\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to sign token: %s\", err))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t} else {\n\t\terr = tokenStore.Store(strconv.Itoa(int(currentUser.ID)), tokenID)\n\t\tif err != nil {\n\t\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to store token: %s\", err))\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"token\": signedToken})\n\t\t}\n\t}\n}", "func CreateToken(user model.User, jwtKey string) (string, error) {\n\n\texpireToken := time.Now().Add(time.Hour * 48).Unix()\n\n\t// Set-up claims\n\tclaims := model.TokenClaims{\n\t\tID: user.ID,\n\t\tUsername: user.Username,\n\t\tName: user.Name,\n\t\tEmail: user.Email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireToken,\n\t\t\tIssuer: \"smartdashboard-backend-auth\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ttokenString, err := token.SignedString([]byte(jwtKey))\n\n\treturn tokenString, err\n}", "func GenerateAuthToken(userID *uuid.UUID) (string, error) {\n\t//compute the expiration\n\texpiration := time.Now().Unix() + JWTExpirationSec\n\n\t//create the claims\n\tclaims := &AuthClaims{\n\t\tUserID: userID.String(),\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expiration,\n\t\t},\n\t}\n\n\t//create the token\n\talgorithm := jwt.GetSigningMethod(JWTSigningAlgorithm)\n\ttoken := jwt.NewWithClaims(algorithm, claims)\n\n\t//create the signed string\n\ttokenStr, err := token.SignedString([]byte(GetJWTKey()))\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"sign auth token\")\n\t}\n\treturn tokenStr, nil\n}", "func (h *Helper) generateToken(tokentype int, expiresInSec time.Duration, id, role, username, email, picturepath string, createdAt, modifiedAt int64) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tAudience: helper.TokenAudience,\n\t\t\tSubject: id,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\t//1Day\n\t\t\tExpiresAt: time.Now().Add(expiresInSec).Unix(),\n\t\t\tIssuer: helper.TokenIssuer,\n\t\t},\n\t\tRole: role,\n\t}\n\tswitch tokentype {\n\tcase ID_TOKEN:\n\t\tclaims.Type = \"id_token\"\n\t\tclaims.User = &TokenUser{username, email, picturepath, createdAt, modifiedAt}\n\tcase REFRESH_TOKEN:\n\t\tclaims.Type = \"refresh\"\n\tcase ACCESS_TOKEN:\n\t\tclaims.Type = \"bearer\"\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(h.signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func GenerateToken(userID uint) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"userID\": userID,\n\t})\n\n\ttokenStr, err := token.SignedString([]byte(secret))\n\n\treturn tokenStr, err\n}", "func (user *User) GenerateToken() {\n\n\tvalue, _ := strconv.Atoi(os.Getenv(\"token_exp\"))\n\n\t//Create new JWT token for the newly registered account\n\ttk := &Token{UserID: uint(user.ID), ExpirationTime: time.Now().Add(time.Duration(value) * time.Second).Unix()}\n\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, _ := token.SignedString([]byte(os.Getenv(\"token_password\")))\n\tuser.Token = tokenString\n\n}", "func (j *JWTUtil) CreateToken(userID uint) (string, error) {\n\n\tclaims := jwt.MapClaims{}\n\n\tvar duration time.Duration\n\tdurationStr := os.Getenv(\"JWT_LIFESPAN_MINUTES\")\n\tif durationStr == \"\" {\n\t\tduration = DefaultTokenLifeSpan\n\t} else {\n\t\td, _ := strconv.ParseInt(durationStr, 10, 64)\n\t\tduration = time.Duration(d) * time.Minute\n\t}\n\n\tclaims[USER_ID] = userID\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(duration).Unix()\n\tjwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsecret := os.Getenv(\"JWT_SECRET\")\n\tif secret == \"\" {\n\t\treturn \"\", errors.New(\"missing jwt token secret\")\n\t}\n\ttoken, err := jwtToken.SignedString([]byte(secret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}", "func ParseCustomToken(tokenStr, secret string) (interface{}, error) {\n\ttoken, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t// Validate the alg expect\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, ErrSignatureInvalid\n\t\t}\n\t\treturn []byte(secret), nil\n\t})\n\tif err != nil {\n\t\tswitch err.(*jwt.ValidationError).Errors {\n\t\tcase jwt.ValidationErrorExpired:\n\t\t\treturn nil, ErrExpiredToken\n\t\tcase jwt.ValidationErrorSignatureInvalid:\n\t\t\treturn nil, ErrSignatureInvalid\n\t\tdefault:\n\t\t\treturn nil, ErrUnknown\n\t\t}\n\t}\n\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif ok && token.Valid {\n\t\tresult := make(map[string]interface{}, len(claims))\n\t\tfor k, v := range claims {\n\t\t\tresult[k] = v\n\t\t}\n\t\treturn result, nil\n\t}\n\treturn nil, ErrUnknown\n}", "func NewMytoken(\n\toidcSub, oidcIss, name string, r restrictions.Restrictions, c api.Capabilities, rot *api.Rotation,\n\tauthTime unixtime.UnixTime,\n) (*Mytoken, error) {\n\tnow := unixtime.Now()\n\tid, err := mtid.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmt := &Mytoken{\n\t\tMytoken: api.Mytoken{\n\t\t\tVersion: api.TokenVer,\n\t\t\tTokenType: api.TokenType,\n\t\t\tSeqNo: 1,\n\t\t\tName: name,\n\t\t\tIssuer: config.Get().IssuerURL,\n\t\t\tSubject: utils.CreateMytokenSubject(oidcSub, oidcIss),\n\t\t\tAudience: config.Get().IssuerURL,\n\t\t\tOIDCIssuer: oidcIss,\n\t\t\tOIDCSubject: oidcSub,\n\t\t\tCapabilities: c,\n\t\t},\n\t\tID: id,\n\t\tIssuedAt: now,\n\t\tNotBefore: now,\n\t\tAuthTime: authTime,\n\t\tRotation: rot,\n\t}\n\tr.EnforceMaxLifetime(oidcIss)\n\tif len(r) > 0 {\n\t\tmt.Restrictions = r\n\t\texp := r.GetExpires()\n\t\tif exp != 0 {\n\t\t\tmt.ExpiresAt = exp\n\t\t}\n\t\tnbf := r.GetNotBefore()\n\t\tif nbf != 0 && nbf > now {\n\t\t\tmt.NotBefore = nbf\n\t\t}\n\t}\n\treturn mt, nil\n}", "func DecodeWithCustomClaims(tokenString string) (*CustomClaims, error) {\n\n // Parse the token\n token, err := jwt.ParseWithClaims(tokenString, &CustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n return key, nil\n })\n\n // Validate the token and return the custom claims\n if claims, ok := token.Claims.(*CustomClaims); ok && token.Valid {\n return claims, nil\n } else {\n return nil, err\n }\n}", "func prepareToken(user *interfaces.User) string {\n\ttokenContent := jwt.MapClaims{\n\t\t\"user_id\": user.ID,\n\t\t\"expiry\": time.Now().Add(time.Minute ^ 60).Unix(),\n\t}\n\tjwtToken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tokenContent)\n\ttoken, err := jwtToken.SignedString([]byte(\"TokenPassword\"))\n\thelpers.HandleErr(err)\n\n\treturn token\n\n}", "func NewChatkitUserToken(appID string, keyID string, keySecret string, userID string, expiryDuration time.Duration) (tokenString string, expiry time.Time, err error) {\n\tjwtClaims, tokenExpiry := getGenericTokenClaims(appID, keyID, expiryDuration)\n\n\tjwtClaims[\"sub\"] = userID\n\n\ttokenString, err = signToken(keySecret, jwtClaims)\n\treturn tokenString, tokenExpiry, err\n}", "func NewToken(claims map[string]interface{}, privatekeyFilename string) (*jwt.Token, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS512, jwt.MapClaims(claims))\n\t// use the test private key to sign the token\n\tkey, err := PrivateKey(privatekeyFilename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsigned, err := token.SignedString(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttoken.Raw = signed\n\tlog.Debug(nil, map[string]interface{}{\"signed_token\": signed, \"claims\": claims}, \"generated test token with custom sub\")\n\treturn token, nil\n}", "func GenerateToken(username, dept_id string) (string, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(330 * 24 * time.Hour)\n\n\tclaims := CustomClaims{\n\t\tusername,\n\t\tdept_id,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuer: \"dingtalk\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\treturn token, err\n}", "func GenerateJWT(user string) *jwtgo.Token {\n\n\ttoken := jwtgo.New(jwtgo.SigningMethodRS512)\n\tin10m := time.Now().Add(time.Duration(10) * time.Minute).Unix()\n\ttoken.Claims = jwtgo.MapClaims{\n\t\t\"iss\": \"Issuer\", // who creates the token and signs it\n\t\t\"aud\": \"Audience\", // to whom the token is intended to be sent\n\t\t\"exp\": in10m, // time when the token will expire (10 minutes from now)\n\t\t\"jti\": uuid.Must(uuid.NewV4()).String(), // a unique identifier for the token\n\t\t\"iat\": time.Now().Unix(), // when the token was issued/created (now)\n\t\t\"nbf\": 2, // time before which the token is not yet valid (2 minutes ago)\n\t\t\"sub\": \"subject\", // the subject/principal is whom the token is about\n\t\t\"scopes\": \"api:access\", // token scope - not a standard claim\n\t\t\"user\": user, // username\n\t}\n\treturn token\n}", "func CreateUserIDToken(key []byte, userID string) (string, error) {\n\tnow := time.Now()\n\texp := now.Add(90 * 24 * time.Hour)\n\n\tjsonToken := paseto.JSONToken{\n\t\tAudience: \"recruitment.empirica.app\",\n\t\tIssuer: \"recruitment.empirica.app\",\n\t\tJti: xid.New().String(),\n\t\tSubject: userID,\n\t\tIssuedAt: now,\n\t\tExpiration: exp,\n\t\tNotBefore: now,\n\t}\n\n\ttoken, err := paseto.Encrypt(key, jsonToken, \"\")\n\t// token = \"v2.local.E42A2iMY9SaZVzt-WkCi45_aebky4vbSUJsfG45OcanamwXwieieMjSjUkgsyZzlbYt82miN1xD-X0zEIhLK_RhWUPLZc9nC0shmkkkHS5Exj2zTpdNWhrC5KJRyUrI0cupc5qrctuREFLAvdCgwZBjh1QSgBX74V631fzl1IErGBgnt2LV1aij5W3hw9cXv4gtm_jSwsfee9HZcCE0sgUgAvklJCDO__8v_fTY7i_Regp5ZPa7h0X0m3yf0n4OXY9PRplunUpD9uEsXJ_MTF5gSFR3qE29eCHbJtRt0FFl81x-GCsQ9H9701TzEjGehCC6Bhw.c29tZSBmb290ZXI\"\n\n\treturn token, err\n}", "func GenerateToken(payload interface{}) string {\n\ttokenContent := jwt.MapClaims{\n\t\t\"payload\": payload,\n\t\t\"exp\": time.Now().Add(time.Second * TokenExpiredTime).Unix(),\n\t}\n\tjwtToken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tokenContent)\n\ttoken, err := jwtToken.SignedString([]byte(\"TokenPassword\"))\n\tif err != nil {\n\t\tlogger.Error(\"Failed to generate token: \", err)\n\t\treturn \"\"\n\t}\n\n\treturn token\n}", "func (ts *TokenService) Decode(tokenString string) (*MyCustomClaims, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{},\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\tkey, err := getKeyFromVault()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn key, nil\n\t\t})\n\n\tif token == nil {\n\t\treturn nil, jwt.NewValidationError(glErr.AuthNilToken(), jwt.ValidationErrorUnverifiable)\n\t}\n\n\tif claims, ok := token.Claims.(*MyCustomClaims); ok && token.Valid {\n\t\treturn claims, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func NewMytoken(oidcSub, oidcIss string, r restrictions.Restrictions, c, sc api.Capabilities, rot *api.Rotation) *Mytoken {\n\tnow := unixtime.Now()\n\tmt := &Mytoken{\n\t\tVersion: api.TokenVer,\n\t\tTokenType: api.TokenType,\n\t\tID: mtid.New(),\n\t\tSeqNo: 1,\n\t\tIssuedAt: now,\n\t\tNotBefore: now,\n\t\tIssuer: config.Get().IssuerURL,\n\t\tSubject: utils.CreateMytokenSubject(oidcSub, oidcIss),\n\t\tAudience: config.Get().IssuerURL,\n\t\tOIDCIssuer: oidcIss,\n\t\tOIDCSubject: oidcSub,\n\t\tCapabilities: c,\n\t\tSubtokenCapabilities: sc,\n\t\tRotation: rot,\n\t}\n\tr.EnforceMaxLifetime(oidcIss)\n\tif len(r) > 0 {\n\t\tmt.Restrictions = r\n\t\texp := r.GetExpires()\n\t\tif exp != 0 {\n\t\t\tmt.ExpiresAt = exp\n\t\t}\n\t\tnbf := r.GetNotBefore()\n\t\tif nbf != 0 && nbf > now {\n\t\t\tmt.NotBefore = nbf\n\t\t}\n\t}\n\treturn mt\n}", "func (v *verifierPrivate) ValidateTokenAndGetClaims(tokenString string, customClaims interface{}) (*Token, error) {\n\n\t// let us check if the verifier is already expired. If it is just return verifier expired error\n\t// The caller has to re-initialize the verifier.\n\ttoken := Token{}\n\ttoken.standardClaims = &jwt.StandardClaims{}\n\tparsedToken, err := jwt.ParseWithClaims(tokenString, token.standardClaims, func(token *jwt.Token) (interface{}, error) {\n\n\t\tif keyIDValue, keyIDExists := token.Header[\"kid\"]; keyIDExists {\n\n\t\t\tkeyIDString, ok := keyIDValue.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"kid (key id) in jwt header is not a string : %v\", keyIDValue)\n\t\t\t}\n\n\t\t\tif matchPubKey, found := v.pubKeyMap[keyIDString]; !found {\n\t\t\t\treturn nil, &MatchingCertNotFoundError{keyIDString}\n\t\t\t} else {\n\t\t\t\t// if the certificate just expired.. we need to return appropriate error\n\t\t\t\t// so that the caller can deal with it appropriately\n\t\t\t\tnow := time.Now()\n\t\t\t\tif now.After(matchPubKey.expTime) {\n\t\t\t\t\treturn nil, &MatchingCertJustExpired{keyIDString}\n\t\t\t\t}\n\t\t\t\t// if the verifier expired, we need to use a new instance of the verifier\n\t\t\t\tif time.Now().After(v.expiration) {\n\t\t\t\t\treturn nil, &VerifierExpiredError{v.expiration}\n\t\t\t\t}\n\t\t\t\treturn matchPubKey.pubKey, nil\n\t\t\t}\n\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"kid (key id) field missing in token. field is mandatory\")\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tif jwtErr, ok := err.(*jwt.ValidationError); ok {\n\t\t\tswitch e := jwtErr.Inner.(type) {\n\t\t\tcase *MatchingCertNotFoundError, *VerifierExpiredError, *MatchingCertJustExpired:\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t\treturn nil, jwtErr\n\t\t}\n\t\treturn nil, err\n\t}\n\ttoken.jwtToken = parsedToken\n\t// so far we have only got the standardClaims parsed. We need to now fill the customClaims\n\n\tparts := strings.Split(tokenString, \".\")\n\t// no need check for the number of segments since the previous ParseWithClaims has already done this check.\n\t// therefor the following is redundant. If we change the implementation, will need to revisit\n\t//if len(parts) != 3 {\n\t//\treturn nil, \"jwt token to be parsed seems to be in \"\n\t//}\n\n\t// parse Claims\n\tvar claimBytes []byte\n\n\tif claimBytes, err = jwt.DecodeSegment(parts[1]); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not decode claims part of the jwt token\")\n\t}\n\tdec := json.NewDecoder(bytes.NewBuffer(claimBytes))\n\terr = dec.Decode(customClaims)\n\ttoken.customClaims = customClaims\n\n\treturn &token, nil\n}", "func GenerateToken(payload map[string]interface{}) (string, error) {\n\treturn GenerateCustomToken(payload, defaultSecret, defaultExpireTime)\n}", "func CreateToken(user *models.User, ExpiresAt int64) (string, error) {\n\n\tclaims := &models.Claims{\n\t\tID: user.ID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: ExpiresAt,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\treturn token.SignedString([]byte(\"pingouin123\"))\n}", "func (ts *TokenService) Encode(user *pb.User) (string, error) {\n\n\t// Build Claim\n\tcurrentTime := time.Now()\n\texpireTime := currentTime.Add(TokenValidityPeriod)\n\n\tclaims := MyCustomClaims{\n\t\tUser: user,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuedAt: currentTime.Unix(),\n\t\t\tIssuer: ClaimIssuer,\n\t\t},\n\t}\n\n\t// Create token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Build signed string with our key\n\tkey, err := getKeyFromVault()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tss, err := token.SignedString(key)\n\n\treturn ss, nil\n}", "func CreateToken(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {\n\n\t// Get user from context\n\tuser := ctx.GetUser()\n\tif user == nil {\n\t\tctx.Unauthorized(\"missing user, please login first\")\n\t\treturn\n\t}\n\n\t// Read request body\n\tdefer func() { _ = req.Body.Close() }()\n\n\treq.Body = http.MaxBytesReader(resp, req.Body, 1048576)\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tctx.BadRequest(fmt.Sprintf(\"unable to read request body : %s\", err))\n\t\treturn\n\t}\n\n\t// Create token\n\ttoken := common.NewToken()\n\n\t// Deserialize json body\n\tif len(body) > 0 {\n\t\terr = json.Unmarshal(body, token)\n\t\tif err != nil {\n\t\t\tctx.BadRequest(fmt.Sprintf(\"unable to deserialize request body : %s\", err))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Generate token uuid and set creation date\n\ttoken.Initialize()\n\ttoken.UserID = user.ID\n\n\t// Save token\n\terr = ctx.GetMetadataBackend().CreateToken(token)\n\tif err != nil {\n\t\tctx.InternalServerError(\"unable to create token : %s\", err)\n\t\treturn\n\t}\n\n\t// Print token in the json response.\n\tvar bytes []byte\n\tif bytes, err = utils.ToJson(token); err != nil {\n\t\tpanic(fmt.Errorf(\"unable to serialize json response : %s\", err))\n\t}\n\n\t_, _ = resp.Write(bytes)\n}", "func GenerateAuthToken(claims *JWTClaims, expiry time.Duration, jwtKey []byte) (string, time.Time, error) {\n\tissuedTime := time.Now()\n\texpirationTime := issuedTime.Add(expiry)\n\tclaims.StandardClaims = jwt.StandardClaims{\n\t\t// In JWT, the expiry time is expressed as unix milliseconds\n\t\tExpiresAt: expirationTime.Unix(),\n\t\t// Can be used to blacklist in the future. Needs to hold state\n\t\t// in that case :/\n\t\tId: uuid.NewV4().String(),\n\t\tIssuedAt: issuedTime.Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tres, err := token.SignedString(jwtKey)\n\treturn res, expirationTime, err\n}", "func GenerateJWTToken(userName, jwtAccSecretKey string) (string, error) {\n\tclaims := jwt.MapClaims{\n\t\t\"username\": userName,\n\t\t\"ExpiresAt\": jwt.TimeFunc().Add(1 * time.Minute).Unix(),\n\t\t\"IssuedAt\": jwt.TimeFunc().Unix(),\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\treturn token.SignedString([]byte(jwtAccSecretKey))\n}", "func GenerateToken(payload PayLoad, expireTime int64) (string, error) {\n\n\tclaims := Claims{\n\t\tpayload.ID,\n\t\tpayload.Account,\n\t\tEncodeMD5(payload.Password),\n\t\tpayload.Scope,\n\t\tpayload.IsSuper,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expireTime,\n\t\t\tIssuer: \"liaoliao\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\treturn token, err\n}", "func AuthCreateToken(c map[string]interface{}) (string, error) {\n\t// Create token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Set claims\n\tclaims := token.Claims.(jwt.MapClaims)\n\tfor k, v := range c {\n\t\tclaims[k] = v\n\t}\n\tclaims[\"exp\"] = time.Now().Add(viper.GetDuration(\"http.token.lifetime\")).Unix()\n\n\t// Generate encoded token and send it as response.\n\tt, err := token.SignedString([]byte(viper.GetString(\"http.token.key\")))\n\treturn t, err\n}", "func (u *User) Token(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.User.Token\")\n\tdefer span.End()\n\n\tv, ok := ctx.Value(web.KeyValues).(*web.Values)\n\tif !ok {\n\t\treturn web.NewShutdownError(\"web value missing from context\")\n\t}\n\n\topt := option.WithCredentialsFile(u.authenticator.GoogleKeyFile)\n\t// Initialize default app\n\tapp, err := firebase.NewApp(context.Background(), nil, opt)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"\")\n\t}\n\n\t// Access auth service from the default app\n\tclient, err := app.Auth(context.Background())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"\")\n\t}\n\n\ttoken, err := client.VerifyIDToken(ctx, params[\"id\"])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"verifying token with firebase\")\n\t}\n\n\tuserRecord, err := client.GetUser(ctx, token.UID)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fetching user from token UID\")\n\t}\n\n\tclaims, err := user.Authenticate(ctx, u.db, v.Now, userRecord.Email, token.UID)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase user.ErrAuthenticationFailure:\n\t\t\treturn web.NewRequestError(err, http.StatusUnauthorized)\n\t\tdefault:\n\t\t\treturn errors.Wrap(err, \"authenticating\")\n\t\t}\n\t}\n\n\tvar tkn struct {\n\t\tToken string `json:\"token\"`\n\t}\n\ttkn.Token, err = u.authenticator.GenerateToken(claims)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"generating token\")\n\t}\n\n\t//dbuser, err := model.CreateNewUserIfNotExists(name, email, phone, avatar, provider, uid, token.Expires, token.IssuedAt, emailVerified)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"\")\n\t}\n\n\treturn web.Respond(ctx, w, tkn, http.StatusOK)\n}", "func GenerateToken(key []byte, userID int64, credential string) (string, error) {\n\n\t//new token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Claims\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"user_id\"] = userID\n\tclaims[\"credential\"] = credential\n\tclaims[\"exp\"] = time.Now().Add(time.Hour*720).UnixNano() / int64(time.Millisecond)\n\ttoken.Claims = claims\n\n\t// Sign and get as a string\n\ttokenString, err := token.SignedString(key)\n\treturn tokenString, err\n}", "func (m *JWTManager) CreateToken(u model.User, permissions *[]string) (string, error) {\n\n\tb, _ := json.Marshal(tokenFormat{\n\t\tUserID: u.ID,\n\t\tPermissions: permissions,\n\t})\n\n\tnow := time.Now()\n\t// set claims\n\tclaims := jwt.StandardClaims{\n\t\tIssuedAt: now.Unix(),\n\t\tExpiresAt: now.Add(m.OP.Expiration).Unix(),\n\t\tSubject: string(b),\n\t\tId: string(generateRandomKey(32)),\n\t}\n\tt := jwt.NewWithClaims(jwt.GetSigningMethod(m.OP.SigningMethod), claims)\n\n\treturn t.SignedString(m.OP.PrivateKey)\n}", "func (c *EcomClient) ExchangeCustomTokenForIDAndRefreshToken(firebaseAPIKey, token string) (*configmgr.TokenAndRefreshToken, error) {\n\t// build the URL including Query params\n\tv := url.Values{}\n\tv.Set(\"key\", firebaseAPIKey)\n\turi := url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"www.googleapis.com\",\n\t\tPath: \"identitytoolkit/v3/relyingparty/verifyCustomToken\",\n\t\tForceQuery: false,\n\t\tRawQuery: v.Encode(),\n\t}\n\n\t// build and execute the request\n\treqBody := verifyCustomTokenRequest{\n\t\tToken: token,\n\t\tReturnSecureToken: true,\n\t}\n\tbuf := new(bytes.Buffer)\n\tjson.NewEncoder(buf).Encode(reqBody)\n\treq, err := http.NewRequest(\"POST\", uri.String(), buf)\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := &http.Client{}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating new POST request failed: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == 400 {\n\t\tvar badReqRes struct {\n\t\t\tError struct {\n\t\t\t\tCode int64 `json:\"code\"`\n\t\t\t\tMessage string `json:\"message\"`\n\t\t\t\tErrors []struct {\n\t\t\t\t\tMessage string `json:\"message\"`\n\t\t\t\t\tDomain string `json:\"domain\"`\n\t\t\t\t\tReason string `json:\"reason\"`\n\t\t\t\t} `json:\"errors\"`\n\t\t\t\tStatus string `json:\"status\"`\n\t\t\t} `json:\"error\"`\n\t\t}\n\t\terr = json.NewDecoder(res.Body).Decode(&badReqRes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"decode failed: %w\", err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%d %s\", badReqRes.Error.Code, badReqRes.Error.Message)\n\t} else if res.StatusCode > 400 {\n\t\treturn nil, fmt.Errorf(\"%s\", res.Status)\n\t}\n\n\ttokenResponse := verifyCustomTokenResponse{}\n\terr = json.NewDecoder(res.Body).Decode(&tokenResponse)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"json decode failed: %w\", err)\n\t}\n\treturn &configmgr.TokenAndRefreshToken{\n\t\tIDToken: tokenResponse.IDToken,\n\t\tRefreshToken: tokenResponse.RefreshToken,\n\t}, nil\n}", "func GenerateToken(jwtSecret string, claims InvoicesClaims) string {\n\thmacSampleSecret := []byte(jwtSecret)\n\n\ttype Claims struct {\n\t\tInvoicesClaims\n\t\tjwt.StandardClaims\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims{\n\t\tInvoicesClaims{\n\t\t\tGetInvoices: true,\n\t\t\tGetInvoice: true,\n\t\t\tCreateInvoice: true,\n\t\t},\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: getExpiry(),\n\t\t},\n\t})\n\n\ttokenString, err := token.SignedString(hmacSampleSecret)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn tokenString\n}", "func generateServiceAccountToken(identity *Identity) *jwt.Token {\n\ttoken := jwt.New(jwt.SigningMethodRS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"service_accountname\"] = identity.Username\n\tclaims[\"sub\"] = identity.ID.String()\n\tclaims[\"jti\"] = uuid.NewV4().String()\n\tclaims[\"iat\"] = time.Now().Unix()\n\n\ttoken.Header[\"kid\"] = \"test-key\"\n\n\treturn token\n}", "func (a *Auth) GenerateToken(userID string) (TokenInfo, error) {\n\taccessToken, err := a.generateAccess(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trefreshToken, err := a.generateRefresh(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttokenInfo := &tokenInfo{\n\t\tTokenType: a.opts.tokenType,\n\t\tAccessToken: accessToken,\n\t\tRefreshToken: refreshToken,\n\t}\n\treturn tokenInfo, nil\n}", "func createToken(user *models.User) string {\n\tvar store models.Store\n\tvar storeID uint\n\n\tif user.HaveStore == true {\n\t\tif config.DB.First(&store, \"user_id = ?\", user.ID).RecordNotFound() {\n\t\t\tstoreID = 0\n\t\t}\n\t\tstoreID = store.ID\n\t} else {\n\t\tstoreID = 0\n\t}\n\t// to send time expire, issue at (iat)\n\tjwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"user_id\": user.ID,\n\t\t\"user_role\": user.Role,\n\t\t\"user_store\": user.HaveStore,\n\t\t\"store_id\": storeID,\n\t\t\"exp\": time.Now().AddDate(0, 0, 7).Unix(),\n\t\t\"iat\": time.Now().Unix(),\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err := jwtToken.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn tokenString\n}", "func setToken(u *models.User) (string, error) {\n\tt := time.Now()\n\tclaims := customClaims{\n\t\tRole: u.Role,\n\t\tActive: u.Active,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tSubject: strconv.Itoa(u.ID),\n\t\t\tExpiresAt: t.Add(expireDelay).Unix(),\n\t\t\tIssuedAt: t.Unix(),\n\t\t\tIssuer: iss}}\n\treturn getTokenString(&claims)\n}", "func CreateToken(user model.User) (string, error) {\n\t//Creating token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t//Adding claims\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"name\"] = user.Name\n\tclaims[\"mobile\"] = user.Mobile\n\tclaims[\"exp\"] = time.Now().Add(time.Hour * 72).Unix()\n\ttoken.Claims = claims\n\n\t//Signing the token\n\tsignedToken, ok := token.SignedString([]byte(signingkey))\n\n\treturn signedToken, ok\n}", "func New(user *user.Model) *jwt.Token {\n\ttoken := jwt.New(jwt.GetSigningMethod(\"RS256\"))\n\ttoken.Claims[\"uid\"] = user.Id.Int64\n\ttoken.Claims[\"user\"] = user\n\ttoken.Claims[\"exp\"] = time.Now().Add(time.Minute * tokenExpTime).Unix()\n\treturn token\n}", "func GenerateSignedUserToken(identity *Identity) (string, *jwt.Token) {\n\ttoken := generateUserToken(identity)\n\ttokenStr := signToken(token)\n\n\treturn tokenStr, token\n}", "func (a *AuthService) CreateToken(userDto *dtos.UserDto) (*models.Token, error) {\n\ttoken := &models.Token{}\n\n\ttoken.AtExp = time.Now().Add(time.Minute * 10).Unix()\n\ttoken.AccessUUID = uuid.NewV4().String()\n\n\ttoken.RtExp = time.Now().Add(time.Hour * 24 * 7).Unix()\n\ttoken.RefreshUUID = uuid.NewV4().String()\n\n\t//Generate Access Token\n\tatclaims := jwt.MapClaims{}\n\n\tatclaims[\"authorized\"] = true\n\tatclaims[\"access_uuid\"] = token.AccessUUID\n\tatclaims[\"id\"] = userDto.ID\n\tatclaims[\"name\"] = userDto.Name\n\tatclaims[\"email\"] = userDto.Email\n\tatclaims[\"exp\"] = token.AtExp\n\n\tat := jwt.NewWithClaims(jwt.SigningMethodHS256, atclaims)\n\n\tif accessToken, err := at.SignedString([]byte(config.AccessSecret)); err != nil {\n\t\ttoken.AccessToken = accessToken\n\t} else {\n\t\treturn nil, err\n\t}\n\n\t//Generate Refresh Token\n\n\trtClaims := jwt.MapClaims{}\n\n\trtClaims[\"authorized\"] = true\n\trtClaims[\"refresh_uuid\"] = token.RefreshUUID\n\trtClaims[\"id\"] = userDto.ID\n\trtClaims[\"name\"] = userDto.Name\n\trtClaims[\"email\"] = userDto.Email\n\trtClaims[\"exp\"] = token.RtExp\n\n\trt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)\n\n\tif refreshToken, err := rt.SignedString([]byte(config.RefreshSecret)); err != nil {\n\t\ttoken.RefreshToken = refreshToken\n\t} else {\n\t\treturn nil, err\n\t}\n\n\treturn token, nil\n}", "func GenerateToken(id int, account string, role string) (token string, err error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(3 * time.Hour) // token發放後多久過期\n\n claims := Claims{\n ID: id,\n Account: account,\n Role: role,\n StandardClaims: jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n IssuedAt: nowTime.Unix(),\n Issuer: \"go-gin-cli\",\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err = tokenClaims.SignedString(jwtSecret)\n if err != nil {\n log.Println(err)\n return\n }\n\n return\n}", "func (a *authSvc) BuildToken(user User) (*string, *int64, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"email\": user.Email,\n\t})\n\tsignedToken, err := token.SignedString(a.authSecret) // sign the token\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tnow := time.Now() // get current time\n\tnowPlusExpiry := now.Add(tokenExpiryMin * time.Minute) // add 60 minutes to current time to get token expiry\n\tnowPlusExpiryTimestamp := nowPlusExpiry.UnixNano() // get the expiry timestamp\n\treturn &signedToken, &nowPlusExpiryTimestamp, nil\n}", "func GenAuthTokenHandler(c *gin.Context) {\r\n\t// Create a new token object, specifying signing method and the claims\r\n\t// you would like it to contain.\r\n\r\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\r\n\t\t\"foo\": \"bar\",\r\n\t\t\"expire\": func() int64 {\r\n\t\t\tnow := time.Now()\r\n\t\t\tduration, _ := time.ParseDuration(\"14d\")\r\n\t\t\tm1 := now.Add(duration)\r\n\t\t\treturn m1.Unix()\r\n\t\t}(),\r\n\t})\r\n\r\n\t// Sign and get the complete encoded token as a string using the secret\r\n\ttokenString, err := token.SignedString([]byte(utils.AppConfig.Server.SecretKey))\r\n\r\n\tfmt.Println(tokenString, err)\r\n\tc.String(http.StatusOK, tokenString)\r\n}", "func NewCustomClaim(username string, ownedFarmIDs []int, workingFarmIDs []int) CustomClaim {\n\treturn CustomClaim{\n\t\tUsername: username,\n\t\tOwnedFarmIDs: ownedFarmIDs,\n\t\tWorkingFarmIDs: workingFarmIDs,\n\t}\n}", "func GenerateJWT(user models.User) (resp LoginResponse, err error) {\n\tclaims := jwt.MapClaims{}\n\n\t// set our claims\n\tclaims[\"User\"] = user\n\tclaims[\"Name\"] = user.Name\n\n\t// set the expire time\n\n\tclaims[\"exp\"] = time.Now().Add(time.Hour * 24 * 30 * 12).Unix() //24 hours inn a day, in 30 days * 12 months = 1 year in milliseconds\n\n\t// create a signer for rsa 256\n\tt := jwt.NewWithClaims(jwt.GetSigningMethod(\"RS256\"), claims)\n\n\tpub, err := jwt.ParseRSAPrivateKeyFromPEM(config.GetConf().Encryption.Private)\n\tif err != nil {\n\t\treturn\n\t}\n\ttokenString, err := t.SignedString(pub)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp = LoginResponse{\n\t\tUser: user,\n\t\tMessage: \"Token succesfully generated\",\n\t\tToken: tokenString,\n\t}\n\n\treturn\n\n}", "func (aw *AuthWriter) CreateToken(userID uint64) (*TokenDetails, error) {\n\ttd := &TokenDetails{userID: userID}\n\n\tvar err error\n\n\t// Creating Access Token\n\ttd.AccessExpires = time.Now().Add(aw.ATExpiry).Unix()\n\ttd.AccessUUID = uuid.NewV4().String()\n\n\tatClaims := jwt.MapClaims{}\n\tatClaims[\"access_uuid\"] = td.AccessUUID\n\tatClaims[\"user_id\"] = userID\n\tatClaims[\"exp\"] = td.AccessExpires\n\tat := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttd.AccessToken, err = at.SignedString([]byte(aw.ATSecret))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Creating Refresh Token\n\ttd.RefreshExpires = time.Now().Add(aw.RTExpiry).Unix()\n\ttd.RefreshUUID = uuid.NewV4().String()\n\n\trtClaims := jwt.MapClaims{}\n\trtClaims[\"refresh_uuid\"] = td.RefreshUUID\n\trtClaims[\"user_id\"] = userID\n\trtClaims[\"exp\"] = td.RefreshExpires\n\trt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)\n\ttd.RefreshToken, err = rt.SignedString([]byte(aw.RTSecret))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn td, nil\n}", "func NewToken(uid int32) (string, string, error) {\n\ttoken := jwtgo.New(jwtgo.SigningMethodES256)\n\n\tclaims := token.Claims.(jwtgo.MapClaims)\n\tclaims[claimUID] = uid\n\tclaims[claimExpire] = time.Now().Add(time.Hour * tokenExpireInHour).Unix()\n\n\tt, err := token.SignedString([]byte(TokenHMACKey))\n\treturn respTokenKey, t, err\n}", "func GenerateToken(info Jwt) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": info.ID,\n\t\t\"email\": info.Email,\n\t\t\"name\": info.Name,\n\t\t\"nbf\": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(),\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\treturn token.SignedString(secret)\n}", "func (a *AuthService) SignJWT(customClaims *builder.CustomClaims) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, customClaims)\n\ttokenString, err := token.SignedString([]byte(*a.signedSecret))\n\treturn tokenString, err\n}", "func GenerateToken(secret []byte, aud, sub string) (string, error) {\n\n\ttok := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.RegisteredClaims{\n\t\tIssuer: TokenIssuer,\n\t\tAudience: []string{aud},\n\t\tSubject: sub,\n\t\tIssuedAt: jwt.NewNumericDate(time.Now()),\n\t\tNotBefore: jwt.NewNumericDate(time.Now().Add(-15 * time.Minute)),\n\t})\n\n\treturn tok.SignedString(secret)\n}", "func (c *Client) AuthenticateToken(ctx context.Context, customToken string) (string, error) {\n\tclient := firebase.FirebaseAuth\n\ttoken, err := client.VerifyIDToken(ctx, customToken)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token.UID, nil\n}", "func (c *Client) CreateToken(ctx context.Context, user *models.User) (*auth.APIToken, *models.Token, error) {\n\t// If the user provides no user, we will make a token for the current session user\n\tif user == nil {\n\t\ti, err := c.Me(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tuser = i\n\t}\n\n\tvariables := make(map[string]interface{})\n\tvariables[\"user_id\"] = user.ID\n\n\tresp := &CreateTokenResponse{}\n\terr := c.transport.Raw(ctx, `\n\t\tmutation CreateToken($user_id: String!) {\n\t\t\tcreateToken(input: { user_id: $user_id }) {\n\t\t\t\tsecret\n\t\t\t\ttoken {\n\t\t\t\t\tid\n\t\t\t\t}\n\t\t\t}\n }\n `, variables, resp)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsecret, err := auth.FromPassword(resp.Response.Secret)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttoken, err := auth.NewAPIToken(secret, resp.Response.Token.ID)\n\treturn token, resp.Response.Token, err\n}", "func (j *Jwt) GenerateToken() string {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"exp\": json.Number(strconv.FormatInt(time.Now().AddDate(0, 0, 1).Unix(), 10)),\n\t\t\"iat\": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),\n\t\t\"uid\": j.UID,\n\t\t\"name\": j.Name,\n\t\t\"username\": j.Username,\n\t})\n\n\ttokenStr, err := token.SignedString(JWTSecret)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tokenStr\n}", "func (middleware *Middleware) CreateToken(claims CustomClaims) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(middleware.SigningKey)\n}", "func UserToken(w http.ResponseWriter, r *http.Request, c router.Context) error {\n\tdb, ok := c.Meta[\"db\"].(*sqlx.DB)\n\tif !ok {\n\t\treturn errors.New(\"db not set in context\")\n\t}\n\ttokenSecret, ok := c.Meta[\"tokenSecret\"].([]byte)\n\tif !ok {\n\t\treturn errors.New(\"token secret not set in context\")\n\t}\n\n\tif r.FormValue(\"grant_type\") != \"password\" {\n\t\treturn res.BadRequest(w, res.ErrorMsg{\"unsupported_grant_type\", \"supports only password grant type\"})\n\t}\n\n\tlogin := r.FormValue(\"login\")\n\tif login == \"\" {\n\t\treturn res.BadRequest(w, res.ErrorMsg{\"invalid_request\", \"login required\"})\n\t}\n\n\tpassword := r.FormValue(\"password\")\n\tif password == \"\" {\n\t\treturn res.BadRequest(w, res.ErrorMsg{\"invalid_request\", \"password required\"})\n\t}\n\n\tu := data.User{}\n\tif err := u.GetByLogin(db, login); err != nil {\n\t\tif e, ok := err.(*data.Error); ok {\n\t\t\treturn res.BadRequest(w, res.ErrorMsg{\"invalid_grant\", e.Desc})\n\t\t}\n\t\treturn err\n\t}\n\n\tif !u.VerifyPassword(password) {\n\t\treturn res.BadRequest(w, res.ErrorMsg{\"invalid_grant\", \"failed to authenticate user\"})\n\t}\n\n\t// Since all is well, generate token and add to database\n\tt := data.Token{\n\t\tUserID: u.ID,\n\t\tExpiresIn: (30 * 24 * time.Hour).Nanoseconds(), // 30 days\n\t}\n\tif err := t.Insert(db); err != nil {\n\t\treturn err\n\t}\n\n\t// get the encoded JSON Web token\n\tjwt, err := t.EncodeJWT(tokenSecret)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// prepare oAuth2 access token payload\n\tpayload := struct {\n\t\tAccessToken string `json:\"access_token\"`\n\t\tTokenType string `json:\"token_type\"`\n\t\tExpiresIn string `json:\"expires_in\"`\n\t}{\n\t\tjwt,\n\t\t\"bearer\",\n\t\ttime.Duration(t.ExpiresIn).String(),\n\t}\n\n\treturn res.OK(w, payload)\n}", "func (repo *TokenRepository) GenerateAuthToken(claims *BaseClaims) (string, error) {\n\ttoken := jwtGo.NewWithClaims(jwtGo.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(repo.authKey)\n\n\treturn tokenString, err\n}", "func GenerateJWTToken(username string) (string, error) {\n\t// Create token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": username,\n\t\t\"exp\": time.Now().Add(time.Minute * 5).Unix(),\n\t})\n\n\tt, err := token.SignedString([]byte(jwtsecret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn t, err\n}", "func (client *RedisClient) GenerateToken(userID string) (string, error) {\n\tid := uuid.NewV4()\n\texp := time.Duration(600 * time.Second) // 10 minutes\n\n\terr := client.redisdb.Set(id.String(), userID, exp).Err()\n\treturn id.String(), err\n}", "func (middleware *Middleware) GenerateTokenWithRefreshToken(field interface{}) (string, string, error) {\n\tc := CustomClaims{\n\t\tCustomField: field,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tNotBefore: time.Now().Unix() - 10,\n\t\t\tExpiresAt: time.Now().Unix() + middleware.ExpireSecond,\n\t\t\tIssuer: middleware.SigningKeyString,\n\t\t},\n\t}\n\tcs, err := middleware.CreateToken(c)\n\tif err != nil {\n\t\treturn \"\", \"\", nil\n\t}\n\trs, err := middleware.CreateToken(CustomClaims{\n\t\tCustomField: field,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tNotBefore: time.Now().Unix() - 10,\n\t\t\tExpiresAt: time.Now().Unix() + middleware.RefreshSecond,\n\t\t\tIssuer: middleware.SigningKeyString,\n\t\t},\n\t\tIsRefreshToken: true,\n\t\tRefreshTarget: &c,\n\t})\n\tif err != nil {\n\t\treturn \"\", \"\", nil\n\t}\n\treturn cs, rs, nil\n}", "func GenToken(\n\tctx context.Context, payload *Payload, group string, expires int64,\n) (tokenStr string, err error) {\n\t// Handling any panic is good trust me!\n\tdefer func() {\n\t\tif err2 := recover(); err2 != nil {\n\t\t\terr = fmt.Errorf(\"%v\", err2)\n\t\t}\n\t}()\n\n\tif payload == nil {\n\t\tpayload = &Payload{\n\t\t\tID: uuid.New().String(),\n\t\t\tFullName: randomdata.SillyName(),\n\t\t\tEmailAddress: randomdata.Email(),\n\t\t\tPhoneNumber: randomdata.PhoneNumber(),\n\t\t\tGroup: group,\n\t\t}\n\t}\n\n\ttoken := jwt.NewWithClaims(signingMethod, Claims{\n\t\tPayload: payload,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expires,\n\t\t\tIssuer: \"mcfp\",\n\t\t},\n\t})\n\n\t// Generate the token\n\treturn token.SignedString(signingKey)\n}", "func (s *Setup) GenerateToken(info *model.Auth) (string, error) {\n\tcfg, err := config.Load()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsecret := []byte(cfg.JWTSecret)\n\n\tvar claims model.AuthClaims\n\n\tclaims.ID = info.ID\n\tclaims.Name = info.Name\n\tclaims.Email = info.Email\n\tclaims.StandardClaims = jwt.StandardClaims{\n\t\tExpiresAt: time.Now().Add(time.Hour * 2).Unix(),\n\t\tIssuer: cfg.AppName,\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\tsignedString, err := token.SignedString(secret)\n\tif err != nil {\n\t\treturn \"\", errors.New(errGeneratingToken)\n\t}\n\n\treturn signedString, nil\n}", "func (f JwtFactory) NewIDToken(username, host, clientID, nonce string) (string, error) {\r\n\tt := jwt.New(jwt.GetSigningMethod(\"RS256\"))\r\n\r\n\tt.Claims = &struct {\r\n\t\tUsername string `json:\"username\"`\r\n\t\tNonce string `json:\"nonce,omitempty\"`\r\n\r\n\t\t// azp is the authorized party - the party to which the ID Token was\r\n\t\t// issued. Same as Audience.\r\n\t\tAzp string `json:\"azp\"`\r\n\r\n\t\t// Purpose defines what this JWT is for, either access_token or\r\n\t\t// id_token.\r\n\t\tPurpose string `json:\"purpose\"`\r\n\r\n\t\tjwt.StandardClaims\r\n\t}{\r\n\t\tusername,\r\n\t\tnonce,\r\n\t\tclientID,\r\n\t\t\"id_token\",\r\n\t\tgetStandardClaims(host, username, clientID),\r\n\t}\r\n\r\n\treturn f.sign(t)\r\n}", "func GenerateJWTToken(info *TokenInfo, expiresAt int64) (string, error) {\n\tinfo.ExpiresAt = expiresAt\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, info)\n\tencryptedToken, err := token.SignedString([]byte(secretKey))\n\tif err != nil {\n\t\treturn \"\", errors.Customize(500, \"failed to sign on token\", err)\n\t}\n\treturn encryptedToken, nil\n}", "func JWTCreate(userID int, expiredAt int64) string {\n\tclaims := UserClaims{\n\t\tuserID,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expiredAt,\n\t\t\tIssuer: \"proton\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, _ := token.SignedString(mySigningKey)\n\treturn signedToken\n}", "func (c *MockClient) AuthenticateToken(ctx context.Context, customToken string) (string, error) {\n\treturn \"ehrid\", nil\n}", "func (s *TokenService) Encode(user *pb.User) (string, error) {\n\n\t// get expire time\n\texpires := time.Now().Add(time.Hour * 72).Unix()\n\n\t// Create claims\n\tclaims := CustomClaims{\n\t\tuser,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expires,\n\t\t\tIssuer: \"go.micro.srv.user\",\n\t\t},\n\t}\n\n\t// create token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// sign token and return\n\treturn token.SignedString(key)\n}", "func CreateToken(user models.User) (string, error) {\n\ttk := Token{\n\t\tUserID: user.Login,\n\t\tDatabaseID: user.ID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tIssuer: \"gopds-api\",\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, err := token.SignedString([]byte(config.AppConfig.GetString(\"sessions.key\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn tokenString, nil\n}", "func GenerateJWT(user interface{}) (string, error) {\n\t// 4380 hours = 6 months\n\texpireToken := time.Now().Add(time.Hour * 4380).Unix()\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, &model.User{\n\t\tId: user.(model.User).Id,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireToken,\n\t\t},\n\t})\n\tsignedToken, err := token.SignedString(server.JwtSecret)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn signedToken, nil\n}", "func (j *Service) Token(claims Claims) (string, error) {\n\n\t// make token for allowed aud values only, rejects others\n\n\t// update claims with ClaimsUpdFunc defined by consumer\n\tif j.ClaimsUpd != nil {\n\t\tclaims = j.ClaimsUpd.Update(claims)\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\tif j.SecretReader == nil {\n\t\treturn \"\", fmt.Errorf(\"secret reader not defined\")\n\t}\n\n\tif err := j.checkAuds(&claims, j.AudienceReader); err != nil {\n\t\treturn \"\", fmt.Errorf(\"aud rejected: %w\", err)\n\t}\n\n\tsecret, err := j.SecretReader.Get(claims.Audience) // get secret via consumer defined SecretReader\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"can't get secret: %w\", err)\n\t}\n\n\ttokenString, err := token.SignedString([]byte(secret))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"can't sign token: %w\", err)\n\t}\n\treturn tokenString, nil\n}", "func CreateToken(userId uint64, secret_name string) (string, error) {\n\n //Retrieve secret value from secrets manager\n\tsecret, err := getSecretValue(secret_name);\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n atClaims := jwt.MapClaims{}\n atClaims[\"authorized\"] = true\n atClaims[\"user_id\"] = userId\n atClaims[\"exp\"] = time.Now().Add(time.Minute * 15).Unix()\n at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttoken, err := at.SignedString([]byte(secret))\n if err != nil {\n return \"\", err\n }\n\tlog.Println(\"Token is successfully created\")\n return token, nil\n}", "func CustomAuth(cfg *types.Config) gin.HandlerFunc {\n\tbasicAuthHandler := gin.BasicAuth(gin.Accounts{\n\t\t// Use the config's username and password for basic auth\n\t\tcfg.Username: cfg.Password,\n\t})\n\n\toidcHandler := getOIDCMiddleware(cfg.OIDCIssuer, cfg.OIDCSubject, cfg.OIDCGroups)\n\n\treturn func(c *gin.Context) {\n\t\tauthHeader := c.GetHeader(\"Authorization\")\n\t\tif strings.HasPrefix(authHeader, \"Bearer \") {\n\t\t\toidcHandler(c)\n\t\t} else {\n\t\t\tbasicAuthHandler(c)\n\t\t}\n\t}\n}", "func (op *AuthOperations) HandleJWTGenerate(w http.ResponseWriter, r *http.Request) {\n\tvar input jwt.General\n\t//fid := r.Header.Get(\"x-fid\")\n\tiid := r.Header.Get(\"x-iid\")\n\terr := json.NewDecoder(r.Body).Decode(&input)\n\tif err != nil {\n\t\tLOGGER.Warningf(\"Error while validating token body : %v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"%s, %s\", iid, input.JTI)\n\n\tvar token jwt.Info\n\tinfoCollection, ctx := op.session.GetSpecificCollection(AuthDBName, JWTInfoCollection)\n\terr = infoCollection.FindOne(ctx,\n\t\tbson.M{\n\t\t\t\"institution\": iid,\n\t\t\t\"jti\": input.JTI,\n\t\t}).Decode(&token)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Error getting JWT info from query: %s\", err.Error())\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tLOGGER.Debugf(\"%+v\", token)\n\n\t// if token exists\n\tif &token == nil {\n\t\tLOGGER.Errorf(\"Token info not found\")\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, errors.New(\"token info not found\"))\n\t\treturn\n\t}\n\n\t// only generate if stage is currently approved\n\tif token.Stage != jwt.Approved {\n\t\tLOGGER.Errorf(\"Token is not currently approved\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"token is not currently approved\"))\n\t\treturn\n\t}\n\n\temail := r.Header.Get(\"email\")\n\t// check to make sure the authenticated user is the same user who requested the token\n\tif email == \"\" || email != token.CreatedBy {\n\t\tLOGGER.Errorf(\"User who requested the token must be the same user to generate the token\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"user who requested the token must be the same user to generate the token\"))\n\t\treturn\n\t}\n\n\t// ensure that the approved request includes a jti\n\tif token.JTI != input.JTI {\n\t\tLOGGER.Errorf(\"Unknown token id\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"unknown token id\"))\n\t\treturn\n\t}\n\n\t// update token info\n\ttoken.Stage = jwt.Ready\n\n\t// set default expiration time\n\t//initExp := \"15m\" //os.Getenv(\"initial_mins\") + \"m\"\n\t//if initExp == \"\" {\n\t//\tinitExp = \"1h\"\n\t//}\n\n\t// generate the token with payload and claims\n\t// initialize to expire in n1 hrs and not before n2 seconds from now\n\t//encodedToken := jwt.GenerateToken(payload, initExp, \"0s\")\n\ttokenSecret := stringutil.RandStringRunes(64, false)\n\n\tkeyID := primitive.NewObjectIDFromTimestamp(time.Now())\n\tjwtSecure := jwt.IJWTSecure{\n\t\tID: keyID,\n\t\tSecret: tokenSecret,\n\t\tJTI: input.JTI,\n\t\tNumber: 0,\n\t}\n\n\tsecureCollection, secureCtx := op.session.GetSpecificCollection(AuthDBName, JWTSecureCollection)\n\t_, err = secureCollection.InsertOne(secureCtx, jwtSecure)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Insert JWT secure failed: %+v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\t// convert the interface type ID to string\n\tLOGGER.Debugf(\"New generate ID: %s\" , keyID.Hex())\n\n\tcount := 0\n\t// define payload\n\tpayload := jwt.CreateClaims(token, count, iid, keyID.Hex())\n\tpayload.ExpiresAt = time.Now().Add(time.Minute * 60).Unix()\n\tpayload.NotBefore = time.Now().Unix()\n\n\tencodedToken, _ := jwt.CreateAndSign(payload, tokenSecret, keyID.Hex())\n\n\t// save updated token info\n\tupdateResult, updateInfoErr := infoCollection.UpdateOne(ctx, bson.M{\"institution\": iid, \"jti\": input.JTI}, bson.M{\"$set\": &token})\n\tif updateInfoErr != nil || updateResult.MatchedCount < 1{\n\t\tLOGGER.Errorf(\"Error update token info: %+v\", updateInfoErr)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"Successfully generate JWT token\")\n\tjwt.ResponseSuccess(w, encodedToken)\n\treturn\n}", "func (c *UsersController) GenerateToken(r *http.Request, args map[string]string, body interface{}) *ApiResponse {\n\tctx := r.Context()\n\tr.ParseForm()\n\n\t//TODO: fix validation on oauthStateString\n\t// - using the current validation, two user can authorize at the same time and failed on generating tokens\n\t//state := r.Form.Get(\"state\")\n\t//if state != oauthStateString {\n\t//\treturn Error(http.StatusInternalServerError, \"Invalid Oauth State\" + state + oauthStateString)\n\t//}\n\n\tcode := r.Form.Get(\"code\")\n\tif code == \"\" {\n\t\treturn Error(http.StatusBadRequest, \"Code not found\")\n\t}\n\n\ttoken, err := c.GitlabService.GenerateToken(ctx, code)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn Error(http.StatusInternalServerError, \"Code exchange failed\")\n\t}\n\n\t//Store generated token here\n\tuser, err := c.GitlabService.GetUserInfo(token.AccessToken)\n\tsavedUser, err := c.UsersService.Save(user)\n\tif savedUser == nil {\n\t\treturn Error(http.StatusInternalServerError, \"User is already present in the database\")\n\t}\n\tif err != nil {\n\t\treturn Error(http.StatusInternalServerError, err.Error())\n\t}\n\n\t//Build the user account\n\tuserAccount := &models.Account{\n\t\tUserId: savedUser.Id,\n\t\tAccessToken: token.AccessToken,\n\t\tAccountType: models.AccountTypes.Gitlab,\n\t\tTokenType: token.TokenType,\n\t\tRefreshToken: token.RefreshToken,\n\t}\n\n\t_, err = c.AccountService.Save(userAccount)\n\tif err != nil {\n\t\treturn Error(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn Ok(\"Authorized\")\n}" ]
[ "0.7570455", "0.73774123", "0.71082765", "0.7080766", "0.66277677", "0.6565818", "0.64657426", "0.6246285", "0.62455434", "0.62095946", "0.62089545", "0.6207226", "0.61426294", "0.6141862", "0.60827297", "0.60660726", "0.6048415", "0.60071194", "0.59799546", "0.5954599", "0.59450513", "0.593462", "0.5924595", "0.5921418", "0.59066963", "0.5901533", "0.5887963", "0.5882675", "0.58331305", "0.5811655", "0.5794316", "0.57892984", "0.5785765", "0.57750356", "0.5755966", "0.5738277", "0.57366234", "0.57150555", "0.5702876", "0.5692176", "0.5691804", "0.5684191", "0.56762177", "0.5676176", "0.56683975", "0.56531274", "0.56505716", "0.564778", "0.56182545", "0.5614645", "0.5596384", "0.55898017", "0.55808777", "0.55658174", "0.5562192", "0.5560738", "0.5557907", "0.5555846", "0.5545729", "0.55208355", "0.55150104", "0.55114067", "0.5507236", "0.54865116", "0.5484559", "0.5483543", "0.54828805", "0.5459596", "0.54413956", "0.5422486", "0.5421781", "0.5391782", "0.53911126", "0.538733", "0.53838104", "0.53671116", "0.5337839", "0.5335665", "0.5332049", "0.53192735", "0.53191614", "0.5318119", "0.5316455", "0.5314219", "0.53106683", "0.5305851", "0.52990454", "0.5295922", "0.5286132", "0.5262039", "0.52489567", "0.5246268", "0.5239746", "0.52335274", "0.5230126", "0.5227847", "0.5225559", "0.5212399", "0.52106404", "0.52013177" ]
0.77481353
0
AuthenticateToken Verifies provided token and if valid, extracts eHRID from it.
func (c *Client) AuthenticateToken(ctx context.Context, customToken string) (string, error) { client := firebase.FirebaseAuth token, err := client.VerifyIDToken(ctx, customToken) if err != nil { return "", err } return token.UID, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *MockClient) AuthenticateToken(ctx context.Context, customToken string) (string, error) {\n\treturn \"ehrid\", nil\n}", "func (o *OIDCAuthenticator) AuthenticateToken(ctx context.Context, rawtoken string) (*Claims, error) {\n\tidToken, err := o.verifier.Verify(ctx, rawtoken)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Token failed validation: %v\", err)\n\t}\n\n\t// Check for required claims\n\tvar claims map[string]interface{}\n\tif err := idToken.Claims(&claims); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to get claim map from token: %v\", err)\n\t}\n\tfor _, requiredClaim := range requiredClaims {\n\t\tif _, ok := claims[requiredClaim]; !ok {\n\t\t\t// Claim missing\n\t\t\treturn nil, fmt.Errorf(\"Required claim %v missing from token\", requiredClaim)\n\t\t}\n\t}\n\n\treturn o.parseClaims(claims)\n}", "func (a *authSvc) ValidateToken(authHeader interface{}) (interface{}, error) {\n\t// validate an Authorization header token is present in the request\n\tif authHeader == nil {\n\t\treturn nil, errors.New(\"no valid Authorization token in request\")\n\t}\n\theader := authHeader.(string)\n\tif header == \"\" {\n\t\treturn nil, errors.New(\"no valid Authorization token in request\")\n\t}\n\t// validate that it is a Bearer token\n\tif !strings.HasPrefix(header, bearerTokenKey) {\n\t\treturn nil, errors.New(\"authorization token is not valid Bearer token\")\n\t}\n\tt := strings.Replace(header, bearerTokenKey, \"\", -1)\n\t// parse the header token\n\ttoken, err := jwt.Parse(t, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"there was an parsing the given token. please validate the token is for this service\")\n\t\t}\n\t\treturn a.authSecret, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// validate token and get claims\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\tvar decodedToken map[string]string\n\t\terr = mapstructure.Decode(claims, &decodedToken)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn decodedToken[\"email\"], nil\n\t}\n\treturn nil, errors.New(\"invalid authorization token\") // token is not valid, return error\n}", "func (j *JwtAuthenticator) AuthenticateToken(ctx context.Context, rawtoken string) (*Claims, error) {\n\n\t// Parse token\n\ttoken, err := jwt.Parse(rawtoken, func(token *jwt.Token) (interface{}, error) {\n\n\t\t// Verify Method\n\t\tif strings.HasPrefix(token.Method.Alg(), \"RS\") {\n\t\t\t// RS256, RS384, or RS512\n\t\t\treturn j.rsaKey, nil\n\t\t} else if strings.HasPrefix(token.Method.Alg(), \"ES\") {\n\t\t\t// ES256, ES384, or ES512\n\t\t\treturn j.ecdsKey, nil\n\t\t} else if strings.HasPrefix(token.Method.Alg(), \"HS\") {\n\t\t\t// HS256, HS384, or HS512\n\t\t\treturn j.sharedSecretKey, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"Unknown token algorithm: %s\", token.Method.Alg())\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, fmt.Errorf(\"Token failed validation\")\n\t}\n\n\t// Get claims\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif claims == nil || !ok {\n\t\treturn nil, fmt.Errorf(\"No claims found in token\")\n\t}\n\n\t// Check for required claims\n\tfor _, requiredClaim := range requiredClaims {\n\t\tif _, ok := claims[requiredClaim]; !ok {\n\t\t\t// Claim missing\n\t\t\treturn nil, fmt.Errorf(\"Required claim %v missing from token\", requiredClaim)\n\t\t}\n\t}\n\n\t// Token now has been verified.\n\t// Claims holds all the authorization information.\n\t// Here we need to first decode it then unmarshal it from JSON\n\tparts := strings.Split(token.Raw, \".\")\n\tclaimBytes, err := jwt.DecodeSegment(parts[1])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to decode claims: %v\", err)\n\t}\n\tvar sdkClaims Claims\n\terr = json.Unmarshal(claimBytes, &sdkClaims)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to get sdkclaims: %v\", err)\n\t}\n\n\tif err := validateUsername(j.usernameClaim, &sdkClaims); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sdkClaims, nil\n}", "func Authenticate(tokenStr string, pub *rsa.PublicKey) (interface{}, error) {\n\tclaims, err := token.Verify(tokenStr, func(claims interface{}, method token.SigningMethod) (interface{}, error) {\n\t\treturn pub, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn claims, nil\n}", "func parseToken(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tevent := ssas.Event{Op: \"ParseToken\"}\n\t\tauthHeader := r.Header.Get(\"Authorization\")\n\t\tif authHeader == \"\" {\n\t\t\tevent.Help = \"no authorization header found\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tauthRegexp := regexp.MustCompile(`^Bearer (\\S+)$`)\n\t\tauthSubmatches := authRegexp.FindStringSubmatch(authHeader)\n\t\tif len(authSubmatches) < 2 {\n\t\t\tevent.Help = \"invalid Authorization header value\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttokenString := authSubmatches[1]\n\t\ttoken, err := server.VerifyToken(tokenString)\n\t\tif err != nil {\n\t\t\tevent.Help = fmt.Sprintf(\"unable to decode authorization header value; %s\", err)\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tvar rd ssas.AuthRegData\n\t\tif rd, err = readRegData(r); err != nil {\n\t\t\trd = ssas.AuthRegData{}\n\t\t}\n\n\t\tif claims, ok := token.Claims.(*service.CommonClaims); ok && token.Valid {\n\t\t\trd.AllowedGroupIDs = claims.GroupIDs\n\t\t\trd.OktaID = claims.OktaID\n\t\t}\n\t\tctx := context.WithValue(r.Context(), \"ts\", tokenString)\n\t\tctx = context.WithValue(ctx, \"rd\", rd)\n\t\tservice.LogEntrySetField(r, \"rd\", rd)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (app *application) authenticateToken(r *http.Request) (*models.User, error) {\n\t// retrieve a token from the client's http request header\n\tauthorizationHeader := r.Header.Get(\"Authorization\")\n\tif authorizationHeader == \"\" {\n\t\treturn nil, errors.New(\"no authorization header received\")\n\t}\n\n\theaderParts := strings.Split(authorizationHeader, \" \")\n\tif len(headerParts) != 2 || headerParts[0] != \"Bearer\" {\n\t\treturn nil, errors.New(\"no authorization header received\")\n\t}\n\n\ttoken := headerParts[1]\n\tif len(token) != 26 {\n\t\treturn nil, errors.New(\"authentication token wrong size\")\n\t}\n\n\t// once all passed, get the user from the tokens table in the database\n\tuser, err := app.DB.GetUserForToken(token)\n\tif err != nil {\n\t\treturn nil, errors.New(\"no matching user found\")\n\t}\n\n\treturn user, nil\n}", "func VerifyToken(tokData []byte, keyFile, keyType string) (iat string, err error) {\n\n\t// trim possible whitespace from token\n\ttokData = regexp.MustCompile(`\\s*$`).ReplaceAll(tokData, []byte{})\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Token len: %v bytes\\n\", len(tokData))\n\t}\n\n\t// Parse the token. Load the key from command line option\n\ttoken, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) {\n\t\tdata, err := loadData(keyFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif isEs(keyType) {\n\t\t\treturn jwt.ParseECPublicKeyFromPEM(data)\n\t\t} else if isRs(keyType) {\n\t\t\treturn jwt.ParseRSAPublicKeyFromPEM(data)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Error signing token - confg error: keyType=[%s]\", keyType)\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn data, nil\n\t})\n\n\t// Print some debug data\n\tif db100 && token != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Header:\\n%v\\n\", token.Header)\n\t\tfmt.Fprintf(os.Stderr, \"Claims:\\n%v\\n\", token.Claims)\n\t}\n\n\t// Print an error if we can't parse for some reason\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Couldn't parse token: %v\", err)\n\t}\n\n\t// Is token invalid?\n\tif !token.Valid {\n\t\treturn \"\", fmt.Errorf(\"Token is invalid\")\n\t}\n\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Token Claims: %s\\n\", godebug.SVarI(token.Claims))\n\t}\n\n\t// {\"auth_token\":\"f5d8f6ae-e2e5-42c9-83a9-dfd07825b0fc\"}\n\ttype GetAuthToken struct {\n\t\tAuthToken string `json:\"auth_token\"`\n\t}\n\tvar gt GetAuthToken\n\tcl := godebug.SVar(token.Claims)\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Claims just before -->>%s<<--\\n\", cl)\n\t}\n\terr = json.Unmarshal([]byte(cl), &gt)\n\tif err == nil {\n\t\tif db100 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Success: %s -- token [%s] \\n\", err, gt.AuthToken)\n\t\t}\n\t\treturn gt.AuthToken, nil\n\t} else {\n\t\tif db100 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: %s -- Unable to unmarsal -->>%s<<--\\n\", err, cl)\n\t\t}\n\t\treturn \"\", err\n\t}\n\n}", "func (a *Auth) authToken(ctx *bm.Context) (int64, error) {\n\treq := ctx.Request\n\tkey := req.Form.Get(\"access_token\")\n\tif key == \"\" {\n\t\treturn 0, ecode.Unauthorized\n\t}\n\t// NOTE: 请求登录鉴权服务接口,拿到对应的用户id\n\tvar mid int64\n\t// TODO: get mid from some code\n\treturn mid, nil\n}", "func (t *TokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) {\n\ttokenID, tokenSecret, err := bootstraptokenutil.ParseToken(token)\n\tif err != nil {\n\t\t// Token isn't of the correct form, ignore it.\n\t\treturn nil, false, nil\n\t}\n\n\tsecretName := bootstrapapi.BootstrapTokenSecretPrefix + tokenID\n\tsecret, err := t.lister.Get(secretName)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tklog.V(3).Infof(\"No secret of name %s to match bootstrap bearer token\", secretName)\n\t\t\treturn nil, false, nil\n\t\t}\n\t\treturn nil, false, err\n\t}\n\n\tif secret.DeletionTimestamp != nil {\n\t\ttokenErrorf(secret, \"is deleted and awaiting removal\")\n\t\treturn nil, false, nil\n\t}\n\n\tif string(secret.Type) != string(bootstrapapi.SecretTypeBootstrapToken) || secret.Data == nil {\n\t\ttokenErrorf(secret, \"has invalid type, expected %s.\", bootstrapapi.SecretTypeBootstrapToken)\n\t\treturn nil, false, nil\n\t}\n\n\tts := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenSecretKey)\n\tif subtle.ConstantTimeCompare([]byte(ts), []byte(tokenSecret)) != 1 {\n\t\ttokenErrorf(secret, \"has invalid value for key %s.\", bootstrapapi.BootstrapTokenSecretKey)\n\t\treturn nil, false, nil\n\t}\n\n\tid := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenIDKey)\n\tif id != tokenID {\n\t\ttokenErrorf(secret, \"has invalid value for key %s.\", bootstrapapi.BootstrapTokenIDKey)\n\t\treturn nil, false, nil\n\t}\n\n\tif bootstrapsecretutil.HasExpired(secret, time.Now()) {\n\t\t// logging done in isSecretExpired method.\n\t\treturn nil, false, nil\n\t}\n\n\tif bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenUsageAuthentication) != \"true\" {\n\t\ttokenErrorf(secret, \"not marked %s=true.\", bootstrapapi.BootstrapTokenUsageAuthentication)\n\t\treturn nil, false, nil\n\t}\n\n\tgroups, err := bootstrapsecretutil.GetGroups(secret)\n\tif err != nil {\n\t\ttokenErrorf(secret, \"has invalid value for key %s: %v.\", bootstrapapi.BootstrapTokenExtraGroupsKey, err)\n\t\treturn nil, false, nil\n\t}\n\n\treturn &authenticator.Response{\n\t\tUser: &user.DefaultInfo{\n\t\t\tName: bootstrapapi.BootstrapUserPrefix + string(id),\n\t\t\tGroups: groups,\n\t\t},\n\t}, true, nil\n}", "func (e *oidcExtension) authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) {\n\tmetadata := client.NewMetadata(headers)\n\tauthHeaders := metadata.Get(e.cfg.Attribute)\n\tif len(authHeaders) == 0 {\n\t\treturn ctx, errNotAuthenticated\n\t}\n\n\t// we only use the first header, if multiple values exist\n\tparts := strings.Split(authHeaders[0], \" \")\n\tif len(parts) != 2 {\n\t\treturn ctx, errInvalidAuthenticationHeaderFormat\n\t}\n\n\traw := parts[1]\n\tidToken, err := e.verifier.Verify(ctx, raw)\n\tif err != nil {\n\t\treturn ctx, fmt.Errorf(\"failed to verify token: %w\", err)\n\t}\n\n\tclaims := map[string]interface{}{}\n\tif err = idToken.Claims(&claims); err != nil {\n\t\t// currently, this isn't a valid condition, the Verify call a few lines above\n\t\t// will already attempt to parse the payload as a json and set it as the claims\n\t\t// for the token. As we are using a map to hold the claims, there's no way to fail\n\t\t// to read the claims. It could fail if we were using a custom struct. Instead of\n\t\t// swalling the error, it's better to make this future-proof, in case the underlying\n\t\t// code changes\n\t\treturn ctx, errFailedToObtainClaimsFromToken\n\t}\n\n\tsubject, err := getSubjectFromClaims(claims, e.cfg.UsernameClaim, idToken.Subject)\n\tif err != nil {\n\t\treturn ctx, fmt.Errorf(\"failed to get subject from claims in the token: %w\", err)\n\t}\n\tmembership, err := getGroupsFromClaims(claims, e.cfg.GroupsClaim)\n\tif err != nil {\n\t\treturn ctx, fmt.Errorf(\"failed to get groups from claims in the token: %w\", err)\n\t}\n\n\tcl := client.FromContext(ctx)\n\tcl.Auth = &authData{\n\t\traw: raw,\n\t\tsubject: subject,\n\t\tmembership: membership,\n\t}\n\treturn client.NewContext(ctx, cl), nil\n}", "func Authenticate(nextFunc http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif err := auth.ValidarToken(r); err != nil {\n\t\t\tresponse.Error(w, http.StatusUnauthorized, err)\n\t\t\treturn\n\t\t}\n\n\t\tnextFunc(w, r)\n\t}\n}", "func ProcessToken(tk string) (*models.Claim, bool, string, error) {\n\t//Para poder decodificar el token\n\tmyPwd := []byte(\"MiClaveUltraSECRETA\")\n\n\t//jwt exige que sea un puntero\n\tclaims := &models.Claim{}\n\n\tsplitToken := strings.Split(tk, \"Bearer\")\n\tif len(splitToken) != 2 {\n\t\treturn claims, false, string(\"\"), errors.New(\"token format invalid\")\n\t}\n\n\ttk = strings.TrimSpace(splitToken[1])\n\n\t//Sintaxis para verificar si el token es valido y mapear el token dentro de claims\n\ttkn, err := jwt.ParseWithClaims(tk, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn myPwd, nil\n\t})\n\tif err == nil {\n\t\t//Si el token es válido lo primero que validamos es si el email existe en la BD\n\t\t_, userFound, _ := bd.UserExists(claims.Email)\n\t\tif userFound {\n\t\t\tEmail = claims.Email\n\t\t\tUserID = claims.ID.Hex()\n\t\t}\n\t\treturn claims, userFound, UserID, nil\n\t}\n\tif !tkn.Valid {\n\t\treturn claims, false, string(\"\"), errors.New(\"invalid token\")\n\t}\n\n\treturn claims, false, string(\"\"), err\n}", "func Authenticate(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(responseWriter http.ResponseWriter, request *http.Request) {\n\t\ttoken := strings.TrimPrefix(request.Header.Get(\"Authorization\"), \"Bearer \")\n\t\terr := crypto.ValidateToken([]byte(token))\n\n\t\tif err == nil {\n\t\t\tnext(responseWriter, request)\n\t\t} else {\n\t\t\thelpers.Response(responseWriter, http.StatusUnauthorized, constants.ERR_INVALID_TOKEN, err, nil)\n\t\t}\n\t}\n}", "func (p *AzureProvider) verifyTokenAndExtractEmail(ctx context.Context, token string) (string, error) {\n\temail := \"\"\n\n\tif token != \"\" && p.Verifier != nil {\n\t\ttoken, err := p.Verifier.Verify(ctx, token)\n\t\t// due to issues mentioned above, id_token may not be signed by AAD\n\t\tif err == nil {\n\t\t\tclaims, err := p.getClaims(token)\n\t\t\tif err == nil {\n\t\t\t\temail = claims.Email\n\t\t\t} else {\n\t\t\t\tlogger.Printf(\"unable to get claims from token: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Printf(\"unable to verify token: %v\", err)\n\t\t}\n\t}\n\n\treturn email, nil\n}", "func (g *grpcWrapper) validateToken(logger *zap.Logger, token string) (string, error) {\n\tlogger.Debug(\"validateToken called\")\n\tif g.skipAuth {\n\t\tlogger.Debug(\"validateToken short-circuited due to SKIP AUTH\")\n\t\treturn \"11\", nil\n\t}\n\tserverAuthToken, err := serverAuth(logger, g.authURL, g.authUser, g.authPassword)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from serverAuth\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\treq, err := http.NewRequest(\"GET\", g.authURL+\"v3/auth/tokens\", nil)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from NewRequest GET\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\treq.Header.Set(\"X-Auth-Token\", serverAuthToken)\n\treq.Header.Set(\"X-Subject-Token\", token)\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from DefaultClient.Do GET\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\tlogger.Debug(\"validateToken error from GET return status\", zap.Int(\"status\", resp.StatusCode))\n\t\treturn \"\", fmt.Errorf(\"token validation gave status %d\", resp.StatusCode)\n\t}\n\tvar validateResp validateTokenResponse\n\tr, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from GET ReadAll body\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tif err = json.Unmarshal(r, &validateResp); err != nil {\n\t\tlogger.Debug(\"validateToken error from GET json.Unmarshal\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tlogger.Debug(\"validateToken succeeded\", zap.String(\"Project.ID\", validateResp.Token.Project.ID))\n\treturn validateResp.Token.Project.ID, nil\n}", "func ValidateAuth(verifyKey *rsa.PublicKey, h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tauthorizationHeader := r.Header.Get(\"Authorization\")\n\t\tif authorizationHeader != \"\" {\n\t\t\tbearerToken := strings.TrimPrefix(authorizationHeader, \"Bearer\")\n\t\t\tbearerToken = strings.TrimSpace(bearerToken)\n\t\t\t// tokenPart := bearerToken[1] //Grab the token part, what we are truly interested in\n\t\t\tcustomClaims := &claims.Claims{}\n\n\t\t\ttoken, err := jwt.ParseWithClaims(bearerToken, customClaims, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\treturn verifyKey, nil\n\t\t\t})\n\n\t\t\tif err != nil { // Malformed token, returns with http code 403 as usual\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !token.Valid {\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx := r.Context()\n\t\t\t// Everything went well, proceed with the request and set the caller to the user retrieved from the parsed token\n\t\t\tr = r.WithContext(AddClaims(ctx, customClaims))\n\t\t\th.ServeHTTP(w, r) // proceed in the middleware chain!\n\t\t} else {\n\t\t\tjson.NewEncoder(w).Encode(\"Invalid Authorization Token\")\n\t\t}\n\t})\n}", "func authenticateApiToken(server string, token string) (string, error) {\n\tlog.Debug(\"Attempting to authenticate the API Refresh Token\")\n\tclient := resty.New()\n\tqueryResponse, err := client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: ignoreCert}).R().\n\t\tSetBody(ApiAuthentication{token}).\n\t\tSetResult(&ApiAuthenticationResponse{}).\n\t\tSetError(&ApiAuthenticationError{}).\n\t\tPost(\"https://\" + server + \"/iaas/api/login\")\n\tif queryResponse.IsError() {\n\t\tlog.Debug(\"Refresh Token failed\")\n\t\treturn \"\", errors.New(queryResponse.Error().(*ApiAuthenticationError).Message)\n\t}\n\tlog.Debug(\"Refresh Token succeeded\")\n\treturn queryResponse.Result().(*ApiAuthenticationResponse).Token, err\n}", "func (s *Server) authenticate(w http.ResponseWriter, req *http.Request) (string, error) {\n\tif s.opts.UseJWT {\n\t\t// Check Authorization header.\n\t\tfor _, val := range req.Header[\"Authorization\"] {\n\t\t\tif strings.HasPrefix(val, \"Bearer \") {\n\t\t\t\ttoken := val[len(\"Bearer \"):]\n\t\t\t\tobject, err := jose.ParseSigned(token)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"error parsing JWT token: %s\", err)\n\t\t\t\t}\n\t\t\t\tpb, err := object.Verify(&s.opts.PrivateKey.PublicKey)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"error verifying JWT token: %s\", err)\n\t\t\t\t}\n\t\t\t\tpayload := make(map[string]string)\n\t\t\t\terr = json.Unmarshal(pb, &payload)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"error parsing JWT payload: %s\", err)\n\t\t\t\t}\n\t\t\t\temail, ok := payload[\"sub\"]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"JWT token does not have sub: %s\", string(pb))\n\t\t\t\t}\n\t\t\t\treturn s.hashId(email), nil\n\t\t\t}\n\t\t}\n\t}\n\tsession, err := s.cookieStore.Get(req, UserSessionName)\n\tif err != nil {\n\t\tsession.Options.MaxAge = -1\n\t\treturn \"\", fmt.Errorf(\"cookieStore.Get returned error %s\", err)\n\t}\n\thash, ok := session.Values[\"hash\"].(string)\n\tglog.V(3).Infof(\"authenticate %s: hash=%s\", req.URL, session.Values[\"hash\"])\n\tif !ok || hash == \"\" {\n\t\treturn \"\", httpError(http.StatusUnauthorized)\n\t}\n\treturn hash, nil\n}", "func (*Service) Authenticate(code string, configuration *portainer.OAuthSettings) (string, error) {\n\ttoken, err := getOAuthToken(code, configuration)\n\tif err != nil {\n\t\tlog.Debug().Err(err).Msg(\"failed retrieving oauth token\")\n\n\t\treturn \"\", err\n\t}\n\n\tidToken, err := getIdToken(token)\n\tif err != nil {\n\t\tlog.Debug().Err(err).Msg(\"failed parsing id_token\")\n\t}\n\n\tresource, err := getResource(token.AccessToken, configuration)\n\tif err != nil {\n\t\tlog.Debug().Err(err).Msg(\"failed retrieving resource\")\n\n\t\treturn \"\", err\n\t}\n\n\tresource = mergeSecondIntoFirst(idToken, resource)\n\n\tusername, err := getUsername(resource, configuration)\n\tif err != nil {\n\t\tlog.Debug().Err(err).Msg(\"failed retrieving username\")\n\n\t\treturn \"\", err\n\t}\n\n\treturn username, nil\n}", "func ExtractIDFromToken(token string, p packer.Packer, n notary.Notary, secretKey []byte) (id string, err error) {\n\tsid, signature, err := p.Unpack([]byte(token))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to decode token\")\n\t}\n\n\tverified := n.Verify(sid, signature, secretKey)\n\tif !verified {\n\t\treturn \"\", fmt.Errorf(\"Failed to verify session ID\")\n\t}\n\n\treturn string(sid[:SessionIDLength]), nil\n}", "func (u *User) ValidateToken(ctx context.Context, inToken *pb.Token, outToken *pb.Token) error {\n\t_ = ctx\n\tts := TokenService{}\n\tclaims, err := ts.Decode(inToken.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif claims == nil {\n\t\treturn fmt.Errorf(glErr.AuthNilClaim(serviceName))\n\t}\n\tif claims.User.Id == 0 || claims.Issuer != ClaimIssuer {\n\t\t// fmt.Printf(\"claim User %v\", claims.User)\n\t\treturn fmt.Errorf(glErr.AuthInvalidClaim(serviceName))\n\t}\n\t// fmt.Printf(\"Claim User %v\", claims.User)\n\t// TODO: Check that userid is a valid user in db\n\n\toutToken.Token = inToken.Token\n\toutToken.Valid = true\n\toutToken.EUid = base64.StdEncoding.EncodeToString([]byte(strconv.FormatInt(claims.User.Id, 10)))\n\n\treturn nil\n\n}", "func (ctx *serverRequestContextImpl) TokenAuthentication() (string, error) {\n\tr := ctx.req\n\t// Get the authorization header\n\tauthHdr := r.Header.Get(\"authorization\")\n\tif authHdr == \"\" {\n\t\treturn \"\", caerrors.NewHTTPErr(401, caerrors.ErrNoAuthHdr, \"No authorization header\")\n\t}\n\t// Get the CA\n\tca, err := ctx.GetCA()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// Get the request body\n\tbody, err := ctx.ReadBodyBytes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif idemix.IsToken(authHdr) {\n\t\treturn ctx.verifyIdemixToken(authHdr, r.Method, r.URL.RequestURI(), body)\n\t}\n\treturn ctx.verifyX509Token(ca, authHdr, r.Method, r.URL.RequestURI(), body)\n}", "func Authenticate(jwtToken string) (*Token, error) {\n\ttk := &Token{}\n\ttoken, err := jwt.ParseWithClaims(jwtToken, tk, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(jwtKey), nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, fmt.Errorf(\"Token is not valid.\")\n\t}\n\n\tif time.Now().After(tk.Expiry) {\n\t\treturn nil, fmt.Errorf(\"Token has expired. Please login again.\")\n\t}\n\n\treturn tk, nil\n}", "func ValidateToken(authClient umAPI.UserManagementApiClient) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttoken := c.MustGet(\"encodedToken\").(string)\n\t\tparsedToken, err := authClient.ValidateJWT(context.Background(), &umAPI.JWTRequest{\n\t\t\tToken: token,\n\t\t})\n\t\tif err != nil {\n\t\t\tst := status.Convert(err)\n\t\t\tlogger.Error.Println(st.Message())\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"error during token validation\"})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tc.Set(\"validatedToken\", parsedToken)\n\t\tc.Next()\n\t}\n}", "func validateIDToken(rawIDToken string) (string,error) {\n\t\n\t// Create verifier\n\tctx := context.Background()\n\tprovider, err := oidc.NewProvider(ctx, \"https://accounts.google.com\")\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\toidcConfig := &oidc.Config{\n\t\tClientID: clientID,\n\t}\n\tverifier := provider.Verifier(oidcConfig)\n\n\t// Verify id token\n\tidToken, err := verifier.Verify(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\n\t// Parse token to JSON\n\tparsed := new(json.RawMessage)\n\tif err := idToken.Claims(parsed); err != nil {\n\t\treturn \"\",err\n\t}\n\n\t// Render json as string\n\tdata, err := json.MarshalIndent(parsed, \"\", \" \")\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\treturn string(data),nil\n}", "func (o *OIDC) authorizeToken(token string) (*openIDPayload, error) {\n\tjwt, err := jose.ParseSigned(token)\n\tif err != nil {\n\t\treturn nil, errs.Wrap(http.StatusUnauthorized, err,\n\t\t\t\"oidc.AuthorizeToken; error parsing oidc token\")\n\t}\n\n\t// Parse claims to get the kid\n\tvar claims openIDPayload\n\tif err := jwt.UnsafeClaimsWithoutVerification(&claims); err != nil {\n\t\treturn nil, errs.Wrap(http.StatusUnauthorized, err,\n\t\t\t\"oidc.AuthorizeToken; error parsing oidc token claims\")\n\t}\n\n\tfound := false\n\tkid := jwt.Headers[0].KeyID\n\tkeys := o.keyStore.Get(kid)\n\tfor _, key := range keys {\n\t\tif err := jwt.Claims(key, &claims); err == nil {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn nil, errs.Unauthorized(\"oidc.AuthorizeToken; cannot validate oidc token\")\n\t}\n\n\tif err := o.ValidatePayload(claims); err != nil {\n\t\treturn nil, errs.Wrap(http.StatusInternalServerError, err, \"oidc.AuthorizeToken\")\n\t}\n\n\treturn &claims, nil\n}", "func (r GetResult) ExtractTokenID() (string, error) {\n\treturn r.Header.Get(\"X-Subject-Token\"), r.Err\n}", "func (id *authIdentity) AuthToken() string {\n\treturn id.authToken\n}", "func (r CreateResult) ExtractTokenID() (string, error) {\n\treturn r.Header.Get(\"X-Subject-Token\"), r.Err\n}", "func VerifyToken(tokenStr string, secret_name string) (string, error) {\n\t var result = \"\"\n\t //Retrieve secret value from secrets manager\n\t secret, err := getSecretValue(secret_name);\n\t verifyToken, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t return[]byte(secret), nil\n\t })\n\t if err == nil && verifyToken.Valid{\n\t\t result = \"Valid\"\n\t } else {\n\t\t result = \"Invalid\"\n\t }\n\t log.Println(\"VerifyToken result =\", result)\n\n\t return result, err\n}", "func (c *CSRFStore) verifyToken(headerToken string) error {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\t// check if token is initialized\n\tif c.token == nil || len(c.token.Value) == 0 {\n\t\treturn errors.New(\"token not initialized\")\n\t}\n\n\ta, err := base64.RawURLEncoding.DecodeString(headerToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check if token values are same, using a constant time comparison\n\tif subtle.ConstantTimeCompare(a, c.token.Value) != 1 {\n\t\treturn errors.New(\"invalid token\")\n\t}\n\n\t// make sure token is still valid\n\tif c.expired() {\n\t\treturn errors.New(\"token has expired\")\n\t}\n\n\treturn nil\n}", "func ValidateToken(tokenString string, w http.ResponseWriter) (Claims, error) {\n\tclaims := Claims{}\n\tjwtKey := []byte(config.Configuration.TokenPrivateKey)\n\n\t// The token string is parsed, decoded and stored into the given Claims struct\n\ttoken, err := jwt.ParseWithClaims(tokenString, &claims,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn jwtKey, nil\n\t\t})\n\n\t// Check if the token has expired according to the expiry time fixed during the sign in\n\tif !token.Valid {\n\t\terr = ExpiredToken\n\t\tMakeErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\tlog.Println(err.Error())\n\t\treturn claims, err\n\t}\n\n\t// Check if the token has been signed with the private key of the api gateway\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t// If the token is expired or has not been signed according to the api gateway key, an Unauthorization code\n\t\t\t// is returned in both cases, but a different message is provided to the client.\n\t\t\tMakeErrorResponse(w, http.StatusUnauthorized, \"Wrong credentials\")\n\t\t\tlog.Println(\"Wrong credentials\")\n\t\t\treturn claims, err\n\t\t}\n\n\t\tMakeErrorResponse(w, http.StatusBadRequest, \"Malformed token\")\n\t\tlog.Println(\"Malformed token\")\n\t\treturn claims, err\n\t}\n\n\treturn claims, nil\n\n}", "func (a *Api) token(res http.ResponseWriter, req *http.Request) *token.TokenData {\n\ttd := a.auth.Authenticate(req)\n\n\tif td == nil {\n\t\tstatusErr := &status.StatusError{Status: status.NewStatus(http.StatusUnauthorized, STATUS_NO_TOKEN)}\n\t\ta.sendModelAsResWithStatus(res, statusErr, http.StatusUnauthorized)\n\t\treturn nil\n\t}\n\t//all good!\n\treturn td\n}", "func (ja *jwtGuard) CheckToken(tokenString string) (userID uint64, err error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &everestClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(ja.secret), nil\n\t})\n\tif err != nil {\n\t\tif ve, ok := err.(*jwt.ValidationError); ok {\n\t\t\tif ve.Errors&jwt.ValidationErrorMalformed != 0 {\n\t\t\t\terr = errors.New(\"not even a token\")\n\t\t\t} else if ve.Errors&jwt.ValidationErrorExpired != 0 {\n\t\t\t\terr = errors.New(\"token expired\")\n\n\t\t\t} else if ve.Errors&jwt.ValidationErrorNotValidYet != 0 {\n\t\t\t\t// Token is either expired or not active yet\n\t\t\t\terr = errors.New(\"token not valid yet\")\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"Couldn't handle this token: %e\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Couldn't handle this token: %e\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tif claims, ok := token.Claims.(*everestClaims); ok {\n\t\treturn claims.UserID, err\n\t}\n\treturn 0, errors.New(\"Couldn't handle this token\")\n}", "func ProcessToken(tk string) (*entities.Clain, bool, int64, error) {\n\tmiClave := []byte(\"apirestdekkdesarrollo\")\n\tclaims := &entities.Clain{}\n\n\tsplitToken := strings.Split(tk, \"Bearer\")\n\n\tif len(splitToken) != 2 {\n\t\treturn claims, false, 0, errors.New(\"Formato del token no válido\")\n\t}\n\n\ttk = strings.TrimSpace(splitToken[1])\n\n\ttkn, err := jwt.ParseWithClaims(tk, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn miClave, nil\n\t})\n\n\tif err != nil {\n\t\t_, encontrado, _ := utils.VerifyExistsUsername(claims.Username)\n\t\tif encontrado == true {\n\t\t\tUsername = claims.Username\n\t\t\tIDUsuario = claims.ID\n\t\t}\n\t\treturn claims, encontrado, IDUsuario, nil\n\t}\n\n\tif !tkn.Valid {\n\t\treturn claims, false, 0, errors.New(\"Token inválido\")\n\t}\n\n\treturn claims, false, 0, err\n}", "func validateAndGetUserID(key []byte, token string) (string, error) {\n\tt := strings.TrimSpace(strings.TrimPrefix(token, \"Bearer \"))\n\n\tvar jsonToken paseto.JSONToken\n\terr := paseto.Decrypt(t, key, &jsonToken, nil)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"paseto json key decrypt\")\n\t}\n\n\treturn jsonToken.Subject, nil\n}", "func Authenticate(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\n\t\t// Expecting: bearer <token>\n\t\tbearer := r.Header.Get(\"Authorization\")\n\t\tif len(bearer) == 0 {\n\t\t\thttp.Error(w, errAuthHeaderMissing.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tif len(bearer) < 6 || strings.ToLower(bearer[0:6]) != \"bearer\" {\n\t\t\thttp.Error(w, errAuthWrongHeaderFormat.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\taccessToken := bearer[7:]\n\t\taccessTokenClaims, err := entity.ParseAccessTokenClaims(accessToken)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// Add claims to the context so we can retrieve them later\n\t\tctx = context.WithValue(ctx, ClaimsKey, accessTokenClaims)\n\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (f *vaultTokenAuthHandler) Validate(token string) (*api.Secret, error) {\n\tf.client.SetToken(token)\n\treturn f.client.Auth().Token().LookupSelf()\n}", "func (a *Service) ValidateJweToken(token string) (map[string]interface{}, *error_utils.ApiError) {\n\n\t// parse token string\n\tclaims, err := a.parseTokenString(token)\n\tif err != nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(err.Error())\n\t}\n\n\t// validate dates\n\tif claims[\"orig_iat\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat is missing\")\n\t}\n\n\t// try convert to float64\n\tif _, ok := claims[\"orig_iat\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat must be float64 format\")\n\t}\n\n\t// get value and validate\n\torigIat := int64(claims[\"orig_iat\"].(float64))\n\tif origIat < a.timeFunc().Add(-a.maxRefresh).Unix() {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\n\t// check if exp exists in map\n\tif claims[\"exp\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp is missing\")\n\t}\n\n\t// try convert to float 64\n\tif _, ok := claims[\"exp\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp must be float64 format\")\n\t}\n\n\t// get value and validate\n\texp := int64(claims[\"exp\"].(float64))\n\tif exp < a.timeFunc().Unix(){\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\t// validate dates\n\n\t// validate issuer\n\t// check if iss exists in map\n\tif claims[\"iss\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss is missing\")\n\t}\n\n\t// try convert to string\n\tif _, ok := claims[\"iss\"].(string); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss must be string format\")\n\t}\n\n\t// get value and validate\n\tissuer := claims[\"iss\"]\n\tif issuer != a.issuer{\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Invalid issuer\")\n\t}\n\t// validate issuer\n\n\treturn claims, nil\n}", "func (e *EdgeRequestContext) AuthToken() *AuthenticationToken {\n\te.tokenOnce.Do(func() {\n\t\tif token, err := e.impl.ValidateToken(e.raw.AuthToken); err != nil {\n\t\t\tlog.Errorw(\"token validation failed\", \"err\", err)\n\t\t\te.token = nil\n\t\t} else {\n\t\t\te.token = token\n\t\t}\n\t})\n\treturn e.token\n}", "func Authenticate(fbToken, fbID string, client URLGetter) error {\n\tvar fbp map[string]interface{}\n\t_, err := fetch(client, fbToken, []string{\"id\"}, &fbp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif fbp[\"id\"] != fbID {\n\t\treturn fmt.Errorf(\"facebook id mismatch, %v != %v\", fbp[\"id\"], fbID)\n\t}\n\n\treturn nil\n}", "func (tokenController TokenController) ValidateTokenHandler(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\n\ttoken, err := request.ParseFromRequest(r, request.AuthorizationHeaderExtractor,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn tokenController.mySigningKey, nil\n\t\t})\n\n\tif err == nil {\n\t\tif token.Valid {\n\t\t\tnext(w, r)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tfmt.Fprint(w, \"Token is not valid\")\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tfmt.Fprint(w, \"Unauthorized access to this resource\")\n\t}\n}", "func GetAuthToken(address string, pkey string, API string) (string, error) {\n var data = new(StringRes)\n // 1: Get the auth data to sign\n // ----------------------------\n res_data, err := http.Get(API+\"/AuthDatum\")\n // Data will need to be hashed\n if err != nil { return \"\", fmt.Errorf(\"Could not get authentication data: (%s)\", err) }\n body, err1 := ioutil.ReadAll(res_data.Body)\n if err != nil { return \"\", fmt.Errorf(\"Could not parse authentication data: (%s)\", err1) }\n err2 := json.Unmarshal(body, &data)\n if err2 != nil { return \"\", fmt.Errorf(\"Could not unmarshal authentication data: (%s)\", err2) }\n\n // Hash the data. Keep the byte array\n data_hash := sig.Keccak256Hash([]byte(data.Result))\n // Sign the data with the private key\n privkey, err3 := crypto.HexToECDSA(pkey)\n if err3 != nil { return \"\", fmt.Errorf(\"Could not parse private key: (%s)\", err3) }\n // Sign the auth data\n _sig, err4 := sig.Ecsign(data_hash, privkey)\n if err4 != nil { return \"\", fmt.Errorf(\"Could not sign with private key: (%s)\", err4) }\n\n // 2: Send sigature, get token\n // ---------------------\n var authdata = new(StringRes)\n var jsonStr = []byte(`{\"owner\":\"`+address+`\",\"sig\":\"0x`+_sig+`\"}`)\n res, err5 := http.Post(API+\"/Authenticate\", \"application/json\", bytes.NewBuffer(jsonStr))\n if err5 != nil { return \"\", fmt.Errorf(\"Could not hit POST /Authenticate: (%s)\", err5) }\n if res.StatusCode != 200 { return \"\", fmt.Errorf(\"(%s): Error in POST /Authenticate\", res.StatusCode)}\n body, err6 := ioutil.ReadAll(res.Body)\n if err6 != nil { return \"\" , fmt.Errorf(\"Could not read /Authenticate body: (%s)\", err6)}\n err7 := json.Unmarshal(body, &authdata)\n if err7 != nil { return \"\", fmt.Errorf(\"Could not unmarshal /Authenticate body: (%s)\", err7) }\n\n // Return the JSON web token\n return string(authdata.Result), nil\n}", "func (tmdb *TMDb) GetAuthValidateToken(token, user, password string) (*AuthenticationToken, error) {\n\tvar validToken AuthenticationToken\n\turi := fmt.Sprintf(\"%s/authentication/token/validate_with_login?api_key=%s&request_token=%s&username=%s&password=%s\", baseURL, tmdb.apiKey, token, user, password)\n\tresult, err := getTmdb(uri, &validToken)\n\treturn result.(*AuthenticationToken), err\n}", "func extractToken(r *http.Request) (string, error) {\n\treqToken := r.Header.Get(\"Authorization\")\n\tsplitToken := strings.Split(reqToken, \"Bearer \")\n\n\tif len(splitToken) < 2 {\n\t\treturn \"\", errors.New(\"No token\")\n\t}\n\n\treturn splitToken[1], nil\n}", "func ValidateIdTokenWithNonce(aud string, idToken string, nonce string) (*SiwaIdToken, string) {\n\n\t//initialize the token object\n\tvar siwaIdToken *SiwaIdToken = &SiwaIdToken{Valid: false}\n\n\tif idToken == \"\" {\n\t\treturn siwaIdToken, \"empty_token\"\n\t}\n\n\t//split and decode token\n\tparts := strings.Split(idToken, \".\")\n\tif len(parts) != 3 {\n\t\treturn siwaIdToken, \"invalid_format_missing_parts\"\n\t}\n\tjsonHeaderB, err := base64UrlDecode(parts[0])\n\tif err != nil {\n\t\treturn siwaIdToken, \"invalid_format_header_base64_decode_failed error:\" + err.Error()\n\t}\n\tvar jwtHeader JWTTokenHeader\n\terr = json.Unmarshal(jsonHeaderB, &jwtHeader)\n\tif err != nil {\n\t\treturn siwaIdToken, \"invalid_format_header_json_decode_failed error:\" + err.Error()\n\t}\n\tjsonBodyB, err := base64UrlDecode(parts[1])\n\tif err != nil {\n\t\treturn siwaIdToken, \"invalid_format_body_base64_decode_failed error:\" + err.Error()\n\t}\n\tvar jwtBody JWTTokenBody\n\terr = json.Unmarshal(jsonBodyB, &jwtBody)\n\tif err != nil {\n\t\treturn siwaIdToken, \"invalid_format_body_json_decode_failed error:\" + err.Error()\n\t}\n\n\t//the basic validation tests pass. Now check if the contents of token are valid\n\tvar reason string\n\tvar valid bool = true\n\n\t//Verify the nonce for the authentication\n\t//if idtoken had nonce, the check will fail\n\tif jwtBody.Nonce != \"\" && jwtBody.Nonce != nonce {\n\t\treason = reason + \"nonce_check_failed\"\n\t\tvalid = false\n\t}\n\n\t//Verify that the iss field contains https://appleid.apple.com\n\tif jwtBody.Iss != \"https://appleid.apple.com\" {\n\t\treason = reason + \" iss_check_failed\"\n\t\tvalid = false\n\t}\n\n\t//Verify that the aud field is the developer’s client_id\n\tif jwtBody.Aud != aud {\n\t\treason = reason + \" aud_check_failed\"\n\t\tvalid = false\n\t}\n\n\t//Verify that the time is earlier than the exp value of the token\n\tif jwtBody.Exp < time.Now().Unix() {\n\t\treason = reason + \" expiry_in_past\"\n\t\tvalid = false\n\t}\n\n\t//Verify the JWS E256 signature using the server’s public key\n\tvar decodedSignature []byte\n\tdecodedSignature, err = base64UrlDecode(parts[2])\n\tif err != nil {\n\t\treason = reason + \" signature_base64_decode_failed error:\" + err.Error()\n\t\tvalid = false\n\t} else if !verifyAppleRSA256(parts[0]+\".\"+parts[1], decodedSignature, jwtHeader.Kid) {\n\t\treason = reason + \" signature_verification_failed\"\n\t\tvalid = false\n\t}\n\n\t//set the values of parsed token into the id token object\n\tsiwaIdToken.Header = &jwtHeader\n\tsiwaIdToken.Body = &jwtBody\n\tsiwaIdToken.Valid = valid\n\tsiwaIdToken.Signature = decodedSignature\n\n\treturn siwaIdToken, reason\n}", "func ValidateToken(token string) (string, error) {\n username, exists := Sessions[token];\n if (!exists) {\n return \"\", apierrors.TokenValidationError{apierrors.TOKEN_VALIDATION_NO_TOKEN};\n }\n\n return username, nil;\n}", "func ValidateToken(tokenString string, secretSignKey []byte) (string, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn secretSignKey, nil\n\t})\n\n\tif claims, ok := token.Claims.(*Claims); ok && token.Valid {\n\t\t// fmt.Printf(\"%v %v\", claims.Email, claims.StandardClaims.ExpiresAt)\n\t\treturn claims.Email, nil\n\t}\n\treturn \"\", err\n}", "func ValidateToken(token string) interface{} {\n\tfmt.Println(\"Starting token validation...\")\n\tclaims := jwt.MapClaims{}\n\t_, err := jwt.ParseWithClaims(token, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(Secret), nil\n\t})\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn map[string]string{\"User\": \"\", \"Role\": \"\"}\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"ERROR: \", err)\n\t\treturn map[string]string{\"User\": \"\", \"Role\": \"\"}\n\t}\n\n\tfmt.Println(claims)\n\n\tudatos := make(map[string]string)\n\tudatos[\"User\"] = claims[\"user\"].(string)\n\tudatos[\"Role\"] = claims[\"role\"].(string)\n\n\treturn udatos\n}", "func (svc *basicAuthNService) ValidateToken(tokenString, kid string) (dto.CustomClaim, error) {\n\tclaim := dto.CustomClaim{}\n\n\tkf := func(token *stdjwt.Token) (interface{}, error) {\n\t\tkeyID := token.Header[\"kid\"].(string)\n\t\tif keyID != kid {\n\t\t\treturn claim, stdjwt.ErrInvalidKeyType\n\t\t}\n\t\treturn []byte(svcconf.C.Auth.SecretKey), nil\n\t}\n\n\ttoken, err := stdjwt.ParseWithClaims(tokenString, &claim, kf)\n\n\t// check if signature is valid\n\tif err != nil {\n\t\treturn claim, err\n\t}\n\tif token.Valid {\n\t\treturn claim, nil\n\t}\n\treturn claim, kitjwt.ErrTokenInvalid\n}", "func Validate(r *http.Request, db *sql.DB) (UserID, error) {\n\ttokens, ok := r.Header[\"Authorization\"]\n\tif !ok {\n\t\treturn 0, ErrNoAuthHeader\n\t}\n\ttoken := strings.TrimPrefix(tokens[0], \"Bearer \")\n\tnow := time.Now().Unix()\n\tvar userID UserID\n\terr := db.QueryRow(`\n\t\tselect\n\t\t\toauth_sessions.owner_id\n\t\tfrom \n\t\t\toauth_access_tokens\n\t\t\tjoin oauth_sessions on oauth_access_tokens.session_id = oauth_sessions.id\n\t\twhere\n\t\t\toauth_access_tokens.id = $1\n\t\t\tand oauth_access_tokens.expire_time > $2\n\t\t`, token, now).Scan(&userID)\n\n\tif err == sql.ErrNoRows {\n\t\treturn 0, errors.Wrapf(ErrSessionInvalid, token)\n\t} else if err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn userID, nil\n}", "func VerifyToken(c *fiber.Ctx) {\n\trawToken := strings.Replace(string(c.Fasthttp.Request.Header.Peek(\"Authorization\")), \"Bearer \", \"\", 1)\n\tif rawToken == \"\" {\n\t\tc.Status(401).JSON(\"Missing token\")\n\t\treturn\n\t}\n\n\tjwtKey := []byte(GetDotEnv(\"JWT_KEY\"))\n\thasToken, _ := jwt.Parse(rawToken, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Error in token verify\")\n\t\t}\n\t\treturn jwtKey, nil\n\t})\n\n\tif hasToken.Valid {\n\t\tc.Next()\n\t} else {\n\t\tc.Status(401).JSON(\"Invalid Token\")\n\t}\n}", "func validateToken(tokenObj token.StructToken) (string, bool) {\n\n\tvar errorfound string\n\t//validate token id ==100\n\t//if len(tokenObj.TokenID) != 100 {\n\t//\terrorfound = \"token ID must be 100 characters\"\n\t//\treturn errorfound, false\n\t//}\n\t//validate token name ==20\n\tif len(tokenObj.TokenName) < 4 || len(tokenObj.TokenName) > 20 {\n\t\terrorfound = \"token name must be more than 4 characters and less than or equal 20 characters\"\n\t\treturn errorfound, false\n\t}\n\t//validate token symbol <= 4\n\tif len(tokenObj.TokenSymbol) > 4 {\n\t\terrorfound = \"token symbol should less than or equal to 4 characters\"\n\t\treturn errorfound, false\n\t}\n\t// validate icon url if empty or ==100\n\t// if len(tokenObj.IconURL) == 0 || len(tokenObj.IconURL) <= 100 {\n\t// \terrorfound = \"\"\n\t// } else {\n\t// \terrorfound = \"Icon URL is optiaonal if enter it must be less or equal 100 characters\"\n\t// \treturn errorfound, false\n\t// }\n\t// validate description if empty or == 100\n\tif len(tokenObj.Description) == 0 || len(tokenObj.Description) <= 100 {\n\t\terrorfound = \"\"\n\t} else {\n\t\terrorfound = \"Description is optiaonal if enter it must be less or equal 100 characters\"\n\t\treturn errorfound, false\n\t}\n\t//validate initiator address if empty\n\tif tokenObj.InitiatorAddress == \"\" {\n\t\terrorfound = \"please enter initiator address (Public key)\"\n\t\treturn errorfound, false\n\t}\n\t//validate initiator address if exist in account data\n\taccountobj := account.GetAccountByAccountPubicKey(tokenObj.InitiatorAddress)\n\tfmt.Println(\"------------------ \", accountobj)\n\tif accountobj.AccountPublicKey == \"\" {\n\t\terrorfound = \"please enter valid initiator address (Public key)\"\n\t\treturn errorfound, false\n\t}\n\tif accountobj.AccountPassword != tokenObj.Password {\n\t\terrorfound = \"The given password is incorrect.\"\n\t\treturn errorfound, false\n\t}\n\n\t//validate Tokens Total Supply less than or equal zero\n\tif tokenObj.TokensTotalSupply < 1 {\n\t\terrorfound = \"please enter Tokens Total Supply more than zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens value less than or equal zero\n\tif tokenObj.TokenValue <= 0.0 {\n\t\terrorfound = \"please enter Tokens value more than zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate token precision from 0 to 5\n\tif tokenObj.Precision < 0 || tokenObj.Precision > 5 {\n\t\terrorfound = \"please enter Precision range from 0 to 5 \"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens UsageType is mandatory security or utility\n\tif tokenObj.UsageType == \"security\" || tokenObj.UsageType == \"utility\" {\n\t\terrorfound = \"\"\n\t} else {\n\t\terrorfound = \"please enter UsageType security or utility\"\n\t\treturn errorfound, false\n\t}\n\tif tokenObj.UsageType == \"security\" && tokenObj.Precision != 0 {\n\t\terrorfound = \"UsageType security and must precision equal zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens TokenType is mandatory public or private\n\tif tokenObj.TokenType == \"public\" || tokenObj.TokenType == \"private\" {\n\t\t// check type token is public, validating for enter contact ID\n\t\tif tokenObj.TokenType == \"public\" {\n\t\t\t// validate ContractID if empty or ==60\n\t\t\tif len(tokenObj.ContractID) < 4 || len(tokenObj.ContractID) > 60 {\n\t\t\t\terrorfound = \"enter ContractID must be more than 4 character and less than or equal 60 characters\"\n\t\t\t\treturn errorfound, false\n\t\t\t}\n\t\t}\n\t\t// check type token is Private , validating for enter pentential PK ,\n\t\t// enter the potential users public keys which can use this token\n\t\taccountList := accountdb.GetAllAccounts()\n\t\tif tokenObj.TokenType == \"private\" {\n\t\t\t//enter pentential PK which can use this token\n\t\t\tif len(tokenObj.UserPublicKey) != 0 {\n\t\t\t\tfor _, pk := range tokenObj.UserPublicKey {\n\t\t\t\t\tif pk == tokenObj.InitiatorAddress {\n\t\t\t\t\t\terrorfound = \"user create token can't be in user public key \"\n\t\t\t\t\t\treturn errorfound, false\n\t\t\t\t\t}\n\t\t\t\t\tif !containspk(accountList, pk) {\n\t\t\t\t\t\terrorfound = \"this public key is not associated with any account\"\n\t\t\t\t\t\treturn errorfound, false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terrorfound = \"enter the potential users public keys which can use this token\"\n\t\t\t\treturn errorfound, false\n\t\t\t}\n\t\t}\n\t} else {\n\t\terrorfound = \"please enter TokenType is public or private\"\n\t\treturn errorfound, false\n\t}\n\n\t// Dynamic price\tIf the price of token is dynamic it gets its value from bidding platform.\n\t// Bidding platform API URL.\n\t// based on ValueDynamic True or false\n\tif tokenObj.ValueDynamic == true {\n\t\t//for example value\n\t\tbiddingplatformValue := 5.5\n\t\ttokenObj.Dynamicprice = biddingplatformValue\n\t}\n\treturn \"\", true\n}", "func VerifyIDToken(ctx context.Context, token string, v IDTokenVerifier) (oidc.IDTokenClaims, error) {\n\tclaims := oidc.EmptyIDTokenClaims()\n\n\tdecrypted, err := oidc.DecryptToken(token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpayload, err := oidc.ParseToken(decrypted, claims)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := oidc.CheckSubject(claims); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckIssuer(claims, v.Issuer()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckAudience(claims, v.ClientID()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckAuthorizedParty(claims, v.ClientID()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckSignature(ctx, decrypted, payload, claims, v.SupportedSignAlgs(), v.KeySet()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckExpiration(claims, v.Offset()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckIssuedAt(claims, v.MaxAgeIAT(), v.Offset()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckNonce(claims, v.Nonce(ctx)); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckAuthorizationContextClassReference(claims, v.ACR()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckAuthTime(claims, v.MaxAge()); err != nil {\n\t\treturn nil, err\n\t}\n\treturn claims, nil\n}", "func (auth *DelegateAuthService) Verify(token string) (string, error) {\n\tdata, err := json.Marshal(map[string]interface{}{\n\t\t\"token\": token,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tendpoint := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: auth.controllerOrigin,\n\t\tPath: \"/api/v1/user/verify\",\n\t}\n\tresp, err := http.Post(endpoint.String(), \"application/json\", bytes.NewReader(data))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tresBody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\tvar resOK struct {\n\t\t\tUserID string `json:\"userId\"`\n\t\t}\n\t\terr = json.Unmarshal(resBody, &resOK)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn resOK.UserID, nil\n\t} else {\n\t\tvar resError struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t}\n\t\terr = json.Unmarshal(resBody, &resError)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.New(string(resBody))\n\t\t}\n\n\t\treturn \"\", errors.New(resError.Status)\n\t}\n}", "func Authenticate(next buffalo.Handler) buffalo.Handler {\n\treturn func(c buffalo.Context) error {\n\t\t// do some work before calling the next handler\n\t\terr := checkJwt(c.Response(), c.Request())\n\t\tif err == nil {\n\t\t\terr := next(c)\n\t\t\t// do some work after calling the next handler\n\t\t\treturn err\n\t\t}\n\n\t\treturn err\n\t}\n}", "func ValidateToken(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tsession := GetSession(w, req, cookieName)\n\t\taccessToken, setbool := session.Values[\"access_token\"].(string)\n\t\tif setbool == true && accessToken == \"\" {\n\t\t\tRedirectLogin(w, req)\n\t\t\t//return\n\t\t} else if setbool == false {\n\t\t\tRedirectLogin(w, req)\n\t\t} else {\n\t\t\tvar p jwt.Parser\n\t\t\ttoken, _, _ := p.ParseUnverified(accessToken, &jwt.StandardClaims{})\n\t\t\tif err := token.Claims.Valid(); err != nil {\n\t\t\t\t//invalid\n\t\t\t\tRedirectLogin(w, req)\n\t\t\t\t//return\n\t\t\t} else {\n\t\t\t\t//valid\n\t\t\t\tnext(w, req)\n\t\t\t\t//return\n\t\t\t}\n\t\t}\n\t\t//RedirectLogin(w, r)\n\t\treturn\n\t})\n}", "func ValidateToken(tokenString string) (string, error) {\n\tsecret := []byte(\"kalle4ever\")\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn secret, nil\n\t})\n\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\treturn claims[\"username\"].(string), nil\n\t}\n\treturn \"\", err\n}", "func (a *Auth) Authenticate(handler http.Handler) http.Handler {\n\tif handler == nil {\n\t\tpanic(\"auth: nil handler\")\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif a.cfg.Disable {\n\t\t\thandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := a.getCookie(r)\n\t\tif token == nil && err == nil {\n\t\t\t// Cookie is missing, invalid. Fetch new token from OAuth2 provider.\n\t\t\t// Redirect user to the OAuth2 consent page to ask for permission for the scopes specified\n\t\t\t// above.\n\t\t\t// Set the scope to the current request URL, it will be used by the redirect handler to\n\t\t\t// redirect back to the url that requested the authentication.\n\t\t\turl := a.cfg.AuthCodeURL(r.RequestURI)\n\t\t\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Get cookie error: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Source token, in case the token needs a renewal.\n\t\tnewOauth2Token, err := a.cfg.TokenSource(r.Context(), token.toOauth2()).Token()\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\ta.logf(\"Failed token source: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tnewToken := fromOauth2(newOauth2Token)\n\n\t\tif newToken.IDToken != token.IDToken {\n\t\t\ta.logf(\"Refreshed token\")\n\t\t\ttoken = newToken\n\t\t\ta.setCookie(w, token)\n\t\t}\n\n\t\t// Validate the id_token.\n\t\tpayload, err := a.validator.Validate(r.Context(), token.IDToken, a.cfg.ClientID)\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Invalid auth.\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Invalid token, reset cookie: %s\", err)\n\t\t\treturn\n\t\t}\n\t\t// User is authenticated.\n\t\t// Store email and name in context, and call the inner handler.\n\t\tcreds := &Creds{\n\t\t\tEmail: payload.Claims[\"email\"].(string),\n\t\t\tName: payload.Claims[\"name\"].(string),\n\t\t}\n\t\tr = r.WithContext(context.WithValue(r.Context(), credsKey, creds))\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func ValidateIDToken(idToken, clientID, hostedDomain string) (*IDTokenClaims, error) {\n\ttoken, err := jwt.Parse(idToken, GoogleKeyFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\tmapClaims, ok := token.Claims.(jwt.MapClaims)\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tif !mapClaims.VerifyIssuer(\"accounts.google.com\", true) {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tif !mapClaims.VerifyAudience(clientID, true) {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\t// Check hosted domain\n\thd, ok := mapClaims[\"hd\"]\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\thds, ok := hd.(string)\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tif hds != hostedDomain {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\t// Check email verified\n\tev, ok := mapClaims[\"email_verified\"]\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tevb, ok := ev.(bool)\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tif !evb {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\t// Email\n\temail, ok := mapClaims[\"email\"]\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\temails, ok := email.(string)\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\t// Start setting up return value\n\trv := &IDTokenClaims{\n\t\tEmailAddress: emails,\n\t}\n\n\t// Try to get first name, it's OK if it fails\n\tfirstName, ok := mapClaims[\"given_name\"]\n\tif ok {\n\t\tnameAsString, ok := firstName.(string)\n\t\tif ok {\n\t\t\trv.FirstName = nameAsString\n\t\t}\n\t}\n\n\t// Try to get last name, it's OK if it fails\n\tlastName, ok := mapClaims[\"family_name\"]\n\tif ok {\n\t\tnameAsString, ok := lastName.(string)\n\t\tif ok {\n\t\t\trv.LastName = nameAsString\n\t\t}\n\t}\n\n\treturn rv, nil\n}", "func VerifyAuthToken(ctx context.Context, header string, verifier *oidc.IDTokenVerifier) (*oidc.IDToken, error) {\n\tparts := strings.Split(header, \" \")\n\tif len(parts) != 2 {\n\t\treturn nil, errors.New(\"invalid authorization header\")\n\t}\n\treturn verifier.Verify(ctx, parts[1])\n}", "func AuthorizeToken(token string) *AuthorizeTokenAttemptResponse {\n\tlookup := ReverseLookupItem(token)\n\tif !lookup.Exists {\n\t\treturn &AuthorizeTokenAttemptResponse{\"\", \"\", false}\n\t}\n\treturn &AuthorizeTokenAttemptResponse{token, lookup.ReverseLookup.ReverseValue, true}\n\n}", "func (ja *JWTAuth) Authenticate(rw http.ResponseWriter, r *http.Request) (User, bool, error) {\n\tvar (\n\t\tcandidates []string\n\t\tgotToken *Token\n\t\terr error\n\t)\n\n\tcandidates = append(candidates, getTokensFromQuery(r, ja.FromQuery)...)\n\tcandidates = append(candidates, getTokensFromHeader(r, ja.FromHeader)...)\n\tcandidates = append(candidates, getTokensFromCookies(r, ja.FromCookies)...)\n\n\tcandidates = append(candidates, getTokensFromHeader(r, []string{\"Authorization\"})...)\n\tchecked := make(map[string]struct{})\n\tparser := &jwt.Parser{\n\t\tUseJSONNumber: true, // parse number in JSON object to json.Number instead of float64\n\t}\n\n\tfor _, candidateToken := range candidates {\n\t\ttokenString := normToken(candidateToken)\n\t\tif _, ok := checked[tokenString]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tgotToken, err = parser.Parse(tokenString, func(*Token) (interface{}, error) {\n\t\t\tsignKeyString := \"-----BEGIN CERTIFICATE-----\\n\" + \"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArD2tI1RXi/guGpw4/uq/id2xG2mdrRgJ1U+fy3vOsT9YH5Y1pxIA1VVMxGixzdSlDzB6UMUTa2XMLetEzIHAz5cNc7aNF+r1wLIrLWS0wHTfjte8MKTDtUehcIF7+kQ3dq9TJ/lqYh4q3/vIOJNcBXRY18de0HeMMqQOw0n+QUrbSBAYGNyam976quRJOKTCaHy0c91FYiE9DmskanHPUyZpYE8EWWVFa9C08OGvybBIpfAfRnI/M9qmYS0putU8UfxTFa6XSSqsmECtmjECA+KXp24buBWyJYh/3HAPk5JdZZhjpLPKf/cyg5Cpk/udoBNZrcAJQzuKgRpgZsdBeQIDAQAB\" + \"\\n-----END CERTIFICATE-----\"\n\t\t\tif key, errr := jwt.ParseRSAPublicKeyFromPEM([]byte(signKeyString)); errr != nil {\n\t\t\t\treturn nil,nil\n\t\t\t} else {\n\t\t\t\treturn key, nil\n\t\t\t}\n\t\t})\n\t\tchecked[tokenString] = struct{}{}\n\n\t\tlogger := ja.logger.With(zap.String(\"token_string\", desensitizedTokenString(tokenString)))\n\t\tif err != nil {\n\t\t\tlogger.Error(\"invalid token\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\n\t\tvar gotClaims = gotToken.Claims.(MapClaims)\n\t\t// By default, the following claims will be verified:\n\t\t// - \"exp\"\n\t\t// - \"iat\"\n\t\t// - \"nbf\"\n\t\t// Here, if `aud_whitelist` or `iss_whitelist` were specified,\n\t\t// continue to verify \"aud\" and \"iss\" correspondingly.\n\t\tif len(ja.IssuerWhitelist) > 0 {\n\t\t\tisValidIssuer := false\n\t\t\tfor _, issuer := range ja.IssuerWhitelist {\n\t\t\t\tif gotClaims.VerifyIssuer(issuer, true) {\n\t\t\t\t\tisValidIssuer = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !isValidIssuer {\n\t\t\t\terr = errors.New(\"invalid issuer\")\n\t\t\t\tlogger.Error(\"invalid token\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif len(ja.AudienceWhitelist) > 0 {\n\t\t\tisValidAudience := false\n\t\t\tfor _, audience := range ja.AudienceWhitelist {\n\t\t\t\tif gotClaims.VerifyAudience(audience, true) {\n\t\t\t\t\tisValidAudience = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !isValidAudience {\n\t\t\t\terr = errors.New(\"invalid audience\")\n\t\t\t\tlogger.Error(\"invalid token\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// The token is valid. Continue to check the user claim.\n\t\tclaimName, gotUserID := getUserID(gotClaims, ja.UserClaims)\n\t\tif gotUserID == \"\" {\n\t\t\terr = errors.New(\"empty user claim\")\n\t\t\tlogger.Error(\"invalid token\", zap.Strings(\"user_claims\", ja.UserClaims), zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Successfully authenticated!\n\t\tvar user = User{\n\t\t\tID: gotUserID,\n\t\t\tMetadata: getUserMetadata(gotClaims, ja.MetaClaims),\n\t\t}\n\t\tlogger.Info(\"user authenticated\", zap.String(\"user_claim\", claimName), zap.String(\"id\", gotUserID))\n\t\treturn user, true, nil\n\t}\n\n\treturn User{}, false, err\n}", "func (c *Conn) authenticate() error {\n\tif c.oAuth == nil || c.oAuth.Valid() {\n\t\t// Authentication is not required or already validated.\n\t\treturn nil\n\t}\n\tif !c.oAuth.IsSet() {\n\t\t// No client information to refresh the token.\n\t\treturn ErrBadToken\n\t}\n\td, err := c.downloadToken()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.retrieveToken(d)\n}", "func ProcessToken(tk string) (*models.Claim, bool, string, error) {\n\tmiClave := []byte(\"ClimediKey2020\")\n\tclaims := &models.Claim{}\n\tvar usuario models.Usuario\n\n\tsplitToken := strings.Split(tk, \"Bearer\")\n\tif len(splitToken) != 2 {\n\t\treturn claims, false, string(\"\"), errors.New(\"Formato de Token invalido\")\n\t}\n\n\ttk = strings.TrimSpace(splitToken[1])\n\n\ttkn, err := jwt.ParseWithClaims(tk, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn miClave, nil\n\t})\n\n\tif !tkn.Valid {\n\t\treturn claims, false, string(\"\"), errors.New(\"Token invalido\")\n\t}\n\n\tvar db string\n\tproperties, err := os.Open(\"\" + os.Getenv(\"CONFIG\") + \"/climedi/climedi.properties\")\n\tif err != nil {\n\t\tlog.Println(\"Error al leer archivo de configuraciones\")\n\t} else {\n\t\tscanner := bufio.NewScanner(properties)\n\t\tfor scanner.Scan() {\n\t\t\tlinea := scanner.Text()\n\t\t\tif strings.HasPrefix(linea, \"userdb\") {\n\t\t\t\tdb = linea[7:]\n\t\t\t}\n\t\t}\n\t}\n\n\tdbref := database.Connect(db)\n\tdefer database.Disconnect(dbref)\n\n\tif err == nil {\n\t\texists, _ := usuario.CheckUsuario(claims.ID, dbref)\n\n\t\tif exists {\n\t\t\tUserID = claims.ID\n\t\t\tDoctorID = claims.IDDoc\n\t\t\tUserChild = claims.Child\n\t\t\tUserMaster = claims.Master\n\t\t\tUserTipoPlan = claims.TipoPlan\n\t\t}\n\t\treturn claims, exists, UserID, nil\n\t}\n\n\treturn claims, false, string(\"\"), err\n}", "func ValidateToken(bearerHeader string) (User, error) {\n\n\t// format the token string\n\ttokenString := strings.Split(bearerHeader, \" \")[1]\n\n\tvar user User\n\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t// Don't forget to validate the alg is what you expect:\n\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn []byte(\"secretkey\"), nil\n\t})\n\n\tif err != nil {\n\n\t\tfmt.Println(err)\n\t\treturn user, err\n\t}\n\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t// convert the interface to the map[string]interface{}\n\t\ts := claims[\"user\"].(map[string]interface{})\n\n\t\t// create a user of User type\n\t\t// convert the s[\"userID\"] interface to string\n\t\tuser := User{s[\"userID\"].(string), s[\"name\"].(string)}\n\n\t\treturn user, nil\n\n\t}\n\n\treturn user, errors.New(\"Something went wrong\")\n\n}", "func ValidateAuthToken(tokenStr string) (bool, *uuid.UUID, error) {\n\t//initialize the claims\n\tclaims := &AuthClaims{}\n\n\t//parse the JWT and load the claims\n\ttoken, err := jwt.ParseWithClaims(tokenStr, claims, getTokenKey)\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\treturn false, nil, nil\n\t\t}\n\t\treturn false, nil, err\n\t}\n\n\t//verify the signing algorithm\n\tif token.Method.Alg() != JWTSigningAlgorithm {\n\t\treturn false, nil, fmt.Errorf(\"invalid signing algorthm: %s\", token.Method.Alg())\n\t}\n\n\t//check if the token is valid\n\tif !token.Valid {\n\t\treturn false, nil, nil\n\t}\n\n\t//extract the user id\n\tuserIDStr := claims.UserID\n\tuserID := uuid.FromStringOrNil(userIDStr)\n\tif userID == uuid.Nil {\n\t\treturn false, nil, nil\n\t}\n\treturn true, &userID, nil\n}", "func DecodeToken() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\ttokenStr := c.Request.Header.Get(\"Authorization\")\n\n\t\tuid, b := token.DecodeToken(tokenStr)\n\n\t\tif b {\n\t\t\tc.Set(common.TokenUid, uid)\n\t\t}\n\t\tc.Next()\n\t}\n}", "func ProcessToken(token string) (*models.Claim, bool, string, error) {\n\tprivateKey := []byte(\"EstaEsMiClavePrivadaDePrueba\")\n\tclaim := &models.Claim{}\n\n\t//Verificamos si el token que llego es valido, se utiliza la palabra Bearer como separador\n\tsplitToken := strings.Split(token, \"Bearer\")\n\tif len(splitToken) != 2 {\n\t\treturn claim, false, string(\"\"), errors.New(\"Formato del token invalido\")\n\t}\n\t//Limpiamos de espacion en blanco al token\n\ttoken = strings.TrimSpace(splitToken[1])\n\t//Mediantes una sintaxis porpia de jwt extraemos el payload del token a el modelo Claim\n\ttkn, err := jwt.ParseWithClaims(token, claim, func(_token *jwt.Token) (interface{}, error) {\n\t\treturn privateKey, nil\n\t})\n\t//Si no hay errores verificamos que el usuario del token exista\n\tif err == nil {\n\t\t_, existe, _ := db.ExisteUser(claim.Email)\n\t\tif existe == true {\n\t\t\t//En Variables publicas guardamos tanto el mail como el ID, para que puedan ser ocupadas en todo el proyecto\n\t\t\tEmail = claim.Email\n\t\t\tID = claim.ID.Hex()\n\t\t}\n\t\treturn claim, existe, ID, nil\n\t}\n\t//Si hubo un error 'err', verificamos si se produjo un fallo en obtener los datos con el ParseWithClaims\n\tif !tkn.Valid {\n\t\treturn claim, false, string(\"\"), errors.New(\"Token inválido\")\n\t}\n\t//Si el token fue valido pero aun asi produjo el error, devolvemos el error\n\treturn claim, false, string(\"\"), err\n}", "func authenticate(r *http.Request) *UserID {\n token, err := r.Cookie(\"token\")\n if err != nil {\n return nil\n }\n passHash, err := r.Cookie(\"passHash\")\n if err != nil {\n return nil\n }\n return authenticateUserPass(token.Value, passHash.Value)\n}", "func (o OAuth1) parseTokenResponse(resp *http.Response) *Token {\n\ttokenBytes, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\to.logger.StatusWrite(\"Error reading from response body %v\\n\", err)\n\t}\n\ttokenData := string(tokenBytes)\n\treturn o.parseTokenData(tokenData)\n}", "func validateAccessToken(token string, providedUsername string) bool {\n\tidpHost, idpPort := resolveIdpHostAndPort()\n\turl := \"https://\" + idpHost + \":\" + idpPort + \"/oauth2/introspect\"\n\tpayload := strings.NewReader(\"token=\" + token)\n\treq, err := http.NewRequest(\"POST\", url, payload)\n\tif err != nil {\n\t\tglog.Error(\"Error creating new request to the introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\n\tusername, password := resolveCredentials()\n\treq.SetBasicAuth(username, password)\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tglog.Error(\"Error sending the request to the introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tglog.Error(\"Error reading the response from introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tvar result map[string]interface{}\n\terr = json.Unmarshal([]byte(string(body)), &result)\n\tif err != nil {\n\t\tglog.Error(\"Error un marshalling the json: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tisActive, ok := (result[\"active\"]).(bool)\n\tif !ok {\n\t\tglog.Error(\"Error casting active to boolean. This may be due to a invalid token\")\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tisExpired := isExpired(result[\"exp\"])\n\tisValidUser := isValidUser(result[\"username\"], providedUsername)\n\treturn isExpired && isActive && isValidUser\n}", "func (p *AzureProvider) Authenticate(ctx context.Context, code string) (*sessions.State, error) {\n\t// convert authorization code into a token\n\toauth2Token, err := p.oauth.Exchange(ctx, code)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"identity/microsoft: token exchange failed %v\", err)\n\t}\n\n\t// id_token contains claims about the authenticated user\n\trawIDToken, ok := oauth2Token.Extra(\"id_token\").(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"identity/microsoft: response did not contain an id_token\")\n\t}\n\t// Parse and verify ID Token payload.\n\tsession, err := p.IDTokenToSession(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"identity/microsoft: could not verify id_token %v\", err)\n\t}\n\n\tsession.AccessToken = oauth2Token.AccessToken\n\tsession.RefreshToken = oauth2Token.RefreshToken\n\tsession.Groups, err = p.UserGroups(ctx, session.AccessToken)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"identity/microsoft: could not retrieve groups %v\", err)\n\t}\n\treturn session, nil\n}", "func (o *oidcClient) authenticate(issuer string, clientID string, audience string) error {\n\t// Store the old transport and restore it in the end.\n\toldTransport := o.httpClient.Transport\n\to.oidcTransport.audience = audience\n\to.httpClient.Transport = o.oidcTransport\n\n\tdefer func() {\n\t\to.httpClient.Transport = oldTransport\n\t}()\n\n\tprovider, err := o.getProvider(issuer, clientID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.oidcTransport.deviceAuthorizationEndpoint = provider.GetDeviceAuthorizationEndpoint()\n\n\tresp, err := rp.DeviceAuthorization(oidcScopes, provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"Code: %s\\n\\n\", resp.UserCode)\n\n\tu, _ := url.Parse(resp.VerificationURIComplete)\n\n\terr = openBrowser(u.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT)\n\tdefer stop()\n\n\ttoken, err := rp.DeviceAccessToken(ctx, resp.DeviceCode, time.Duration(resp.Interval)*time.Second, provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif o.tokens.Token == nil {\n\t\to.tokens.Token = &oauth2.Token{}\n\t}\n\n\to.tokens.Expiry = time.Now().Add(time.Duration(token.ExpiresIn))\n\to.tokens.IDToken = token.IDToken\n\to.tokens.Token.AccessToken = token.AccessToken\n\to.tokens.TokenType = token.TokenType\n\n\tif token.RefreshToken != \"\" {\n\t\to.tokens.Token.RefreshToken = token.RefreshToken\n\t}\n\n\treturn nil\n}", "func getAuthTokenFromHeader(ctx echo.Context) (string, error) {\n\theaderContent := ctx.Request().Header.Get(echo.HeaderAuthorization)\n\theaderContent = strings.TrimSpace(headerContent)\n\tprefix := \"Bearer:\"\n\tif strings.HasPrefix(headerContent, prefix) {\n\t\trunes := []rune(headerContent)\n\t\treturn strings.TrimSpace(string(runes[len(prefix):])), nil\n\t}\n\treturn \"\", fmt.Errorf(\"auth header not found\")\n}", "func VerifyAuthToken(w http.ResponseWriter, r *http.Request) {\n\t//Endpoint to verify user JWT\n\t//Useful for other services\n\ttoken := r.Header.Get(\"Authorization\")\n\tresult, _ := utils.VerifyJWT(token)\n\tverifyAuthTokenResp := response.JsonResponse(\"Token is valid\", 200)\n\tinvalidAuthTokenResp := response.JsonResponse(\"Token is invalid or expired\", 500)\n\tif result {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tjson.NewEncoder(w).Encode(verifyAuthTokenResp)\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(invalidAuthTokenResp)\n\t}\n}", "func (r commonResult) ExtractToken() (*Token, error) {\n\tvar s Token\n\terr := r.ExtractInto(&s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse the token itself from the stored headers.\n\ts.ID = r.Header.Get(\"X-Subject-Token\")\n\n\treturn &s, err\n}", "func parseToken(secret []byte, token string) (*AccessToken, error) {\n\tt, err := jwtParse(secret, token)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !t.Valid {\n\t\treturn nil, nil\n\t}\n\n\treturn jwtToAccessToken(t)\n}", "func Base64ToAuthenticate(token string) (*Authenticate, error) {\n\tjs, err := b64.StdEncoding.DecodeString(token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tins := new(Authenticate)\n\terr = json.Unmarshal(js, ins)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ins, nil\n}", "func (c *resourcePrincipalFederationClient) exchangeToken(publicKeyBase64 string, tokenResponse resourcePrincipalTokenResponse) (sessionToken string, err error) {\n\trpServiceClient := c.ResourcePrincipalSessionTokenClient\n\n\t//Set the signer of this client to be the instance principal provider\n\trpServiceClient.Signer = common.DefaultRequestSigner(&c.instancePrincipalKeyProvider)\n\n\t// Call identity service to get resource principal session token\n\tsessionTokenReq := resourcePrincipalSessionTokenRequest{\n\t\tresourcePrincipalSessionTokenRequestBody{\n\t\t\tServicePrincipalSessionToken: tokenResponse.Body.ServicePrincipalSessionToken,\n\t\t\tResourcePrincipalToken: tokenResponse.Body.ResourcePrincipalToken,\n\t\t\tSessionPublicKey: publicKeyBase64,\n\t\t},\n\t}\n\n\tsessionTokenHTTPReq, err := common.MakeDefaultHTTPRequestWithTaggedStruct(http.MethodPost,\n\t\t\"\", sessionTokenReq)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsessionTokenHTTPRes, err := rpServiceClient.Call(context.Background(), &sessionTokenHTTPReq)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer common.CloseBodyIfValid(sessionTokenHTTPRes)\n\n\tsessionTokenRes := x509FederationResponse{}\n\terr = common.UnmarshalResponse(sessionTokenHTTPRes, &sessionTokenRes)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsessionToken = sessionTokenRes.Token.Token\n\treturn\n}", "func (p Plugin) ExchangeToken(ctx context.Context, trustDomain, k8sSAjwt string) (\n\tstring /*access token*/, time.Time /*expireTime*/, int /*httpRespCode*/, error) {\n\taud := constructAudience(trustDomain)\n\tvar jsonStr = constructFederatedTokenRequest(aud, k8sSAjwt)\n\treq, _ := http.NewRequest(\"POST\", SecureTokenEndpoint, bytes.NewBuffer(jsonStr))\n\treq.Header.Set(\"Content-Type\", contentType)\n\n\tresp, err := p.hTTPClient.Do(req)\n\terrMsg := \"failed to call token exchange service. \"\n\tif err != nil || resp == nil {\n\t\tstatusCode := http.StatusServiceUnavailable\n\t\t// If resp is not null, return the actually status code returned from the token service.\n\t\t// If resp is null, return a service unavailable status and try again.\n\t\tif resp != nil {\n\t\t\tstatusCode = resp.StatusCode\n\t\t\terrMsg += fmt.Sprintf(\"HTTP status: %s. Error: %v\", resp.Status, err)\n\t\t} else {\n\t\t\terrMsg += fmt.Sprintf(\"HTTP response empty. Error: %v\", err)\n\t\t}\n\t\treturn \"\", time.Now(), statusCode, errors.New(errMsg)\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\trespData := &federatedTokenResponse{}\n\tif err := json.Unmarshal(body, respData); err != nil {\n\t\treturn \"\", time.Now(), resp.StatusCode, fmt.Errorf(\n\t\t\t\"failed to unmarshal response data. HTTP status: %s. Error: %v. Body size: %d\", resp.Status, err, len(body))\n\t}\n\n\tif respData.AccessToken == \"\" {\n\t\treturn \"\", time.Now(), resp.StatusCode, fmt.Errorf(\n\t\t\t\"exchanged empty token. HTTP status: %s. Response: %v\", resp.Status, string(body))\n\t}\n\n\treturn respData.AccessToken, time.Now().Add(time.Second * time.Duration(respData.ExpiresIn)), resp.StatusCode, nil\n}", "func getIdToken(token *oauth2.Token) (map[string]interface{}, error) {\n\ttokenData := make(map[string]interface{})\n\n\tidToken := token.Extra(\"id_token\")\n\tif idToken == nil {\n\t\treturn tokenData, nil\n\t}\n\n\tjwtParser := jwt.Parser{\n\t\tSkipClaimsValidation: true,\n\t}\n\n\tt, _, err := jwtParser.ParseUnverified(idToken.(string), jwt.MapClaims{})\n\tif err != nil {\n\t\treturn tokenData, errors.Wrap(err, \"failed to parse id_token\")\n\t}\n\n\tif claims, ok := t.Claims.(jwt.MapClaims); ok {\n\t\tfor k, v := range claims {\n\t\t\ttokenData[k] = v\n\t\t}\n\t}\n\n\treturn tokenData, nil\n}", "func (l *RemoteProvider) ExtractToken(w http.ResponseWriter, r *http.Request) {\n\tl.TokenStoreMut.Lock()\n\tdefer l.TokenStoreMut.Unlock()\n\n\ttokenString, err := l.GetToken(r)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Token not found: %s\", err.Error())\n\t\treturn\n\t}\n\tnewts := l.TokenStore[tokenString]\n\tif newts != \"\" {\n\t\ttokenString = newts\n\t}\n\n\tresp := map[string]interface{}{\n\t\t\"meshery-provider\": l.Name(),\n\t\ttokenName: tokenString,\n\t}\n\tlogrus.Debugf(\"encoded response : %v\", resp)\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\terr = ErrEncoding(err, \"Auth Details\")\n\t\tlogrus.Error(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func verifyToken(tokenString string) (*jwt.Token, error) {\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(os.Getenv(\"JWT_SECRET\")), nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn token, nil\n}", "func (a AuthConfigToken) Authenticate(metric *string) (bool, error) {\n\tauthSplit := strings.SplitN(*metric, \".\", 2)\n\tif len(authSplit) == 2 {\n\t\ttoken, exists := a.Tokens[authSplit[0]]\n\t\tif !exists || !token {\n\t\t\tauthError := errors.New(\"Invalid authentication token\")\n\t\t\treturn false, authError\n\t\t}\n\t} else {\n\t\tauthError := errors.New(\"Missing authentication token\")\n\t\treturn false, authError\n\t}\n\t*metric = authSplit[1]\n\treturn true, nil\n}", "func Validate(redisdb *redis.Client, auth string) (string, error) {\n\n\t// Extract the JWT token from the Authorization header\n\ttokenStr, err := parseAuthHeader(auth)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while parsing authorization header: %s\", err.Error())\n\t}\n\n\t// Validate token and extract the subject\n\tsub, err := validateToken(tokenStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while validating JWT token: %s\", err.Error())\n\t}\n\n\t// Lookup the session in Redis\n\tuser, err := Get(redisdb, sub, \"remote\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while retrieving user ID from session: %s\", err.Error())\n\t}\n\n\treturn user, nil\n\n}", "func Authenticate(next buffalo.Handler) buffalo.Handler {\n\treturn func(c buffalo.Context) error {\n\t\t// do some work before calling the next handler\n\t\tclient, err := FirebaseApp.Auth(context.Background())\n\n\t\tidToken := c.Request().Header.Get(\"Authorization\")\n\t\tidToken = strings.Replace(idToken, `bearer `, \"\", 1)\n\t\tif ENV == \"development\" || ENV == \"test\" {\n\t\t\tfmt.Println(\"Authorization\", idToken)\n\t\t}\n\t\ttoken, err := client.VerifyIDToken(idToken)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error verifying ID token: %v\\n\", err)\n\t\t\tresponse := Response{\n\t\t\t\tMessage: \"Missing or invalid token.\",\n\t\t\t}\n\t\t\tc.Response().WriteHeader(http.StatusUnauthorized)\n\t\t\tjson.NewEncoder(c.Response()).Encode(response)\n\t\t\treturn err\n\t\t}\n\n\t\tif err := setCurrentUser(token.UID, c); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\terr = next(c)\n\t\treturn err\n\t}\n}", "func (a *Authenticator) ValidateToken(jwt string) (string, error) {\n\tvalidatedToken, err := jwkkeys.ValidateGoogleClaims(a.cachedKeys, jwt, a.audience, jwkkeys.GoogleIssuers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn validatedToken.GoogleClaims.Email, nil\n}", "func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error {\n\tfound, err := resolveBearerAuthTokenProvider(ctx, cfg, configs)\n\tif found || err != nil {\n\t\treturn err\n\t}\n\n\treturn resolveBearerAuthTokenProviderChain(ctx, cfg, configs)\n}", "func TokenExtractor(encodedToken string) string {\n\tif encodedToken != \"\" {\n\t\tencodedPayload := strings.Split(encodedToken, \".\")\n\t\tdecodedPayload, err := base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(encodedPayload[1])\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tpayload := make(map[string]string)\n\t\terr = json.Unmarshal(decodedPayload, &payload)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn payload[\"userId\"]\n\t}\n\treturn \"\"\n}", "func (v *verifierPrivate) ValidateTokenAndGetClaims(tokenString string, customClaims interface{}) (*Token, error) {\n\n\t// let us check if the verifier is already expired. If it is just return verifier expired error\n\t// The caller has to re-initialize the verifier.\n\ttoken := Token{}\n\ttoken.standardClaims = &jwt.StandardClaims{}\n\tparsedToken, err := jwt.ParseWithClaims(tokenString, token.standardClaims, func(token *jwt.Token) (interface{}, error) {\n\n\t\tif keyIDValue, keyIDExists := token.Header[\"kid\"]; keyIDExists {\n\n\t\t\tkeyIDString, ok := keyIDValue.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"kid (key id) in jwt header is not a string : %v\", keyIDValue)\n\t\t\t}\n\n\t\t\tif matchPubKey, found := v.pubKeyMap[keyIDString]; !found {\n\t\t\t\treturn nil, &MatchingCertNotFoundError{keyIDString}\n\t\t\t} else {\n\t\t\t\t// if the certificate just expired.. we need to return appropriate error\n\t\t\t\t// so that the caller can deal with it appropriately\n\t\t\t\tnow := time.Now()\n\t\t\t\tif now.After(matchPubKey.expTime) {\n\t\t\t\t\treturn nil, &MatchingCertJustExpired{keyIDString}\n\t\t\t\t}\n\t\t\t\t// if the verifier expired, we need to use a new instance of the verifier\n\t\t\t\tif time.Now().After(v.expiration) {\n\t\t\t\t\treturn nil, &VerifierExpiredError{v.expiration}\n\t\t\t\t}\n\t\t\t\treturn matchPubKey.pubKey, nil\n\t\t\t}\n\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"kid (key id) field missing in token. field is mandatory\")\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tif jwtErr, ok := err.(*jwt.ValidationError); ok {\n\t\t\tswitch e := jwtErr.Inner.(type) {\n\t\t\tcase *MatchingCertNotFoundError, *VerifierExpiredError, *MatchingCertJustExpired:\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t\treturn nil, jwtErr\n\t\t}\n\t\treturn nil, err\n\t}\n\ttoken.jwtToken = parsedToken\n\t// so far we have only got the standardClaims parsed. We need to now fill the customClaims\n\n\tparts := strings.Split(tokenString, \".\")\n\t// no need check for the number of segments since the previous ParseWithClaims has already done this check.\n\t// therefor the following is redundant. If we change the implementation, will need to revisit\n\t//if len(parts) != 3 {\n\t//\treturn nil, \"jwt token to be parsed seems to be in \"\n\t//}\n\n\t// parse Claims\n\tvar claimBytes []byte\n\n\tif claimBytes, err = jwt.DecodeSegment(parts[1]); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not decode claims part of the jwt token\")\n\t}\n\tdec := json.NewDecoder(bytes.NewBuffer(claimBytes))\n\terr = dec.Decode(customClaims)\n\ttoken.customClaims = customClaims\n\n\treturn &token, nil\n}", "func (auth *EdgeSampleAuth) Authenticate(request *http.Request) (int, string, string) {\n\treturn security.AuthFailed, \"\", \"\"\n}", "func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*SecurityInfo, error) {\n\n\ttoken := fmt.Sprintf(\"%s%s\", authPrefix, k.Token())\n\n\treq := esapi.SecurityAuthenticateRequest{\n\t\tHeader: map[string][]string{AuthKey: []string{token}},\n\t}\n\n\tres, err := req.Do(ctx, es)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"apikey auth request %s: %w\", k.Id, err)\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tif res.IsError() {\n\t\treturn nil, fmt.Errorf(\"apikey auth response %s: %s\", k.Id, res.String())\n\t}\n\n\tvar info SecurityInfo\n\tdecoder := json.NewDecoder(res.Body)\n\tif err := decoder.Decode(&info); err != nil {\n\t\treturn nil, fmt.Errorf(\"apikey auth parse %s: %w\", k.Id, err)\n\t}\n\n\treturn &info, nil\n}", "func (a *HyperflexApiService) GetHyperflexServiceAuthTokenByMoidExecute(r ApiGetHyperflexServiceAuthTokenByMoidRequest) (*HyperflexServiceAuthToken, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *HyperflexServiceAuthToken\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HyperflexApiService.GetHyperflexServiceAuthTokenByMoid\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/hyperflex/ServiceAuthTokens/{Moid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"Moid\"+\"}\", url.PathEscape(parameterToString(r.moid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/csv\", \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (p *provider) checkToken(spec *spec.Spec, req *http.Request, token string) (clientToken, error) {\n\t// 1. uc token\n\tucToken, err := auth.VerifyUCClientToken(token)\n\tif err == nil {\n\t\treturn clientToken{\n\t\t\tClientID: ucToken.ClientID,\n\t\t\tClientName: ucToken.ClientName,\n\t\t}, nil\n\t}\n\t// 2. openapi oauth2 token\n\toauthToken, err := auth.VerifyOpenapiOAuth2Token(p.oauth2server, nil, req)\n\tif err != nil {\n\t\treturn clientToken{}, err\n\t}\n\treturn clientToken{\n\t\tClientID: oauthToken.ClientID,\n\t\tClientName: oauthToken.ClientName,\n\t}, nil\n}", "func Authenticate(role string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttoken, err := GetTokenFromHeader(c)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": err.Error()})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tif len(role) > 0 && !token.Role.Check(role) {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"Unauthorized\"})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tc.Next()\n\t}\n}", "func ExtractToken(r *http.Request) string {\r\n\tauthorization := r.Header.Get(\"Authorization\")\r\n\tregex := regexp.MustCompile(\"(Bearer\\\\s)(.*)\")\r\n\tmatch := regex.FindStringSubmatch(authorization)\r\n\r\n\tif len(match) > 0 {\r\n\t\treturn match[2]\r\n\t}\r\n\r\n\treturn \"\"\r\n}", "func (m *middlewareStruct) CheckJWTToken(c *gin.Context) {\n\tbearToken := c.GetHeader(\"Authorization\")\n\n\tstrArr := strings.Split(bearToken, \" \")\n\tif len(strArr) == 2 {\n\n\t\ttoken, err := m.service.VerifyToken(strArr[1], os.Getenv(\"ACCESS_SECRET\"))\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\n\t\tclaims, _ := token.Claims.(jwt.MapClaims)\n\n\t\tc.Set(\"user_id\", claims[\"user_id\"])\n\n\t\treturn\n\n\t}\n\n\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"error\": \"Token inválido\"})\n\treturn\n}", "func ValidateToken(pathHandler server.HandlerType) server.HandlerType {\n\treturn func(res http.ResponseWriter, req *http.Request) {\n\t\tlog.Printf(\"ValidateToken Received request: %v\", req)\n\t\tprovidedToken := req.Header.Get(tokenRequestHeader)\n\t\tif providedToken == \"\" {\n\t\t\tlog.Println(\"Token required; No token provided.\")\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\n\t\tif actualToken, ok := generatedTokens[providedToken]; ok {\n\t\t\taccessTime := time.Now()\n\t\t\tduration := accessTime.Sub(actualToken.CreatedAt)\n\t\t\tif int(duration.Seconds()) >= actualToken.TTL {\n\t\t\t\tlog.Println(\"Token has expired\")\n\t\t\t\tdelete(generatedTokens, providedToken)\n\t\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"Token validated!\")\n\t\t\tpathHandler(res, req)\n\t\t} else {\n\t\t\tlog.Printf(\"Invalid token provided: %v\", providedToken)\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\t}\n}" ]
[ "0.67421335", "0.6439286", "0.6411624", "0.6336948", "0.62938714", "0.6270822", "0.6206244", "0.61743766", "0.61042845", "0.6099566", "0.5945026", "0.59336144", "0.59284616", "0.5907546", "0.5892094", "0.583817", "0.5832258", "0.58032966", "0.5799427", "0.57990456", "0.57935154", "0.5722378", "0.57120204", "0.5710158", "0.57048225", "0.5684839", "0.56830543", "0.56803864", "0.5668737", "0.5663812", "0.5646012", "0.5628401", "0.56108093", "0.56000215", "0.5596119", "0.5593919", "0.5591319", "0.5588357", "0.5571116", "0.55650485", "0.5534812", "0.5520806", "0.55039746", "0.5475244", "0.5470196", "0.5466194", "0.5457347", "0.54563165", "0.54554915", "0.5442555", "0.54294485", "0.54238474", "0.54043955", "0.5393152", "0.53866583", "0.53848594", "0.53682643", "0.5366451", "0.53628117", "0.5360339", "0.5350156", "0.5348477", "0.53440344", "0.5341315", "0.53395116", "0.5335187", "0.5327193", "0.532597", "0.53202397", "0.5319979", "0.5317721", "0.53165686", "0.5315438", "0.53038394", "0.53009456", "0.52957463", "0.5293701", "0.5289592", "0.52889276", "0.5283868", "0.5283448", "0.5283071", "0.52801687", "0.52770627", "0.527454", "0.5268863", "0.5268487", "0.5264046", "0.5263508", "0.5261182", "0.526094", "0.52608263", "0.5260519", "0.5252321", "0.52490205", "0.5248725", "0.52428854", "0.5241085", "0.5233157", "0.52324915" ]
0.6345341
3
CustomToken creates a signed custom authentication token with the specified user ID.
func (c *MockClient) CustomToken(uid string) (string, error) { return "abc", nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CreateCustomToken(ID string) (string, error) {\n\tclient, err := fbApp.Auth(context.Background())\n\tif err != nil {\n\t\treturn \"\", errors.New(\"error getting Auth client\")\n\t}\n\n\ttoken, err := client.CustomToken(ID)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"error minting custom token\")\n\t}\n\n\treturn token, nil\n}", "func (c *Client) GenerateCustomToken(uid string) (string, error) {\n\ttoken, err := c.AuthClient.CustomToken(context.Background(), uid)\n\tif err != nil {\n\t\tlog.Fatalf(\"error minting custom token: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\tlog.Printf(\"Got custom token: %v\\n\", token)\n\treturn token, err\n}", "func (c *Client) CustomToken(ctx context.Context, uid string) (string, error) {\n\tclient := firebase.FirebaseAuth\n\treturn client.CustomToken(ctx, uid)\n}", "func (a *Auth) SignInWithCustomToken(token string) (*User, error) {\n\tclient := &http.Client{}\n\n\turl := a.App.Prefix + \"https://identitytoolkit.googleapis.com/v1/accounts:signInWithCustomToken?key=\" + a.App.APIKey\n\n\tdata := map[string]interface{}{\"token\": token, \"returnSecureToken\": true}\n\treqdata, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", url, strings.NewReader(string(reqdata)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar respdata map[string]interface{}\n\terr = json.Unmarshal(body, &respdata)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, exists := respdata[\"error\"]\n\tif exists {\n\t\treturn nil, errors.New(respdata[\"error\"].(map[string]interface{})[\"message\"].(string))\n\t}\n\n\ttimeLength, err := strconv.Atoi(respdata[\"expiresIn\"].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &User{\n\t\tIDToken: respdata[\"idToken\"].(string),\n\t\tRefreshToken: respdata[\"refreshToken\"].(string),\n\t\tExpiresIn: time.Duration(timeLength),\n\t\tOtherData: OtherData{},\n\t}, nil\n}", "func generateUserToken(identity *Identity) *jwt.Token {\n\ttoken := jwt.New(jwt.SigningMethodRS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"jti\"] = uuid.NewV4().String()\n\tiat := time.Now().Unix()\n\tclaims[\"exp\"] = 0\n\tclaims[\"iat\"] = iat\n\tclaims[\"typ\"] = \"Bearer\"\n\tclaims[\"preferred_username\"] = identity.Username\n\tclaims[\"sub\"] = identity.ID.String()\n\tclaims[\"email\"] = identity.Email\n\n\ttoken.Header[\"kid\"] = \"test-key\"\n\n\treturn token\n}", "func GenerateCustomToken(payload map[string]interface{}, secret string, expire time.Duration) (string, error) {\n\tif len(payload) == 0 {\n\t\treturn \"\", ErrPayloadEmpty\n\t}\n\n\tif secret == \"\" {\n\t\tsecret = defaultSecret\n\t}\n\n\tif expire == 0 {\n\t\texpire = defaultExpireTime\n\t}\n\n\t// Set claims\n\tclaims := jwt.MapClaims{}\n\tfor k, v := range payload {\n\t\tclaims[k] = v\n\t}\n\tclaims[\"exp\"] = time.Now().Add(expire).Unix()\n\n\t// Create token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Generate encoded token and send it as response\n\treturn token.SignedString([]byte(secret))\n}", "func CreateToken(userId primitive.ObjectID) (tokenString string, err error) {\n\n\t// Get config file\n\tconfig, err := ConfigHelper.GetConfig()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\ttype MyCustomClaims struct {\n\t\tUserId primitive.ObjectID `json:\"userId\"`\n\t\tjwt.StandardClaims\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, MyCustomClaims{\n\t\tuserId,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + (config.JwtExpHours * 3600),\n\t\t},\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err = token.SignedString([]byte(config.JwtSecret))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\treturn\n}", "func CreateUserIDToken(key []byte, userID string) (string, error) {\n\tnow := time.Now()\n\texp := now.Add(90 * 24 * time.Hour)\n\n\tjsonToken := paseto.JSONToken{\n\t\tAudience: \"recruitment.empirica.app\",\n\t\tIssuer: \"recruitment.empirica.app\",\n\t\tJti: xid.New().String(),\n\t\tSubject: userID,\n\t\tIssuedAt: now,\n\t\tExpiration: exp,\n\t\tNotBefore: now,\n\t}\n\n\ttoken, err := paseto.Encrypt(key, jsonToken, \"\")\n\t// token = \"v2.local.E42A2iMY9SaZVzt-WkCi45_aebky4vbSUJsfG45OcanamwXwieieMjSjUkgsyZzlbYt82miN1xD-X0zEIhLK_RhWUPLZc9nC0shmkkkHS5Exj2zTpdNWhrC5KJRyUrI0cupc5qrctuREFLAvdCgwZBjh1QSgBX74V631fzl1IErGBgnt2LV1aij5W3hw9cXv4gtm_jSwsfee9HZcCE0sgUgAvklJCDO__8v_fTY7i_Regp5ZPa7h0X0m3yf0n4OXY9PRplunUpD9uEsXJ_MTF5gSFR3qE29eCHbJtRt0FFl81x-GCsQ9H9701TzEjGehCC6Bhw.c29tZSBmb290ZXI\"\n\n\treturn token, err\n}", "func GenerateToken(c *gin.Context) {\n\tcurrentUser := GetCurrentUser(c.Request)\n\tif currentUser == nil {\n\t\terr := c.AbortWithError(http.StatusUnauthorized, fmt.Errorf(\"Invalid session\"))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\treturn\n\t}\n\n\ttokenID := uuid.NewV4().String()\n\n\t// Create the Claims\n\tclaims := &ScopedClaims{\n\t\tjwt.StandardClaims{\n\t\t\tIssuer: auth0ApiIssuer,\n\t\t\tAudience: auth0ApiAudiences[0],\n\t\t\tIssuedAt: time.Now().UnixNano(),\n\t\t\tExpiresAt: time.Now().UnixNano() * 2,\n\t\t\tSubject: strconv.Itoa(int(currentUser.ID)),\n\t\t\tId: tokenID,\n\t\t},\n\t\t\"api:invoke\",\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to sign token: %s\", err))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t} else {\n\t\terr = tokenStore.Store(strconv.Itoa(int(currentUser.ID)), tokenID)\n\t\tif err != nil {\n\t\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to store token: %s\", err))\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"token\": signedToken})\n\t\t}\n\t}\n}", "func verifyCustomToken(t *testing.T, ct, uid string) *auth.Token {\n\tidt, err := signInWithCustomToken(ct)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer deleteUser(uid)\n\n\tvt, err := client.VerifyIDToken(context.Background(), idt)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif vt.UID != uid {\n\t\tt.Errorf(\"UID = %q; want UID = %q\", vt.UID, uid)\n\t}\n\tif vt.Firebase.Tenant != \"\" {\n\t\tt.Errorf(\"Tenant = %q; want = %q\", vt.Firebase.Tenant, \"\")\n\t}\n\treturn vt\n}", "func CreateToken(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {\n\n\t// Get user from context\n\tuser := ctx.GetUser()\n\tif user == nil {\n\t\tctx.Unauthorized(\"missing user, please login first\")\n\t\treturn\n\t}\n\n\t// Read request body\n\tdefer func() { _ = req.Body.Close() }()\n\n\treq.Body = http.MaxBytesReader(resp, req.Body, 1048576)\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tctx.BadRequest(fmt.Sprintf(\"unable to read request body : %s\", err))\n\t\treturn\n\t}\n\n\t// Create token\n\ttoken := common.NewToken()\n\n\t// Deserialize json body\n\tif len(body) > 0 {\n\t\terr = json.Unmarshal(body, token)\n\t\tif err != nil {\n\t\t\tctx.BadRequest(fmt.Sprintf(\"unable to deserialize request body : %s\", err))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Generate token uuid and set creation date\n\ttoken.Initialize()\n\ttoken.UserID = user.ID\n\n\t// Save token\n\terr = ctx.GetMetadataBackend().CreateToken(token)\n\tif err != nil {\n\t\tctx.InternalServerError(\"unable to create token : %s\", err)\n\t\treturn\n\t}\n\n\t// Print token in the json response.\n\tvar bytes []byte\n\tif bytes, err = utils.ToJson(token); err != nil {\n\t\tpanic(fmt.Errorf(\"unable to serialize json response : %s\", err))\n\t}\n\n\t_, _ = resp.Write(bytes)\n}", "func (engine ssoEngineImpl) generateJWTToken(authenticatedUser *authenticatedUser) (*common.CustomClaims, string, error) {\n\n\t// Build the claims\n\tclaims := &common.CustomClaims{\n\t\tUser: authenticatedUser.UserName,\n\t\tRoles: authenticatedUser.Roles,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + engine.tokenSecondsToLive,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"EasySSO Server\",\n\t\t},\n\t}\n\t// Build the token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS512, claims)\n\n\t// Convert the token to a string\n\ttokenString, err := token.SignedString(engine.privateKey)\n\tif err != nil {\n\t\tlog.Error(\"Unable to sign generated token\", err)\n\t\treturn nil, \"\", err\n\t}\n\treturn claims, tokenString, nil\n}", "func CreateToken(user *models.User, ExpiresAt int64) (string, error) {\n\n\tclaims := &models.Claims{\n\t\tID: user.ID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: ExpiresAt,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\treturn token.SignedString([]byte(\"pingouin123\"))\n}", "func GenerateToken(m *models.User) (*AuthToken, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(24 * time.Hour)\n\n\tclaims := userStdClaims{\n\t\tUser: m,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"gin-server-api\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\tauthToken := &AuthToken{Token: token, ExpiresAt: expireTime.Format(\"2006-01-02 15:04:05\")}\n\treturn authToken, err\n}", "func NewChatkitUserToken(appID string, keyID string, keySecret string, userID string, expiryDuration time.Duration) (tokenString string, expiry time.Time, err error) {\n\tjwtClaims, tokenExpiry := getGenericTokenClaims(appID, keyID, expiryDuration)\n\n\tjwtClaims[\"sub\"] = userID\n\n\ttokenString, err = signToken(keySecret, jwtClaims)\n\treturn tokenString, tokenExpiry, err\n}", "func (t *Jwt) GenerateToken(userID uint, expiredAt time.Duration) (accessToken string, err error) {\n\texp := time.Now().Add(expiredAt)\n\t// jwt token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{\"exp\": exp.Unix(), \"userID\": userID})\n\t// sign the jwt token\n\taccessToken, err = token.SignedString(t.PrivateKey)\n\tif err != nil {\n\t\t// todo: log error\n\t}\n\treturn\n}", "func NewMytoken(\n\toidcSub, oidcIss, name string, r restrictions.Restrictions, c api.Capabilities, rot *api.Rotation,\n\tauthTime unixtime.UnixTime,\n) (*Mytoken, error) {\n\tnow := unixtime.Now()\n\tid, err := mtid.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmt := &Mytoken{\n\t\tMytoken: api.Mytoken{\n\t\t\tVersion: api.TokenVer,\n\t\t\tTokenType: api.TokenType,\n\t\t\tSeqNo: 1,\n\t\t\tName: name,\n\t\t\tIssuer: config.Get().IssuerURL,\n\t\t\tSubject: utils.CreateMytokenSubject(oidcSub, oidcIss),\n\t\t\tAudience: config.Get().IssuerURL,\n\t\t\tOIDCIssuer: oidcIss,\n\t\t\tOIDCSubject: oidcSub,\n\t\t\tCapabilities: c,\n\t\t},\n\t\tID: id,\n\t\tIssuedAt: now,\n\t\tNotBefore: now,\n\t\tAuthTime: authTime,\n\t\tRotation: rot,\n\t}\n\tr.EnforceMaxLifetime(oidcIss)\n\tif len(r) > 0 {\n\t\tmt.Restrictions = r\n\t\texp := r.GetExpires()\n\t\tif exp != 0 {\n\t\t\tmt.ExpiresAt = exp\n\t\t}\n\t\tnbf := r.GetNotBefore()\n\t\tif nbf != 0 && nbf > now {\n\t\t\tmt.NotBefore = nbf\n\t\t}\n\t}\n\treturn mt, nil\n}", "func Token(user *User, secretSignKey []byte) (string, error) {\n\t// \ttoken := jwt.New(jwt.SigningMethodHS256)\n\t// FooFoo\n\n\t// Create the Claims\n\tclaims := &Claims{\n\t\tuser.ID,\n\t\tuser.Email,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(TokenDuration).Unix(),\n\t\t\tIssuer: \"gorth\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(secretSignKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}", "func NewCustomTokenFactory(token string) TokenFactory {\n\treturn &customTokenfactory{tokenString: token}\n}", "func GenerateSignedUserToken(identity *Identity) (string, *jwt.Token) {\n\ttoken := generateUserToken(identity)\n\ttokenStr := signToken(token)\n\n\treturn tokenStr, token\n}", "func (m *manager) GenerateToken(userID string, username string, roles []string) (string, error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(m.expireTime * time.Second)\n\n claims := Token{\n UserID: userID,\n Name: m.hashService.Make(username),\n Roles: roles,\n StandardClaims: &jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n Issuer: m.issuer,\n Audience: m.audience,\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err := tokenClaims.SignedString(m.jwtSecret)\n\n return token, err\n}", "func (c *Client) CreateToken(ctx context.Context, user *models.User) (*auth.APIToken, *models.Token, error) {\n\t// If the user provides no user, we will make a token for the current session user\n\tif user == nil {\n\t\ti, err := c.Me(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tuser = i\n\t}\n\n\tvariables := make(map[string]interface{})\n\tvariables[\"user_id\"] = user.ID\n\n\tresp := &CreateTokenResponse{}\n\terr := c.transport.Raw(ctx, `\n\t\tmutation CreateToken($user_id: String!) {\n\t\t\tcreateToken(input: { user_id: $user_id }) {\n\t\t\t\tsecret\n\t\t\t\ttoken {\n\t\t\t\t\tid\n\t\t\t\t}\n\t\t\t}\n }\n `, variables, resp)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsecret, err := auth.FromPassword(resp.Response.Secret)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttoken, err := auth.NewAPIToken(secret, resp.Response.Token.ID)\n\treturn token, resp.Response.Token, err\n}", "func (h *Helper) generateToken(tokentype int, expiresInSec time.Duration, id, role, username, email, picturepath string, createdAt, modifiedAt int64) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tAudience: helper.TokenAudience,\n\t\t\tSubject: id,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\t//1Day\n\t\t\tExpiresAt: time.Now().Add(expiresInSec).Unix(),\n\t\t\tIssuer: helper.TokenIssuer,\n\t\t},\n\t\tRole: role,\n\t}\n\tswitch tokentype {\n\tcase ID_TOKEN:\n\t\tclaims.Type = \"id_token\"\n\t\tclaims.User = &TokenUser{username, email, picturepath, createdAt, modifiedAt}\n\tcase REFRESH_TOKEN:\n\t\tclaims.Type = \"refresh\"\n\tcase ACCESS_TOKEN:\n\t\tclaims.Type = \"bearer\"\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(h.signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func (j *JWTUtil) CreateToken(userID uint) (string, error) {\n\n\tclaims := jwt.MapClaims{}\n\n\tvar duration time.Duration\n\tdurationStr := os.Getenv(\"JWT_LIFESPAN_MINUTES\")\n\tif durationStr == \"\" {\n\t\tduration = DefaultTokenLifeSpan\n\t} else {\n\t\td, _ := strconv.ParseInt(durationStr, 10, 64)\n\t\tduration = time.Duration(d) * time.Minute\n\t}\n\n\tclaims[USER_ID] = userID\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(duration).Unix()\n\tjwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsecret := os.Getenv(\"JWT_SECRET\")\n\tif secret == \"\" {\n\t\treturn \"\", errors.New(\"missing jwt token secret\")\n\t}\n\ttoken, err := jwtToken.SignedString([]byte(secret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}", "func createJwtToken(u user.User) (string, error) {\n\t// Set custom claims\n\tclaims := &middleware.LoginCustomClaims{\n\t\tu.Username,\n\t\tfalse,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 72).Unix(),\n\t\t},\n\t}\n\n\t// Create token with claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Generate encoded token and send it as response.\n\tkey := viper.GetString(\"auth.signkey\")\n\tt, err := token.SignedString([]byte(key))\n\treturn t, err\n\n}", "func GenerateToken(c *gin.Context, user *models.UserResource) string {\n\tclaims := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.StandardClaims{\n\t\tIssuer: user.ID,\n\t\tExpiresAt: jwt.NewTime(float64(time.Now().Add(24 * time.Hour).UnixNano())),\n\t})\n\n\ttoken, err := claims.SignedString([]byte(SecretKey))\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Unable to authonticate\"})\n\t\treturn \"\"\n\t}\n\tc.SetCookie(\n\t\t\"jwt\", token, int(time.Now().Add(24*time.Hour).UnixNano()), \"/\", \"localhost\", false, true,\n\t)\n\treturn token\n}", "func prepareToken(user *interfaces.User) string {\n\ttokenContent := jwt.MapClaims{\n\t\t\"user_id\": user.ID,\n\t\t\"expiry\": time.Now().Add(time.Minute ^ 60).Unix(),\n\t}\n\tjwtToken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tokenContent)\n\ttoken, err := jwtToken.SignedString([]byte(\"TokenPassword\"))\n\thelpers.HandleErr(err)\n\n\treturn token\n\n}", "func NewMytoken(oidcSub, oidcIss string, r restrictions.Restrictions, c, sc api.Capabilities, rot *api.Rotation) *Mytoken {\n\tnow := unixtime.Now()\n\tmt := &Mytoken{\n\t\tVersion: api.TokenVer,\n\t\tTokenType: api.TokenType,\n\t\tID: mtid.New(),\n\t\tSeqNo: 1,\n\t\tIssuedAt: now,\n\t\tNotBefore: now,\n\t\tIssuer: config.Get().IssuerURL,\n\t\tSubject: utils.CreateMytokenSubject(oidcSub, oidcIss),\n\t\tAudience: config.Get().IssuerURL,\n\t\tOIDCIssuer: oidcIss,\n\t\tOIDCSubject: oidcSub,\n\t\tCapabilities: c,\n\t\tSubtokenCapabilities: sc,\n\t\tRotation: rot,\n\t}\n\tr.EnforceMaxLifetime(oidcIss)\n\tif len(r) > 0 {\n\t\tmt.Restrictions = r\n\t\texp := r.GetExpires()\n\t\tif exp != 0 {\n\t\t\tmt.ExpiresAt = exp\n\t\t}\n\t\tnbf := r.GetNotBefore()\n\t\tif nbf != 0 && nbf > now {\n\t\t\tmt.NotBefore = nbf\n\t\t}\n\t}\n\treturn mt\n}", "func createToken(user *models.User) string {\n\tvar store models.Store\n\tvar storeID uint\n\n\tif user.HaveStore == true {\n\t\tif config.DB.First(&store, \"user_id = ?\", user.ID).RecordNotFound() {\n\t\t\tstoreID = 0\n\t\t}\n\t\tstoreID = store.ID\n\t} else {\n\t\tstoreID = 0\n\t}\n\t// to send time expire, issue at (iat)\n\tjwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"user_id\": user.ID,\n\t\t\"user_role\": user.Role,\n\t\t\"user_store\": user.HaveStore,\n\t\t\"store_id\": storeID,\n\t\t\"exp\": time.Now().AddDate(0, 0, 7).Unix(),\n\t\t\"iat\": time.Now().Unix(),\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err := jwtToken.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn tokenString\n}", "func setToken(u *models.User) (string, error) {\n\tt := time.Now()\n\tclaims := customClaims{\n\t\tRole: u.Role,\n\t\tActive: u.Active,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tSubject: strconv.Itoa(u.ID),\n\t\t\tExpiresAt: t.Add(expireDelay).Unix(),\n\t\t\tIssuedAt: t.Unix(),\n\t\t\tIssuer: iss}}\n\treturn getTokenString(&claims)\n}", "func (c *APIClient) CreateUserTempMFAToken(ctx _context.Context, id int32) apiCreateUserTempMFATokenRequest {\n\treturn apiCreateUserTempMFATokenRequest{\n\t\tclient: c,\n\t\tctx: ctx,\n\t\tid: id,\n\t}\n}", "func GenToken(id uint) string {\n\tjwt_token := jwt.New(jwt.GetSigningMethod(\"HS256\"))\n\t// Set some claims\n\tjwt_token.Claims = jwt.MapClaims{\n\t\t\"id\": id,\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\t// Sign and get the complete encoded token as a string\n\ttoken, _ := jwt_token.SignedString([]byte(NBSecretPassword))\n\treturn token\n}", "func GenerateToken(username string, isAdmin bool, expires int, signingKey []byte) (string, error) {\n\tiat := time.Now()\n\texpirationTime := iat.Add(time.Duration(expires) * time.Second)\n\t// Create the JWT claims, which includes the username and expiry time\n\tclaims := &CustomClaims{\n\t\tUsername: username,\n\t\tIsAdmin: isAdmin,\n\t\tIssuedAt: iat.Unix(),\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// In JWT, the expiry time is expressed as unix milliseconds\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\t// Declare the token with the algorithm used for signing, and the claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t// Create the JWT string.\n\treturn token.SignedString(signingKey)\n}", "func GenerateToken(user string) (string, error) {\n\tvar err error\n\tsecret := \"secret\"\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": user,\n\t\t\"iss\": strconv.FormatInt(GetCurrentTimeMillis(), 10),\n\t})\n\ttokenString, err := token.SignedString([]byte(secret))\n\n\treturn tokenString, err\n}", "func (t *jwtMgr) createJWTToken(user *auth.User, privateClaims map[string]interface{}) (string, time.Time, error) {\n\tcurrTime := time.Now()\n\texp := currTime.Add(t.expiration)\n\tif user == nil || user.Name == \"\" {\n\t\tt.logger.Errorf(\"User information is required to create a JWT token\")\n\t\treturn \"\", exp, ErrMissingUserInfo\n\t}\n\t// standard jwt claims like sub, iss, exp\n\tclaims := jwt.Claims{\n\t\tSubject: user.Name,\n\t\tIssuer: issuerClaimValue,\n\t\tExpiry: jwt.NewNumericDate(exp),\n\t\tIssuedAt: jwt.NewNumericDate(currTime),\n\t}\n\t// venice custom claims\n\tif privateClaims == nil {\n\t\tprivateClaims = make(map[string]interface{})\n\t}\n\tprivateClaims[TenantClaim] = user.GetTenant()\n\tprivateClaims[RolesClaim] = user.Status.GetRoles()\n\t// create signed JWT\n\ttoken, err := jwt.Signed(t.signer).Claims(claims).Claims(privateClaims).CompactSerialize()\n\tif err != nil {\n\t\tt.logger.Errorf(\"Unable to create JWT token: Err: %v\", err)\n\t\treturn \"\", exp, err\n\t}\n\treturn token, exp, err\n}", "func createNewAuthToken(w http.ResponseWriter, r *http.Request, u *chatable.User) (*chatable.PublicToken, chatable.CompoundError) {\n\t// create a new token for the user\n\t// client_id is on the header\n\tclientID := r.Header.Get(\"ClientID\")\n\tcid, err := strconv.Atoi(clientID)\n\tif err != nil {\n\t\tcid = -1\n\t}\n\tat := chatable.NewAuthToken(u.ID, cid, chatable.StringSlice{\"all\"})\n\tif err = store.AuthTokenStore.Create(at); err != nil {\n\t\treturn nil, chatable.NewServerError(err.Error())\n\t}\n\treturn at.ToPublicToken(), nil\n}", "func (j *JWT) GenerateToken(user models.User) (string, error) {\n\texpirationTime := time.Now().Add(7 * 24 * time.Hour)\n\tclaims := &requset.CustomClaims{\n\t\tTelephone: user.Telephone,\n\t\tUserName: user.Username,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"y\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(j.JwtSecret)\n}", "func (middleware *Middleware) GenerateToken(field interface{}) (string, error) {\n\treturn middleware.CreateToken(CustomClaims{\n\t\tCustomField: field,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tNotBefore: time.Now().Unix() - 10,\n\t\t\tExpiresAt: time.Now().Unix() + middleware.ExpireSecond,\n\t\t\tIssuer: middleware.SigningKeyString,\n\t\t},\n\t})\n}", "func CreateToken(user model.User, jwtKey string) (string, error) {\n\n\texpireToken := time.Now().Add(time.Hour * 48).Unix()\n\n\t// Set-up claims\n\tclaims := model.TokenClaims{\n\t\tID: user.ID,\n\t\tUsername: user.Username,\n\t\tName: user.Name,\n\t\tEmail: user.Email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireToken,\n\t\t\tIssuer: \"smartdashboard-backend-auth\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ttokenString, err := token.SignedString([]byte(jwtKey))\n\n\treturn tokenString, err\n}", "func GenerateAuthToken(userID *uuid.UUID) (string, error) {\n\t//compute the expiration\n\texpiration := time.Now().Unix() + JWTExpirationSec\n\n\t//create the claims\n\tclaims := &AuthClaims{\n\t\tUserID: userID.String(),\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expiration,\n\t\t},\n\t}\n\n\t//create the token\n\talgorithm := jwt.GetSigningMethod(JWTSigningAlgorithm)\n\ttoken := jwt.NewWithClaims(algorithm, claims)\n\n\t//create the signed string\n\ttokenStr, err := token.SignedString([]byte(GetJWTKey()))\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"sign auth token\")\n\t}\n\treturn tokenStr, nil\n}", "func EmbedUserTokenInContext(ctx context.Context, identity *Identity) context.Context {\n\tif identity == nil {\n\t\tidentity = NewIdentity()\n\t}\n\t_, token := GenerateSignedUserToken(identity)\n\treturn embedTokenInContext(ctx, token)\n}", "func CreateToken(user model.User) (string, error) {\n\t//Creating token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t//Adding claims\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"name\"] = user.Name\n\tclaims[\"mobile\"] = user.Mobile\n\tclaims[\"exp\"] = time.Now().Add(time.Hour * 72).Unix()\n\ttoken.Claims = claims\n\n\t//Signing the token\n\tsignedToken, ok := token.SignedString([]byte(signingkey))\n\n\treturn signedToken, ok\n}", "func AuthCreateToken(c map[string]interface{}) (string, error) {\n\t// Create token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Set claims\n\tclaims := token.Claims.(jwt.MapClaims)\n\tfor k, v := range c {\n\t\tclaims[k] = v\n\t}\n\tclaims[\"exp\"] = time.Now().Add(viper.GetDuration(\"http.token.lifetime\")).Unix()\n\n\t// Generate encoded token and send it as response.\n\tt, err := token.SignedString([]byte(viper.GetString(\"http.token.key\")))\n\treturn t, err\n}", "func (client IdentityClient) createAuthToken(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/users/{userId}/authTokens\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateAuthTokenResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func New(user *user.Model) *jwt.Token {\n\ttoken := jwt.New(jwt.GetSigningMethod(\"RS256\"))\n\ttoken.Claims[\"uid\"] = user.Id.Int64\n\ttoken.Claims[\"user\"] = user\n\ttoken.Claims[\"exp\"] = time.Now().Add(time.Minute * tokenExpTime).Unix()\n\treturn token\n}", "func GenerateToken(userID uint) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"userID\": userID,\n\t})\n\n\ttokenStr, err := token.SignedString([]byte(secret))\n\n\treturn tokenStr, err\n}", "func (a *Auth) GenerateToken(userID string) (TokenInfo, error) {\n\taccessToken, err := a.generateAccess(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trefreshToken, err := a.generateRefresh(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttokenInfo := &tokenInfo{\n\t\tTokenType: a.opts.tokenType,\n\t\tAccessToken: accessToken,\n\t\tRefreshToken: refreshToken,\n\t}\n\treturn tokenInfo, nil\n}", "func (a *AuthService) CreateToken(userDto *dtos.UserDto) (*models.Token, error) {\n\ttoken := &models.Token{}\n\n\ttoken.AtExp = time.Now().Add(time.Minute * 10).Unix()\n\ttoken.AccessUUID = uuid.NewV4().String()\n\n\ttoken.RtExp = time.Now().Add(time.Hour * 24 * 7).Unix()\n\ttoken.RefreshUUID = uuid.NewV4().String()\n\n\t//Generate Access Token\n\tatclaims := jwt.MapClaims{}\n\n\tatclaims[\"authorized\"] = true\n\tatclaims[\"access_uuid\"] = token.AccessUUID\n\tatclaims[\"id\"] = userDto.ID\n\tatclaims[\"name\"] = userDto.Name\n\tatclaims[\"email\"] = userDto.Email\n\tatclaims[\"exp\"] = token.AtExp\n\n\tat := jwt.NewWithClaims(jwt.SigningMethodHS256, atclaims)\n\n\tif accessToken, err := at.SignedString([]byte(config.AccessSecret)); err != nil {\n\t\ttoken.AccessToken = accessToken\n\t} else {\n\t\treturn nil, err\n\t}\n\n\t//Generate Refresh Token\n\n\trtClaims := jwt.MapClaims{}\n\n\trtClaims[\"authorized\"] = true\n\trtClaims[\"refresh_uuid\"] = token.RefreshUUID\n\trtClaims[\"id\"] = userDto.ID\n\trtClaims[\"name\"] = userDto.Name\n\trtClaims[\"email\"] = userDto.Email\n\trtClaims[\"exp\"] = token.RtExp\n\n\trt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)\n\n\tif refreshToken, err := rt.SignedString([]byte(config.RefreshSecret)); err != nil {\n\t\ttoken.RefreshToken = refreshToken\n\t} else {\n\t\treturn nil, err\n\t}\n\n\treturn token, nil\n}", "func (user *User) GenerateToken() {\n\n\tvalue, _ := strconv.Atoi(os.Getenv(\"token_exp\"))\n\n\t//Create new JWT token for the newly registered account\n\ttk := &Token{UserID: uint(user.ID), ExpirationTime: time.Now().Add(time.Duration(value) * time.Second).Unix()}\n\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, _ := token.SignedString([]byte(os.Getenv(\"token_password\")))\n\tuser.Token = tokenString\n\n}", "func (c *Config) SignedToken(u *auth.User) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS512, jwtClaims{\n\t\tauth.Claims{u.ID, u.Name},\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: c.clock.Now().Add(12 * time.Hour).Unix(),\n\t\t\tIssuer: \"bissy-api\",\n\t\t},\n\t})\n\n\treturn token.SignedString(c.signingKey)\n}", "func (a *authSvc) BuildToken(user User) (*string, *int64, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"email\": user.Email,\n\t})\n\tsignedToken, err := token.SignedString(a.authSecret) // sign the token\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tnow := time.Now() // get current time\n\tnowPlusExpiry := now.Add(tokenExpiryMin * time.Minute) // add 60 minutes to current time to get token expiry\n\tnowPlusExpiryTimestamp := nowPlusExpiry.UnixNano() // get the expiry timestamp\n\treturn &signedToken, &nowPlusExpiryTimestamp, nil\n}", "func (aw *AuthWriter) CreateToken(userID uint64) (*TokenDetails, error) {\n\ttd := &TokenDetails{userID: userID}\n\n\tvar err error\n\n\t// Creating Access Token\n\ttd.AccessExpires = time.Now().Add(aw.ATExpiry).Unix()\n\ttd.AccessUUID = uuid.NewV4().String()\n\n\tatClaims := jwt.MapClaims{}\n\tatClaims[\"access_uuid\"] = td.AccessUUID\n\tatClaims[\"user_id\"] = userID\n\tatClaims[\"exp\"] = td.AccessExpires\n\tat := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttd.AccessToken, err = at.SignedString([]byte(aw.ATSecret))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Creating Refresh Token\n\ttd.RefreshExpires = time.Now().Add(aw.RTExpiry).Unix()\n\ttd.RefreshUUID = uuid.NewV4().String()\n\n\trtClaims := jwt.MapClaims{}\n\trtClaims[\"refresh_uuid\"] = td.RefreshUUID\n\trtClaims[\"user_id\"] = userID\n\trtClaims[\"exp\"] = td.RefreshExpires\n\trt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)\n\ttd.RefreshToken, err = rt.SignedString([]byte(aw.RTSecret))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn td, nil\n}", "func GenerateToken(id int, account string, role string) (token string, err error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(3 * time.Hour) // token發放後多久過期\n\n claims := Claims{\n ID: id,\n Account: account,\n Role: role,\n StandardClaims: jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n IssuedAt: nowTime.Unix(),\n Issuer: \"go-gin-cli\",\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err = tokenClaims.SignedString(jwtSecret)\n if err != nil {\n log.Println(err)\n return\n }\n\n return\n}", "func (c *RESTClient) ExchangeCustomTokenForIDAndRefreshToken(firebaseAPIKey, token string) (*TokenAndRefreshToken, error) {\n\t// build the URL including Query params\n\tv := url.Values{}\n\tv.Set(\"key\", firebaseAPIKey)\n\turi := url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"www.googleapis.com\",\n\t\tPath: \"identitytoolkit/v3/relyingparty/verifyCustomToken\",\n\t\tForceQuery: false,\n\t\tRawQuery: v.Encode(),\n\t}\n\n\t// build and execute the request\n\treqBody := verifyCustomTokenRequest{\n\t\tToken: token,\n\t\tReturnSecureToken: true,\n\t}\n\tbuf := new(bytes.Buffer)\n\tjson.NewEncoder(buf).Encode(reqBody)\n\treq, err := http.NewRequest(\"POST\", uri.String(), buf)\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating new POST request: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode == 400 {\n\t\tvar badReqRes badRequestResponse\n\t\terr = json.NewDecoder(res.Body).Decode(&badReqRes)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"decode failed: %w\", err)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%d %s\", badReqRes.Error.Code, badReqRes.Error.Message)\n\t} else if res.StatusCode > 400 {\n\t\treturn nil, fmt.Errorf(\"%s\", res.Status)\n\t}\n\n\ttokenResponse := verifyCustomTokenResponse{}\n\terr = json.NewDecoder(res.Body).Decode(&tokenResponse)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"json decode failed: %w\", err)\n\t}\n\treturn &TokenAndRefreshToken{\n\t\tIDToken: tokenResponse.IDToken,\n\t\tRefreshToken: tokenResponse.RefreshToken,\n\t}, nil\n}", "func (ti *TokenIssuer) Create(user *models.User) (*oauth2.Token, error) {\n\tnow := ti.Clock.Now()\n\n\tuserID := strconv.Itoa(user.ID)\n\n\t// Create the new access token\n\tnewAccessClaims := jwt.StandardClaims{\n\t\tSubject: userID,\n\t\tExpiresAt: now.Add(defaultTokenExpiry).Unix(),\n\t\tIssuedAt: now.Unix(),\n\t}\n\taccessToken, err := jwt.NewWithClaims(jwt.SigningMethodHS256, newAccessClaims).SignedString([]byte(ti.GlobalTokenKey))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create the new refresh token\n\tnewRefreshClaims := jwt.StandardClaims{\n\t\tSubject: userID,\n\t\tExpiresAt: now.Add(defaultRefreshTokenExpiry).Unix(),\n\t\tIssuedAt: now.Unix(),\n\t}\n\trefreshToken, err := jwt.NewWithClaims(jwt.SigningMethodHS256, newRefreshClaims).SignedString([]byte(ti.GlobalTokenKey))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &oauth2.Token{\n\t\tTokenType: \"Bearer\",\n\t\tAccessToken: accessToken,\n\t\tExpiry: time.Unix(newAccessClaims.ExpiresAt, 0),\n\t\tRefreshToken: refreshToken,\n\t}, nil\n}", "func (asap *ASAP) SignCustomClaims(audience string, customClaims jws.Claims, privateKey cr.PrivateKey) (token []byte, err error) {\n\tvar signingMethod crypto.SigningMethod\n\n\tswitch privateKey.(type) {\n\tcase *rsa.PrivateKey:\n\t\tsigningMethod = crypto.SigningMethodRS256\n\tcase *ecdsa.PrivateKey:\n\t\tsigningMethod = crypto.SigningMethodES256\n\tdefault:\n\t\treturn nil, errors.New(\"bad private key\")\n\t}\n\n\tasap.setAsapClaims(customClaims, audience)\n\treturn asap.signClaims(customClaims, privateKey, signingMethod)\n}", "func (m *JWTManager) CreateToken(u model.User, permissions *[]string) (string, error) {\n\n\tb, _ := json.Marshal(tokenFormat{\n\t\tUserID: u.ID,\n\t\tPermissions: permissions,\n\t})\n\n\tnow := time.Now()\n\t// set claims\n\tclaims := jwt.StandardClaims{\n\t\tIssuedAt: now.Unix(),\n\t\tExpiresAt: now.Add(m.OP.Expiration).Unix(),\n\t\tSubject: string(b),\n\t\tId: string(generateRandomKey(32)),\n\t}\n\tt := jwt.NewWithClaims(jwt.GetSigningMethod(m.OP.SigningMethod), claims)\n\n\treturn t.SignedString(m.OP.PrivateKey)\n}", "func (u *User) Token(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.User.Token\")\n\tdefer span.End()\n\n\tv, ok := ctx.Value(web.KeyValues).(*web.Values)\n\tif !ok {\n\t\treturn web.NewShutdownError(\"web value missing from context\")\n\t}\n\n\topt := option.WithCredentialsFile(u.authenticator.GoogleKeyFile)\n\t// Initialize default app\n\tapp, err := firebase.NewApp(context.Background(), nil, opt)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"\")\n\t}\n\n\t// Access auth service from the default app\n\tclient, err := app.Auth(context.Background())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"\")\n\t}\n\n\ttoken, err := client.VerifyIDToken(ctx, params[\"id\"])\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"verifying token with firebase\")\n\t}\n\n\tuserRecord, err := client.GetUser(ctx, token.UID)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fetching user from token UID\")\n\t}\n\n\tclaims, err := user.Authenticate(ctx, u.db, v.Now, userRecord.Email, token.UID)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase user.ErrAuthenticationFailure:\n\t\t\treturn web.NewRequestError(err, http.StatusUnauthorized)\n\t\tdefault:\n\t\t\treturn errors.Wrap(err, \"authenticating\")\n\t\t}\n\t}\n\n\tvar tkn struct {\n\t\tToken string `json:\"token\"`\n\t}\n\ttkn.Token, err = u.authenticator.GenerateToken(claims)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"generating token\")\n\t}\n\n\t//dbuser, err := model.CreateNewUserIfNotExists(name, email, phone, avatar, provider, uid, token.Expires, token.IssuedAt, emailVerified)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"\")\n\t}\n\n\treturn web.Respond(ctx, w, tkn, http.StatusOK)\n}", "func NewToken(uid int32) (string, string, error) {\n\ttoken := jwtgo.New(jwtgo.SigningMethodES256)\n\n\tclaims := token.Claims.(jwtgo.MapClaims)\n\tclaims[claimUID] = uid\n\tclaims[claimExpire] = time.Now().Add(time.Hour * tokenExpireInHour).Unix()\n\n\tt, err := token.SignedString([]byte(TokenHMACKey))\n\treturn respTokenKey, t, err\n}", "func (this *Token) CreateJWTToken(typeUser string, user interface{}) string {\n\n\t// Create new JWT token for the newly registered account\n\tvar id uint64\n\tswitch typeUser {\n\tcase \"user_buyers\":\n\t\tid = user.(*UserBuyers).ID\n\t}\n\n\ttk := &Token{UserId: id, UserType: typeUser, UserDetail: user}\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, _ := token.SignedString([]byte(os.Getenv(\"TOKEN_PASSWORD\")))\n\n\treturn tokenString\n}", "func generateServiceAccountToken(identity *Identity) *jwt.Token {\n\ttoken := jwt.New(jwt.SigningMethodRS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"service_accountname\"] = identity.Username\n\tclaims[\"sub\"] = identity.ID.String()\n\tclaims[\"jti\"] = uuid.NewV4().String()\n\tclaims[\"iat\"] = time.Now().Unix()\n\n\ttoken.Header[\"kid\"] = \"test-key\"\n\n\treturn token\n}", "func NewToken(claims map[string]interface{}, privatekeyFilename string) (*jwt.Token, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS512, jwt.MapClaims(claims))\n\t// use the test private key to sign the token\n\tkey, err := PrivateKey(privatekeyFilename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsigned, err := token.SignedString(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttoken.Raw = signed\n\tlog.Debug(nil, map[string]interface{}{\"signed_token\": signed, \"claims\": claims}, \"generated test token with custom sub\")\n\treturn token, nil\n}", "func generateAuthToken(u *db.UserModel) (*types.AuthorizedUser, error) {\n\tc := make(chan *types.TokenOutput)\n\n\te := time.Now().Add(time.Hour * 72).Unix()\n\n\tclaims := &types.JwtUserClaims{\n\t\tCurrentUser: types.CurrentUser{Name: u.Username, Email: u.Email, Id: u.ID},\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: e,\n\t\t},\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ts, err := t.SignedString([]byte(config.JWT_SECRET))\n\n\tif err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\tgo tokenModel.Create(\n\t\t&types.Token{UserId: u.ID, Token: s, Expiration: e},\n\t\tc,\n\t)\n\n\tif r := <-c; r.Err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\treturn &types.AuthorizedUser{Token: s}, nil\n}", "func (c *UsersController) GenerateToken(r *http.Request, args map[string]string, body interface{}) *ApiResponse {\n\tctx := r.Context()\n\tr.ParseForm()\n\n\t//TODO: fix validation on oauthStateString\n\t// - using the current validation, two user can authorize at the same time and failed on generating tokens\n\t//state := r.Form.Get(\"state\")\n\t//if state != oauthStateString {\n\t//\treturn Error(http.StatusInternalServerError, \"Invalid Oauth State\" + state + oauthStateString)\n\t//}\n\n\tcode := r.Form.Get(\"code\")\n\tif code == \"\" {\n\t\treturn Error(http.StatusBadRequest, \"Code not found\")\n\t}\n\n\ttoken, err := c.GitlabService.GenerateToken(ctx, code)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn Error(http.StatusInternalServerError, \"Code exchange failed\")\n\t}\n\n\t//Store generated token here\n\tuser, err := c.GitlabService.GetUserInfo(token.AccessToken)\n\tsavedUser, err := c.UsersService.Save(user)\n\tif savedUser == nil {\n\t\treturn Error(http.StatusInternalServerError, \"User is already present in the database\")\n\t}\n\tif err != nil {\n\t\treturn Error(http.StatusInternalServerError, err.Error())\n\t}\n\n\t//Build the user account\n\tuserAccount := &models.Account{\n\t\tUserId: savedUser.Id,\n\t\tAccessToken: token.AccessToken,\n\t\tAccountType: models.AccountTypes.Gitlab,\n\t\tTokenType: token.TokenType,\n\t\tRefreshToken: token.RefreshToken,\n\t}\n\n\t_, err = c.AccountService.Save(userAccount)\n\tif err != nil {\n\t\treturn Error(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn Ok(\"Authorized\")\n}", "func (a *Service) GenerateJweToken(customClaims map[string]interface{}) (string, *time.Time, *error_utils.ApiError) {\n\n\tenc, err := jose.NewEncrypter(\n\t\tjose.ContentEncryption(a.encryptionAlgorithm),\n\t\tjose.Recipient{Algorithm: jose.DIRECT, Key: a.encryptionKey},\n\t\t(&jose.EncrypterOptions{}).WithType(\"JWT\"),\n\t)\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\texpire := a.timeFunc().UTC().Add(a.timeout)\n\n\tclaims := map[string]interface{} { }\n\tclaims[\"exp\"] = expire.Unix()\n\tclaims[\"orig_iat\"] = a.timeFunc().Unix()\n\tclaims[\"iss\"] = a.issuer\n\n\tif customClaims != nil {\n\t\tfor key, value := range customClaims {\n\t\t\tclaims[key] = value\n\t\t}\n\t}\n\n\ttoken, err := jwt.Encrypted(enc).Claims(claims).CompactSerialize()\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\treturn token, &expire, nil\n}", "func (client *RedisClient) GenerateToken(userID string) (string, error) {\n\tid := uuid.NewV4()\n\texp := time.Duration(600 * time.Second) // 10 minutes\n\n\terr := client.redisdb.Set(id.String(), userID, exp).Err()\n\treturn id.String(), err\n}", "func GenerateToken(username, dept_id string) (string, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(330 * 24 * time.Hour)\n\n\tclaims := CustomClaims{\n\t\tusername,\n\t\tdept_id,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuer: \"dingtalk\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\treturn token, err\n}", "func GenerateToken(payload map[string]interface{}) (string, error) {\n\treturn GenerateCustomToken(payload, defaultSecret, defaultExpireTime)\n}", "func CreateToken(userId uint64, secret_name string) (string, error) {\n\n //Retrieve secret value from secrets manager\n\tsecret, err := getSecretValue(secret_name);\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n atClaims := jwt.MapClaims{}\n atClaims[\"authorized\"] = true\n atClaims[\"user_id\"] = userId\n atClaims[\"exp\"] = time.Now().Add(time.Minute * 15).Unix()\n at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttoken, err := at.SignedString([]byte(secret))\n if err != nil {\n return \"\", err\n }\n\tlog.Println(\"Token is successfully created\")\n return token, nil\n}", "func UserToken(w http.ResponseWriter, r *http.Request, c router.Context) error {\n\tdb, ok := c.Meta[\"db\"].(*sqlx.DB)\n\tif !ok {\n\t\treturn errors.New(\"db not set in context\")\n\t}\n\ttokenSecret, ok := c.Meta[\"tokenSecret\"].([]byte)\n\tif !ok {\n\t\treturn errors.New(\"token secret not set in context\")\n\t}\n\n\tif r.FormValue(\"grant_type\") != \"password\" {\n\t\treturn res.BadRequest(w, res.ErrorMsg{\"unsupported_grant_type\", \"supports only password grant type\"})\n\t}\n\n\tlogin := r.FormValue(\"login\")\n\tif login == \"\" {\n\t\treturn res.BadRequest(w, res.ErrorMsg{\"invalid_request\", \"login required\"})\n\t}\n\n\tpassword := r.FormValue(\"password\")\n\tif password == \"\" {\n\t\treturn res.BadRequest(w, res.ErrorMsg{\"invalid_request\", \"password required\"})\n\t}\n\n\tu := data.User{}\n\tif err := u.GetByLogin(db, login); err != nil {\n\t\tif e, ok := err.(*data.Error); ok {\n\t\t\treturn res.BadRequest(w, res.ErrorMsg{\"invalid_grant\", e.Desc})\n\t\t}\n\t\treturn err\n\t}\n\n\tif !u.VerifyPassword(password) {\n\t\treturn res.BadRequest(w, res.ErrorMsg{\"invalid_grant\", \"failed to authenticate user\"})\n\t}\n\n\t// Since all is well, generate token and add to database\n\tt := data.Token{\n\t\tUserID: u.ID,\n\t\tExpiresIn: (30 * 24 * time.Hour).Nanoseconds(), // 30 days\n\t}\n\tif err := t.Insert(db); err != nil {\n\t\treturn err\n\t}\n\n\t// get the encoded JSON Web token\n\tjwt, err := t.EncodeJWT(tokenSecret)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// prepare oAuth2 access token payload\n\tpayload := struct {\n\t\tAccessToken string `json:\"access_token\"`\n\t\tTokenType string `json:\"token_type\"`\n\t\tExpiresIn string `json:\"expires_in\"`\n\t}{\n\t\tjwt,\n\t\t\"bearer\",\n\t\ttime.Duration(t.ExpiresIn).String(),\n\t}\n\n\treturn res.OK(w, payload)\n}", "func CreateToken(userKey string) (*TokenDetails, error) {\n\ttd := &TokenDetails{}\n\ttd.AtExpires = time.Now().Add(time.Minute * 15).Unix()\n\ttd.AccessUUID = uuid.NewV4().String()\n\n\ttd.RtExpires = time.Now().Add(time.Hour * 24 * 7).Unix()\n\ttd.RefreshUUID = uuid.NewV4().String()\n\n\tvar err error\n\t//Creating Access Token\n\tos.Setenv(\"ACCESS_SECRET\", os.Getenv(\"ACCESS_SECRET\"))\n\tatClaims := jwt.MapClaims{}\n\tatClaims[\"authorized\"] = true\n\tatClaims[\"access_uuid\"] = td.AccessUUID\n\tatClaims[\"user_id\"] = userKey\n\tatClaims[\"exp\"] = td.AtExpires\n\tat := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttd.AccessToken, err = at.SignedString([]byte(os.Getenv(\"ACCESS_SECRET\")))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//Creating Refresh Token\n\tos.Setenv(\"REFRESH_SECRET\", \"mcmvmkmsdnfsdmfdsjf\") //this should be in an env file\n\trtClaims := jwt.MapClaims{}\n\trtClaims[\"refresh_uuid\"] = td.RefreshUUID\n\trtClaims[\"user_id\"] = userKey\n\trtClaims[\"exp\"] = td.RtExpires\n\trt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)\n\ttd.RefreshToken, err = rt.SignedString([]byte(os.Getenv(\"REFRESH_SECRET\")))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn td, nil\n}", "func CreateToken(user models.User) (string, error) {\n\ttk := Token{\n\t\tUserID: user.Login,\n\t\tDatabaseID: user.ID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tIssuer: \"gopds-api\",\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, err := token.SignedString([]byte(config.AppConfig.GetString(\"sessions.key\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn tokenString, nil\n}", "func (f JwtFactory) NewIDToken(username, host, clientID, nonce string) (string, error) {\r\n\tt := jwt.New(jwt.GetSigningMethod(\"RS256\"))\r\n\r\n\tt.Claims = &struct {\r\n\t\tUsername string `json:\"username\"`\r\n\t\tNonce string `json:\"nonce,omitempty\"`\r\n\r\n\t\t// azp is the authorized party - the party to which the ID Token was\r\n\t\t// issued. Same as Audience.\r\n\t\tAzp string `json:\"azp\"`\r\n\r\n\t\t// Purpose defines what this JWT is for, either access_token or\r\n\t\t// id_token.\r\n\t\tPurpose string `json:\"purpose\"`\r\n\r\n\t\tjwt.StandardClaims\r\n\t}{\r\n\t\tusername,\r\n\t\tnonce,\r\n\t\tclientID,\r\n\t\t\"id_token\",\r\n\t\tgetStandardClaims(host, username, clientID),\r\n\t}\r\n\r\n\treturn f.sign(t)\r\n}", "func (ts *TokenService) Encode(user *pb.User) (string, error) {\n\n\t// Build Claim\n\tcurrentTime := time.Now()\n\texpireTime := currentTime.Add(TokenValidityPeriod)\n\n\tclaims := MyCustomClaims{\n\t\tUser: user,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuedAt: currentTime.Unix(),\n\t\t\tIssuer: ClaimIssuer,\n\t\t},\n\t}\n\n\t// Create token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Build signed string with our key\n\tkey, err := getKeyFromVault()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tss, err := token.SignedString(key)\n\n\treturn ss, nil\n}", "func CreateToken(id, username string) (string, error) {\n\tvar err error\n\tatClaims := jwt.MapClaims{}\n\tatClaims[\"authorized\"] = true\n\tatClaims[\"ID\"] = id\n\tatClaims[\"username\"] = username\n\tatClaims[\"exp\"] = time.Now().Add(time.Hour * 23).Unix()\n\tat := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttoken, err := at.SignedString([]byte(os.Getenv(\"jwtsecret\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn token, nil\n}", "func createAccessToken(user model.User) (string, error) {\n\tvar newUser = model.User{}\n\tnewUser.ID = user.ID\n\texpiresAt := time.Now().Add(time.Duration(config.Conf.JwtTokenExpiration) * time.Millisecond)\n\tclaims := UserClaim{\n\t\tnewUser,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expiresAt.Unix(),\n\t\t},\n\t}\n\n\t// Generates access accessToken and refresh accessToken\n\tunSignedToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn unSignedToken.SignedString([]byte(config.Conf.JwtSecret))\n}", "func GenerateToken(payload PayLoad, expireTime int64) (string, error) {\n\n\tclaims := Claims{\n\t\tpayload.ID,\n\t\tpayload.Account,\n\t\tEncodeMD5(payload.Password),\n\t\tpayload.Scope,\n\t\tpayload.IsSuper,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expireTime,\n\t\t\tIssuer: \"liaoliao\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\treturn token, err\n}", "func AuthtokenTokenByUserID(db XODB, userID int) (*AuthtokenToken, error) {\n\tvar err error\n\n\t// sql query\n\tconst sqlstr = `SELECT ` +\n\t\t`key, created, user_id ` +\n\t\t`FROM public.authtoken_token ` +\n\t\t`WHERE user_id = $1`\n\n\t// run query\n\tXOLog(sqlstr, userID)\n\tat := AuthtokenToken{\n\t\t_exists: true,\n\t}\n\n\terr = db.QueryRow(sqlstr, userID).Scan(&at.Key, &at.Created, &at.UserID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &at, nil\n}", "func GenAuthTokenHandler(c *gin.Context) {\r\n\t// Create a new token object, specifying signing method and the claims\r\n\t// you would like it to contain.\r\n\r\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\r\n\t\t\"foo\": \"bar\",\r\n\t\t\"expire\": func() int64 {\r\n\t\t\tnow := time.Now()\r\n\t\t\tduration, _ := time.ParseDuration(\"14d\")\r\n\t\t\tm1 := now.Add(duration)\r\n\t\t\treturn m1.Unix()\r\n\t\t}(),\r\n\t})\r\n\r\n\t// Sign and get the complete encoded token as a string using the secret\r\n\ttokenString, err := token.SignedString([]byte(utils.AppConfig.Server.SecretKey))\r\n\r\n\tfmt.Println(tokenString, err)\r\n\tc.String(http.StatusOK, tokenString)\r\n}", "func (defaultActorTokensProvider) GenerateIDToken(ctx context.Context, serviceAccount, audience string) (tok string, err error) {\n\terr = withCredentialsClient(ctx, func(client *iam.CredentialsClient) (err error) {\n\t\ttok, err = client.GenerateIDToken(ctx, serviceAccount, audience, true, nil)\n\t\treturn\n\t})\n\treturn\n}", "func (e *env) NewUserToken(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tw.Write([]byte(\"User token:\" + e.authState.GenerateRegisterToken(\"user\")))\n\t\treturn\n\tdefault:\n\t}\n}", "func GenerateToken(key []byte, userID int64, credential string) (string, error) {\n\n\t//new token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Claims\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"user_id\"] = userID\n\tclaims[\"credential\"] = credential\n\tclaims[\"exp\"] = time.Now().Add(time.Hour*720).UnixNano() / int64(time.Millisecond)\n\ttoken.Claims = claims\n\n\t// Sign and get as a string\n\ttokenString, err := token.SignedString(key)\n\treturn tokenString, err\n}", "func CreateToken(user string) (string, error) {\n\treturn createToken(jwt.MapClaims{\"username\": user})\n}", "func createAccessToken(userID string, refreshUUID string, expires int64) (string, error) {\n\tclaims := CustomClaimsAcessToken{\n\t\tUser_id: userID,\n\t\tRefresh_uuid: refreshUUID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expires,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)\n\tsignedToken, err := token.SignedString([]byte(os.Getenv(\"TOKEN_SECRET\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn signedToken, nil\n}", "func HandleCreateTokenWithTrustID(t *testing.T, options tokens.AuthOptionsBuilder, requestJSON string) {\n\ttesthelper.SetupHTTP()\n\tdefer testhelper.TeardownHTTP()\n\n\tclient := gophercloud.ServiceClient{\n\t\tProviderClient: &gophercloud.ProviderClient{},\n\t\tEndpoint: testhelper.Endpoint(),\n\t}\n\n\ttesthelper.Mux.HandleFunc(\"/auth/tokens\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttesthelper.TestMethod(t, r, \"POST\")\n\t\ttesthelper.TestHeader(t, r, \"Content-Type\", \"application/json\")\n\t\ttesthelper.TestHeader(t, r, \"Accept\", \"application/json\")\n\t\ttesthelper.TestJSONRequest(t, r, requestJSON)\n\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tfmt.Fprintf(w, `{\n \"token\": {\n \"expires_at\": \"2013-02-27T18:30:59.999999Z\",\n \"issued_at\": \"2013-02-27T16:30:59.999999Z\",\n \"methods\": [\n \"password\"\n ],\n \"OS-TRUST:trust\": {\n \"id\": \"fe0aef\",\n \"impersonation\": false,\n\t\t\t\t\t\t\"redelegated_trust_id\": \"3ba234\",\n\t\t\t\t\t\t\"redelegation_count\": 2,\n \"links\": {\n \"self\": \"http://example.com/identity/v3/trusts/fe0aef\"\n },\n \"trustee_user\": {\n \"id\": \"0ca8f6\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/0ca8f6\"\n }\n },\n \"trustor_user\": {\n \"id\": \"bd263c\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/bd263c\"\n }\n }\n },\n \"user\": {\n \"domain\": {\n \"id\": \"1789d1\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/domains/1789d1\"\n },\n \"name\": \"example.com\"\n },\n \"email\": \"[email protected]\",\n \"id\": \"0ca8f6\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/0ca8f6\"\n },\n \"name\": \"Joe\"\n }\n }\n}`)\n\t})\n\n\tvar actual trusts.TokenExt\n\terr := tokens.Create(&client, options).ExtractInto(&actual)\n\tif err != nil {\n\t\tt.Errorf(\"Create returned an error: %v\", err)\n\t}\n\texpected := trusts.TokenExt{\n\t\tToken: trusts.Token{\n\t\t\tToken: tokens.Token{\n\t\t\t\tExpiresAt: gophercloud.JSONRFC3339Milli(time.Date(2013, 02, 27, 18, 30, 59, 999999000, time.UTC)),\n\t\t\t},\n\t\t\tTrust: trusts.Trust{\n\t\t\t\tID: \"fe0aef\",\n\t\t\t\tImpersonation: false,\n\t\t\t\tTrusteeUser: trusts.TrusteeUser{\n\t\t\t\t\tID: \"0ca8f6\",\n\t\t\t\t},\n\t\t\t\tTrustorUser: trusts.TrustorUser{\n\t\t\t\t\tID: \"bd263c\",\n\t\t\t\t},\n\t\t\t\tRedelegatedTrustID: \"3ba234\",\n\t\t\t\tRedelegationCount: 2,\n\t\t\t},\n\t\t},\n\t}\n\ttesthelper.AssertDeepEquals(t, expected, actual)\n}", "func newTokenHandler(w http.ResponseWriter, r *http.Request) {\n\t// Read the bytes from the body\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t}\n\n\t// Schema Validation:\n\tjsonErrors, err := validateRequestSchema(tokenRequestSchema, bodyBytes)\n\t// General validation error\n\tif err != nil {\n\t\tcode := http.StatusInternalServerError\n\t\tif err == errInvalidJSON {\n\t\t\tcode = http.StatusBadRequest\n\t\t}\n\t\tresultErrorJSON(w, code, err.Error())\n\t\treturn\n\t}\n\n\t// JSON Schema errors\n\tif jsonErrors != nil {\n\t\tresultSchemaErrorJSON(w, jsonErrors)\n\t\treturn\n\t}\n\n\tvar payload tokenPayload\n\terr = json.Unmarshal(bodyBytes, &payload)\n\tif err != nil {\n\t\tresultErrorJSON(w, http.StatusBadRequest, errInvalidPayload.Error())\n\t\treturn\n\t}\n\n\t// TODO: Use your own methods to log someone in and then return a new Token\n\n\tif response, err := bjwt.Generate(123456); err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\tresultResponseJSON(w, http.StatusOK, response)\n\t}\n}", "func (cc *CloudComb) UserToken() (string, error) {\n\t// user token request params\n\ttype userTokenReq struct {\n\t\tAppKey string `json:\"app_key\"`\n\t\tAppSecret string `json:\"app_secret\"`\n\t}\n\treq := userTokenReq{\n\t\tAppKey: cc.appKey,\n\t\tAppSecret: cc.appSecret,\n\t}\n\n\t// generate json body\n\tbody := new(bytes.Buffer)\n\terr := json.NewEncoder(body).Encode(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// do rest request\n\tresult, _, err := cc.doRESTRequest(\"POST\", \"/api/v1/token\", \"\", nil, body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// user token response messages\n\ttype userTokenRes struct {\n\t\tToken string `json:\"token\"`\n\t\t//ExpiresIn uint `json:\"expires_in\"`\n\t}\n\tvar res userTokenRes\n\n\t// parse json\n\tif err := json.NewDecoder(strings.NewReader(result)).Decode(&res); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Token, nil\n}", "func NewToken(id int, role string) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"role\": role,\n\t\t\"userID\": id,\n\t\t\"nbf\": time.Now().Unix(),\n\t\t\"iat\": time.Now().Unix(),\n\t\t\"exp\": time.Now().Local().Add(time.Hour*time.Duration(JWT_EXP_HOUR) + time.Minute*time.Duration(JWT_EXP_MIN) + time.Second*time.Duration(JWT_EXP_SEC)).Unix(),\n\t})\n\t// Sign and get the complete encoded token as a string using the secret\n\tsToken, err := token.SignedString([]byte(JWT_SECRET))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn sToken, nil\n}", "func UserAuth(h http.Handler) http.Handler {\n return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n token, err := request.ParseFromRequest(r, MyAuth2Extractor, func(token *jwt.Token) (interface{}, error) {\n b := GetKey()\n return b, nil\n }, request.WithClaims(&CustomClaims{}))\n\n if err != nil {\n http.Error(w, http.StatusText(401), 401)\n return\n }\n\n claims, err := Decode(token.Raw)\n if err != nil {\n fmt.Println(err)\n http.Error(w, http.StatusText(401), 401)\n return\n }\n\n user := claims.User\n\n ctx := context.WithValue(r.Context(), \"user\", user)\n h.ServeHTTP(w, r.WithContext(ctx))\n })\n}", "func (j *JWT) Create(ctx context.Context, user Principal) (Token, error) {\n\t// Create a new token object, specifying signing method and the claims\n\t// you would like it to contain.\n\tclaims := &Claims{\n\t\tStandardClaims: gojwt.StandardClaims{\n\t\t\tSubject: user.Subject,\n\t\t\tIssuer: user.Issuer,\n\t\t\tExpiresAt: user.ExpiresAt.Unix(),\n\t\t\tIssuedAt: user.IssuedAt.Unix(),\n\t\t\tNotBefore: user.IssuedAt.Unix(),\n\t\t},\n\t\tOrganization: user.Organization,\n\t\tGroup: user.Group,\n\t}\n\ttoken := gojwt.NewWithClaims(gojwt.SigningMethodHS256, claims)\n\t// Sign and get the complete encoded token as a string using the secret\n\tt, err := token.SignedString([]byte(j.Secret))\n\t// this will only fail if the JSON can't be encoded correctly\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn Token(t), nil\n}", "func UserID(token *jwt.Token) string {\n\treturn oauth.UserID(token)\n}", "func createRefreshToken(userID, UUID string, expires int64) (string, error) {\n\tclaims := CustomClaimsRefreshToken{\n\t\tUser_id: userID,\n\t\tUUID: UUID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expires,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)\n\tsignedToken, err := token.SignedString([]byte(os.Getenv(\"TOKEN_SECRET\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn signedToken, nil\n}", "func GenerateToken(payload interface{}) string {\n\ttokenContent := jwt.MapClaims{\n\t\t\"payload\": payload,\n\t\t\"exp\": time.Now().Add(time.Second * TokenExpiredTime).Unix(),\n\t}\n\tjwtToken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tokenContent)\n\ttoken, err := jwtToken.SignedString([]byte(\"TokenPassword\"))\n\tif err != nil {\n\t\tlogger.Error(\"Failed to generate token: \", err)\n\t\treturn \"\"\n\t}\n\n\treturn token\n}", "func (p gceTokenProvider) mintIDToken(ctx context.Context) (*Token, error) {\n\tv := url.Values{\n\t\t\"audience\": []string{p.audience},\n\t\t\"format\": []string{\"full\"}, // include VM instance info into claims\n\t}\n\turlSuffix := fmt.Sprintf(\"instance/service-accounts/%s/identity?%s\", p.account, v.Encode())\n\ttoken, err := metadataClient.Get(urlSuffix)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"auth/gce: metadata server call failed\").Tag(transient.Tag).Err()\n\t}\n\n\tclaims, err := ParseIDTokenClaims(token)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"auth/gce: metadata server returned invalid ID token\").Err()\n\t}\n\n\treturn &Token{\n\t\tToken: oauth2.Token{\n\t\t\tTokenType: \"Bearer\",\n\t\t\tAccessToken: NoAccessToken,\n\t\t\tExpiry: time.Unix(claims.Exp, 0),\n\t\t},\n\t\tIDToken: token,\n\t\tEmail: p.Email(),\n\t}, nil\n}", "func GenerateUserToken(ctx context.Context, tokenEndpoint string, configuration LoginConfiguration, username string, userSecret string) (*app.AuthToken, error) {\n\tif !configuration.IsPostgresDeveloperModeEnabled() {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"method\": \"Generate\",\n\t\t}, \"Postgres developer mode not enabled\")\n\t\treturn nil, errors.NewInternalError(ctx, errs.New(\"postgres developer mode is not enabled\"))\n\t}\n\n\tvar scopes []account.Identity\n\tscopes = append(scopes, test.TestIdentity)\n\tscopes = append(scopes, test.TestObserverIdentity)\n\n\tclient := &http.Client{Timeout: 10 * time.Second}\n\n\tres, err := client.PostForm(tokenEndpoint, url.Values{\n\t\t\"client_id\": {configuration.GetKeycloakClientID()},\n\t\t\"client_secret\": {configuration.GetKeycloakSecret()},\n\t\t\"username\": {username},\n\t\t\"password\": {userSecret},\n\t\t\"grant_type\": {\"password\"},\n\t})\n\tif err != nil {\n\t\treturn nil, errors.NewInternalError(ctx, errs.Wrap(err, \"error when obtaining token\"))\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != http.StatusOK {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"response_status\": res.Status,\n\t\t\t\"response_body\": rest.ReadBody(res.Body),\n\t\t}, \"unable to obtain token\")\n\t\treturn nil, errors.NewInternalError(ctx, errs.Errorf(\"unable to obtain toke. Response status: %s. Responce body: %s\", res.Status, rest.ReadBody(res.Body)))\n\t}\n\tt, err := token.ReadTokenSet(ctx, res)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"token_endpoint\": res,\n\t\t\t\"err\": err,\n\t\t}, \"Error when unmarshal json with access token\")\n\t\treturn nil, errors.NewInternalError(ctx, errs.Wrap(err, \"error when unmarshal json with access token\"))\n\t}\n\n\treturn convertToken(*t), nil\n}", "func ParseCustomToken(tokenStr, secret string) (interface{}, error) {\n\ttoken, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t// Validate the alg expect\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, ErrSignatureInvalid\n\t\t}\n\t\treturn []byte(secret), nil\n\t})\n\tif err != nil {\n\t\tswitch err.(*jwt.ValidationError).Errors {\n\t\tcase jwt.ValidationErrorExpired:\n\t\t\treturn nil, ErrExpiredToken\n\t\tcase jwt.ValidationErrorSignatureInvalid:\n\t\t\treturn nil, ErrSignatureInvalid\n\t\tdefault:\n\t\t\treturn nil, ErrUnknown\n\t\t}\n\t}\n\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif ok && token.Valid {\n\t\tresult := make(map[string]interface{}, len(claims))\n\t\tfor k, v := range claims {\n\t\t\tresult[k] = v\n\t\t}\n\t\treturn result, nil\n\t}\n\treturn nil, ErrUnknown\n}", "func newToken(sub, role string) Token {\n\treturn Token{\n\t\tID: id.New(),\n\t\tSubject: sub,\n\t\tRole: role,\n\t\tCreatedAt: time.Now().UTC(),\n\t}\n}", "func GenerateNewAccessToken(u *domain.User) (string, error) {\n\t// Set secret key from .env file.\n\tsecret := os.Getenv(\"JWT_SECRET_KEY\")\n\n\t// Set expires minutes count for secret key from .env file.\n\tminutesCount, _ := strconv.Atoi(os.Getenv(\"JWT_SECRET_KEY_EXPIRE_MINUTES\"))\n\n\t// Create token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Set claims\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = u.ID\n\tclaims[\"email\"] = u.Email\n\tclaims[\"username\"] = u.Username\n\tclaims[\"full_name\"] = u.FullName\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * time.Duration(minutesCount)).Unix()\n\n\t// Generate encoded token and send it as response.\n\tt, err := token.SignedString([]byte(secret))\n\tif err != nil {\n\t\t// Return error, it JWT token generation failed.\n\t\treturn \"\", err\n\t}\n\n\treturn t, nil\n}", "func GenerateAuthToken(claims *JWTClaims, expiry time.Duration, jwtKey []byte) (string, time.Time, error) {\n\tissuedTime := time.Now()\n\texpirationTime := issuedTime.Add(expiry)\n\tclaims.StandardClaims = jwt.StandardClaims{\n\t\t// In JWT, the expiry time is expressed as unix milliseconds\n\t\tExpiresAt: expirationTime.Unix(),\n\t\t// Can be used to blacklist in the future. Needs to hold state\n\t\t// in that case :/\n\t\tId: uuid.NewV4().String(),\n\t\tIssuedAt: issuedTime.Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tres, err := token.SignedString(jwtKey)\n\treturn res, expirationTime, err\n}", "func (c *Client) UserToken(userID string) (*UserTokenResponse, error) {\n\tp := userTokenParams{\n\t\tAppUserID: userID,\n\t}\n\tparamMap, err := toMap(p, map[string]string{\n\t\tAppIDKey: c.config.AppID,\n\t\tPasswordKey: c.config.Password,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret, err := httpRequest(c, p.URI(), paramMap, nil, func() interface{} {\n\t\treturn &UserTokenResponse{}\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsp := ret.(*UserTokenResponse)\n\n\tif err = checkErr(rsp.Code, rsp.SubCode, rsp.Message); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rsp, nil\n}" ]
[ "0.71608907", "0.7160486", "0.7132221", "0.6660991", "0.6522209", "0.64628446", "0.64589286", "0.62632966", "0.6257606", "0.61727697", "0.6130676", "0.6115605", "0.6063814", "0.60552263", "0.5998025", "0.5960696", "0.5954293", "0.5941921", "0.59352916", "0.59310776", "0.5922322", "0.5896745", "0.58626354", "0.5853604", "0.5844319", "0.5829672", "0.58288264", "0.5824991", "0.5815045", "0.5812553", "0.5804836", "0.5793642", "0.57889575", "0.5782149", "0.5775991", "0.57703525", "0.57637924", "0.57496345", "0.57353896", "0.57134765", "0.5710516", "0.5702978", "0.56973284", "0.56857795", "0.5685051", "0.5672823", "0.5667917", "0.5656171", "0.560716", "0.55992323", "0.5595772", "0.5584992", "0.5558761", "0.5549139", "0.55469793", "0.55421656", "0.5538468", "0.5533947", "0.5529057", "0.5524662", "0.5507735", "0.5504549", "0.5499826", "0.54930264", "0.5487209", "0.5474245", "0.547337", "0.54610336", "0.54604626", "0.5460247", "0.545767", "0.54480934", "0.5442384", "0.543889", "0.54342866", "0.5422009", "0.5421098", "0.5415391", "0.5392049", "0.5390274", "0.5386184", "0.53826016", "0.5363195", "0.53616655", "0.5319488", "0.53016716", "0.5297974", "0.528533", "0.52786356", "0.52761376", "0.52756286", "0.52700466", "0.52607036", "0.52566314", "0.5255204", "0.5245126", "0.5244061", "0.52335864", "0.5225912", "0.52225775" ]
0.6981441
3
AuthenticateToken Verifies provided token and if valid, extracts eHRID from it.
func (c *MockClient) AuthenticateToken(ctx context.Context, customToken string) (string, error) { return "ehrid", nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *OIDCAuthenticator) AuthenticateToken(ctx context.Context, rawtoken string) (*Claims, error) {\n\tidToken, err := o.verifier.Verify(ctx, rawtoken)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Token failed validation: %v\", err)\n\t}\n\n\t// Check for required claims\n\tvar claims map[string]interface{}\n\tif err := idToken.Claims(&claims); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to get claim map from token: %v\", err)\n\t}\n\tfor _, requiredClaim := range requiredClaims {\n\t\tif _, ok := claims[requiredClaim]; !ok {\n\t\t\t// Claim missing\n\t\t\treturn nil, fmt.Errorf(\"Required claim %v missing from token\", requiredClaim)\n\t\t}\n\t}\n\n\treturn o.parseClaims(claims)\n}", "func (a *authSvc) ValidateToken(authHeader interface{}) (interface{}, error) {\n\t// validate an Authorization header token is present in the request\n\tif authHeader == nil {\n\t\treturn nil, errors.New(\"no valid Authorization token in request\")\n\t}\n\theader := authHeader.(string)\n\tif header == \"\" {\n\t\treturn nil, errors.New(\"no valid Authorization token in request\")\n\t}\n\t// validate that it is a Bearer token\n\tif !strings.HasPrefix(header, bearerTokenKey) {\n\t\treturn nil, errors.New(\"authorization token is not valid Bearer token\")\n\t}\n\tt := strings.Replace(header, bearerTokenKey, \"\", -1)\n\t// parse the header token\n\ttoken, err := jwt.Parse(t, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"there was an parsing the given token. please validate the token is for this service\")\n\t\t}\n\t\treturn a.authSecret, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// validate token and get claims\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\tvar decodedToken map[string]string\n\t\terr = mapstructure.Decode(claims, &decodedToken)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn decodedToken[\"email\"], nil\n\t}\n\treturn nil, errors.New(\"invalid authorization token\") // token is not valid, return error\n}", "func (c *Client) AuthenticateToken(ctx context.Context, customToken string) (string, error) {\n\tclient := firebase.FirebaseAuth\n\ttoken, err := client.VerifyIDToken(ctx, customToken)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token.UID, nil\n}", "func (j *JwtAuthenticator) AuthenticateToken(ctx context.Context, rawtoken string) (*Claims, error) {\n\n\t// Parse token\n\ttoken, err := jwt.Parse(rawtoken, func(token *jwt.Token) (interface{}, error) {\n\n\t\t// Verify Method\n\t\tif strings.HasPrefix(token.Method.Alg(), \"RS\") {\n\t\t\t// RS256, RS384, or RS512\n\t\t\treturn j.rsaKey, nil\n\t\t} else if strings.HasPrefix(token.Method.Alg(), \"ES\") {\n\t\t\t// ES256, ES384, or ES512\n\t\t\treturn j.ecdsKey, nil\n\t\t} else if strings.HasPrefix(token.Method.Alg(), \"HS\") {\n\t\t\t// HS256, HS384, or HS512\n\t\t\treturn j.sharedSecretKey, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"Unknown token algorithm: %s\", token.Method.Alg())\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, fmt.Errorf(\"Token failed validation\")\n\t}\n\n\t// Get claims\n\tclaims, ok := token.Claims.(jwt.MapClaims)\n\tif claims == nil || !ok {\n\t\treturn nil, fmt.Errorf(\"No claims found in token\")\n\t}\n\n\t// Check for required claims\n\tfor _, requiredClaim := range requiredClaims {\n\t\tif _, ok := claims[requiredClaim]; !ok {\n\t\t\t// Claim missing\n\t\t\treturn nil, fmt.Errorf(\"Required claim %v missing from token\", requiredClaim)\n\t\t}\n\t}\n\n\t// Token now has been verified.\n\t// Claims holds all the authorization information.\n\t// Here we need to first decode it then unmarshal it from JSON\n\tparts := strings.Split(token.Raw, \".\")\n\tclaimBytes, err := jwt.DecodeSegment(parts[1])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to decode claims: %v\", err)\n\t}\n\tvar sdkClaims Claims\n\terr = json.Unmarshal(claimBytes, &sdkClaims)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to get sdkclaims: %v\", err)\n\t}\n\n\tif err := validateUsername(j.usernameClaim, &sdkClaims); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sdkClaims, nil\n}", "func Authenticate(tokenStr string, pub *rsa.PublicKey) (interface{}, error) {\n\tclaims, err := token.Verify(tokenStr, func(claims interface{}, method token.SigningMethod) (interface{}, error) {\n\t\treturn pub, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn claims, nil\n}", "func parseToken(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tevent := ssas.Event{Op: \"ParseToken\"}\n\t\tauthHeader := r.Header.Get(\"Authorization\")\n\t\tif authHeader == \"\" {\n\t\t\tevent.Help = \"no authorization header found\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tauthRegexp := regexp.MustCompile(`^Bearer (\\S+)$`)\n\t\tauthSubmatches := authRegexp.FindStringSubmatch(authHeader)\n\t\tif len(authSubmatches) < 2 {\n\t\t\tevent.Help = \"invalid Authorization header value\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttokenString := authSubmatches[1]\n\t\ttoken, err := server.VerifyToken(tokenString)\n\t\tif err != nil {\n\t\t\tevent.Help = fmt.Sprintf(\"unable to decode authorization header value; %s\", err)\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tvar rd ssas.AuthRegData\n\t\tif rd, err = readRegData(r); err != nil {\n\t\t\trd = ssas.AuthRegData{}\n\t\t}\n\n\t\tif claims, ok := token.Claims.(*service.CommonClaims); ok && token.Valid {\n\t\t\trd.AllowedGroupIDs = claims.GroupIDs\n\t\t\trd.OktaID = claims.OktaID\n\t\t}\n\t\tctx := context.WithValue(r.Context(), \"ts\", tokenString)\n\t\tctx = context.WithValue(ctx, \"rd\", rd)\n\t\tservice.LogEntrySetField(r, \"rd\", rd)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (app *application) authenticateToken(r *http.Request) (*models.User, error) {\n\t// retrieve a token from the client's http request header\n\tauthorizationHeader := r.Header.Get(\"Authorization\")\n\tif authorizationHeader == \"\" {\n\t\treturn nil, errors.New(\"no authorization header received\")\n\t}\n\n\theaderParts := strings.Split(authorizationHeader, \" \")\n\tif len(headerParts) != 2 || headerParts[0] != \"Bearer\" {\n\t\treturn nil, errors.New(\"no authorization header received\")\n\t}\n\n\ttoken := headerParts[1]\n\tif len(token) != 26 {\n\t\treturn nil, errors.New(\"authentication token wrong size\")\n\t}\n\n\t// once all passed, get the user from the tokens table in the database\n\tuser, err := app.DB.GetUserForToken(token)\n\tif err != nil {\n\t\treturn nil, errors.New(\"no matching user found\")\n\t}\n\n\treturn user, nil\n}", "func VerifyToken(tokData []byte, keyFile, keyType string) (iat string, err error) {\n\n\t// trim possible whitespace from token\n\ttokData = regexp.MustCompile(`\\s*$`).ReplaceAll(tokData, []byte{})\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Token len: %v bytes\\n\", len(tokData))\n\t}\n\n\t// Parse the token. Load the key from command line option\n\ttoken, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) {\n\t\tdata, err := loadData(keyFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif isEs(keyType) {\n\t\t\treturn jwt.ParseECPublicKeyFromPEM(data)\n\t\t} else if isRs(keyType) {\n\t\t\treturn jwt.ParseRSAPublicKeyFromPEM(data)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Error signing token - confg error: keyType=[%s]\", keyType)\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn data, nil\n\t})\n\n\t// Print some debug data\n\tif db100 && token != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Header:\\n%v\\n\", token.Header)\n\t\tfmt.Fprintf(os.Stderr, \"Claims:\\n%v\\n\", token.Claims)\n\t}\n\n\t// Print an error if we can't parse for some reason\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Couldn't parse token: %v\", err)\n\t}\n\n\t// Is token invalid?\n\tif !token.Valid {\n\t\treturn \"\", fmt.Errorf(\"Token is invalid\")\n\t}\n\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Token Claims: %s\\n\", godebug.SVarI(token.Claims))\n\t}\n\n\t// {\"auth_token\":\"f5d8f6ae-e2e5-42c9-83a9-dfd07825b0fc\"}\n\ttype GetAuthToken struct {\n\t\tAuthToken string `json:\"auth_token\"`\n\t}\n\tvar gt GetAuthToken\n\tcl := godebug.SVar(token.Claims)\n\tif db100 {\n\t\tfmt.Fprintf(os.Stderr, \"Claims just before -->>%s<<--\\n\", cl)\n\t}\n\terr = json.Unmarshal([]byte(cl), &gt)\n\tif err == nil {\n\t\tif db100 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Success: %s -- token [%s] \\n\", err, gt.AuthToken)\n\t\t}\n\t\treturn gt.AuthToken, nil\n\t} else {\n\t\tif db100 {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: %s -- Unable to unmarsal -->>%s<<--\\n\", err, cl)\n\t\t}\n\t\treturn \"\", err\n\t}\n\n}", "func (a *Auth) authToken(ctx *bm.Context) (int64, error) {\n\treq := ctx.Request\n\tkey := req.Form.Get(\"access_token\")\n\tif key == \"\" {\n\t\treturn 0, ecode.Unauthorized\n\t}\n\t// NOTE: 请求登录鉴权服务接口,拿到对应的用户id\n\tvar mid int64\n\t// TODO: get mid from some code\n\treturn mid, nil\n}", "func (t *TokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) {\n\ttokenID, tokenSecret, err := bootstraptokenutil.ParseToken(token)\n\tif err != nil {\n\t\t// Token isn't of the correct form, ignore it.\n\t\treturn nil, false, nil\n\t}\n\n\tsecretName := bootstrapapi.BootstrapTokenSecretPrefix + tokenID\n\tsecret, err := t.lister.Get(secretName)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tklog.V(3).Infof(\"No secret of name %s to match bootstrap bearer token\", secretName)\n\t\t\treturn nil, false, nil\n\t\t}\n\t\treturn nil, false, err\n\t}\n\n\tif secret.DeletionTimestamp != nil {\n\t\ttokenErrorf(secret, \"is deleted and awaiting removal\")\n\t\treturn nil, false, nil\n\t}\n\n\tif string(secret.Type) != string(bootstrapapi.SecretTypeBootstrapToken) || secret.Data == nil {\n\t\ttokenErrorf(secret, \"has invalid type, expected %s.\", bootstrapapi.SecretTypeBootstrapToken)\n\t\treturn nil, false, nil\n\t}\n\n\tts := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenSecretKey)\n\tif subtle.ConstantTimeCompare([]byte(ts), []byte(tokenSecret)) != 1 {\n\t\ttokenErrorf(secret, \"has invalid value for key %s.\", bootstrapapi.BootstrapTokenSecretKey)\n\t\treturn nil, false, nil\n\t}\n\n\tid := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenIDKey)\n\tif id != tokenID {\n\t\ttokenErrorf(secret, \"has invalid value for key %s.\", bootstrapapi.BootstrapTokenIDKey)\n\t\treturn nil, false, nil\n\t}\n\n\tif bootstrapsecretutil.HasExpired(secret, time.Now()) {\n\t\t// logging done in isSecretExpired method.\n\t\treturn nil, false, nil\n\t}\n\n\tif bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenUsageAuthentication) != \"true\" {\n\t\ttokenErrorf(secret, \"not marked %s=true.\", bootstrapapi.BootstrapTokenUsageAuthentication)\n\t\treturn nil, false, nil\n\t}\n\n\tgroups, err := bootstrapsecretutil.GetGroups(secret)\n\tif err != nil {\n\t\ttokenErrorf(secret, \"has invalid value for key %s: %v.\", bootstrapapi.BootstrapTokenExtraGroupsKey, err)\n\t\treturn nil, false, nil\n\t}\n\n\treturn &authenticator.Response{\n\t\tUser: &user.DefaultInfo{\n\t\t\tName: bootstrapapi.BootstrapUserPrefix + string(id),\n\t\t\tGroups: groups,\n\t\t},\n\t}, true, nil\n}", "func (e *oidcExtension) authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) {\n\tmetadata := client.NewMetadata(headers)\n\tauthHeaders := metadata.Get(e.cfg.Attribute)\n\tif len(authHeaders) == 0 {\n\t\treturn ctx, errNotAuthenticated\n\t}\n\n\t// we only use the first header, if multiple values exist\n\tparts := strings.Split(authHeaders[0], \" \")\n\tif len(parts) != 2 {\n\t\treturn ctx, errInvalidAuthenticationHeaderFormat\n\t}\n\n\traw := parts[1]\n\tidToken, err := e.verifier.Verify(ctx, raw)\n\tif err != nil {\n\t\treturn ctx, fmt.Errorf(\"failed to verify token: %w\", err)\n\t}\n\n\tclaims := map[string]interface{}{}\n\tif err = idToken.Claims(&claims); err != nil {\n\t\t// currently, this isn't a valid condition, the Verify call a few lines above\n\t\t// will already attempt to parse the payload as a json and set it as the claims\n\t\t// for the token. As we are using a map to hold the claims, there's no way to fail\n\t\t// to read the claims. It could fail if we were using a custom struct. Instead of\n\t\t// swalling the error, it's better to make this future-proof, in case the underlying\n\t\t// code changes\n\t\treturn ctx, errFailedToObtainClaimsFromToken\n\t}\n\n\tsubject, err := getSubjectFromClaims(claims, e.cfg.UsernameClaim, idToken.Subject)\n\tif err != nil {\n\t\treturn ctx, fmt.Errorf(\"failed to get subject from claims in the token: %w\", err)\n\t}\n\tmembership, err := getGroupsFromClaims(claims, e.cfg.GroupsClaim)\n\tif err != nil {\n\t\treturn ctx, fmt.Errorf(\"failed to get groups from claims in the token: %w\", err)\n\t}\n\n\tcl := client.FromContext(ctx)\n\tcl.Auth = &authData{\n\t\traw: raw,\n\t\tsubject: subject,\n\t\tmembership: membership,\n\t}\n\treturn client.NewContext(ctx, cl), nil\n}", "func Authenticate(nextFunc http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif err := auth.ValidarToken(r); err != nil {\n\t\t\tresponse.Error(w, http.StatusUnauthorized, err)\n\t\t\treturn\n\t\t}\n\n\t\tnextFunc(w, r)\n\t}\n}", "func ProcessToken(tk string) (*models.Claim, bool, string, error) {\n\t//Para poder decodificar el token\n\tmyPwd := []byte(\"MiClaveUltraSECRETA\")\n\n\t//jwt exige que sea un puntero\n\tclaims := &models.Claim{}\n\n\tsplitToken := strings.Split(tk, \"Bearer\")\n\tif len(splitToken) != 2 {\n\t\treturn claims, false, string(\"\"), errors.New(\"token format invalid\")\n\t}\n\n\ttk = strings.TrimSpace(splitToken[1])\n\n\t//Sintaxis para verificar si el token es valido y mapear el token dentro de claims\n\ttkn, err := jwt.ParseWithClaims(tk, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn myPwd, nil\n\t})\n\tif err == nil {\n\t\t//Si el token es válido lo primero que validamos es si el email existe en la BD\n\t\t_, userFound, _ := bd.UserExists(claims.Email)\n\t\tif userFound {\n\t\t\tEmail = claims.Email\n\t\t\tUserID = claims.ID.Hex()\n\t\t}\n\t\treturn claims, userFound, UserID, nil\n\t}\n\tif !tkn.Valid {\n\t\treturn claims, false, string(\"\"), errors.New(\"invalid token\")\n\t}\n\n\treturn claims, false, string(\"\"), err\n}", "func Authenticate(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(responseWriter http.ResponseWriter, request *http.Request) {\n\t\ttoken := strings.TrimPrefix(request.Header.Get(\"Authorization\"), \"Bearer \")\n\t\terr := crypto.ValidateToken([]byte(token))\n\n\t\tif err == nil {\n\t\t\tnext(responseWriter, request)\n\t\t} else {\n\t\t\thelpers.Response(responseWriter, http.StatusUnauthorized, constants.ERR_INVALID_TOKEN, err, nil)\n\t\t}\n\t}\n}", "func (p *AzureProvider) verifyTokenAndExtractEmail(ctx context.Context, token string) (string, error) {\n\temail := \"\"\n\n\tif token != \"\" && p.Verifier != nil {\n\t\ttoken, err := p.Verifier.Verify(ctx, token)\n\t\t// due to issues mentioned above, id_token may not be signed by AAD\n\t\tif err == nil {\n\t\t\tclaims, err := p.getClaims(token)\n\t\t\tif err == nil {\n\t\t\t\temail = claims.Email\n\t\t\t} else {\n\t\t\t\tlogger.Printf(\"unable to get claims from token: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Printf(\"unable to verify token: %v\", err)\n\t\t}\n\t}\n\n\treturn email, nil\n}", "func (g *grpcWrapper) validateToken(logger *zap.Logger, token string) (string, error) {\n\tlogger.Debug(\"validateToken called\")\n\tif g.skipAuth {\n\t\tlogger.Debug(\"validateToken short-circuited due to SKIP AUTH\")\n\t\treturn \"11\", nil\n\t}\n\tserverAuthToken, err := serverAuth(logger, g.authURL, g.authUser, g.authPassword)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from serverAuth\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\treq, err := http.NewRequest(\"GET\", g.authURL+\"v3/auth/tokens\", nil)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from NewRequest GET\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\treq.Header.Set(\"X-Auth-Token\", serverAuthToken)\n\treq.Header.Set(\"X-Subject-Token\", token)\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from DefaultClient.Do GET\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 200 {\n\t\tlogger.Debug(\"validateToken error from GET return status\", zap.Int(\"status\", resp.StatusCode))\n\t\treturn \"\", fmt.Errorf(\"token validation gave status %d\", resp.StatusCode)\n\t}\n\tvar validateResp validateTokenResponse\n\tr, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogger.Debug(\"validateToken error from GET ReadAll body\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tif err = json.Unmarshal(r, &validateResp); err != nil {\n\t\tlogger.Debug(\"validateToken error from GET json.Unmarshal\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\tlogger.Debug(\"validateToken succeeded\", zap.String(\"Project.ID\", validateResp.Token.Project.ID))\n\treturn validateResp.Token.Project.ID, nil\n}", "func ValidateAuth(verifyKey *rsa.PublicKey, h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tauthorizationHeader := r.Header.Get(\"Authorization\")\n\t\tif authorizationHeader != \"\" {\n\t\t\tbearerToken := strings.TrimPrefix(authorizationHeader, \"Bearer\")\n\t\t\tbearerToken = strings.TrimSpace(bearerToken)\n\t\t\t// tokenPart := bearerToken[1] //Grab the token part, what we are truly interested in\n\t\t\tcustomClaims := &claims.Claims{}\n\n\t\t\ttoken, err := jwt.ParseWithClaims(bearerToken, customClaims, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\treturn verifyKey, nil\n\t\t\t})\n\n\t\t\tif err != nil { // Malformed token, returns with http code 403 as usual\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !token.Valid {\n\t\t\t\tw.WriteHeader(http.StatusForbidden)\n\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx := r.Context()\n\t\t\t// Everything went well, proceed with the request and set the caller to the user retrieved from the parsed token\n\t\t\tr = r.WithContext(AddClaims(ctx, customClaims))\n\t\t\th.ServeHTTP(w, r) // proceed in the middleware chain!\n\t\t} else {\n\t\t\tjson.NewEncoder(w).Encode(\"Invalid Authorization Token\")\n\t\t}\n\t})\n}", "func authenticateApiToken(server string, token string) (string, error) {\n\tlog.Debug(\"Attempting to authenticate the API Refresh Token\")\n\tclient := resty.New()\n\tqueryResponse, err := client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: ignoreCert}).R().\n\t\tSetBody(ApiAuthentication{token}).\n\t\tSetResult(&ApiAuthenticationResponse{}).\n\t\tSetError(&ApiAuthenticationError{}).\n\t\tPost(\"https://\" + server + \"/iaas/api/login\")\n\tif queryResponse.IsError() {\n\t\tlog.Debug(\"Refresh Token failed\")\n\t\treturn \"\", errors.New(queryResponse.Error().(*ApiAuthenticationError).Message)\n\t}\n\tlog.Debug(\"Refresh Token succeeded\")\n\treturn queryResponse.Result().(*ApiAuthenticationResponse).Token, err\n}", "func (*Service) Authenticate(code string, configuration *portainer.OAuthSettings) (string, error) {\n\ttoken, err := getOAuthToken(code, configuration)\n\tif err != nil {\n\t\tlog.Debug().Err(err).Msg(\"failed retrieving oauth token\")\n\n\t\treturn \"\", err\n\t}\n\n\tidToken, err := getIdToken(token)\n\tif err != nil {\n\t\tlog.Debug().Err(err).Msg(\"failed parsing id_token\")\n\t}\n\n\tresource, err := getResource(token.AccessToken, configuration)\n\tif err != nil {\n\t\tlog.Debug().Err(err).Msg(\"failed retrieving resource\")\n\n\t\treturn \"\", err\n\t}\n\n\tresource = mergeSecondIntoFirst(idToken, resource)\n\n\tusername, err := getUsername(resource, configuration)\n\tif err != nil {\n\t\tlog.Debug().Err(err).Msg(\"failed retrieving username\")\n\n\t\treturn \"\", err\n\t}\n\n\treturn username, nil\n}", "func (s *Server) authenticate(w http.ResponseWriter, req *http.Request) (string, error) {\n\tif s.opts.UseJWT {\n\t\t// Check Authorization header.\n\t\tfor _, val := range req.Header[\"Authorization\"] {\n\t\t\tif strings.HasPrefix(val, \"Bearer \") {\n\t\t\t\ttoken := val[len(\"Bearer \"):]\n\t\t\t\tobject, err := jose.ParseSigned(token)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"error parsing JWT token: %s\", err)\n\t\t\t\t}\n\t\t\t\tpb, err := object.Verify(&s.opts.PrivateKey.PublicKey)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"error verifying JWT token: %s\", err)\n\t\t\t\t}\n\t\t\t\tpayload := make(map[string]string)\n\t\t\t\terr = json.Unmarshal(pb, &payload)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"error parsing JWT payload: %s\", err)\n\t\t\t\t}\n\t\t\t\temail, ok := payload[\"sub\"]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"JWT token does not have sub: %s\", string(pb))\n\t\t\t\t}\n\t\t\t\treturn s.hashId(email), nil\n\t\t\t}\n\t\t}\n\t}\n\tsession, err := s.cookieStore.Get(req, UserSessionName)\n\tif err != nil {\n\t\tsession.Options.MaxAge = -1\n\t\treturn \"\", fmt.Errorf(\"cookieStore.Get returned error %s\", err)\n\t}\n\thash, ok := session.Values[\"hash\"].(string)\n\tglog.V(3).Infof(\"authenticate %s: hash=%s\", req.URL, session.Values[\"hash\"])\n\tif !ok || hash == \"\" {\n\t\treturn \"\", httpError(http.StatusUnauthorized)\n\t}\n\treturn hash, nil\n}", "func ExtractIDFromToken(token string, p packer.Packer, n notary.Notary, secretKey []byte) (id string, err error) {\n\tsid, signature, err := p.Unpack([]byte(token))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to decode token\")\n\t}\n\n\tverified := n.Verify(sid, signature, secretKey)\n\tif !verified {\n\t\treturn \"\", fmt.Errorf(\"Failed to verify session ID\")\n\t}\n\n\treturn string(sid[:SessionIDLength]), nil\n}", "func (u *User) ValidateToken(ctx context.Context, inToken *pb.Token, outToken *pb.Token) error {\n\t_ = ctx\n\tts := TokenService{}\n\tclaims, err := ts.Decode(inToken.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif claims == nil {\n\t\treturn fmt.Errorf(glErr.AuthNilClaim(serviceName))\n\t}\n\tif claims.User.Id == 0 || claims.Issuer != ClaimIssuer {\n\t\t// fmt.Printf(\"claim User %v\", claims.User)\n\t\treturn fmt.Errorf(glErr.AuthInvalidClaim(serviceName))\n\t}\n\t// fmt.Printf(\"Claim User %v\", claims.User)\n\t// TODO: Check that userid is a valid user in db\n\n\toutToken.Token = inToken.Token\n\toutToken.Valid = true\n\toutToken.EUid = base64.StdEncoding.EncodeToString([]byte(strconv.FormatInt(claims.User.Id, 10)))\n\n\treturn nil\n\n}", "func Authenticate(jwtToken string) (*Token, error) {\n\ttk := &Token{}\n\ttoken, err := jwt.ParseWithClaims(jwtToken, tk, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(jwtKey), nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, fmt.Errorf(\"Token is not valid.\")\n\t}\n\n\tif time.Now().After(tk.Expiry) {\n\t\treturn nil, fmt.Errorf(\"Token has expired. Please login again.\")\n\t}\n\n\treturn tk, nil\n}", "func (ctx *serverRequestContextImpl) TokenAuthentication() (string, error) {\n\tr := ctx.req\n\t// Get the authorization header\n\tauthHdr := r.Header.Get(\"authorization\")\n\tif authHdr == \"\" {\n\t\treturn \"\", caerrors.NewHTTPErr(401, caerrors.ErrNoAuthHdr, \"No authorization header\")\n\t}\n\t// Get the CA\n\tca, err := ctx.GetCA()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// Get the request body\n\tbody, err := ctx.ReadBodyBytes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif idemix.IsToken(authHdr) {\n\t\treturn ctx.verifyIdemixToken(authHdr, r.Method, r.URL.RequestURI(), body)\n\t}\n\treturn ctx.verifyX509Token(ca, authHdr, r.Method, r.URL.RequestURI(), body)\n}", "func ValidateToken(authClient umAPI.UserManagementApiClient) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttoken := c.MustGet(\"encodedToken\").(string)\n\t\tparsedToken, err := authClient.ValidateJWT(context.Background(), &umAPI.JWTRequest{\n\t\t\tToken: token,\n\t\t})\n\t\tif err != nil {\n\t\t\tst := status.Convert(err)\n\t\t\tlogger.Error.Println(st.Message())\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"error during token validation\"})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tc.Set(\"validatedToken\", parsedToken)\n\t\tc.Next()\n\t}\n}", "func (o *OIDC) authorizeToken(token string) (*openIDPayload, error) {\n\tjwt, err := jose.ParseSigned(token)\n\tif err != nil {\n\t\treturn nil, errs.Wrap(http.StatusUnauthorized, err,\n\t\t\t\"oidc.AuthorizeToken; error parsing oidc token\")\n\t}\n\n\t// Parse claims to get the kid\n\tvar claims openIDPayload\n\tif err := jwt.UnsafeClaimsWithoutVerification(&claims); err != nil {\n\t\treturn nil, errs.Wrap(http.StatusUnauthorized, err,\n\t\t\t\"oidc.AuthorizeToken; error parsing oidc token claims\")\n\t}\n\n\tfound := false\n\tkid := jwt.Headers[0].KeyID\n\tkeys := o.keyStore.Get(kid)\n\tfor _, key := range keys {\n\t\tif err := jwt.Claims(key, &claims); err == nil {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn nil, errs.Unauthorized(\"oidc.AuthorizeToken; cannot validate oidc token\")\n\t}\n\n\tif err := o.ValidatePayload(claims); err != nil {\n\t\treturn nil, errs.Wrap(http.StatusInternalServerError, err, \"oidc.AuthorizeToken\")\n\t}\n\n\treturn &claims, nil\n}", "func validateIDToken(rawIDToken string) (string,error) {\n\t\n\t// Create verifier\n\tctx := context.Background()\n\tprovider, err := oidc.NewProvider(ctx, \"https://accounts.google.com\")\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\toidcConfig := &oidc.Config{\n\t\tClientID: clientID,\n\t}\n\tverifier := provider.Verifier(oidcConfig)\n\n\t// Verify id token\n\tidToken, err := verifier.Verify(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\n\t// Parse token to JSON\n\tparsed := new(json.RawMessage)\n\tif err := idToken.Claims(parsed); err != nil {\n\t\treturn \"\",err\n\t}\n\n\t// Render json as string\n\tdata, err := json.MarshalIndent(parsed, \"\", \" \")\n\tif err != nil {\n\t\treturn \"\",err\n\t}\n\treturn string(data),nil\n}", "func (r GetResult) ExtractTokenID() (string, error) {\n\treturn r.Header.Get(\"X-Subject-Token\"), r.Err\n}", "func (id *authIdentity) AuthToken() string {\n\treturn id.authToken\n}", "func (r CreateResult) ExtractTokenID() (string, error) {\n\treturn r.Header.Get(\"X-Subject-Token\"), r.Err\n}", "func VerifyToken(tokenStr string, secret_name string) (string, error) {\n\t var result = \"\"\n\t //Retrieve secret value from secrets manager\n\t secret, err := getSecretValue(secret_name);\n\t verifyToken, err := jwt.Parse(tokenStr, func(token *jwt.Token) (interface{}, error) {\n\t\t return[]byte(secret), nil\n\t })\n\t if err == nil && verifyToken.Valid{\n\t\t result = \"Valid\"\n\t } else {\n\t\t result = \"Invalid\"\n\t }\n\t log.Println(\"VerifyToken result =\", result)\n\n\t return result, err\n}", "func (c *CSRFStore) verifyToken(headerToken string) error {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\t// check if token is initialized\n\tif c.token == nil || len(c.token.Value) == 0 {\n\t\treturn errors.New(\"token not initialized\")\n\t}\n\n\ta, err := base64.RawURLEncoding.DecodeString(headerToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check if token values are same, using a constant time comparison\n\tif subtle.ConstantTimeCompare(a, c.token.Value) != 1 {\n\t\treturn errors.New(\"invalid token\")\n\t}\n\n\t// make sure token is still valid\n\tif c.expired() {\n\t\treturn errors.New(\"token has expired\")\n\t}\n\n\treturn nil\n}", "func ValidateToken(tokenString string, w http.ResponseWriter) (Claims, error) {\n\tclaims := Claims{}\n\tjwtKey := []byte(config.Configuration.TokenPrivateKey)\n\n\t// The token string is parsed, decoded and stored into the given Claims struct\n\ttoken, err := jwt.ParseWithClaims(tokenString, &claims,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn jwtKey, nil\n\t\t})\n\n\t// Check if the token has expired according to the expiry time fixed during the sign in\n\tif !token.Valid {\n\t\terr = ExpiredToken\n\t\tMakeErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\tlog.Println(err.Error())\n\t\treturn claims, err\n\t}\n\n\t// Check if the token has been signed with the private key of the api gateway\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t// If the token is expired or has not been signed according to the api gateway key, an Unauthorization code\n\t\t\t// is returned in both cases, but a different message is provided to the client.\n\t\t\tMakeErrorResponse(w, http.StatusUnauthorized, \"Wrong credentials\")\n\t\t\tlog.Println(\"Wrong credentials\")\n\t\t\treturn claims, err\n\t\t}\n\n\t\tMakeErrorResponse(w, http.StatusBadRequest, \"Malformed token\")\n\t\tlog.Println(\"Malformed token\")\n\t\treturn claims, err\n\t}\n\n\treturn claims, nil\n\n}", "func (a *Api) token(res http.ResponseWriter, req *http.Request) *token.TokenData {\n\ttd := a.auth.Authenticate(req)\n\n\tif td == nil {\n\t\tstatusErr := &status.StatusError{Status: status.NewStatus(http.StatusUnauthorized, STATUS_NO_TOKEN)}\n\t\ta.sendModelAsResWithStatus(res, statusErr, http.StatusUnauthorized)\n\t\treturn nil\n\t}\n\t//all good!\n\treturn td\n}", "func ProcessToken(tk string) (*entities.Clain, bool, int64, error) {\n\tmiClave := []byte(\"apirestdekkdesarrollo\")\n\tclaims := &entities.Clain{}\n\n\tsplitToken := strings.Split(tk, \"Bearer\")\n\n\tif len(splitToken) != 2 {\n\t\treturn claims, false, 0, errors.New(\"Formato del token no válido\")\n\t}\n\n\ttk = strings.TrimSpace(splitToken[1])\n\n\ttkn, err := jwt.ParseWithClaims(tk, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn miClave, nil\n\t})\n\n\tif err != nil {\n\t\t_, encontrado, _ := utils.VerifyExistsUsername(claims.Username)\n\t\tif encontrado == true {\n\t\t\tUsername = claims.Username\n\t\t\tIDUsuario = claims.ID\n\t\t}\n\t\treturn claims, encontrado, IDUsuario, nil\n\t}\n\n\tif !tkn.Valid {\n\t\treturn claims, false, 0, errors.New(\"Token inválido\")\n\t}\n\n\treturn claims, false, 0, err\n}", "func (ja *jwtGuard) CheckToken(tokenString string) (userID uint64, err error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &everestClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(ja.secret), nil\n\t})\n\tif err != nil {\n\t\tif ve, ok := err.(*jwt.ValidationError); ok {\n\t\t\tif ve.Errors&jwt.ValidationErrorMalformed != 0 {\n\t\t\t\terr = errors.New(\"not even a token\")\n\t\t\t} else if ve.Errors&jwt.ValidationErrorExpired != 0 {\n\t\t\t\terr = errors.New(\"token expired\")\n\n\t\t\t} else if ve.Errors&jwt.ValidationErrorNotValidYet != 0 {\n\t\t\t\t// Token is either expired or not active yet\n\t\t\t\terr = errors.New(\"token not valid yet\")\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"Couldn't handle this token: %e\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Couldn't handle this token: %e\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tif claims, ok := token.Claims.(*everestClaims); ok {\n\t\treturn claims.UserID, err\n\t}\n\treturn 0, errors.New(\"Couldn't handle this token\")\n}", "func Authenticate(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\n\t\t// Expecting: bearer <token>\n\t\tbearer := r.Header.Get(\"Authorization\")\n\t\tif len(bearer) == 0 {\n\t\t\thttp.Error(w, errAuthHeaderMissing.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tif len(bearer) < 6 || strings.ToLower(bearer[0:6]) != \"bearer\" {\n\t\t\thttp.Error(w, errAuthWrongHeaderFormat.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\taccessToken := bearer[7:]\n\t\taccessTokenClaims, err := entity.ParseAccessTokenClaims(accessToken)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// Add claims to the context so we can retrieve them later\n\t\tctx = context.WithValue(ctx, ClaimsKey, accessTokenClaims)\n\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func validateAndGetUserID(key []byte, token string) (string, error) {\n\tt := strings.TrimSpace(strings.TrimPrefix(token, \"Bearer \"))\n\n\tvar jsonToken paseto.JSONToken\n\terr := paseto.Decrypt(t, key, &jsonToken, nil)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"paseto json key decrypt\")\n\t}\n\n\treturn jsonToken.Subject, nil\n}", "func (f *vaultTokenAuthHandler) Validate(token string) (*api.Secret, error) {\n\tf.client.SetToken(token)\n\treturn f.client.Auth().Token().LookupSelf()\n}", "func (a *Service) ValidateJweToken(token string) (map[string]interface{}, *error_utils.ApiError) {\n\n\t// parse token string\n\tclaims, err := a.parseTokenString(token)\n\tif err != nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(err.Error())\n\t}\n\n\t// validate dates\n\tif claims[\"orig_iat\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat is missing\")\n\t}\n\n\t// try convert to float64\n\tif _, ok := claims[\"orig_iat\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Orig Iat must be float64 format\")\n\t}\n\n\t// get value and validate\n\torigIat := int64(claims[\"orig_iat\"].(float64))\n\tif origIat < a.timeFunc().Add(-a.maxRefresh).Unix() {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\n\t// check if exp exists in map\n\tif claims[\"exp\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp is missing\")\n\t}\n\n\t// try convert to float 64\n\tif _, ok := claims[\"exp\"].(float64); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Exp must be float64 format\")\n\t}\n\n\t// get value and validate\n\texp := int64(claims[\"exp\"].(float64))\n\tif exp < a.timeFunc().Unix(){\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Token is expired\")\n\t}\n\t// validate dates\n\n\t// validate issuer\n\t// check if iss exists in map\n\tif claims[\"iss\"] == nil {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss is missing\")\n\t}\n\n\t// try convert to string\n\tif _, ok := claims[\"iss\"].(string); !ok {\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Iss must be string format\")\n\t}\n\n\t// get value and validate\n\tissuer := claims[\"iss\"]\n\tif issuer != a.issuer{\n\t\treturn nil, error_utils.NewUnauthorizedError(\"Invalid issuer\")\n\t}\n\t// validate issuer\n\n\treturn claims, nil\n}", "func (e *EdgeRequestContext) AuthToken() *AuthenticationToken {\n\te.tokenOnce.Do(func() {\n\t\tif token, err := e.impl.ValidateToken(e.raw.AuthToken); err != nil {\n\t\t\tlog.Errorw(\"token validation failed\", \"err\", err)\n\t\t\te.token = nil\n\t\t} else {\n\t\t\te.token = token\n\t\t}\n\t})\n\treturn e.token\n}", "func Authenticate(fbToken, fbID string, client URLGetter) error {\n\tvar fbp map[string]interface{}\n\t_, err := fetch(client, fbToken, []string{\"id\"}, &fbp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif fbp[\"id\"] != fbID {\n\t\treturn fmt.Errorf(\"facebook id mismatch, %v != %v\", fbp[\"id\"], fbID)\n\t}\n\n\treturn nil\n}", "func (tokenController TokenController) ValidateTokenHandler(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\n\ttoken, err := request.ParseFromRequest(r, request.AuthorizationHeaderExtractor,\n\t\tfunc(token *jwt.Token) (interface{}, error) {\n\t\t\treturn tokenController.mySigningKey, nil\n\t\t})\n\n\tif err == nil {\n\t\tif token.Valid {\n\t\t\tnext(w, r)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tfmt.Fprint(w, \"Token is not valid\")\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tfmt.Fprint(w, \"Unauthorized access to this resource\")\n\t}\n}", "func GetAuthToken(address string, pkey string, API string) (string, error) {\n var data = new(StringRes)\n // 1: Get the auth data to sign\n // ----------------------------\n res_data, err := http.Get(API+\"/AuthDatum\")\n // Data will need to be hashed\n if err != nil { return \"\", fmt.Errorf(\"Could not get authentication data: (%s)\", err) }\n body, err1 := ioutil.ReadAll(res_data.Body)\n if err != nil { return \"\", fmt.Errorf(\"Could not parse authentication data: (%s)\", err1) }\n err2 := json.Unmarshal(body, &data)\n if err2 != nil { return \"\", fmt.Errorf(\"Could not unmarshal authentication data: (%s)\", err2) }\n\n // Hash the data. Keep the byte array\n data_hash := sig.Keccak256Hash([]byte(data.Result))\n // Sign the data with the private key\n privkey, err3 := crypto.HexToECDSA(pkey)\n if err3 != nil { return \"\", fmt.Errorf(\"Could not parse private key: (%s)\", err3) }\n // Sign the auth data\n _sig, err4 := sig.Ecsign(data_hash, privkey)\n if err4 != nil { return \"\", fmt.Errorf(\"Could not sign with private key: (%s)\", err4) }\n\n // 2: Send sigature, get token\n // ---------------------\n var authdata = new(StringRes)\n var jsonStr = []byte(`{\"owner\":\"`+address+`\",\"sig\":\"0x`+_sig+`\"}`)\n res, err5 := http.Post(API+\"/Authenticate\", \"application/json\", bytes.NewBuffer(jsonStr))\n if err5 != nil { return \"\", fmt.Errorf(\"Could not hit POST /Authenticate: (%s)\", err5) }\n if res.StatusCode != 200 { return \"\", fmt.Errorf(\"(%s): Error in POST /Authenticate\", res.StatusCode)}\n body, err6 := ioutil.ReadAll(res.Body)\n if err6 != nil { return \"\" , fmt.Errorf(\"Could not read /Authenticate body: (%s)\", err6)}\n err7 := json.Unmarshal(body, &authdata)\n if err7 != nil { return \"\", fmt.Errorf(\"Could not unmarshal /Authenticate body: (%s)\", err7) }\n\n // Return the JSON web token\n return string(authdata.Result), nil\n}", "func (tmdb *TMDb) GetAuthValidateToken(token, user, password string) (*AuthenticationToken, error) {\n\tvar validToken AuthenticationToken\n\turi := fmt.Sprintf(\"%s/authentication/token/validate_with_login?api_key=%s&request_token=%s&username=%s&password=%s\", baseURL, tmdb.apiKey, token, user, password)\n\tresult, err := getTmdb(uri, &validToken)\n\treturn result.(*AuthenticationToken), err\n}", "func extractToken(r *http.Request) (string, error) {\n\treqToken := r.Header.Get(\"Authorization\")\n\tsplitToken := strings.Split(reqToken, \"Bearer \")\n\n\tif len(splitToken) < 2 {\n\t\treturn \"\", errors.New(\"No token\")\n\t}\n\n\treturn splitToken[1], nil\n}", "func ValidateIdTokenWithNonce(aud string, idToken string, nonce string) (*SiwaIdToken, string) {\n\n\t//initialize the token object\n\tvar siwaIdToken *SiwaIdToken = &SiwaIdToken{Valid: false}\n\n\tif idToken == \"\" {\n\t\treturn siwaIdToken, \"empty_token\"\n\t}\n\n\t//split and decode token\n\tparts := strings.Split(idToken, \".\")\n\tif len(parts) != 3 {\n\t\treturn siwaIdToken, \"invalid_format_missing_parts\"\n\t}\n\tjsonHeaderB, err := base64UrlDecode(parts[0])\n\tif err != nil {\n\t\treturn siwaIdToken, \"invalid_format_header_base64_decode_failed error:\" + err.Error()\n\t}\n\tvar jwtHeader JWTTokenHeader\n\terr = json.Unmarshal(jsonHeaderB, &jwtHeader)\n\tif err != nil {\n\t\treturn siwaIdToken, \"invalid_format_header_json_decode_failed error:\" + err.Error()\n\t}\n\tjsonBodyB, err := base64UrlDecode(parts[1])\n\tif err != nil {\n\t\treturn siwaIdToken, \"invalid_format_body_base64_decode_failed error:\" + err.Error()\n\t}\n\tvar jwtBody JWTTokenBody\n\terr = json.Unmarshal(jsonBodyB, &jwtBody)\n\tif err != nil {\n\t\treturn siwaIdToken, \"invalid_format_body_json_decode_failed error:\" + err.Error()\n\t}\n\n\t//the basic validation tests pass. Now check if the contents of token are valid\n\tvar reason string\n\tvar valid bool = true\n\n\t//Verify the nonce for the authentication\n\t//if idtoken had nonce, the check will fail\n\tif jwtBody.Nonce != \"\" && jwtBody.Nonce != nonce {\n\t\treason = reason + \"nonce_check_failed\"\n\t\tvalid = false\n\t}\n\n\t//Verify that the iss field contains https://appleid.apple.com\n\tif jwtBody.Iss != \"https://appleid.apple.com\" {\n\t\treason = reason + \" iss_check_failed\"\n\t\tvalid = false\n\t}\n\n\t//Verify that the aud field is the developer’s client_id\n\tif jwtBody.Aud != aud {\n\t\treason = reason + \" aud_check_failed\"\n\t\tvalid = false\n\t}\n\n\t//Verify that the time is earlier than the exp value of the token\n\tif jwtBody.Exp < time.Now().Unix() {\n\t\treason = reason + \" expiry_in_past\"\n\t\tvalid = false\n\t}\n\n\t//Verify the JWS E256 signature using the server’s public key\n\tvar decodedSignature []byte\n\tdecodedSignature, err = base64UrlDecode(parts[2])\n\tif err != nil {\n\t\treason = reason + \" signature_base64_decode_failed error:\" + err.Error()\n\t\tvalid = false\n\t} else if !verifyAppleRSA256(parts[0]+\".\"+parts[1], decodedSignature, jwtHeader.Kid) {\n\t\treason = reason + \" signature_verification_failed\"\n\t\tvalid = false\n\t}\n\n\t//set the values of parsed token into the id token object\n\tsiwaIdToken.Header = &jwtHeader\n\tsiwaIdToken.Body = &jwtBody\n\tsiwaIdToken.Valid = valid\n\tsiwaIdToken.Signature = decodedSignature\n\n\treturn siwaIdToken, reason\n}", "func ValidateToken(token string) (string, error) {\n username, exists := Sessions[token];\n if (!exists) {\n return \"\", apierrors.TokenValidationError{apierrors.TOKEN_VALIDATION_NO_TOKEN};\n }\n\n return username, nil;\n}", "func ValidateToken(tokenString string, secretSignKey []byte) (string, error) {\n\ttoken, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\treturn secretSignKey, nil\n\t})\n\n\tif claims, ok := token.Claims.(*Claims); ok && token.Valid {\n\t\t// fmt.Printf(\"%v %v\", claims.Email, claims.StandardClaims.ExpiresAt)\n\t\treturn claims.Email, nil\n\t}\n\treturn \"\", err\n}", "func ValidateToken(token string) interface{} {\n\tfmt.Println(\"Starting token validation...\")\n\tclaims := jwt.MapClaims{}\n\t_, err := jwt.ParseWithClaims(token, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(Secret), nil\n\t})\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn map[string]string{\"User\": \"\", \"Role\": \"\"}\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"ERROR: \", err)\n\t\treturn map[string]string{\"User\": \"\", \"Role\": \"\"}\n\t}\n\n\tfmt.Println(claims)\n\n\tudatos := make(map[string]string)\n\tudatos[\"User\"] = claims[\"user\"].(string)\n\tudatos[\"Role\"] = claims[\"role\"].(string)\n\n\treturn udatos\n}", "func (svc *basicAuthNService) ValidateToken(tokenString, kid string) (dto.CustomClaim, error) {\n\tclaim := dto.CustomClaim{}\n\n\tkf := func(token *stdjwt.Token) (interface{}, error) {\n\t\tkeyID := token.Header[\"kid\"].(string)\n\t\tif keyID != kid {\n\t\t\treturn claim, stdjwt.ErrInvalidKeyType\n\t\t}\n\t\treturn []byte(svcconf.C.Auth.SecretKey), nil\n\t}\n\n\ttoken, err := stdjwt.ParseWithClaims(tokenString, &claim, kf)\n\n\t// check if signature is valid\n\tif err != nil {\n\t\treturn claim, err\n\t}\n\tif token.Valid {\n\t\treturn claim, nil\n\t}\n\treturn claim, kitjwt.ErrTokenInvalid\n}", "func Validate(r *http.Request, db *sql.DB) (UserID, error) {\n\ttokens, ok := r.Header[\"Authorization\"]\n\tif !ok {\n\t\treturn 0, ErrNoAuthHeader\n\t}\n\ttoken := strings.TrimPrefix(tokens[0], \"Bearer \")\n\tnow := time.Now().Unix()\n\tvar userID UserID\n\terr := db.QueryRow(`\n\t\tselect\n\t\t\toauth_sessions.owner_id\n\t\tfrom \n\t\t\toauth_access_tokens\n\t\t\tjoin oauth_sessions on oauth_access_tokens.session_id = oauth_sessions.id\n\t\twhere\n\t\t\toauth_access_tokens.id = $1\n\t\t\tand oauth_access_tokens.expire_time > $2\n\t\t`, token, now).Scan(&userID)\n\n\tif err == sql.ErrNoRows {\n\t\treturn 0, errors.Wrapf(ErrSessionInvalid, token)\n\t} else if err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn userID, nil\n}", "func VerifyToken(c *fiber.Ctx) {\n\trawToken := strings.Replace(string(c.Fasthttp.Request.Header.Peek(\"Authorization\")), \"Bearer \", \"\", 1)\n\tif rawToken == \"\" {\n\t\tc.Status(401).JSON(\"Missing token\")\n\t\treturn\n\t}\n\n\tjwtKey := []byte(GetDotEnv(\"JWT_KEY\"))\n\thasToken, _ := jwt.Parse(rawToken, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Error in token verify\")\n\t\t}\n\t\treturn jwtKey, nil\n\t})\n\n\tif hasToken.Valid {\n\t\tc.Next()\n\t} else {\n\t\tc.Status(401).JSON(\"Invalid Token\")\n\t}\n}", "func validateToken(tokenObj token.StructToken) (string, bool) {\n\n\tvar errorfound string\n\t//validate token id ==100\n\t//if len(tokenObj.TokenID) != 100 {\n\t//\terrorfound = \"token ID must be 100 characters\"\n\t//\treturn errorfound, false\n\t//}\n\t//validate token name ==20\n\tif len(tokenObj.TokenName) < 4 || len(tokenObj.TokenName) > 20 {\n\t\terrorfound = \"token name must be more than 4 characters and less than or equal 20 characters\"\n\t\treturn errorfound, false\n\t}\n\t//validate token symbol <= 4\n\tif len(tokenObj.TokenSymbol) > 4 {\n\t\terrorfound = \"token symbol should less than or equal to 4 characters\"\n\t\treturn errorfound, false\n\t}\n\t// validate icon url if empty or ==100\n\t// if len(tokenObj.IconURL) == 0 || len(tokenObj.IconURL) <= 100 {\n\t// \terrorfound = \"\"\n\t// } else {\n\t// \terrorfound = \"Icon URL is optiaonal if enter it must be less or equal 100 characters\"\n\t// \treturn errorfound, false\n\t// }\n\t// validate description if empty or == 100\n\tif len(tokenObj.Description) == 0 || len(tokenObj.Description) <= 100 {\n\t\terrorfound = \"\"\n\t} else {\n\t\terrorfound = \"Description is optiaonal if enter it must be less or equal 100 characters\"\n\t\treturn errorfound, false\n\t}\n\t//validate initiator address if empty\n\tif tokenObj.InitiatorAddress == \"\" {\n\t\terrorfound = \"please enter initiator address (Public key)\"\n\t\treturn errorfound, false\n\t}\n\t//validate initiator address if exist in account data\n\taccountobj := account.GetAccountByAccountPubicKey(tokenObj.InitiatorAddress)\n\tfmt.Println(\"------------------ \", accountobj)\n\tif accountobj.AccountPublicKey == \"\" {\n\t\terrorfound = \"please enter valid initiator address (Public key)\"\n\t\treturn errorfound, false\n\t}\n\tif accountobj.AccountPassword != tokenObj.Password {\n\t\terrorfound = \"The given password is incorrect.\"\n\t\treturn errorfound, false\n\t}\n\n\t//validate Tokens Total Supply less than or equal zero\n\tif tokenObj.TokensTotalSupply < 1 {\n\t\terrorfound = \"please enter Tokens Total Supply more than zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens value less than or equal zero\n\tif tokenObj.TokenValue <= 0.0 {\n\t\terrorfound = \"please enter Tokens value more than zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate token precision from 0 to 5\n\tif tokenObj.Precision < 0 || tokenObj.Precision > 5 {\n\t\terrorfound = \"please enter Precision range from 0 to 5 \"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens UsageType is mandatory security or utility\n\tif tokenObj.UsageType == \"security\" || tokenObj.UsageType == \"utility\" {\n\t\terrorfound = \"\"\n\t} else {\n\t\terrorfound = \"please enter UsageType security or utility\"\n\t\treturn errorfound, false\n\t}\n\tif tokenObj.UsageType == \"security\" && tokenObj.Precision != 0 {\n\t\terrorfound = \"UsageType security and must precision equal zero\"\n\t\treturn errorfound, false\n\t}\n\t//validate Tokens TokenType is mandatory public or private\n\tif tokenObj.TokenType == \"public\" || tokenObj.TokenType == \"private\" {\n\t\t// check type token is public, validating for enter contact ID\n\t\tif tokenObj.TokenType == \"public\" {\n\t\t\t// validate ContractID if empty or ==60\n\t\t\tif len(tokenObj.ContractID) < 4 || len(tokenObj.ContractID) > 60 {\n\t\t\t\terrorfound = \"enter ContractID must be more than 4 character and less than or equal 60 characters\"\n\t\t\t\treturn errorfound, false\n\t\t\t}\n\t\t}\n\t\t// check type token is Private , validating for enter pentential PK ,\n\t\t// enter the potential users public keys which can use this token\n\t\taccountList := accountdb.GetAllAccounts()\n\t\tif tokenObj.TokenType == \"private\" {\n\t\t\t//enter pentential PK which can use this token\n\t\t\tif len(tokenObj.UserPublicKey) != 0 {\n\t\t\t\tfor _, pk := range tokenObj.UserPublicKey {\n\t\t\t\t\tif pk == tokenObj.InitiatorAddress {\n\t\t\t\t\t\terrorfound = \"user create token can't be in user public key \"\n\t\t\t\t\t\treturn errorfound, false\n\t\t\t\t\t}\n\t\t\t\t\tif !containspk(accountList, pk) {\n\t\t\t\t\t\terrorfound = \"this public key is not associated with any account\"\n\t\t\t\t\t\treturn errorfound, false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terrorfound = \"enter the potential users public keys which can use this token\"\n\t\t\t\treturn errorfound, false\n\t\t\t}\n\t\t}\n\t} else {\n\t\terrorfound = \"please enter TokenType is public or private\"\n\t\treturn errorfound, false\n\t}\n\n\t// Dynamic price\tIf the price of token is dynamic it gets its value from bidding platform.\n\t// Bidding platform API URL.\n\t// based on ValueDynamic True or false\n\tif tokenObj.ValueDynamic == true {\n\t\t//for example value\n\t\tbiddingplatformValue := 5.5\n\t\ttokenObj.Dynamicprice = biddingplatformValue\n\t}\n\treturn \"\", true\n}", "func (auth *DelegateAuthService) Verify(token string) (string, error) {\n\tdata, err := json.Marshal(map[string]interface{}{\n\t\t\"token\": token,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tendpoint := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: auth.controllerOrigin,\n\t\tPath: \"/api/v1/user/verify\",\n\t}\n\tresp, err := http.Post(endpoint.String(), \"application/json\", bytes.NewReader(data))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tresBody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\tvar resOK struct {\n\t\t\tUserID string `json:\"userId\"`\n\t\t}\n\t\terr = json.Unmarshal(resBody, &resOK)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn resOK.UserID, nil\n\t} else {\n\t\tvar resError struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t}\n\t\terr = json.Unmarshal(resBody, &resError)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.New(string(resBody))\n\t\t}\n\n\t\treturn \"\", errors.New(resError.Status)\n\t}\n}", "func VerifyIDToken(ctx context.Context, token string, v IDTokenVerifier) (oidc.IDTokenClaims, error) {\n\tclaims := oidc.EmptyIDTokenClaims()\n\n\tdecrypted, err := oidc.DecryptToken(token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpayload, err := oidc.ParseToken(decrypted, claims)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := oidc.CheckSubject(claims); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckIssuer(claims, v.Issuer()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckAudience(claims, v.ClientID()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckAuthorizedParty(claims, v.ClientID()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckSignature(ctx, decrypted, payload, claims, v.SupportedSignAlgs(), v.KeySet()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckExpiration(claims, v.Offset()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckIssuedAt(claims, v.MaxAgeIAT(), v.Offset()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckNonce(claims, v.Nonce(ctx)); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckAuthorizationContextClassReference(claims, v.ACR()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = oidc.CheckAuthTime(claims, v.MaxAge()); err != nil {\n\t\treturn nil, err\n\t}\n\treturn claims, nil\n}", "func Authenticate(next buffalo.Handler) buffalo.Handler {\n\treturn func(c buffalo.Context) error {\n\t\t// do some work before calling the next handler\n\t\terr := checkJwt(c.Response(), c.Request())\n\t\tif err == nil {\n\t\t\terr := next(c)\n\t\t\t// do some work after calling the next handler\n\t\t\treturn err\n\t\t}\n\n\t\treturn err\n\t}\n}", "func ValidateToken(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tsession := GetSession(w, req, cookieName)\n\t\taccessToken, setbool := session.Values[\"access_token\"].(string)\n\t\tif setbool == true && accessToken == \"\" {\n\t\t\tRedirectLogin(w, req)\n\t\t\t//return\n\t\t} else if setbool == false {\n\t\t\tRedirectLogin(w, req)\n\t\t} else {\n\t\t\tvar p jwt.Parser\n\t\t\ttoken, _, _ := p.ParseUnverified(accessToken, &jwt.StandardClaims{})\n\t\t\tif err := token.Claims.Valid(); err != nil {\n\t\t\t\t//invalid\n\t\t\t\tRedirectLogin(w, req)\n\t\t\t\t//return\n\t\t\t} else {\n\t\t\t\t//valid\n\t\t\t\tnext(w, req)\n\t\t\t\t//return\n\t\t\t}\n\t\t}\n\t\t//RedirectLogin(w, r)\n\t\treturn\n\t})\n}", "func ValidateToken(tokenString string) (string, error) {\n\tsecret := []byte(\"kalle4ever\")\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn secret, nil\n\t})\n\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\treturn claims[\"username\"].(string), nil\n\t}\n\treturn \"\", err\n}", "func (a *Auth) Authenticate(handler http.Handler) http.Handler {\n\tif handler == nil {\n\t\tpanic(\"auth: nil handler\")\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif a.cfg.Disable {\n\t\t\thandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttoken, err := a.getCookie(r)\n\t\tif token == nil && err == nil {\n\t\t\t// Cookie is missing, invalid. Fetch new token from OAuth2 provider.\n\t\t\t// Redirect user to the OAuth2 consent page to ask for permission for the scopes specified\n\t\t\t// above.\n\t\t\t// Set the scope to the current request URL, it will be used by the redirect handler to\n\t\t\t// redirect back to the url that requested the authentication.\n\t\t\turl := a.cfg.AuthCodeURL(r.RequestURI)\n\t\t\thttp.Redirect(w, r, url, http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Get cookie error: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// Source token, in case the token needs a renewal.\n\t\tnewOauth2Token, err := a.cfg.TokenSource(r.Context(), token.toOauth2()).Token()\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Internal error\", http.StatusInternalServerError)\n\t\t\ta.logf(\"Failed token source: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tnewToken := fromOauth2(newOauth2Token)\n\n\t\tif newToken.IDToken != token.IDToken {\n\t\t\ta.logf(\"Refreshed token\")\n\t\t\ttoken = newToken\n\t\t\ta.setCookie(w, token)\n\t\t}\n\n\t\t// Validate the id_token.\n\t\tpayload, err := a.validator.Validate(r.Context(), token.IDToken, a.cfg.ClientID)\n\t\tif err != nil {\n\t\t\ta.clearCookie(w)\n\t\t\thttp.Error(w, \"Invalid auth.\", http.StatusUnauthorized)\n\t\t\ta.logf(\"Invalid token, reset cookie: %s\", err)\n\t\t\treturn\n\t\t}\n\t\t// User is authenticated.\n\t\t// Store email and name in context, and call the inner handler.\n\t\tcreds := &Creds{\n\t\t\tEmail: payload.Claims[\"email\"].(string),\n\t\t\tName: payload.Claims[\"name\"].(string),\n\t\t}\n\t\tr = r.WithContext(context.WithValue(r.Context(), credsKey, creds))\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func VerifyAuthToken(ctx context.Context, header string, verifier *oidc.IDTokenVerifier) (*oidc.IDToken, error) {\n\tparts := strings.Split(header, \" \")\n\tif len(parts) != 2 {\n\t\treturn nil, errors.New(\"invalid authorization header\")\n\t}\n\treturn verifier.Verify(ctx, parts[1])\n}", "func ValidateIDToken(idToken, clientID, hostedDomain string) (*IDTokenClaims, error) {\n\ttoken, err := jwt.Parse(idToken, GoogleKeyFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\tmapClaims, ok := token.Claims.(jwt.MapClaims)\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tif !mapClaims.VerifyIssuer(\"accounts.google.com\", true) {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tif !mapClaims.VerifyAudience(clientID, true) {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\t// Check hosted domain\n\thd, ok := mapClaims[\"hd\"]\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\thds, ok := hd.(string)\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tif hds != hostedDomain {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\t// Check email verified\n\tev, ok := mapClaims[\"email_verified\"]\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tevb, ok := ev.(bool)\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\tif !evb {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\t// Email\n\temail, ok := mapClaims[\"email\"]\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\temails, ok := email.(string)\n\tif !ok {\n\t\treturn nil, ErrInvalidIDToken\n\t}\n\n\t// Start setting up return value\n\trv := &IDTokenClaims{\n\t\tEmailAddress: emails,\n\t}\n\n\t// Try to get first name, it's OK if it fails\n\tfirstName, ok := mapClaims[\"given_name\"]\n\tif ok {\n\t\tnameAsString, ok := firstName.(string)\n\t\tif ok {\n\t\t\trv.FirstName = nameAsString\n\t\t}\n\t}\n\n\t// Try to get last name, it's OK if it fails\n\tlastName, ok := mapClaims[\"family_name\"]\n\tif ok {\n\t\tnameAsString, ok := lastName.(string)\n\t\tif ok {\n\t\t\trv.LastName = nameAsString\n\t\t}\n\t}\n\n\treturn rv, nil\n}", "func AuthorizeToken(token string) *AuthorizeTokenAttemptResponse {\n\tlookup := ReverseLookupItem(token)\n\tif !lookup.Exists {\n\t\treturn &AuthorizeTokenAttemptResponse{\"\", \"\", false}\n\t}\n\treturn &AuthorizeTokenAttemptResponse{token, lookup.ReverseLookup.ReverseValue, true}\n\n}", "func (ja *JWTAuth) Authenticate(rw http.ResponseWriter, r *http.Request) (User, bool, error) {\n\tvar (\n\t\tcandidates []string\n\t\tgotToken *Token\n\t\terr error\n\t)\n\n\tcandidates = append(candidates, getTokensFromQuery(r, ja.FromQuery)...)\n\tcandidates = append(candidates, getTokensFromHeader(r, ja.FromHeader)...)\n\tcandidates = append(candidates, getTokensFromCookies(r, ja.FromCookies)...)\n\n\tcandidates = append(candidates, getTokensFromHeader(r, []string{\"Authorization\"})...)\n\tchecked := make(map[string]struct{})\n\tparser := &jwt.Parser{\n\t\tUseJSONNumber: true, // parse number in JSON object to json.Number instead of float64\n\t}\n\n\tfor _, candidateToken := range candidates {\n\t\ttokenString := normToken(candidateToken)\n\t\tif _, ok := checked[tokenString]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tgotToken, err = parser.Parse(tokenString, func(*Token) (interface{}, error) {\n\t\t\tsignKeyString := \"-----BEGIN CERTIFICATE-----\\n\" + \"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArD2tI1RXi/guGpw4/uq/id2xG2mdrRgJ1U+fy3vOsT9YH5Y1pxIA1VVMxGixzdSlDzB6UMUTa2XMLetEzIHAz5cNc7aNF+r1wLIrLWS0wHTfjte8MKTDtUehcIF7+kQ3dq9TJ/lqYh4q3/vIOJNcBXRY18de0HeMMqQOw0n+QUrbSBAYGNyam976quRJOKTCaHy0c91FYiE9DmskanHPUyZpYE8EWWVFa9C08OGvybBIpfAfRnI/M9qmYS0putU8UfxTFa6XSSqsmECtmjECA+KXp24buBWyJYh/3HAPk5JdZZhjpLPKf/cyg5Cpk/udoBNZrcAJQzuKgRpgZsdBeQIDAQAB\" + \"\\n-----END CERTIFICATE-----\"\n\t\t\tif key, errr := jwt.ParseRSAPublicKeyFromPEM([]byte(signKeyString)); errr != nil {\n\t\t\t\treturn nil,nil\n\t\t\t} else {\n\t\t\t\treturn key, nil\n\t\t\t}\n\t\t})\n\t\tchecked[tokenString] = struct{}{}\n\n\t\tlogger := ja.logger.With(zap.String(\"token_string\", desensitizedTokenString(tokenString)))\n\t\tif err != nil {\n\t\t\tlogger.Error(\"invalid token\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\n\t\tvar gotClaims = gotToken.Claims.(MapClaims)\n\t\t// By default, the following claims will be verified:\n\t\t// - \"exp\"\n\t\t// - \"iat\"\n\t\t// - \"nbf\"\n\t\t// Here, if `aud_whitelist` or `iss_whitelist` were specified,\n\t\t// continue to verify \"aud\" and \"iss\" correspondingly.\n\t\tif len(ja.IssuerWhitelist) > 0 {\n\t\t\tisValidIssuer := false\n\t\t\tfor _, issuer := range ja.IssuerWhitelist {\n\t\t\t\tif gotClaims.VerifyIssuer(issuer, true) {\n\t\t\t\t\tisValidIssuer = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !isValidIssuer {\n\t\t\t\terr = errors.New(\"invalid issuer\")\n\t\t\t\tlogger.Error(\"invalid token\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif len(ja.AudienceWhitelist) > 0 {\n\t\t\tisValidAudience := false\n\t\t\tfor _, audience := range ja.AudienceWhitelist {\n\t\t\t\tif gotClaims.VerifyAudience(audience, true) {\n\t\t\t\t\tisValidAudience = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !isValidAudience {\n\t\t\t\terr = errors.New(\"invalid audience\")\n\t\t\t\tlogger.Error(\"invalid token\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// The token is valid. Continue to check the user claim.\n\t\tclaimName, gotUserID := getUserID(gotClaims, ja.UserClaims)\n\t\tif gotUserID == \"\" {\n\t\t\terr = errors.New(\"empty user claim\")\n\t\t\tlogger.Error(\"invalid token\", zap.Strings(\"user_claims\", ja.UserClaims), zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Successfully authenticated!\n\t\tvar user = User{\n\t\t\tID: gotUserID,\n\t\t\tMetadata: getUserMetadata(gotClaims, ja.MetaClaims),\n\t\t}\n\t\tlogger.Info(\"user authenticated\", zap.String(\"user_claim\", claimName), zap.String(\"id\", gotUserID))\n\t\treturn user, true, nil\n\t}\n\n\treturn User{}, false, err\n}", "func (c *Conn) authenticate() error {\n\tif c.oAuth == nil || c.oAuth.Valid() {\n\t\t// Authentication is not required or already validated.\n\t\treturn nil\n\t}\n\tif !c.oAuth.IsSet() {\n\t\t// No client information to refresh the token.\n\t\treturn ErrBadToken\n\t}\n\td, err := c.downloadToken()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.retrieveToken(d)\n}", "func ProcessToken(tk string) (*models.Claim, bool, string, error) {\n\tmiClave := []byte(\"ClimediKey2020\")\n\tclaims := &models.Claim{}\n\tvar usuario models.Usuario\n\n\tsplitToken := strings.Split(tk, \"Bearer\")\n\tif len(splitToken) != 2 {\n\t\treturn claims, false, string(\"\"), errors.New(\"Formato de Token invalido\")\n\t}\n\n\ttk = strings.TrimSpace(splitToken[1])\n\n\ttkn, err := jwt.ParseWithClaims(tk, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn miClave, nil\n\t})\n\n\tif !tkn.Valid {\n\t\treturn claims, false, string(\"\"), errors.New(\"Token invalido\")\n\t}\n\n\tvar db string\n\tproperties, err := os.Open(\"\" + os.Getenv(\"CONFIG\") + \"/climedi/climedi.properties\")\n\tif err != nil {\n\t\tlog.Println(\"Error al leer archivo de configuraciones\")\n\t} else {\n\t\tscanner := bufio.NewScanner(properties)\n\t\tfor scanner.Scan() {\n\t\t\tlinea := scanner.Text()\n\t\t\tif strings.HasPrefix(linea, \"userdb\") {\n\t\t\t\tdb = linea[7:]\n\t\t\t}\n\t\t}\n\t}\n\n\tdbref := database.Connect(db)\n\tdefer database.Disconnect(dbref)\n\n\tif err == nil {\n\t\texists, _ := usuario.CheckUsuario(claims.ID, dbref)\n\n\t\tif exists {\n\t\t\tUserID = claims.ID\n\t\t\tDoctorID = claims.IDDoc\n\t\t\tUserChild = claims.Child\n\t\t\tUserMaster = claims.Master\n\t\t\tUserTipoPlan = claims.TipoPlan\n\t\t}\n\t\treturn claims, exists, UserID, nil\n\t}\n\n\treturn claims, false, string(\"\"), err\n}", "func ValidateToken(bearerHeader string) (User, error) {\n\n\t// format the token string\n\ttokenString := strings.Split(bearerHeader, \" \")[1]\n\n\tvar user User\n\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t// Don't forget to validate the alg is what you expect:\n\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\n\t\treturn []byte(\"secretkey\"), nil\n\t})\n\n\tif err != nil {\n\n\t\tfmt.Println(err)\n\t\treturn user, err\n\t}\n\n\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t// convert the interface to the map[string]interface{}\n\t\ts := claims[\"user\"].(map[string]interface{})\n\n\t\t// create a user of User type\n\t\t// convert the s[\"userID\"] interface to string\n\t\tuser := User{s[\"userID\"].(string), s[\"name\"].(string)}\n\n\t\treturn user, nil\n\n\t}\n\n\treturn user, errors.New(\"Something went wrong\")\n\n}", "func ValidateAuthToken(tokenStr string) (bool, *uuid.UUID, error) {\n\t//initialize the claims\n\tclaims := &AuthClaims{}\n\n\t//parse the JWT and load the claims\n\ttoken, err := jwt.ParseWithClaims(tokenStr, claims, getTokenKey)\n\tif err != nil {\n\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\treturn false, nil, nil\n\t\t}\n\t\treturn false, nil, err\n\t}\n\n\t//verify the signing algorithm\n\tif token.Method.Alg() != JWTSigningAlgorithm {\n\t\treturn false, nil, fmt.Errorf(\"invalid signing algorthm: %s\", token.Method.Alg())\n\t}\n\n\t//check if the token is valid\n\tif !token.Valid {\n\t\treturn false, nil, nil\n\t}\n\n\t//extract the user id\n\tuserIDStr := claims.UserID\n\tuserID := uuid.FromStringOrNil(userIDStr)\n\tif userID == uuid.Nil {\n\t\treturn false, nil, nil\n\t}\n\treturn true, &userID, nil\n}", "func DecodeToken() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\ttokenStr := c.Request.Header.Get(\"Authorization\")\n\n\t\tuid, b := token.DecodeToken(tokenStr)\n\n\t\tif b {\n\t\t\tc.Set(common.TokenUid, uid)\n\t\t}\n\t\tc.Next()\n\t}\n}", "func ProcessToken(token string) (*models.Claim, bool, string, error) {\n\tprivateKey := []byte(\"EstaEsMiClavePrivadaDePrueba\")\n\tclaim := &models.Claim{}\n\n\t//Verificamos si el token que llego es valido, se utiliza la palabra Bearer como separador\n\tsplitToken := strings.Split(token, \"Bearer\")\n\tif len(splitToken) != 2 {\n\t\treturn claim, false, string(\"\"), errors.New(\"Formato del token invalido\")\n\t}\n\t//Limpiamos de espacion en blanco al token\n\ttoken = strings.TrimSpace(splitToken[1])\n\t//Mediantes una sintaxis porpia de jwt extraemos el payload del token a el modelo Claim\n\ttkn, err := jwt.ParseWithClaims(token, claim, func(_token *jwt.Token) (interface{}, error) {\n\t\treturn privateKey, nil\n\t})\n\t//Si no hay errores verificamos que el usuario del token exista\n\tif err == nil {\n\t\t_, existe, _ := db.ExisteUser(claim.Email)\n\t\tif existe == true {\n\t\t\t//En Variables publicas guardamos tanto el mail como el ID, para que puedan ser ocupadas en todo el proyecto\n\t\t\tEmail = claim.Email\n\t\t\tID = claim.ID.Hex()\n\t\t}\n\t\treturn claim, existe, ID, nil\n\t}\n\t//Si hubo un error 'err', verificamos si se produjo un fallo en obtener los datos con el ParseWithClaims\n\tif !tkn.Valid {\n\t\treturn claim, false, string(\"\"), errors.New(\"Token inválido\")\n\t}\n\t//Si el token fue valido pero aun asi produjo el error, devolvemos el error\n\treturn claim, false, string(\"\"), err\n}", "func (o OAuth1) parseTokenResponse(resp *http.Response) *Token {\n\ttokenBytes, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\to.logger.StatusWrite(\"Error reading from response body %v\\n\", err)\n\t}\n\ttokenData := string(tokenBytes)\n\treturn o.parseTokenData(tokenData)\n}", "func authenticate(r *http.Request) *UserID {\n token, err := r.Cookie(\"token\")\n if err != nil {\n return nil\n }\n passHash, err := r.Cookie(\"passHash\")\n if err != nil {\n return nil\n }\n return authenticateUserPass(token.Value, passHash.Value)\n}", "func validateAccessToken(token string, providedUsername string) bool {\n\tidpHost, idpPort := resolveIdpHostAndPort()\n\turl := \"https://\" + idpHost + \":\" + idpPort + \"/oauth2/introspect\"\n\tpayload := strings.NewReader(\"token=\" + token)\n\treq, err := http.NewRequest(\"POST\", url, payload)\n\tif err != nil {\n\t\tglog.Error(\"Error creating new request to the introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\n\tusername, password := resolveCredentials()\n\treq.SetBasicAuth(username, password)\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tglog.Error(\"Error sending the request to the introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tglog.Error(\"Error reading the response from introspection endpoint: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tvar result map[string]interface{}\n\terr = json.Unmarshal([]byte(string(body)), &result)\n\tif err != nil {\n\t\tglog.Error(\"Error un marshalling the json: \", err)\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tisActive, ok := (result[\"active\"]).(bool)\n\tif !ok {\n\t\tglog.Error(\"Error casting active to boolean. This may be due to a invalid token\")\n\t\textension.Exit(extension.ErrorExitCode)\n\t}\n\tisExpired := isExpired(result[\"exp\"])\n\tisValidUser := isValidUser(result[\"username\"], providedUsername)\n\treturn isExpired && isActive && isValidUser\n}", "func (p *AzureProvider) Authenticate(ctx context.Context, code string) (*sessions.State, error) {\n\t// convert authorization code into a token\n\toauth2Token, err := p.oauth.Exchange(ctx, code)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"identity/microsoft: token exchange failed %v\", err)\n\t}\n\n\t// id_token contains claims about the authenticated user\n\trawIDToken, ok := oauth2Token.Extra(\"id_token\").(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"identity/microsoft: response did not contain an id_token\")\n\t}\n\t// Parse and verify ID Token payload.\n\tsession, err := p.IDTokenToSession(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"identity/microsoft: could not verify id_token %v\", err)\n\t}\n\n\tsession.AccessToken = oauth2Token.AccessToken\n\tsession.RefreshToken = oauth2Token.RefreshToken\n\tsession.Groups, err = p.UserGroups(ctx, session.AccessToken)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"identity/microsoft: could not retrieve groups %v\", err)\n\t}\n\treturn session, nil\n}", "func (o *oidcClient) authenticate(issuer string, clientID string, audience string) error {\n\t// Store the old transport and restore it in the end.\n\toldTransport := o.httpClient.Transport\n\to.oidcTransport.audience = audience\n\to.httpClient.Transport = o.oidcTransport\n\n\tdefer func() {\n\t\to.httpClient.Transport = oldTransport\n\t}()\n\n\tprovider, err := o.getProvider(issuer, clientID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.oidcTransport.deviceAuthorizationEndpoint = provider.GetDeviceAuthorizationEndpoint()\n\n\tresp, err := rp.DeviceAuthorization(oidcScopes, provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"Code: %s\\n\\n\", resp.UserCode)\n\n\tu, _ := url.Parse(resp.VerificationURIComplete)\n\n\terr = openBrowser(u.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT)\n\tdefer stop()\n\n\ttoken, err := rp.DeviceAccessToken(ctx, resp.DeviceCode, time.Duration(resp.Interval)*time.Second, provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif o.tokens.Token == nil {\n\t\to.tokens.Token = &oauth2.Token{}\n\t}\n\n\to.tokens.Expiry = time.Now().Add(time.Duration(token.ExpiresIn))\n\to.tokens.IDToken = token.IDToken\n\to.tokens.Token.AccessToken = token.AccessToken\n\to.tokens.TokenType = token.TokenType\n\n\tif token.RefreshToken != \"\" {\n\t\to.tokens.Token.RefreshToken = token.RefreshToken\n\t}\n\n\treturn nil\n}", "func getAuthTokenFromHeader(ctx echo.Context) (string, error) {\n\theaderContent := ctx.Request().Header.Get(echo.HeaderAuthorization)\n\theaderContent = strings.TrimSpace(headerContent)\n\tprefix := \"Bearer:\"\n\tif strings.HasPrefix(headerContent, prefix) {\n\t\trunes := []rune(headerContent)\n\t\treturn strings.TrimSpace(string(runes[len(prefix):])), nil\n\t}\n\treturn \"\", fmt.Errorf(\"auth header not found\")\n}", "func VerifyAuthToken(w http.ResponseWriter, r *http.Request) {\n\t//Endpoint to verify user JWT\n\t//Useful for other services\n\ttoken := r.Header.Get(\"Authorization\")\n\tresult, _ := utils.VerifyJWT(token)\n\tverifyAuthTokenResp := response.JsonResponse(\"Token is valid\", 200)\n\tinvalidAuthTokenResp := response.JsonResponse(\"Token is invalid or expired\", 500)\n\tif result {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tjson.NewEncoder(w).Encode(verifyAuthTokenResp)\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(invalidAuthTokenResp)\n\t}\n}", "func (r commonResult) ExtractToken() (*Token, error) {\n\tvar s Token\n\terr := r.ExtractInto(&s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse the token itself from the stored headers.\n\ts.ID = r.Header.Get(\"X-Subject-Token\")\n\n\treturn &s, err\n}", "func parseToken(secret []byte, token string) (*AccessToken, error) {\n\tt, err := jwtParse(secret, token)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !t.Valid {\n\t\treturn nil, nil\n\t}\n\n\treturn jwtToAccessToken(t)\n}", "func (p Plugin) ExchangeToken(ctx context.Context, trustDomain, k8sSAjwt string) (\n\tstring /*access token*/, time.Time /*expireTime*/, int /*httpRespCode*/, error) {\n\taud := constructAudience(trustDomain)\n\tvar jsonStr = constructFederatedTokenRequest(aud, k8sSAjwt)\n\treq, _ := http.NewRequest(\"POST\", SecureTokenEndpoint, bytes.NewBuffer(jsonStr))\n\treq.Header.Set(\"Content-Type\", contentType)\n\n\tresp, err := p.hTTPClient.Do(req)\n\terrMsg := \"failed to call token exchange service. \"\n\tif err != nil || resp == nil {\n\t\tstatusCode := http.StatusServiceUnavailable\n\t\t// If resp is not null, return the actually status code returned from the token service.\n\t\t// If resp is null, return a service unavailable status and try again.\n\t\tif resp != nil {\n\t\t\tstatusCode = resp.StatusCode\n\t\t\terrMsg += fmt.Sprintf(\"HTTP status: %s. Error: %v\", resp.Status, err)\n\t\t} else {\n\t\t\terrMsg += fmt.Sprintf(\"HTTP response empty. Error: %v\", err)\n\t\t}\n\t\treturn \"\", time.Now(), statusCode, errors.New(errMsg)\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\trespData := &federatedTokenResponse{}\n\tif err := json.Unmarshal(body, respData); err != nil {\n\t\treturn \"\", time.Now(), resp.StatusCode, fmt.Errorf(\n\t\t\t\"failed to unmarshal response data. HTTP status: %s. Error: %v. Body size: %d\", resp.Status, err, len(body))\n\t}\n\n\tif respData.AccessToken == \"\" {\n\t\treturn \"\", time.Now(), resp.StatusCode, fmt.Errorf(\n\t\t\t\"exchanged empty token. HTTP status: %s. Response: %v\", resp.Status, string(body))\n\t}\n\n\treturn respData.AccessToken, time.Now().Add(time.Second * time.Duration(respData.ExpiresIn)), resp.StatusCode, nil\n}", "func (c *resourcePrincipalFederationClient) exchangeToken(publicKeyBase64 string, tokenResponse resourcePrincipalTokenResponse) (sessionToken string, err error) {\n\trpServiceClient := c.ResourcePrincipalSessionTokenClient\n\n\t//Set the signer of this client to be the instance principal provider\n\trpServiceClient.Signer = common.DefaultRequestSigner(&c.instancePrincipalKeyProvider)\n\n\t// Call identity service to get resource principal session token\n\tsessionTokenReq := resourcePrincipalSessionTokenRequest{\n\t\tresourcePrincipalSessionTokenRequestBody{\n\t\t\tServicePrincipalSessionToken: tokenResponse.Body.ServicePrincipalSessionToken,\n\t\t\tResourcePrincipalToken: tokenResponse.Body.ResourcePrincipalToken,\n\t\t\tSessionPublicKey: publicKeyBase64,\n\t\t},\n\t}\n\n\tsessionTokenHTTPReq, err := common.MakeDefaultHTTPRequestWithTaggedStruct(http.MethodPost,\n\t\t\"\", sessionTokenReq)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsessionTokenHTTPRes, err := rpServiceClient.Call(context.Background(), &sessionTokenHTTPReq)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer common.CloseBodyIfValid(sessionTokenHTTPRes)\n\n\tsessionTokenRes := x509FederationResponse{}\n\terr = common.UnmarshalResponse(sessionTokenHTTPRes, &sessionTokenRes)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tsessionToken = sessionTokenRes.Token.Token\n\treturn\n}", "func Base64ToAuthenticate(token string) (*Authenticate, error) {\n\tjs, err := b64.StdEncoding.DecodeString(token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tins := new(Authenticate)\n\terr = json.Unmarshal(js, ins)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ins, nil\n}", "func getIdToken(token *oauth2.Token) (map[string]interface{}, error) {\n\ttokenData := make(map[string]interface{})\n\n\tidToken := token.Extra(\"id_token\")\n\tif idToken == nil {\n\t\treturn tokenData, nil\n\t}\n\n\tjwtParser := jwt.Parser{\n\t\tSkipClaimsValidation: true,\n\t}\n\n\tt, _, err := jwtParser.ParseUnverified(idToken.(string), jwt.MapClaims{})\n\tif err != nil {\n\t\treturn tokenData, errors.Wrap(err, \"failed to parse id_token\")\n\t}\n\n\tif claims, ok := t.Claims.(jwt.MapClaims); ok {\n\t\tfor k, v := range claims {\n\t\t\ttokenData[k] = v\n\t\t}\n\t}\n\n\treturn tokenData, nil\n}", "func (l *RemoteProvider) ExtractToken(w http.ResponseWriter, r *http.Request) {\n\tl.TokenStoreMut.Lock()\n\tdefer l.TokenStoreMut.Unlock()\n\n\ttokenString, err := l.GetToken(r)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Token not found: %s\", err.Error())\n\t\treturn\n\t}\n\tnewts := l.TokenStore[tokenString]\n\tif newts != \"\" {\n\t\ttokenString = newts\n\t}\n\n\tresp := map[string]interface{}{\n\t\t\"meshery-provider\": l.Name(),\n\t\ttokenName: tokenString,\n\t}\n\tlogrus.Debugf(\"encoded response : %v\", resp)\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\terr = ErrEncoding(err, \"Auth Details\")\n\t\tlogrus.Error(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func verifyToken(tokenString string) (*jwt.Token, error) {\n\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t}\n\t\treturn []byte(os.Getenv(\"JWT_SECRET\")), nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn token, nil\n}", "func (a AuthConfigToken) Authenticate(metric *string) (bool, error) {\n\tauthSplit := strings.SplitN(*metric, \".\", 2)\n\tif len(authSplit) == 2 {\n\t\ttoken, exists := a.Tokens[authSplit[0]]\n\t\tif !exists || !token {\n\t\t\tauthError := errors.New(\"Invalid authentication token\")\n\t\t\treturn false, authError\n\t\t}\n\t} else {\n\t\tauthError := errors.New(\"Missing authentication token\")\n\t\treturn false, authError\n\t}\n\t*metric = authSplit[1]\n\treturn true, nil\n}", "func Validate(redisdb *redis.Client, auth string) (string, error) {\n\n\t// Extract the JWT token from the Authorization header\n\ttokenStr, err := parseAuthHeader(auth)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while parsing authorization header: %s\", err.Error())\n\t}\n\n\t// Validate token and extract the subject\n\tsub, err := validateToken(tokenStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while validating JWT token: %s\", err.Error())\n\t}\n\n\t// Lookup the session in Redis\n\tuser, err := Get(redisdb, sub, \"remote\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"while retrieving user ID from session: %s\", err.Error())\n\t}\n\n\treturn user, nil\n\n}", "func Authenticate(next buffalo.Handler) buffalo.Handler {\n\treturn func(c buffalo.Context) error {\n\t\t// do some work before calling the next handler\n\t\tclient, err := FirebaseApp.Auth(context.Background())\n\n\t\tidToken := c.Request().Header.Get(\"Authorization\")\n\t\tidToken = strings.Replace(idToken, `bearer `, \"\", 1)\n\t\tif ENV == \"development\" || ENV == \"test\" {\n\t\t\tfmt.Println(\"Authorization\", idToken)\n\t\t}\n\t\ttoken, err := client.VerifyIDToken(idToken)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error verifying ID token: %v\\n\", err)\n\t\t\tresponse := Response{\n\t\t\t\tMessage: \"Missing or invalid token.\",\n\t\t\t}\n\t\t\tc.Response().WriteHeader(http.StatusUnauthorized)\n\t\t\tjson.NewEncoder(c.Response()).Encode(response)\n\t\t\treturn err\n\t\t}\n\n\t\tif err := setCurrentUser(token.UID, c); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\terr = next(c)\n\t\treturn err\n\t}\n}", "func (a *Authenticator) ValidateToken(jwt string) (string, error) {\n\tvalidatedToken, err := jwkkeys.ValidateGoogleClaims(a.cachedKeys, jwt, a.audience, jwkkeys.GoogleIssuers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn validatedToken.GoogleClaims.Email, nil\n}", "func (auth *EdgeSampleAuth) Authenticate(request *http.Request) (int, string, string) {\n\treturn security.AuthFailed, \"\", \"\"\n}", "func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error {\n\tfound, err := resolveBearerAuthTokenProvider(ctx, cfg, configs)\n\tif found || err != nil {\n\t\treturn err\n\t}\n\n\treturn resolveBearerAuthTokenProviderChain(ctx, cfg, configs)\n}", "func TokenExtractor(encodedToken string) string {\n\tif encodedToken != \"\" {\n\t\tencodedPayload := strings.Split(encodedToken, \".\")\n\t\tdecodedPayload, err := base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(encodedPayload[1])\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tpayload := make(map[string]string)\n\t\terr = json.Unmarshal(decodedPayload, &payload)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn payload[\"userId\"]\n\t}\n\treturn \"\"\n}", "func (v *verifierPrivate) ValidateTokenAndGetClaims(tokenString string, customClaims interface{}) (*Token, error) {\n\n\t// let us check if the verifier is already expired. If it is just return verifier expired error\n\t// The caller has to re-initialize the verifier.\n\ttoken := Token{}\n\ttoken.standardClaims = &jwt.StandardClaims{}\n\tparsedToken, err := jwt.ParseWithClaims(tokenString, token.standardClaims, func(token *jwt.Token) (interface{}, error) {\n\n\t\tif keyIDValue, keyIDExists := token.Header[\"kid\"]; keyIDExists {\n\n\t\t\tkeyIDString, ok := keyIDValue.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"kid (key id) in jwt header is not a string : %v\", keyIDValue)\n\t\t\t}\n\n\t\t\tif matchPubKey, found := v.pubKeyMap[keyIDString]; !found {\n\t\t\t\treturn nil, &MatchingCertNotFoundError{keyIDString}\n\t\t\t} else {\n\t\t\t\t// if the certificate just expired.. we need to return appropriate error\n\t\t\t\t// so that the caller can deal with it appropriately\n\t\t\t\tnow := time.Now()\n\t\t\t\tif now.After(matchPubKey.expTime) {\n\t\t\t\t\treturn nil, &MatchingCertJustExpired{keyIDString}\n\t\t\t\t}\n\t\t\t\t// if the verifier expired, we need to use a new instance of the verifier\n\t\t\t\tif time.Now().After(v.expiration) {\n\t\t\t\t\treturn nil, &VerifierExpiredError{v.expiration}\n\t\t\t\t}\n\t\t\t\treturn matchPubKey.pubKey, nil\n\t\t\t}\n\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"kid (key id) field missing in token. field is mandatory\")\n\t\t}\n\t})\n\n\tif err != nil {\n\t\tif jwtErr, ok := err.(*jwt.ValidationError); ok {\n\t\t\tswitch e := jwtErr.Inner.(type) {\n\t\t\tcase *MatchingCertNotFoundError, *VerifierExpiredError, *MatchingCertJustExpired:\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t\treturn nil, jwtErr\n\t\t}\n\t\treturn nil, err\n\t}\n\ttoken.jwtToken = parsedToken\n\t// so far we have only got the standardClaims parsed. We need to now fill the customClaims\n\n\tparts := strings.Split(tokenString, \".\")\n\t// no need check for the number of segments since the previous ParseWithClaims has already done this check.\n\t// therefor the following is redundant. If we change the implementation, will need to revisit\n\t//if len(parts) != 3 {\n\t//\treturn nil, \"jwt token to be parsed seems to be in \"\n\t//}\n\n\t// parse Claims\n\tvar claimBytes []byte\n\n\tif claimBytes, err = jwt.DecodeSegment(parts[1]); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not decode claims part of the jwt token\")\n\t}\n\tdec := json.NewDecoder(bytes.NewBuffer(claimBytes))\n\terr = dec.Decode(customClaims)\n\ttoken.customClaims = customClaims\n\n\treturn &token, nil\n}", "func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*SecurityInfo, error) {\n\n\ttoken := fmt.Sprintf(\"%s%s\", authPrefix, k.Token())\n\n\treq := esapi.SecurityAuthenticateRequest{\n\t\tHeader: map[string][]string{AuthKey: []string{token}},\n\t}\n\n\tres, err := req.Do(ctx, es)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"apikey auth request %s: %w\", k.Id, err)\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tif res.IsError() {\n\t\treturn nil, fmt.Errorf(\"apikey auth response %s: %s\", k.Id, res.String())\n\t}\n\n\tvar info SecurityInfo\n\tdecoder := json.NewDecoder(res.Body)\n\tif err := decoder.Decode(&info); err != nil {\n\t\treturn nil, fmt.Errorf(\"apikey auth parse %s: %w\", k.Id, err)\n\t}\n\n\treturn &info, nil\n}", "func (a *HyperflexApiService) GetHyperflexServiceAuthTokenByMoidExecute(r ApiGetHyperflexServiceAuthTokenByMoidRequest) (*HyperflexServiceAuthToken, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *HyperflexServiceAuthToken\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HyperflexApiService.GetHyperflexServiceAuthTokenByMoid\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/hyperflex/ServiceAuthTokens/{Moid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"Moid\"+\"}\", url.PathEscape(parameterToString(r.moid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/csv\", \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (p *provider) checkToken(spec *spec.Spec, req *http.Request, token string) (clientToken, error) {\n\t// 1. uc token\n\tucToken, err := auth.VerifyUCClientToken(token)\n\tif err == nil {\n\t\treturn clientToken{\n\t\t\tClientID: ucToken.ClientID,\n\t\t\tClientName: ucToken.ClientName,\n\t\t}, nil\n\t}\n\t// 2. openapi oauth2 token\n\toauthToken, err := auth.VerifyOpenapiOAuth2Token(p.oauth2server, nil, req)\n\tif err != nil {\n\t\treturn clientToken{}, err\n\t}\n\treturn clientToken{\n\t\tClientID: oauthToken.ClientID,\n\t\tClientName: oauthToken.ClientName,\n\t}, nil\n}", "func Authenticate(role string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\ttoken, err := GetTokenFromHeader(c)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": err.Error()})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tif len(role) > 0 && !token.Role.Check(role) {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"Unauthorized\"})\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t\tc.Next()\n\t}\n}", "func ExtractToken(r *http.Request) string {\r\n\tauthorization := r.Header.Get(\"Authorization\")\r\n\tregex := regexp.MustCompile(\"(Bearer\\\\s)(.*)\")\r\n\tmatch := regex.FindStringSubmatch(authorization)\r\n\r\n\tif len(match) > 0 {\r\n\t\treturn match[2]\r\n\t}\r\n\r\n\treturn \"\"\r\n}", "func ValidateToken(pathHandler server.HandlerType) server.HandlerType {\n\treturn func(res http.ResponseWriter, req *http.Request) {\n\t\tlog.Printf(\"ValidateToken Received request: %v\", req)\n\t\tprovidedToken := req.Header.Get(tokenRequestHeader)\n\t\tif providedToken == \"\" {\n\t\t\tlog.Println(\"Token required; No token provided.\")\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\n\t\tif actualToken, ok := generatedTokens[providedToken]; ok {\n\t\t\taccessTime := time.Now()\n\t\t\tduration := accessTime.Sub(actualToken.CreatedAt)\n\t\t\tif int(duration.Seconds()) >= actualToken.TTL {\n\t\t\t\tlog.Println(\"Token has expired\")\n\t\t\t\tdelete(generatedTokens, providedToken)\n\t\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Println(\"Token validated!\")\n\t\t\tpathHandler(res, req)\n\t\t} else {\n\t\t\tlog.Printf(\"Invalid token provided: %v\", providedToken)\n\t\t\tserver.ReturnUnauthorizedResponse(res)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (m *middlewareStruct) CheckJWTToken(c *gin.Context) {\n\tbearToken := c.GetHeader(\"Authorization\")\n\n\tstrArr := strings.Split(bearToken, \" \")\n\tif len(strArr) == 2 {\n\n\t\ttoken, err := m.service.VerifyToken(strArr[1], os.Getenv(\"ACCESS_SECRET\"))\n\t\tif err != nil {\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\n\t\tclaims, _ := token.Claims.(jwt.MapClaims)\n\n\t\tc.Set(\"user_id\", claims[\"user_id\"])\n\n\t\treturn\n\n\t}\n\n\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"error\": \"Token inválido\"})\n\treturn\n}" ]
[ "0.64408565", "0.6411155", "0.6346037", "0.6338294", "0.62949747", "0.6271194", "0.62069285", "0.617363", "0.61038214", "0.61009336", "0.5945495", "0.59345144", "0.5928352", "0.5908977", "0.5891017", "0.5836715", "0.58319247", "0.58029294", "0.5801394", "0.5798947", "0.5791079", "0.5720952", "0.57120425", "0.571185", "0.57047284", "0.56835616", "0.56816036", "0.56775326", "0.5668186", "0.5661136", "0.5644543", "0.56277674", "0.5608936", "0.5601224", "0.5594888", "0.5594492", "0.5590164", "0.5588905", "0.5571133", "0.5563127", "0.55361176", "0.55210847", "0.5504044", "0.5474867", "0.5467931", "0.54651195", "0.54552245", "0.54548866", "0.54535735", "0.5441994", "0.5428419", "0.5422301", "0.54046744", "0.5391617", "0.53846633", "0.53841585", "0.5370079", "0.5365341", "0.53613734", "0.53604674", "0.53488415", "0.5347618", "0.534592", "0.534143", "0.534093", "0.53349227", "0.5325819", "0.5325016", "0.5319933", "0.5319769", "0.53166056", "0.5316311", "0.5314096", "0.5305999", "0.53026354", "0.52945775", "0.5293135", "0.52898353", "0.528889", "0.5283154", "0.528309", "0.5282649", "0.5277613", "0.52764374", "0.5272928", "0.5270515", "0.5266754", "0.5265322", "0.5263115", "0.5261059", "0.5260928", "0.52608794", "0.5259772", "0.5253574", "0.5249562", "0.52477986", "0.52436274", "0.52402973", "0.52317226", "0.5231189" ]
0.67438513
0
/ Executes arbitrary stuff to prepare for the actual work, for instance installing packages or containers.
func Prepare() error { // log.Println("Preparing work...") // commands := [][]string{ // []string{"yum", "update", "-y"}, // []string{"yum", "install", "-y", "docker"}, // []string{"service", "docker", "start"}, // []string{"docker", "pull", "tnolet/scraper:0.1.0"}, // } // for _, command := range commands { // out, err := exec.Command(command).Output() // if err != nil { // log.Printf("Prepare command unsuccessful: %v, %v", err.Error(), out) // return err // } // log.Printf("Succesfully executed preparation: %v", out) // } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Execute() {\n\t// Load config\n\tloadConfig()\n\n\t// Init Redis Client\n\tinitRedisClient()\n\n\t// Synchronize composer.phar\n\tgo composerPhar(\"composerPhar\", 1)\n\n\t// Synchronize packages.json\n\tgo packagesJsonFile(\"PackagesJson\", 1)\n\n\t// Update status\n\tgo status(\"Status\", 1)\n\n\tWg.Add(1)\n\n\tfor i := 0; i < 12; i++ {\n\t\tgo providers(\"Provider\", i)\n\t}\n\n\tfor i := 0; i < 30; i++ {\n\t\tgo packages(\"Packages\", i)\n\t}\n\n\tfor i := 0; i < 50; i++ {\n\t\tgo dists(\"Dists\", i)\n\t}\n\n\tfor i := 0; i < 1; i++ {\n\t\tgo distsRetry(403, i)\n\t}\n\n\tfor i := 0; i < 1; i++ {\n\t\tgo distsRetry(500, i)\n\t}\n\n\tfor i := 0; i < 1; i++ {\n\t\tgo distsRetry(502, i)\n\t}\n\n}", "func Setup(c *exec.Cmd) {}", "func main() {\n\tif os.Args[0] == shimPath {\n\t\tif _, found := internalEnv(\"_DAGGER_INTERNAL_COMMAND\"); found {\n\t\t\tos.Exit(internalCommand())\n\t\t\treturn\n\t\t}\n\n\t\t// If we're being executed as `/_shim`, then we're inside the container and should shim\n\t\t// the user command.\n\t\tos.Exit(shim())\n\t} else {\n\t\t// Otherwise, we're being invoked directly by buildkitd and should setup the bundle.\n\t\tos.Exit(setupBundle())\n\t}\n}", "func setupMain() {\n\tflag.Parse()\n\texecutionArgs = []string{\"-o\", *diskoffset, *imagepath, \"5\"}\n\texecutionArgsRecover = []string{\"-o\", *diskoffset, *imagepath, \"-d\"}\n\tif !executeFLS(executionArgs) { //operates on cache object directory\n\t\tlog.Println(\"failed to execute FLS\")\n\t}\n\tprepGlobals()\n}", "func main() {\n\n\tvar version string\n\n\t// Derive the commit message from -X main.commit=$YOUR_VALUE_HERE\n\t// if the build does not have the commit variable set externally,\n\t// fall back to unsupported custom build\n\tif commit != \"\" {\n\t\tversion = commit\n\t} else {\n\t\tversion = \"unsupported custom build\"\n\t}\n\n\t// let the user know that we are running within a docker container\n\tcheckRunningWithinDocker()\n\n\t// build the Command Line interface\n\t// https://github.com/urfave/cli/blob/master/docs/v2/manual.md\n\n\t// basic information\n\tapp := &cli.App{\n\t\tName: \"appimagetool\",\n\t\tAuthors: \t\t\t\t[]*cli.Author{{Name: \"AppImage Project\"}},\n\t\tVersion: version,\n\t\tUsage: \t\t\"An automatic tool to create AppImages\",\n\t\tEnableBashCompletion: false,\n\t\tHideHelp: false,\n\t\tHideVersion: false,\n\t\tCompiled: time.Time{},\n\t\tCopyright: \"MIT License\",\n\t\tAction: \t\t\t\tbootstrapAppImageBuild,\n\n\t}\n\n\t// define subcommands, like 'deploy', 'validate', ...\n\tapp.Commands = []*cli.Command{\n\t\t{\n\t\t\tName: \"deploy\",\n\t\t\tUsage: \"Turns PREFIX directory into AppDir by deploying dependencies and AppRun file\",\n\t\t\tAction: bootstrapAppImageDeploy,\n\t\t},\n\t\t{\n\t\t\tName: \"validate\",\n\t\t\tUsage: \"Calculate the sha256 digest and check whether the signature is valid\",\n\t\t\tAction: bootstrapValidateAppImage,\n\t\t},\n\t\t{\n\t\t\tName: \"setupsigning\",\n\t\t\tUsage: \"Prepare a git repository that is used with Travis CI for signing AppImages\",\n\t\t\tAction: bootstrapSetupSigning,\n\t\t},\n\t\t{\n\t\t\tName: \t\"sections\",\n\t\t\tUsage: \t\"\",\n\t\t\tAction:\tbootstrapAppImageSections,\n\t\t},\n\t}\n\n\t// define flags, such as --libapprun_hooks, --standalone here ...\n\tapp.Flags = []cli.Flag{\n\t\t&cli.BoolFlag{\n\t\t\tName: \"libapprun_hooks\",\n\t\t\tAliases: []string{\"l\"},\n\t\t\tUsage: \"Use libapprun_hooks\",\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: \"overwrite\",\n\t\t\tAliases: []string{\"o\"},\n\t\t\tUsage: \"Overwrite existing files\",\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: \"standalone\",\n\t\t\tAliases: []string{\"s\"},\n\t\t\tUsage: \"Make standalone self-contained bundle\",\n\t\t},\n\t}\n\n\t// TODO: move travis based Sections to travis.go in future\n\tif os.Getenv(\"TRAVIS_TEST_RESULT\") == \"1\" {\n\t\tlog.Fatal(\"$TRAVIS_TEST_RESULT is 1, exiting...\")\n\t}\n\n\terrRuntime := app.Run(os.Args)\n\tif errRuntime != nil {\n\t\tlog.Fatal(errRuntime)\n\t}\n\n}", "func Execute() {\n\tlog.Entry().Infof(\"Version %s\", GitCommit)\n\n\trootCmd.AddCommand(ArtifactPrepareVersionCommand())\n\trootCmd.AddCommand(ConfigCommand())\n\trootCmd.AddCommand(DefaultsCommand())\n\trootCmd.AddCommand(ContainerSaveImageCommand())\n\trootCmd.AddCommand(CommandLineCompletionCommand())\n\trootCmd.AddCommand(VersionCommand())\n\trootCmd.AddCommand(DetectExecuteScanCommand())\n\trootCmd.AddCommand(HadolintExecuteCommand())\n\trootCmd.AddCommand(KarmaExecuteTestsCommand())\n\trootCmd.AddCommand(UiVeri5ExecuteTestsCommand())\n\trootCmd.AddCommand(SonarExecuteScanCommand())\n\trootCmd.AddCommand(KubernetesDeployCommand())\n\trootCmd.AddCommand(HelmExecuteCommand())\n\trootCmd.AddCommand(XsDeployCommand())\n\trootCmd.AddCommand(GithubCheckBranchProtectionCommand())\n\trootCmd.AddCommand(GithubCommentIssueCommand())\n\trootCmd.AddCommand(GithubCreateIssueCommand())\n\trootCmd.AddCommand(GithubCreatePullRequestCommand())\n\trootCmd.AddCommand(GithubPublishReleaseCommand())\n\trootCmd.AddCommand(GithubSetCommitStatusCommand())\n\trootCmd.AddCommand(GitopsUpdateDeploymentCommand())\n\trootCmd.AddCommand(CloudFoundryDeleteServiceCommand())\n\trootCmd.AddCommand(AbapEnvironmentPullGitRepoCommand())\n\trootCmd.AddCommand(AbapEnvironmentCloneGitRepoCommand())\n\trootCmd.AddCommand(AbapEnvironmentCheckoutBranchCommand())\n\trootCmd.AddCommand(AbapEnvironmentCreateTagCommand())\n\trootCmd.AddCommand(AbapEnvironmentCreateSystemCommand())\n\trootCmd.AddCommand(CheckmarxExecuteScanCommand())\n\trootCmd.AddCommand(CheckmarxOneExecuteScanCommand())\n\trootCmd.AddCommand(FortifyExecuteScanCommand())\n\trootCmd.AddCommand(CodeqlExecuteScanCommand())\n\trootCmd.AddCommand(CredentialdiggerScanCommand())\n\trootCmd.AddCommand(MtaBuildCommand())\n\trootCmd.AddCommand(ProtecodeExecuteScanCommand())\n\trootCmd.AddCommand(MavenExecuteCommand())\n\trootCmd.AddCommand(CloudFoundryCreateServiceKeyCommand())\n\trootCmd.AddCommand(MavenBuildCommand())\n\trootCmd.AddCommand(MavenExecuteIntegrationCommand())\n\trootCmd.AddCommand(MavenExecuteStaticCodeChecksCommand())\n\trootCmd.AddCommand(NexusUploadCommand())\n\trootCmd.AddCommand(AbapEnvironmentPushATCSystemConfigCommand())\n\trootCmd.AddCommand(AbapEnvironmentRunATCCheckCommand())\n\trootCmd.AddCommand(NpmExecuteScriptsCommand())\n\trootCmd.AddCommand(NpmExecuteLintCommand())\n\trootCmd.AddCommand(GctsCreateRepositoryCommand())\n\trootCmd.AddCommand(GctsExecuteABAPQualityChecksCommand())\n\trootCmd.AddCommand(GctsExecuteABAPUnitTestsCommand())\n\trootCmd.AddCommand(GctsDeployCommand())\n\trootCmd.AddCommand(MalwareExecuteScanCommand())\n\trootCmd.AddCommand(CloudFoundryCreateServiceCommand())\n\trootCmd.AddCommand(CloudFoundryDeployCommand())\n\trootCmd.AddCommand(GctsRollbackCommand())\n\trootCmd.AddCommand(WhitesourceExecuteScanCommand())\n\trootCmd.AddCommand(GctsCloneRepositoryCommand())\n\trootCmd.AddCommand(JsonApplyPatchCommand())\n\trootCmd.AddCommand(KanikoExecuteCommand())\n\trootCmd.AddCommand(CnbBuildCommand())\n\trootCmd.AddCommand(AbapEnvironmentBuildCommand())\n\trootCmd.AddCommand(AbapEnvironmentAssemblePackagesCommand())\n\trootCmd.AddCommand(AbapAddonAssemblyKitCheckCVsCommand())\n\trootCmd.AddCommand(AbapAddonAssemblyKitCheckPVCommand())\n\trootCmd.AddCommand(AbapAddonAssemblyKitCreateTargetVectorCommand())\n\trootCmd.AddCommand(AbapAddonAssemblyKitPublishTargetVectorCommand())\n\trootCmd.AddCommand(AbapAddonAssemblyKitRegisterPackagesCommand())\n\trootCmd.AddCommand(AbapAddonAssemblyKitReleasePackagesCommand())\n\trootCmd.AddCommand(AbapAddonAssemblyKitReserveNextPackagesCommand())\n\trootCmd.AddCommand(CloudFoundryCreateSpaceCommand())\n\trootCmd.AddCommand(CloudFoundryDeleteSpaceCommand())\n\trootCmd.AddCommand(VaultRotateSecretIdCommand())\n\trootCmd.AddCommand(IsChangeInDevelopmentCommand())\n\trootCmd.AddCommand(TransportRequestUploadCTSCommand())\n\trootCmd.AddCommand(TransportRequestUploadRFCCommand())\n\trootCmd.AddCommand(NewmanExecuteCommand())\n\trootCmd.AddCommand(IntegrationArtifactDeployCommand())\n\trootCmd.AddCommand(TransportRequestUploadSOLMANCommand())\n\trootCmd.AddCommand(IntegrationArtifactUpdateConfigurationCommand())\n\trootCmd.AddCommand(IntegrationArtifactGetMplStatusCommand())\n\trootCmd.AddCommand(IntegrationArtifactGetServiceEndpointCommand())\n\trootCmd.AddCommand(IntegrationArtifactDownloadCommand())\n\trootCmd.AddCommand(AbapEnvironmentAssembleConfirmCommand())\n\trootCmd.AddCommand(IntegrationArtifactUploadCommand())\n\trootCmd.AddCommand(IntegrationArtifactTriggerIntegrationTestCommand())\n\trootCmd.AddCommand(IntegrationArtifactUnDeployCommand())\n\trootCmd.AddCommand(IntegrationArtifactResourceCommand())\n\trootCmd.AddCommand(TerraformExecuteCommand())\n\trootCmd.AddCommand(ContainerExecuteStructureTestsCommand())\n\trootCmd.AddCommand(GaugeExecuteTestsCommand())\n\trootCmd.AddCommand(BatsExecuteTestsCommand())\n\trootCmd.AddCommand(PipelineCreateScanSummaryCommand())\n\trootCmd.AddCommand(TransportRequestDocIDFromGitCommand())\n\trootCmd.AddCommand(TransportRequestReqIDFromGitCommand())\n\trootCmd.AddCommand(WritePipelineEnv())\n\trootCmd.AddCommand(ReadPipelineEnv())\n\trootCmd.AddCommand(InfluxWriteDataCommand())\n\trootCmd.AddCommand(AbapEnvironmentRunAUnitTestCommand())\n\trootCmd.AddCommand(CheckStepActiveCommand())\n\trootCmd.AddCommand(GolangBuildCommand())\n\trootCmd.AddCommand(ShellExecuteCommand())\n\trootCmd.AddCommand(ApiProxyDownloadCommand())\n\trootCmd.AddCommand(ApiKeyValueMapDownloadCommand())\n\trootCmd.AddCommand(ApiProviderDownloadCommand())\n\trootCmd.AddCommand(ApiProxyUploadCommand())\n\trootCmd.AddCommand(GradleExecuteBuildCommand())\n\trootCmd.AddCommand(ApiKeyValueMapUploadCommand())\n\trootCmd.AddCommand(PythonBuildCommand())\n\trootCmd.AddCommand(AzureBlobUploadCommand())\n\trootCmd.AddCommand(AwsS3UploadCommand())\n\trootCmd.AddCommand(ApiProxyListCommand())\n\trootCmd.AddCommand(AnsSendEventCommand())\n\trootCmd.AddCommand(ApiProviderListCommand())\n\trootCmd.AddCommand(TmsUploadCommand())\n\trootCmd.AddCommand(TmsExportCommand())\n\trootCmd.AddCommand(IntegrationArtifactTransportCommand())\n\trootCmd.AddCommand(AscAppUploadCommand())\n\n\taddRootFlags(rootCmd)\n\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.SetErrorCategory(log.ErrorConfiguration)\n\t\tlog.Entry().WithError(err).Fatal(\"configuration error\")\n\t}\n}", "func (inst *Installer) Execute(ctx context.Context) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tinst.log.Info(\"Recovered from panic\", logging.F(\"err\", r))\n\t\t\tinst.err.Append(PanicRecoverError{fmt.Sprintf(\"%v\", r)})\n\t\t}\n\t\tinst.setDone()\n\t}()\n\n\t// Open the file transaction\n\tif err := inst.openTransaction(ctx); err != nil {\n\t\tinst.err.Append(NewTransactionOpenError(err))\n\t\tinst.aborted = true\n\t\treturn\n\t}\n\n\t// Ensure we close the transaction whatever happens\n\tdefer func() {\n\t\tinst.err.Append(inst.copyPkgManagerLog())\n\t\tinst.endTransaction(ctx)\n\t}()\n\n\t// Launch the install in the background\n\tgo func() {\n\t\tdefer inst.setDone()\n\t\tif err := inst.doInstall(ctx); err != nil {\n\t\t\tinst.err.Append(err)\n\t\t}\n\t}()\n\n\t// Wait for either the install or the context to be done\n\tselect {\n\tcase <-inst.Done():\n\t\t// we're done here, let's go home\n\tcase <-ctx.Done():\n\t\tinst.aborted = true\n\t\t<-inst.Done()\n\t}\n}", "func main(){\n subcmd := \"build\"\n if(len(os.Args)>=2){ subcmd = os.Args[1] }\n fmt.Println(subcmd+\"ing \"+pName+\"...\");\n //Now run the appropriate type of operation\n var err error\n err = nil\n switch(subcmd){\n case \"build\":\n err = doBuild()\n\n case \"clean\":\n doClean()\n\n case \"package\":\n err = doPackage()\n\n case \"install\":\n err = doInstall()\n\n default:\n fmt.Println(\"Unknown action: \", subcmd)\n\tfmt.Println(\"Available actions are:\")\n\tfmt.Println(\" - make build:\", \"Compile the tools for the current system OS/ARCH\")\n\tfmt.Println(\" - make clean:\", \"Cleanup all the build files\")\n\tfmt.Println(\" - make package:\", \"Create a sterile \\\"dist\\\" directory ready to be copied/installed someplace\")\n\tfmt.Println(\" - make install:\", \"Install the package output to the designated directory\")\n os.Exit(1)\n }\n if(err != nil){ \n fmt.Println(\"[Error]\", err)\n os.Exit(1) \n } else {\n fmt.Println(\"[Success]\")\n os.Exit(0)\n }\n}", "func Executor(s string) {\n\ts = strings.TrimSpace(s)\n\tcmdStrings := strings.Split(s, \" \")\n\tif s == \"\" {\n\t\treturn\n\t} else if s == \"quit\" || s == \"exit\" {\n\t\tfmt.Println(\"Bye!\")\n\t\tos.Exit(0)\n\t\treturn\n\t}\n\tswitch cmdStrings[0] {\n\tcase \"install-px\":\n\t\tinstallPX()\n\tcase \"deploy\":\n\t\tif len(cmdStrings) < 2 {\n\t\t\tfmt.Println(\"deploy requires an application name\")\n\t\t\treturn\n\t\t}\n\t\tdeploy(\"default\", cmdStrings[1])\n\tcase \"benchmark\":\n\t\tswitch cmdStrings[1] {\n\t\tcase \"postgres\":\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/psql -c 'create database pxdemo;'\")\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/pgbench -n -i -s 50 pxdemo;\")\n\t\t\tpodExec(\"default\", \"app=postgres\", \"/usr/bin/psql pxdemo -c 'select count(*) from pgbench_accounts;'\")\n\t\tdefault:\n\t\t\tfmt.Printf(\"%s benchmark not supported\\n\", cmdStrings[1])\n\t\t}\n\tcase \"px\":\n\t\tif len(cmdStrings) < 2 {\n\t\t\tfmt.Println(\"deploy requires an application name\")\n\t\t\treturn\n\t\t}\n\t\tswitch cmdStrings[1] {\n\t\tcase \"connect\":\n\t\t\tpxInit()\n\t\tcase \"snap\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px snap requires an application name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxSnap(cmdStrings[2])\n\t\tcase \"backup\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px backup requires an PVC name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxBackup(cmdStrings[2])\n\t\tcase \"backup-status\":\n\t\t\tif len(cmdStrings) < 3 {\n\t\t\t\tfmt.Println(\"px backup-status requires a PVC name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpxBackupStatus(cmdStrings[2])\n\t\tdefault:\n\t\t\tfmt.Printf(\"px %s is not a valid command\\n\", cmdStrings[1])\n\t\t}\n\tcase \"pre-flight-check\":\n\t\tpreflight()\n\tdefault:\n\t\tfmt.Printf(\"%s is not a supported option\", s)\n\t}\n\treturn\n}", "func main() {\n\tgopath := os.Getenv(\"GOPATH\")\n\tif gopath == \"\" {\n\t\tgopath = build.Default.GOPATH\n\t}\n\tbase := \"cd \" + gopath + \"/src/github.com/cavapoo2/eventsBoard/\"\n\tfmt.Println(\"base is \", base)\n\tinstallPaths := []string{\n\n\t\tbase + \"lib/configuration && go install\",\n\t\tbase + \"lib/helper/amqp && go install\",\n\n\t\tbase + \"lib/helper/kafka && go install\",\n\t\tbase + \"lib/msgqueue/amqp && go install\",\n\t\tbase + \"lib/msgqueue/builder && go install\",\n\t\tbase + \"lib/msgqueue/kafka && go install\",\n\t\tbase + \"lib/msgqueue && go install\",\n\n\t\tbase + \"lib/persistence/mongolayer && go install\",\n\t\tbase + \"lib/persistence/dblayer && go install\",\n\t\tbase + \"lib/persistence && go install\",\n\n\t\tbase + \"bookingservice/listener && go install\",\n\t\tbase + \"bookingservice/rest && go install\",\n\t\tbase + \"contracts && go install\",\n\t\tbase + \"eventService/rest && go install\",\n\t\tbase + \"eventService/listner && go install\",\n\t}\n\n\tbuildPaths := []string{\n\t\tbase + \"bookingservice && go build main.go\",\n\t\tbase + \"eventService && go build main.go\",\n\t\tbase + \"eventService && go build -o eventservice main.go\",\n\t\tbase + \"bookingservice && go build -o bookingservice main.go\",\n\t}\n\tfor _, p := range installPaths {\n\t\texecCommand(p)\n\t}\n\tfor _, p := range buildPaths {\n\t\texecCommand(p)\n\t}\n\n}", "func (mj *Job) exec(cmd mendercmd.Commander, timeout time.Duration) error {\n\tswitch mj.Operation {\n\tcase \"mender_install\":\n\t\t// check if we are back after rebooting\n\t\tswitch mj.menderState.Step {\n\t\tcase \"rebooting\":\n\t\t\tmj.reportProgress(\"rebooted\")\n\t\t\t// This is a naive implementation. Before committing one would probably check the system is working fine\n\t\t\t// and then issue the Commit, otherwise Rollback.\n\t\t\t// For example, in case Greengrass was installed, one could check that Greengrass service is up\n\t\t\t// and running.\n\t\t\t// On the other hand, to come to this stage, we know that we have network, time and date and we can connect\n\t\t\t// to AWS.\n\t\t\terr := cmd.Commit() // commit\n\t\t\tif err != nil {\n\t\t\t\tjobErr := awsiotjobs.JobError{ErrCode: \"ERR_MENDER_COMMIT\", ErrMessage: \"error committing\"}\n\t\t\t\tmj.fail(jobErr)\n\t\t\t\treturn jobErr\n\t\t\t}\n\t\t\tmj.success(\"committed\")\n\t\tdefault:\n\t\t\t// If the step is \"installing\" it could be for different cases\n\t\t\t// 1- the system rebooted/lost connection and the installation was not completed.\n\t\t\t// 2- Installation was completed, system rebooted, but the state update was not performed\n\t\t\t// In case 1 we should restart the installation process\n\t\t\t// In case 2 we should either make sure this does not happen - ie make the reboot conditional to the\n\t\t\t// correct persistance of the \"rebooting\" state; or rely on some other mechanism to detect that the\n\t\t\t// firmware has been successfully updated and the system has rebooted and is working correctly\n\n\t\t\tch := make(chan string)\n\t\t\tdone := make(chan error)\n\t\t\tmj.progress(\"installing\")\n\t\t\tgo cmd.Install(mj.URL, done, ch)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase progress := <-ch:\n\t\t\t\t\tlog.Printf(\"%s\", progress)\n\t\t\t\t\tmj.reportProgress(progress) // report progress via MQTT\n\t\t\t\tcase err := <-done:\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tjobErr := awsiotjobs.JobError{ErrCode: \"ERR_MENDER_INSTALL_FAILED\", ErrMessage: err.Error()}\n\t\t\t\t\t\tmj.fail(jobErr)\n\t\t\t\t\t\treturn jobErr\n\t\t\t\t\t}\n\t\t\t\t\t// This should be changed - setting the rebooting state might fail\n\t\t\t\t\t// and when the system startsup will find a wrong state and will start installing the software again\n\t\t\t\t\t// Must find a way to make this deterministic - maybe relying on mender local state?\n\t\t\t\t\tmj.progress(\"rebooting\")\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tcmd := exec.Command(\"shutdown\", \"-r\", \"now\")\n\t\t\t\t\t\tcmd.Start()\n\t\t\t\t\t\terr := cmd.Wait()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tfmt.Println(\"Could not reboot the system\")\n\t\t\t\t\t\t\tmj.fail(awsiotjobs.JobError{ErrCode: \"ERROR_UNABLE_TO_REBOOT\", ErrMessage: err.Error()})\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfmt.Println(\"rebooting...\")\n\t\t\t\t\t\tmj.execution.Terminate() //Should be called by the agent code and not the library - based on signalling from the OS when shutting down\n\t\t\t\t\t}()\n\t\t\t\t\treturn nil\n\t\t\t\tcase <-time.After(timeout): // timeout value can be in doc\n\t\t\t\t\tfmt.Printf(\"install timeout\")\n\t\t\t\t\tjobErr := awsiotjobs.JobError{ErrCode: \"ERR_MENDER_INSTALL_TIMEOUT\", ErrMessage: \"mender timed out\"}\n\t\t\t\t\tmj.fail(jobErr)\n\t\t\t\t\treturn jobErr\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\tcase \"mender_rollback\":\n\t\terr := cmd.Rollback()\n\t\tif err != nil {\n\t\t\tmj.fail(awsiotjobs.JobError{ErrCode: \"ERR_MENDER_ROLLBACK_FAIL\", ErrMessage: \"unable to run rollback\"})\n\t\t\treturn err\n\t\t}\n\t\tmj.success(\"rolled_back\")\n\t}\n\treturn nil\n}", "func (d *Docker) Run(p *packages.Package, e *Execution) error {\n\tif p == nil {\n\t\treturn fmt.Errorf(\"No package provided\")\n\t}\n\tif p.Image == \"\" {\n\t\treturn fmt.Errorf(\"Provided package does not contain any image\")\n\t}\n\tif e == nil {\n\t\treturn fmt.Errorf(\"No execution provided\")\n\t}\n\tdockerArgs := []string{\n\t\td.Path,\n\t\t\"run\",\n\t\t\"--interactive\",\n\t\t\"--rm\",\n\t\t\"--workdir\", e.WorkingDir,\n\t\t\"--init\",\n\t}\n\targs := e.Args\n\tif p.Entrypoint != nil {\n\t\tif len(p.Entrypoint) > 0 {\n\t\t\tdockerArgs = append(dockerArgs, \"--entrypoint\", p.Entrypoint[0])\n\t\t\tif len(p.Entrypoint) > 1 {\n\t\t\t\targs = append(p.Entrypoint[1:], args...)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, portmap := range p.Ports {\n\t\tdockerArgs = append(dockerArgs, \"-p\")\n\t\tdockerArgs = append(dockerArgs, portmap)\n\t}\n\n\tfor _, network := range p.Networks {\n\t\tdockerArgs = append(dockerArgs, \"--net\")\n\t\tdockerArgs = append(dockerArgs, network)\n\t}\n\tif e.IsTTYOpened {\n\t\tdockerArgs = append(dockerArgs, \"--tty\")\n\t}\n\tfor _, envvar := range e.Environment {\n\t\tdockerArgs = append(dockerArgs, \"-e\")\n\t\tdockerArgs = append(dockerArgs, envvar)\n\t}\n\tfor _, volume := range e.Volumes {\n\t\tdockerArgs = append(dockerArgs, \"-v\")\n\t\tdockerArgs = append(dockerArgs, volume)\n\t}\n\tif !p.KeepContainerUser {\n\t\tif e.User != nil {\n\t\t\tdockerArgs = append(dockerArgs, \"-u\")\n\t\t\tdockerArgs = append(dockerArgs, e.User.Uid+\":\"+e.User.Gid)\n\t\t}\n\t}\n\tdockerArgs = append(dockerArgs, p.Image)\n\tdockerArgs = append(dockerArgs, args...)\n\tif d.Exec == nil {\n\t\treturn fmt.Errorf(\"No docker executable provided\")\n\t}\n\treturn d.Exec(d.Path, dockerArgs, os.Environ())\n}", "func main() {\n\n\tdir, _ := os.Getwd()\n\n\t// Build Runtime\n\tfmt.Println(\"**** Building Runtime ****\")\n\truntimeDir, _ := filepath.Abs(filepath.Join(dir, \"..\", \"runtime\", \"js\"))\n\terr := os.Chdir(runtimeDir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trunCommand(\"npm\", \"install\")\n\trunCommand(\"npm\", \"run\", \"build\")\n\n\t// Install Wails\n\tfmt.Println(\"**** Installing Wails locally ****\")\n\texecDir, _ := filepath.Abs(filepath.Join(dir, \"..\", \"cmd\", \"wails\"))\n\terr = os.Chdir(execDir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trunCommand(\"go\", \"install\")\n\n\tbaseDir, _ := filepath.Abs(filepath.Join(dir, \"..\"))\n\terr = os.Chdir(baseDir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trunCommand(\"go\", \"mod\", \"tidy\")\n}", "func doRuncInitCmd(cmd *cobra.Command, args []string) {\n runtime.GOMAXPROCS(1)\n runtime.LockOSThread()\n\n factory, _ := libcontainer.New(\"\")\n if err := factory.StartInitialization(); err != nil {\n log.Fatal(err)\n }\n\n panic(\"Could not initialise pid 0 for container\")\n}", "func RunPreExecutor(spec *opsv1alpha1.GitOps, dir string) error {\n\tif spec.Spec.Templating != nil {\n\t\tt := spec.Spec.Templating\n\t\tif t.Executor != nil {\n\n\t\t\t// Create a new context and add a timeout to it\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)\n\t\t\tdefer cancel() // The cancel should be deferred so resources are cleaned up\n\t\t\tcmd := exec.CommandContext(ctx, t.Executor.Exec)\n\t\t\tcmd.Dir = GetGitRootDir(spec)\n\t\t\tif spec.Spec.Templating.SourceFolder != \"\" {\n\t\t\t\tcmd.Dir += \"/\" + spec.Spec.Templating.SourceFolder\n\t\t\t}\n\n\t\t\tif len(t.Executor.Args) >= 1 {\n\t\t\t\ta := []string{t.Executor.Exec}\n\t\t\t\tfor _, add := range t.Executor.Args {\n\t\t\t\t\ta = append(a, add)\n\t\t\t\t}\n\t\t\t\tcmd.Args = a\n\t\t\t}\n\n\t\t\tout, err := cmd.CombinedOutput()\n\t\t\tif ctx.Err() == context.DeadlineExceeded {\n\t\t\t\tlog.Error(err, \"Command timed out\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"Command failed\", \"output\", string(out))\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (Golang) Prep(gopath string, meta Metadata, local bool) (err error) {\n\tlogrus.Debug(\"Running Prep Commands\")\n\tvar codepath string\n\tif local {\n\t\twd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"failed getting CWD\")\n\t\t\treturn err\n\t\t}\n\n\t\tcodepath = wd\n\n\t} else {\n\t\tcodepath = fmt.Sprintf(\"%s/src/%s\", gopath, meta.Package)\n\n\t\terr = os.Chdir(codepath)\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"failed to cwd to %s\", gopath)\n\t\t\treturn err\n\t\t}\n\n\t\t// set the gopath in the environment so that we can interpolate it below\n\t\t_ = os.Setenv(\"GOPATH\", gopath)\n\t}\n\n\tfor _, cmdString := range meta.BuildInfo.PrepCommands {\n\t\t// interpolate any environment variables into the command string\n\t\tcmdString, err = envsubst.String(cmdString)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"failed to substitute env vars\")\n\t\t\treturn err\n\t\t}\n\n\t\tcmd := exec.Command(\"bash\", \"-c\", cmdString)\n\n\t\tlogrus.Debugf(\"Running %q with GOPATH=%s\", cmdString, gopath)\n\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\terr = cmd.Run()\n\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"failed running %q\", cmdString)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlogrus.Debugf(\"Prep steps for %s complete\", meta.Package)\n\n\treturn err\n}", "func (d *db) Exec(c *cli.Context) error {\n\tlogrus.Debug(\"executing workload from provided configuration\")\n\n\t// check if either the all or alter tables action was provided\n\tif d.Actions.All || d.Actions.AlterTables {\n\t\t// alter required tables in the database\n\t\terr := d.Alter()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// check if either the all or encrypt users action was provided\n\tif d.Actions.All || d.Actions.EncryptUsers {\n\t\t// encrypt user fields in the database\n\t\terr := d.Encrypt()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// check if either the all or sync repo counter action was provided\n\tif d.Actions.All || d.Actions.SyncCounter {\n\t\t// sync all repo counter values in the database\n\t\terr := d.Sync()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (e *dockerExec) install(ctx context.Context) error {\n\tif e.Manifest.Result.Fileset.Map != nil || e.Manifest.Result.Fileset.List != nil {\n\t\treturn nil\n\t}\n\tif outputs := e.Config.OutputIsDir; outputs != nil {\n\t\te.Manifest.Result.Fileset.List = make([]reflow.Fileset, len(outputs))\n\t\tfor i := range outputs {\n\t\t\tvar err error\n\t\t\te.Manifest.Result.Fileset.List[i], err =\n\t\t\t\te.Executor.install(ctx, e.path(\"return\", strconv.Itoa(i)), true, &e.staging)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tvar err error\n\te.Manifest.Result.Fileset, err = e.Executor.install(ctx, e.path(\"return\", \"default\"), true, &e.staging)\n\treturn err\n}", "func main() {\n\tcmd.Execute(version, gitCommit, buildDate)\n}", "func doPrepare(env env.Project, options *PrepareOptions) (err error) {\n\t// Create the dep manager\n\tdepManager := dep.DepManager{Env: env}\n\tif !depManager.IsInitialized() {\n\t\t// This is an old app\n\t\terr = MigrateOldApp(env, depManager)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif options == nil {\n\t\toptions = &PrepareOptions{}\n\t}\n\n\t// Call external preprocessor\n\tif options.PreProcessor != nil {\n\t\terr = options.PreProcessor.PrepareForBuild(env)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t//generate metadata\n\terr = generateGoMetadata(env)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//load descriptor\n\tappJson, err := fgutil.LoadLocalFile(filepath.Join(env.GetRootDir(), \"flogo.json\"))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tdescriptor, err := ParseAppDescriptor(appJson)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tremoveEmbeddedAppGoFile(env.GetAppDir())\n\tremoveShimGoFiles(env.GetAppDir())\n\n\tif options.Shim != \"\" {\n\n\t\tremoveMainGoFile(env.GetAppDir()) //todo maybe rename if it exists\n\t\tcreateShimSupportGoFile(env.GetAppDir(), appJson, options.EmbedConfig)\n\n\t\tfmt.Println(\"Shim:\", options.Shim)\n\n\t\tfor _, value := range descriptor.Triggers {\n\n\t\t\tfmt.Println(\"Id:\", value.Id)\n\t\t\tif value.Id == options.Shim {\n\t\t\t\ttriggerPath := filepath.Join(env.GetVendorSrcDir(), value.Ref, \"trigger.json\")\n\n\t\t\t\tmdJson, err := fgutil.LoadLocalFile(triggerPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmetadata, err := ParseTriggerMetadata(mdJson)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tfmt.Println(\"Shim Metadata:\", metadata.Shim)\n\n\t\t\t\tif metadata.Shim != \"\" {\n\n\t\t\t\t\t//todo blow up if shim file not found\n\t\t\t\t\tshimFilePath := filepath.Join(env.GetVendorSrcDir(), value.Ref, dirShim, fileShimGo)\n\t\t\t\t\tfmt.Println(\"Shim File:\", shimFilePath)\n\t\t\t\t\tfgutil.CopyFile(shimFilePath, filepath.Join(env.GetAppDir(), fileShimGo))\n\n\t\t\t\t\t// ensure deps after the shim.go has been copied to main.go...\n\t\t\t\t\tdepManager.Ensure()\n\n\t\t\t\t\t// This is a bit of a workaround, will resolve with a better solution in the future\n\t\t\t\t\t// generate metadata again... ensure will remove it\n\t\t\t\t\terr = generateGoMetadata(env)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Check if this shim based trigger has a gobuild file. If the trigger has a gobuild\n\t\t\t\t\t// execute that file, otherwise check if there is a Makefile to execute\n\t\t\t\t\tgobuildFilePath := filepath.Join(env.GetVendorSrcDir(), value.Ref, dirShim, gobuildFile)\n\t\t\t\t\tif _, err := os.Stat(gobuildFilePath); err == nil {\n\t\t\t\t\t\tfmt.Println(\"This trigger makes use if a gobuild file...\")\n\t\t\t\t\t\tfmt.Println(\"Go build file:\", gobuildFilePath)\n\t\t\t\t\t\tfgutil.CopyFile(gobuildFilePath, filepath.Join(env.GetAppDir(), gobuildFile))\n\n\t\t\t\t\t\t// Execute go run gobuild.go\n\t\t\t\t\t\tcmd := exec.Command(\"go\", \"run\", gobuildFile, env.GetAppDir())\n\t\t\t\t\t\tcmd.Stdout = os.Stdout\n\t\t\t\t\t\tcmd.Stderr = os.Stderr\n\t\t\t\t\t\tcmd.Dir = env.GetAppDir()\n\t\t\t\t\t\tcmd.Env = append(os.Environ(),\n\t\t\t\t\t\t\tfmt.Sprintf(\"GOPATH=%s\", env.GetRootDir()),\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\terr = cmd.Run()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if metadata.Shim == \"plugin\" {\n\t\t\t\t\t\t//look for Makefile and execute it\n\t\t\t\t\t\tmakeFilePath := filepath.Join(env.GetVendorSrcDir(), value.Ref, dirShim, makeFile)\n\t\t\t\t\t\tfmt.Println(\"Make File:\", makeFilePath)\n\t\t\t\t\t\tfgutil.CopyFile(makeFilePath, filepath.Join(env.GetAppDir(), makeFile))\n\n\t\t\t\t\t\t// Execute make\n\t\t\t\t\t\tcmd := exec.Command(\"make\", \"-C\", env.GetAppDir())\n\t\t\t\t\t\tcmd.Stdout = os.Stdout\n\t\t\t\t\t\tcmd.Stderr = os.Stderr\n\t\t\t\t\t\tcmd.Env = append(os.Environ(),\n\t\t\t\t\t\t\tfmt.Sprintf(\"GOPATH=%s\", env.GetRootDir()),\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\terr = cmd.Run()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t} else if options.EmbedConfig {\n\t\tcreateEmbeddedAppGoFile(env.GetAppDir(), appJson)\n\t}\n\treturn\n}", "func (c *ContainerExecutor) Run(opts ifc.RunOptions) error {\n\tlog.Print(\"starting generic container\")\n\n\tif c.Options.ClusterName != \"\" {\n\t\tcleanup, err := c.SetKubeConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer cleanup()\n\t}\n\n\tinput, err := bundleReader(c.ExecutorBundle)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO this logic is redundant in executor package, move it to pkg/container\n\tvar output io.Writer\n\tif c.ResultsDir == \"\" {\n\t\t// set output only if the output if resulting directory is not defined\n\t\toutput = os.Stdout\n\t}\n\tif err = c.setConfig(); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO check the executor type when dryrun is set\n\tif opts.DryRun {\n\t\tlog.Print(\"DryRun execution finished\")\n\t\treturn nil\n\t}\n\n\terr = c.ClientFunc(c.ResultsDir, input, output, c.Container, c.MountBasePath).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Print(\"execution of the generic container finished\")\n\treturn nil\n}", "func HelperInitRelatedExecCommand(cmd string, args []string) (string, error) {\n\tif cmd == \"kubeadm\" {\n\t\tif len(args) == 0 {\n\t\t\treturn \"\", fmt.Errorf(\"Test setup error - missing argument(s) for kubeadm command\")\n\t\t}\n\t\tswitch args[0] {\n\t\tcase \"version\":\n\t\t\treturn \"v1.11.0\", nil\n\t\tcase \"token\":\n\t\t\treturn \"zs6do0.rlyf5fbz9abknbc4\", nil\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"Test setup error - need mock for kubeadm %q command\", args[0])\n\t\t}\n\n\t}\n\tout, err := lazyjack.OsExecCommand(cmd, args)\n\treturn out, err\n}", "func NeedsExec(t testing.TB) {\n\ttryExecOnce.Do(func() {\n\t\ttryExecErr = tryExec()\n\t})\n\tif tryExecErr != nil {\n\t\tt.Helper()\n\t\tt.Skipf(\"skipping test: cannot exec subprocess on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, tryExecErr)\n\t}\n}", "func pkg(c *cobra.Command, _ []string) {\n\tvar err error\n\t// cwd, err := os.Getwd()\n\n\tif err != nil {\n\t\tfmt.Println(\"lambda-phage can't find your package!\")\n\t\treturn\n\t}\n\n\tbinName := \"lambda-phage-\" + cuid.New()\n\n\tcmd := exec.Command(\"go\", \"build\", \"-o\", \"/tmp/\"+binName)\n\tcmd.Stderr = os.Stderr\n\n\t// copy the environment from parent proc\n\t// and add flags for\n\tpEnv := os.Environ()\n\tenv := make([]string, len(pEnv)+2)\n\tcopy(env, pEnv)\n\tenv[len(env)-2] = \"GOOS=linux\"\n\tenv[len(env)-1] = \"GOARCH=amd64\"\n\tcmd.Env = env\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\tfmt.Printf(\"Error building go executable, %s\", err.Error())\n\t\treturn\n\t}\n\n\tzFile, err := newZipFile(binName + \".zip\")\n\tif err != nil {\n\t\tzipFileFail(err)\n\t\treturn\n\t}\n\n\t_, err = zFile.AddFile(\"/tmp/\" + binName)\n\n\tif err != nil {\n\t\tzipFileFail(err)\n\t\treturn\n\t}\n\n\t_, err = zFile.AddString(\"index.js\", jsloader)\n\n\tif err != nil {\n\t\tzipFileFail(err)\n\t\treturn\n\t}\n\n\terr = zFile.Close()\n\n}", "func (r *Runner) setup(application *config.Application, wg *sync.WaitGroup) error {\n\tdefer wg.Done()\n\n\tif err := r.checkApplicationExecutableEnvironment(application); err == nil {\n\t\treturn nil\n\t}\n\n\tif len(application.Setup) == 0 {\n\t\treturn nil\n\t}\n\n\thasSetup = true\n\n\tr.view.Writef(\"⚙️ Please wait while setup of application '%s'...\\n\", application.Name)\n\n\tstdoutStream := NewLogstreamer(StdOut, application.Name, r.view)\n\tstderrStream := NewLogstreamer(StdErr, application.Name, r.view)\n\n\tvar setup = strings.Join(application.Setup, \"; \")\n\n\tsetup = strings.Replace(setup, \"~\", \"$HOME\", -1)\n\tsetup = os.ExpandEnv(setup)\n\n\tcommands := strings.Join(application.Setup, \"\\n\")\n\tr.view.Writef(\"👉 Running commands:\\n%s\\n\\n\", commands)\n\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", setup)\n\tcmd.Stdout = stdoutStream\n\tcmd.Stderr = stderrStream\n\tcmd.Env = os.Environ()\n\n\tsetup = os.ExpandEnv(setup)\n\n\tcmd.Run()\n\n\treturn nil\n}", "func DoSetup() {\n\tSetLoggerVerbosity()\n\tgplog.Verbose(\"Backup Command: %s\", os.Args)\n\tgplog.Info(\"gpbackup version = %s\", GetVersion())\n\n\tutils.CheckGpexpandRunning(utils.BackupPreventedByGpexpandMessage)\n\ttimestamp := history.CurrentTimestamp()\n\tcreateBackupLockFile(timestamp)\n\tinitializeConnectionPool(timestamp)\n\tgplog.Info(\"Greenplum Database Version = %s\", connectionPool.Version.VersionString)\n\n\tgplog.Info(\"Starting backup of database %s\", MustGetFlagString(options.DBNAME))\n\topts, err := options.NewOptions(cmdFlags)\n\tgplog.FatalOnError(err)\n\n\tValidateAndProcessFilterLists(opts)\n\tincludeOids := GetOidsFromRelationList(IncludedRelationFqns)\n\terr = ExpandIncludesForPartitions(connectionPool, opts, includeOids, cmdFlags)\n\tgplog.FatalOnError(err)\n\n\tclusterConfigConn := dbconn.NewDBConnFromEnvironment(MustGetFlagString(options.DBNAME))\n\tclusterConfigConn.MustConnect(1)\n\n\tsegConfig := cluster.MustGetSegmentConfiguration(clusterConfigConn)\n\tglobalCluster = cluster.NewCluster(segConfig)\n\tclusterConfigConn.Close()\n\n\tglobalFPInfo = filepath.NewFilePathInfo(globalCluster, MustGetFlagString(options.BACKUP_DIR), timestamp, \"\")\n\tif MustGetFlagBool(options.METADATA_ONLY) {\n\t\t_, err = globalCluster.ExecuteLocalCommand(fmt.Sprintf(\"mkdir -p %s\", globalFPInfo.GetDirForContent(-1)))\n\t\tgplog.FatalOnError(err)\n\t} else {\n\t\tcreateBackupDirectoriesOnAllHosts()\n\t}\n\tglobalTOC = &toc.TOC{}\n\tglobalTOC.InitializeMetadataEntryMap()\n\tutils.InitializePipeThroughParameters(!MustGetFlagBool(options.NO_COMPRESSION), MustGetFlagString(options.COMPRESSION_TYPE), MustGetFlagInt(options.COMPRESSION_LEVEL))\n\tgetQuotedRoleNames(connectionPool)\n\n\tpluginConfigFlag := MustGetFlagString(options.PLUGIN_CONFIG)\n\n\tif pluginConfigFlag != \"\" {\n\t\tpluginConfig, err = utils.ReadPluginConfig(pluginConfigFlag)\n\t\tgplog.FatalOnError(err)\n\t\tconfigFilename := path.Base(pluginConfig.ConfigPath)\n\t\tconfigDirname := path.Dir(pluginConfig.ConfigPath)\n\t\tpluginConfig.ConfigPath = path.Join(configDirname, timestamp+\"_\"+configFilename)\n\t\t_ = cmdFlags.Set(options.PLUGIN_CONFIG, pluginConfig.ConfigPath)\n\t\tgplog.Debug(\"Plugin config path: %s\", pluginConfig.ConfigPath)\n\t}\n\n\tinitializeBackupReport(*opts)\n\n\tif pluginConfigFlag != \"\" {\n\t\tbackupReport.PluginVersion = pluginConfig.CheckPluginExistsOnAllHosts(globalCluster)\n\t\tpluginConfig.CopyPluginConfigToAllHosts(globalCluster)\n\t\tpluginConfig.SetupPluginForBackup(globalCluster, globalFPInfo)\n\t}\n}", "func installAll() {\n\t// conf file must be read in before anything else, to initialize permissions etc\n\tconf_file.Read()\n\tconf.Vals.ConfLock.RLock()\n\tif conf.Vals.Initialized == false {\n\t\tpanic(\"the conf.Vals global conf struct has not been initialized\")\n\t}\n\n\t// launch a background poller to keep conns to aws alive\n\tif conf.Vals.Network.DynamoDB.KeepAlive {\n\t\tlog.Printf(\"launching background keepalive\")\n\t\tgo keepalive.KeepAlive([]string{})\n\t}\n\n\t// deal with iam, or not\n\tif conf.Vals.UseIAM {\n\t\tiam_ready_chan := make(chan bool)\n\t\tgo conf_iam.GoIAM(iam_ready_chan)\n\t\t_ = <-iam_ready_chan\n\t}\n\tconf.Vals.ConfLock.RUnlock()\n\n\tvar get1 get_item.Request\n\tvar put1 put_item.Request\n\tvar up1 update_item.Request\n\tvar upt1 update_table.Request\n\tvar del1 delete_item.Request\n\tvar delt1 delete_table.Request\n\tvar batchw1 batch_write_item.Request\n\tvar batchg1 batch_get_item.Request\n\tvar create1 create.Request\n\tvar query1 query.Request\n\tvar scan1 scan.Request\n\tvar desc1 describe_table.Request\n\tvar list1 list_tables.Request\n\tfmt.Printf(\"%v%v%v%v%v%v%v%v%v%v%v%v%v\", get1, put1, up1, upt1, del1, batchw1, batchg1, create1, delt1, query1, scan1, desc1, list1)\n}", "func bootstrapAppImageBuild(c *cli.Context) error {\n\n\t// check if the number of arguments are stictly 1, if not\n\t// return\n\tif c.NArg() != 1 {\n\t\tlog.Fatal(\"Please specify the path to the AppDir which you would like to aid.\")\n\n\t}\n\tfileToAppDir := c.Args().Get(0)\n\n\t// does the file exist? if not early-exit\n\tif ! helpers.CheckIfFileOrFolderExists(fileToAppDir) {\n\t\tlog.Fatal(\"The specified directory does not exist\")\n\t}\n\n\t// Add the location of the executable to the $PATH\n\thelpers.AddHereToPath()\n\n\n\t// Check for needed files on $PATH\n\ttools := []string{\"file\", \"mksquashfs\", \"desktop-file-validate\", \"uploadtool\", \"patchelf\", \"desktop-file-validate\", \"patchelf\"} // \"sh\", \"strings\", \"grep\" no longer needed?; \"curl\" is needed for uploading only, \"glib-compile-schemas\" is needed in some cases only\n\t// curl is needed by uploadtool; TODO: Replace uploadtool with native Go code\n\t// \"sh\", \"strings\", \"grep\" are needed by appdirtool to parse qt_prfxpath; TODO: Replace with native Go code\n\tfor _, t := range tools {\n\t\t_, err := exec.LookPath(t)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Required helper tool\", t, \"missing\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// Check whether we have a sufficient version of mksquashfs for -offset\n\tif helpers.CheckIfSquashfsVersionSufficient(\"mksquashfs\") == false {\n\t\tos.Exit(1)\n\t}\n\n\t// Check if is directory, then assume we want to convert an AppDir into an AppImage\n\tfileToAppDir, _ = filepath.EvalSymlinks(fileToAppDir)\n\tif info, err := os.Stat(fileToAppDir); err == nil && info.IsDir() {\n\t\tGenerateAppImage(fileToAppDir)\n\t} else {\n\t\t// TODO: If it is a file, then check if it is an AppImage and if yes, extract it\n\t\tlog.Fatal(\"Supplied argument is not a directory \\n\" +\n\t\t\t\"To extract an AppImage, run it with --appimage-extract \\n\")\n\n\t}\n\treturn nil\n}", "func Execute() {\n\tvar err error\n\tvar retryAttempts uint = 5\n\tvar retryDelay time.Duration = 100\n\n\tfs = filesystem.NewOsFs()\n\tciutils = install.DefaultInstall()\n\tfslock = &filesystem.OsLock{\n\t\tRetryAttempts: retryAttempts,\n\t\tRetryDelay: retryDelay * time.Millisecond,\n\t\tRetryDelayType: retry.BackOffDelay,\n\t}\n\n\tsecretsClient, err = secrets.NewClient()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err = rootCmd.Execute(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (m *manager) Install(ctx context.Context) error {\n\tsteps := map[api.InstallPhase][]steps.Step{\n\t\tapi.InstallPhaseBootstrap: m.bootstrap(),\n\t\tapi.InstallPhaseRemoveBootstrap: {\n\t\t\tsteps.Action(m.initializeKubernetesClients),\n\t\t\tsteps.Action(m.initializeOperatorDeployer), // depends on kube clients\n\t\t\tsteps.Action(m.removeBootstrap),\n\t\t\tsteps.Action(m.removeBootstrapIgnition),\n\t\t\tsteps.Action(m.configureAPIServerCertificate),\n\t\t\tsteps.Condition(m.apiServersReady, 30*time.Minute, true),\n\t\t\tsteps.Condition(m.minimumWorkerNodesReady, 30*time.Minute, true),\n\t\t\tsteps.Condition(m.operatorConsoleExists, 30*time.Minute, true),\n\t\t\tsteps.Action(m.updateConsoleBranding),\n\t\t\tsteps.Condition(m.operatorConsoleReady, 20*time.Minute, true),\n\t\t\tsteps.Action(m.disableSamples),\n\t\t\tsteps.Action(m.disableOperatorHubSources),\n\t\t\tsteps.Action(m.disableUpdates),\n\t\t\tsteps.Condition(m.clusterVersionReady, 30*time.Minute, true),\n\t\t\tsteps.Condition(m.aroDeploymentReady, 20*time.Minute, true),\n\t\t\tsteps.Action(m.updateClusterData),\n\t\t\tsteps.Action(m.configureIngressCertificate),\n\t\t\tsteps.Condition(m.ingressControllerReady, 30*time.Minute, true),\n\t\t\tsteps.Action(m.configureDefaultStorageClass),\n\t\t\tsteps.Action(m.finishInstallation),\n\t\t},\n\t}\n\n\terr := m.startInstallation(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif steps[m.doc.OpenShiftCluster.Properties.Install.Phase] == nil {\n\t\treturn fmt.Errorf(\"unrecognised phase %s\", m.doc.OpenShiftCluster.Properties.Install.Phase)\n\t}\n\tm.log.Printf(\"starting phase %s\", m.doc.OpenShiftCluster.Properties.Install.Phase)\n\treturn m.runSteps(ctx, steps[m.doc.OpenShiftCluster.Properties.Install.Phase], \"install\")\n}", "func AWSInstall() {\n\t// check if ansible is installed\n\tcommon.DependencyCheck(\"ansible\")\n\tSetClusterName()\n\t// Copy the configuraton files as indicated in the kubespray docs\n\tif _, err := os.Stat(\"./inventory/\" + common.Name + \"/installer\"); err == nil {\n\t\tfmt.Println(\"Configuration folder already exists\")\n\t} else {\n\t\tos.MkdirAll(\"./inventory/\"+common.Name+\"/installer\", 0755)\n\t\tmvHost := exec.Command(\"mv\", \"./inventory/hosts\", \"./inventory/\"+common.Name+\"/hosts\")\n\t\tmvHost.Run()\n\t\tmvHost.Wait()\n\t\tmvShhBastion := exec.Command(\"cp\", \"./kubespray/ssh-bastion.conf\", \"./inventory/\"+common.Name+\"/ssh-bastion.conf\")\n\t\tmvShhBastion.Run()\n\t\tmvShhBastion.Wait()\n\t\t//os.MkdirAll(\"./inventory/\"+common.Name+\"/installer/group_vars\", 0755)\n\t\tcpSample := exec.Command(\"cp\", \"-rfp\", \"./kubespray/inventory/sample/.\", \"./inventory/\"+common.Name+\"/installer/\")\n\t\tcpSample.Run()\n\t\tcpSample.Wait()\n\n\t\tcpKube := exec.Command(\"cp\", \"-rfp\", \"./kubespray/.\", \"./inventory/\"+common.Name+\"/installer/\")\n\t\tcpKube.Run()\n\t\tcpKube.Wait()\n\n\t\tmvInstallerHosts := exec.Command(\"cp\", \"./inventory/\"+common.Name+\"/hosts\", \"./inventory/\"+common.Name+\"/installer/hosts\")\n\t\tmvInstallerHosts.Run()\n\t\tmvInstallerHosts.Wait()\n\t\tmvProvisionerHosts := exec.Command(\"cp\", \"./inventory/\"+common.Name+\"/hosts\", \"./inventory/\"+common.Name+\"/installer/hosts\")\n\t\tmvProvisionerHosts.Run()\n\t\tmvProvisionerHosts.Wait()\n\n\t\t//Start Kubernetes Installation\n\t\t//Enable load balancer api access and copy the kubeconfig file locally\n\t\tloadBalancerName, err := exec.Command(\"sh\", \"-c\", \"grep apiserver_loadbalancer_domain_name= ./inventory/\"+common.Name+\"/installer/hosts | cut -d'=' -f2\").CombinedOutput()\n\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Problem getting the load balancer domain name\", err)\n\t\t} else {\n\t\t\tvar groupVars *os.File\n\t\t\t//Make a copy of kubeconfig on Ansible host\n\t\t\tif kubesprayVersion == \"develop\" || kubesprayVersion == \"version-0-7\" {\n\t\t\t\t// Set Kube Network Proxy\n\t\t\t\tSetNetworkPlugin(\"./inventory/\" + common.Name + \"/installer/group_vars/k8s-cluster\")\n\t\t\t\tprepareInventoryClusterFile(\"./inventory/\" + common.Name + \"/installer/group_vars/k8s-cluster/k8s-cluster.yml\")\n\t\t\t\tgroupVars = prepareInventoryGroupAllFile(\"./inventory/\" + common.Name + \"/installer/group_vars/all/all.yml\")\n\t\t\t} else {\n\t\t\t\t// Set Kube Network Proxy\n\t\t\t\tSetNetworkPlugin(\"./inventory/\" + common.Name + \"/installer/group_vars\")\n\t\t\t\tprepareInventoryClusterFile(\"./inventory/\" + common.Name + \"/installer/group_vars/k8s-cluster.yml\")\n\t\t\t\tgroupVars = prepareInventoryGroupAllFile(\"./inventory/\" + common.Name + \"/installer/group_vars/all.yml\")\n\t\t\t}\n\t\t\tdefer groupVars.Close()\n\t\t\t// Resolve Load Balancer Domain Name and pick the first IP\n\n\t\t\telbNameRaw, _ := exec.Command(\"sh\", \"-c\", \"grep apiserver_loadbalancer_domain_name= ./inventory/\"+common.Name+\"/installer/hosts | cut -d'=' -f2 | sed 's/\\\"//g'\").CombinedOutput()\n\n\t\t\t// Convert the Domain name to string, strip all spaces so that Lookup does not return errors\n\t\t\telbName := strings.TrimSpace(string(elbNameRaw))\n\t\t\tfmt.Println(elbName)\n\t\t\tnode, err := net.LookupHost(elbName)\n\t\t\tcommon.ErrorCheck(\"Error resolving ELB name: %v\", err)\n\t\t\telbIP := node[0]\n\t\t\tfmt.Println(node)\n\n\t\t\tDomainName := strings.TrimSpace(string(loadBalancerName))\n\t\t\tloadBalancerDomainName := \"apiserver_loadbalancer_domain_name: \" + DomainName\n\n\t\t\tfmt.Fprintf(groupVars, \"#Set cloud provider to AWS\\n\")\n\t\t\tfmt.Fprintf(groupVars, \"cloud_provider: 'aws'\\n\")\n\t\t\tfmt.Fprintf(groupVars, \"#Load Balancer Configuration\\n\")\n\t\t\tfmt.Fprintf(groupVars, \"loadbalancer_apiserver_localhost: false\\n\")\n\t\t\tfmt.Fprintf(groupVars, \"%s\\n\", loadBalancerDomainName)\n\t\t\tfmt.Fprintf(groupVars, \"loadbalancer_apiserver:\\n\")\n\t\t\tfmt.Fprintf(groupVars, \" address: %s\\n\", elbIP)\n\t\t\tfmt.Fprintf(groupVars, \" port: 6443\\n\")\n\t\t}\n\t}\n\n\tsshUser, osLabel := distSelect()\n\tinstaller.RunPlaybook(\"./inventory/\"+common.Name+\"/installer/\", \"cluster.yml\", sshUser, osLabel)\n\n\treturn\n}", "func doRequiredFiles() error {\n\tvar filePath, fileURL string\n\tabf, err := be.GetAppWorkingFolder()\n\tlf, _ := be.GetAppWorkingFolder()\n\tlf = lf + be.CAppLogfile\n\n\t//ex, err := os.Executable()\n\t//if err != nil {\n\t//\treturn fmt.Errorf(\"Unable to retrieve running binary: %v \", err)\n\t//}\n\t//abf := be.AddTrailingSlash(filepath.Dir(ex))\n\n\tbwconf, err := be.GetConfigStruct(\"\", true)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to get CLIConfigStruct: %v \", err)\n\t}\n\tswitch bwconf.ProjectType {\n\tcase be.PTBitcoinPlus:\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\treturn fmt.Errorf(\"Windows is not currently supported for BitcoinPlus: %v \", err)\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\treturn fmt.Errorf(\"ARM32 is not currently supported by BitcoinPlus: %v \", err)\n\t\t\tcase \"arm64\":\n\t\t\t\treturn fmt.Errorf(\"ARM64 is not currently supported by BitcoinPlus: %v \", err)\n\t\t\tcase \"386\":\n\t\t\t\tfilePath = abf + be.CDFFileLinux32BitcoinPlus\n\t\t\t\tfileURL = be.CDownloadURLBitcoinPlus + be.CDFFileLinux32BitcoinPlus\n\t\t\tcase \"amd64\":\n\t\t\t\tfilePath = abf + be.CDFFileLinux64BitcoinPlus\n\t\t\t\tfileURL = be.CDownloadURLBitcoinPlus + be.CDFFileLinux64BitcoinPlus\n\t\t\t}\n\t\t}\n\tcase be.PTDeVault:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFWindowsDeVault\n\t\t\tfileURL = be.CDownloadURLDeVault + be.CDFWindowsDeVault\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\tfilePath = abf + be.CDFRPiDeVault\n\t\t\tfileURL = be.CDownloadURLDeVault + be.CDFRPiDeVault\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFLinuxDeVault\n\t\t\tfileURL = be.CDownloadURLDeVault + be.CDFLinuxDeVault\n\t\t}\n\tcase be.PTDigiByte:\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tfilePath = abf + be.CDFWindowsDigiByte\n\t\t\tfileURL = be.CDownloadURLDigiByte + be.CDFWindowsDigiByte\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\treturn fmt.Errorf(\"ARM32 is not currently supported by DigiByte: %v \", err)\n\t\t\tcase \"arm64\":\n\t\t\t\tfilePath = abf + be.CDFArm64DigiByte\n\t\t\t\tfileURL = be.CDownloadURLDigiByte + be.CDFArm64DigiByte\n\t\t\tcase \"386\":\n\t\t\t\tfilePath = abf + be.CDFLinuxDigiByte\n\t\t\t\tfileURL = be.CDownloadURLDigiByte + be.CDFLinuxDigiByte\n\t\t\tcase \"amd64\":\n\t\t\t\tfilePath = abf + be.CDFLinuxDigiByte\n\t\t\t\tfileURL = be.CDownloadURLDigiByte + be.CDFLinuxDigiByte\n\t\t\t}\n\t\t}\n\tcase be.PTDivi:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFDiviWindows\n\t\t\tfileURL = be.CDownloadURLDivi + be.CDFDiviWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\tfilePath = abf + be.CDFDiviRPi\n\t\t\tfileURL = be.CDownloadURLDivi + be.CDFDiviRPi\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFDiviLinux\n\t\t\tfileURL = be.CDownloadURLDivi + be.CDFDiviLinux\n\t\t}\n\tcase be.PTFeathercoin:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFFeathercoinWindows\n\t\t\tfileURL = be.CDownloadURLFeathercoin + be.CDFFeathercoinWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\treturn fmt.Errorf(\"ARM is not supported for this build: %v \", err)\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFFeathercoinLinux\n\t\t\tfileURL = be.CDownloadURLFeathercoin + be.CDFFeathercoinLinux\n\t\t}\n\tcase be.PTGroestlcoin:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFGroestlcoinWindows\n\t\t\tfileURL = be.CDownloadURLGroestlcoin + be.CDFGroestlcoinWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\tfilePath = abf + be.CDFGroestlcoinRPi\n\t\t\tfileURL = be.CDownloadURLGroestlcoin + be.CDFGroestlcoinRPi\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFGroestlcoinLinux\n\t\t\tfileURL = be.CDownloadURLGroestlcoin + be.CDFGroestlcoinLinux\n\t\t}\n\tcase be.PTPhore:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFPhoreWindows\n\t\t\tfileURL = be.CDownloadURLPhore + be.CDFPhoreWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\tfilePath = abf + be.CDFPhoreRPi\n\t\t\tfileURL = be.CDownloadURLPhore + be.CDFPhoreRPi\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFPhoreLinux\n\t\t\tfileURL = be.CDownloadURLPhore + be.CDFPhoreLinux\n\t\t}\n\tcase be.PTPIVX:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFPIVXFileWindows\n\t\t\tfileURL = be.CDownloadURLPIVX + be.CDFPIVXFileWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\tfilePath = abf + be.CDFPIVXFileArm32\n\t\t\tfileURL = be.CDownloadURLPIVX + be.CDFPIVXFileArm32\n\t\t} else if runtime.GOARCH == \"arm64\" {\n\t\t\tfilePath = abf + be.CDFPIVXFileArm64\n\t\t\tfileURL = be.CDownloadURLPIVX + be.CDFPIVXFileArm64\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFPIVXFileLinux\n\t\t\tfileURL = be.CDownloadURLPIVX + be.CDFPIVXFileLinux\n\t\t}\n\tcase be.PTRapids:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFRapidsFileWindows\n\t\t\tfileURL = be.CDownloadURLRapids + be.CDFRapidsFileWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\treturn fmt.Errorf(\"ARM is not currently supported by Rapids at present: %v \", err)\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFRapidsFileLinux\n\t\t\t//filePath2 = abf + be.CDFRapidsFileLinuxDaemon\n\t\t\tfileURL = be.CDownloadURLRapids + be.CDFRapidsFileLinux\n\t\t\t//fileURL2 = be.CDownloadURLRapids + be.CDFRapidsFileLinuxDaemon\n\t\t}\n\tcase be.PTReddCoin:\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tfilePath = abf + be.CDFReddCoinWindows\n\t\t\tfileURL = be.CDownloadURLReddCoinGen + be.CDFReddCoinWindows\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tfilePath = abf + be.CDFReddCoinRPi\n\t\t\t\tfileURL = be.CDownloadURLReddCoinArm\n\t\t\tcase \"arm64\":\n\t\t\t\treturn fmt.Errorf(\"ARM64 is not currently supported by ReddCoin: %v \", err)\n\t\t\tcase \"386\":\n\t\t\t\tfilePath = abf + be.CDFReddCoinLinux32\n\t\t\t\tfileURL = be.CDownloadURLReddCoinGen + be.CDFReddCoinLinux32\n\t\t\tcase \"amd64\":\n\t\t\t\tfilePath = abf + be.CDFReddCoinLinux64\n\t\t\t\tfileURL = be.CDownloadURLReddCoinGen + be.CDFReddCoinLinux64\n\t\t\t}\n\t\t}\n\t\t//if runtime.GOOS == \"windows\" {\n\t\t//\tfilePath = abf + be.CDFReddCoinWindows\n\t\t//\tfileURL = be.CDownloadURLReddCoinGen + be.CDFReddCoinWindows\n\t\t//} else if runtime.GOARCH == \"arm\" {\n\t\t//\tfilePath = abf + be.CDFReddCoinRPi\n\t\t//\tfileURL = be.CDownloadURLReddCoinArm\n\t\t//} else {\n\t\t//\tfilePath = abf + be.CDFReddCoinLinux64\n\t\t//\tfileURL = be.CDownloadURLReddCoinGen + be.CDFReddCoinLinux64\n\t\t//}\n\tcase be.PTScala:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFScalaWindows\n\t\t\tfileURL = be.CDownloadURLScala + be.CDFScalaWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\tfilePath = abf + be.CDFScalaRPi\n\t\t\tfileURL = be.CDownloadURLScala + be.CDFScalaRPi\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFScalaLinux\n\t\t\tfileURL = be.CDownloadURLScala + be.CDFScalaLinux\n\t\t}\n\tcase be.PTSyscoin:\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tfilePath = abf + be.CDFSyscoinFileWindows\n\t\t\tfileURL = be.CDownloadURLSyscoin + be.CDFSyscoinFileWindows\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\treturn fmt.Errorf(\"ARM32 is not currently supported by DigiByte: %v \", err)\n\t\t\tcase \"arm64\":\n\t\t\t\tfilePath = abf + be.CDFSyscoinFileArm64\n\t\t\t\tfileURL = be.CDownloadURLSyscoin + be.CDFSyscoinFileArm64\n\t\t\tcase \"386\":\n\t\t\t\tfilePath = abf + be.CDFSyscoinFileLinux\n\t\t\t\tfileURL = be.CDownloadURLSyscoin + be.CDFSyscoinFileLinux\n\t\t\tcase \"amd64\":\n\t\t\t\tfilePath = abf + be.CDFSyscoinFileLinux\n\t\t\t\tfileURL = be.CDownloadURLDigiByte + be.CDFSyscoinFileLinux\n\t\t\t}\n\t\t}\n\tcase be.PTTrezarcoin:\n\t\tbe.AddToLog(lf, \"TZC detected...\", false)\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tbe.AddToLog(lf, \"windows detected...\", false)\n\t\t\tfilePath = abf + be.CDFTrezarcoinWindows\n\t\t\tfileURL = be.CDownloadURLTC + be.CDFTrezarcoinWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\tbe.AddToLog(lf, \"arm detected...\", false)\n\t\t\tfilePath = abf + be.CDFTrezarcoinRPi\n\t\t\tfileURL = be.CDownloadURLTC + be.CDFTrezarcoinRPi\n\t\t} else {\n\t\t\tbe.AddToLog(lf, \"linux detected...\", false)\n\t\t\tfilePath = abf + be.CDFTrezarcoinLinux\n\t\t\tfileURL = be.CDownloadURLTC + be.CDFTrezarcoinLinux\n\t\t}\n\tcase be.PTVertcoin:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tfilePath = abf + be.CDFVertcoinWindows\n\t\t\tfileURL = be.CDownloadURLVertcoin + be.CDFVertcoinWindows\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\tfilePath = abf + be.CDFVertcoinRPi\n\t\t\tfileURL = be.CDownloadURLVertcoin + be.CDFVertcoinRPi\n\t\t} else {\n\t\t\tfilePath = abf + be.CDFVertcoinLinux\n\t\t\tfileURL = be.CDownloadURLVertcoin + be.CDFVertcoinLinux\n\t\t}\n\tdefault:\n\t\terr = errors.New(\"unable to determine ProjectType\")\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error - %v\", err)\n\t}\n\n\tbe.AddToLog(lf, \"filePath=\"+filePath, false)\n\tbe.AddToLog(lf, \"fileURL=\"+fileURL, false)\n\tbe.AddToLog(lf, \"Downloading required files...\", true)\n\n\tif err := be.DownloadFile(filePath, fileURL); err != nil {\n\t\treturn fmt.Errorf(\"unable to download file: %v - %v\", filePath+fileURL, err)\n\t}\n\tdefer os.Remove(filePath)\n\n\tr, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to open file: %v - %v\", filePath, err)\n\t}\n\n\t// Now, decompress the files...\n\tbe.AddToLog(lf, \"decompressing files...\", true)\n\tswitch bwconf.ProjectType {\n\tcase be.PTBitcoinPlus:\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirWindowsBitcoinPlus)\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm64\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirLinuxBitcoinPlus)\n\t\t\tcase \"386\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\t//defer os.RemoveAll(abf + be.CExtractedDirLinuxBitcoinPlus)\n\t\t\tcase \"amd64\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\t//defer os.RemoveAll(abf + be.CExtractedDirLinuxBitcoinPlus)\n\t\t\t}\n\t\t}\n\tcase be.PTDeVault:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirWinDeVault)\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirLinuxDeVault)\n\t\t} else {\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirLinuxDeVault)\n\t\t}\n\tcase be.PTDigiByte:\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirWindowsDigiByte)\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm64\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirLinuxDigiByte)\n\t\t\tcase \"386\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirLinuxDigiByte)\n\t\t\tcase \"amd64\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CExtractedDirLinuxDigiByte)\n\t\t\t}\n\t\t}\n\tcase be.PTDivi:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CDiviExtractedDirWindows)\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CDiviExtractedDirLinux)\n\t\t} else {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CDiviExtractedDirLinux)\n\t\t}\n\tcase be.PTFeathercoin:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\treturn fmt.Errorf(\"feathercoin is not supported on Windows at this point\")\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CFeathercoinExtractedDirLinux)\n\t\t} else {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CFeathercoinExtractedDirLinux)\n\t\t}\n\tcase be.PTGroestlcoin:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CGroestlcoinExtractedDirWindows)\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CGroestlcoinExtractedDirLinux)\n\t\t} else {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CGroestlcoinExtractedDirLinux)\n\t\t}\n\tcase be.PTPhore:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CPhoreExtractedDirWindows)\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CPhoreExtractedDirLinux)\n\t\t} else {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CPhoreExtractedDirLinux)\n\t\t}\n\tcase be.PTPIVX:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CPIVXExtractedDirWindows)\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CPIVXExtractedDirArm)\n\t\t} else if runtime.GOARCH == \"arm64\" {\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CPIVXExtractedDirArm)\n\t\t} else {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CPIVXExtractedDirLinux)\n\t\t}\n\tcase be.PTRapids:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CRapidsExtractedDirWindows)\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CRapidsExtractedDirLinux)\n\t\t} else {\n\t\t\t// First the normal file...\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CRapidsExtractedDirLinux)\n\t\t}\n\tcase be.PTReddCoin:\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\t//_, err = be.UnZip(filePath, abf)\n\t\t\t//if err != nil {\n\t\t\t//\treturn fmt.Errorf(\"unable to unzip file: %v - %v\", filePath, err)\n\t\t\t//}\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\n\t\t\tdefer os.RemoveAll(abf + be.CReddCoinExtractedDirWin)\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CReddCoinExtractedDirLinux)\n\t\t\tcase \"386\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CReddCoinExtractedDirLinux)\n\t\t\tcase \"amd64\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CReddCoinExtractedDirLinux)\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\t}\n\tcase be.PTScala:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\n\t\t\t// todo Correctly remove the Windows extracted dir below.\n\t\t\t//defer os.RemoveAll(abf + be.CScalaExtractedDirLinux)\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CScalaExtractedDirLinux)\n\t\t} else {\n\t\t\tuz := unzip.New(filePath, abf)\n\t\t\terr := uz.Extract()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unzip file: %v - %v\", filePath, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(\"tmp\")\n\t\t}\n\tcase be.PTSyscoin:\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CSyscoinExtractedDirWindows)\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm64\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CSyscoinExtractedDirLinux)\n\t\t\tcase \"386\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CSyscoinExtractedDirLinux)\n\t\t\tcase \"amd64\":\n\t\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t\t}\n\t\t\t\tdefer os.RemoveAll(abf + be.CSyscoinExtractedDirLinux)\n\t\t\t}\n\t\t}\n\tcase be.PTTrezarcoin:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\treturn fmt.Errorf(\"trezarcoin is not supported on Windows at this point\")\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CTrezarcoinRPiExtractedDir)\n\t\t} else {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CTrezarcoinLinuxExtractedDir)\n\t\t}\n\tcase be.PTVertcoin:\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tif err := archiver.Unarchive(filePath, abf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unarchive file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CVertcoinExtractedDirWindows)\n\t\t} else if runtime.GOARCH == \"arm\" {\n\t\t\t//err = be.ExtractTarGz(r)\n\t\t\terr = archiver.Unarchive(filePath, abf)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to extractTarGz file: %v - %v\", r, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(abf + be.CVertcoinExtractedDirLinux)\n\t\t} else {\n\t\t\tuz := unzip.New(filePath, abf)\n\t\t\terr := uz.Extract()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to unzip file: %v - %v\", filePath, err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(\"tmp\")\n\t\t}\n\tdefault:\n\t\terr = errors.New(\"unable to determine ProjectType\")\n\t}\n\n\tif err := be.AddToLog(lf, \"Installing files...\", true); err != nil {\n\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t}\n\n\t// Copy files to correct location\n\tvar srcPath, srcPathD, srcFileCLI, srcFileD, srcFileTX, srcPathSap, srcFileSap1, srcFileSap2 string\n\n\tswitch bwconf.ProjectType {\n\tcase be.PTBitcoinPlus:\n\t\tif err := be.AddToLog(lf, \"BitcoinPlus detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CExtractedDirWindowsBitcoinPlus\n\t\t\tsrcFileCLI = be.CCliFileWinBitcoinPlus\n\t\t\tsrcFileD = be.CDFileWinBitcoinPlus\n\t\t\tsrcFileTX = be.CTxFileWinBitcoinPlus\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\", \"arm64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CExtractedDirLinuxBitcoinPlus\n\t\t\t\tsrcFileCLI = be.CCliFileBitcoinPlus\n\t\t\t\tsrcFileD = be.CDFileBitcoinPlus\n\t\t\t\tsrcFileTX = be.CTxFileBitcoinPlus\n\t\t\tcase \"386\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux 386 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CExtractedDirLinuxBitcoinPlus\n\t\t\t\tsrcFileCLI = be.CCliFileBitcoinPlus\n\t\t\t\tsrcFileD = be.CDFileBitcoinPlus\n\t\t\t\tsrcFileTX = be.CTxFileBitcoinPlus\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CExtractedDirLinuxBitcoinPlus\n\t\t\t\tsrcFileCLI = be.CCliFileBitcoinPlus\n\t\t\t\tsrcFileD = be.CDFileBitcoinPlus\n\t\t\t\tsrcFileTX = be.CTxFileBitcoinPlus\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTDeVault:\n\t\tif err := be.AddToLog(lf, \"DeVault detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CExtractedDirWinDeVault + \"bin\\\\\"\n\t\t\tsrcFileCLI = be.CCliFileWinDeVault\n\t\t\tsrcFileD = be.CDFileWinDeVault\n\t\t\tsrcFileTX = be.CTxFileWinDeVault\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CExtractedDirLinuxDeVault + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CCliFileDeVault\n\t\t\t\tsrcFileD = be.CDFileDeVault\n\t\t\t\tsrcFileTX = be.CTxFileDeVault\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CExtractedDirLinuxDeVault + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CCliFileDeVault\n\t\t\t\tsrcFileD = be.CDFileDeVault\n\t\t\t\tsrcFileTX = be.CTxFileDeVault\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTDigiByte:\n\t\tif err := be.AddToLog(lf, \"DigiByte detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CExtractedDirWindowsDigiByte + \"bin\\\\\"\n\t\t\tsrcFileCLI = be.CCliFileWinDigiByte\n\t\t\tsrcFileD = be.CDFileWinDigiByte\n\t\t\tsrcFileTX = be.CTxFileWinDigiByte\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\", \"arm64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CExtractedDirLinuxDigiByte + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CCliFileDigiByte\n\t\t\t\tsrcFileD = be.CDFileDigiByte\n\t\t\t\tsrcFileTX = be.CTxFileDigiByte\n\t\t\tcase \"386\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux 386 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CExtractedDirLinuxDigiByte + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CCliFileDigiByte\n\t\t\t\tsrcFileD = be.CDFileDigiByte\n\t\t\t\tsrcFileTX = be.CTxFileDigiByte\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CExtractedDirLinuxDigiByte + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CCliFileDigiByte\n\t\t\t\tsrcFileD = be.CDFileDigiByte\n\t\t\t\tsrcFileTX = be.CTxFileDigiByte\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTDivi:\n\t\tif err := be.AddToLog(lf, \"DIVI detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CDiviExtractedDirWindows + \"bin\\\\\"\n\t\t\tsrcFileCLI = be.CDiviCliFileWin\n\t\t\tsrcFileD = be.CDiviDFileWin\n\t\t\tsrcFileTX = be.CDiviTxFileWin\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CDiviExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CDiviCliFile\n\t\t\t\tsrcFileD = be.CDiviDFile\n\t\t\t\tsrcFileTX = be.CDiviTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CDiviExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CDiviCliFile\n\t\t\t\tsrcFileD = be.CDiviDFile\n\t\t\t\tsrcFileTX = be.CDiviTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTFeathercoin:\n\t\tif err := be.AddToLog(lf, \"Feathercoin detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CFeathercoinExtractedDirLinux\n\t\t\tsrcFileCLI = be.CFeathercoinCliFileWin\n\t\t\tsrcFileD = be.CFeathercoinDFileWin\n\t\t\tsrcFileTX = be.CFeathercoinTxFileWin\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CFeathercoinExtractedDirLinux\n\t\t\t\tsrcFileCLI = be.CFeathercoinCliFile\n\t\t\t\tsrcFileD = be.CFeathercoinDFile\n\t\t\t\tsrcFileTX = be.CFeathercoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CFeathercoinExtractedDirLinux\n\t\t\t\tsrcFileCLI = be.CFeathercoinCliFile\n\t\t\t\tsrcFileD = be.CFeathercoinDFile\n\t\t\t\tsrcFileTX = be.CFeathercoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTGroestlcoin:\n\t\tif err := be.AddToLog(lf, \"Groestlcoin detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CGroestlcoinExtractedDirWindows + \"bin\\\\\"\n\t\t\tsrcFileCLI = be.CGroestlcoinCliFileWin\n\t\t\tsrcFileD = be.CGroestlcoinDFileWin\n\t\t\tsrcFileTX = be.CGroestlcoinTxFileWin\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CGroestlcoinExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CGroestlcoinCliFile\n\t\t\t\tsrcFileD = be.CGroestlcoinDFile\n\t\t\t\tsrcFileTX = be.CGroestlcoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CGroestlcoinExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CGroestlcoinCliFile\n\t\t\t\tsrcFileD = be.CGroestlcoinDFile\n\t\t\t\tsrcFileTX = be.CGroestlcoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTPhore:\n\t\tif err := be.AddToLog(lf, \"Phore detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CPhoreExtractedDirLinux + \"bin\\\\\"\n\t\t\tsrcFileCLI = be.CPhoreCliFileWin\n\t\t\tsrcFileD = be.CPhoreDFileWin\n\t\t\tsrcFileTX = be.CPhoreTxFileWin\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CPhoreExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CPhoreCliFile\n\t\t\t\tsrcFileD = be.CPhoreDFile\n\t\t\t\tsrcFileTX = be.CPhoreTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CPhoreExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CPhoreCliFile\n\t\t\t\tsrcFileD = be.CPhoreDFile\n\t\t\t\tsrcFileTX = be.CPhoreTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTPIVX:\n\t\tif err := be.AddToLog(lf, \"PIVX detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CPIVXExtractedDirWindows + \"bin\\\\\"\n\t\t\tsrcPathSap = abf + be.CPIVXExtractedDirWindows + \"share\\\\pivx\\\\\"\n\t\t\tsrcFileCLI = be.CPIVXCliFileWin\n\t\t\tsrcFileD = be.CPIVXDFileWin\n\t\t\tsrcFileTX = be.CPIVXTxFileWin\n\t\t\tsrcFileSap1 = be.CPIVXSapling1\n\t\t\tsrcFileSap2 = be.CPIVXSapling2\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\", \"arm64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CPIVXExtractedDirArm + \"bin/\"\n\t\t\t\tsrcPathSap = abf + be.CPIVXExtractedDirArm + \"share/pivx/\"\n\t\t\t\tsrcFileCLI = be.CPIVXCliFile\n\t\t\t\tsrcFileD = be.CPIVXDFile\n\t\t\t\tsrcFileTX = be.CPIVXTxFile\n\t\t\t\tsrcFileSap1 = be.CPIVXSapling1\n\t\t\t\tsrcFileSap2 = be.CPIVXSapling2\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tsrcPath = abf + be.CPIVXExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcPathSap = abf + be.CPIVXExtractedDirLinux + \"share/pivx/\"\n\t\t\t\tsrcFileCLI = be.CPIVXCliFile\n\t\t\t\tsrcFileD = be.CPIVXDFile\n\t\t\t\tsrcFileTX = be.CPIVXTxFile\n\t\t\t\tsrcFileSap1 = be.CPIVXSapling1\n\t\t\t\tsrcFileSap2 = be.CPIVXSapling2\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTRapids:\n\t\tif err := be.AddToLog(lf, \"ReddCoin detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CRapidsExtractedDirWindows\n\t\t\tsrcFileCLI = be.CRapidsCliFileWin\n\t\t\tsrcFileD = be.CRapidsDFileWin\n\t\t\tsrcFileTX = be.CRapidsTxFileWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CRapidsExtractedDirLinux\n\t\t\t\tsrcFileCLI = be.CRapidsCliFile\n\t\t\t\tsrcFileD = be.CRapidsDFile\n\t\t\t\tsrcFileTX = be.CRapidsTxFile\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CRapidsExtractedDirLinux\n\t\t\t\tsrcFileCLI = be.CRapidsCliFile\n\t\t\t\tsrcFileD = be.CRapidsDFile\n\t\t\t\tsrcFileTX = be.CRapidsTxFile\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTReddCoin:\n\t\tif err := be.AddToLog(lf, \"ReddCoin detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CReddCoinExtractedDirWin + \"bin\\\\\"\n\t\t\tsrcFileCLI = be.CReddCoinCliFileWin\n\t\t\tsrcFileD = be.CReddCoinDFileWin\n\t\t\tsrcFileTX = be.CReddCoinTxFileWin\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf\n\t\t\t\tsrcFileCLI = be.CReddCoinCliFile\n\t\t\t\tsrcFileD = be.CReddCoinDFile\n\t\t\t\tsrcFileTX = be.CReddCoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"386\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux 386 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CReddCoinExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CReddCoinCliFile\n\t\t\t\tsrcFileD = be.CReddCoinDFile\n\t\t\t\tsrcFileTX = be.CReddCoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CReddCoinExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CReddCoinCliFile\n\t\t\t\tsrcFileD = be.CReddCoinDFile\n\t\t\t\tsrcFileTX = be.CReddCoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTScala:\n\t\tif err := be.AddToLog(lf, \"Scala detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CScalaExtractedDirLinux\n\t\t\tsrcFileCLI = be.CScalaCliFileWin\n\t\t\tsrcFileD = be.CScalaDFileWin\n\t\t\tsrcFileTX = be.CScalaTxFileWin\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CScalaExtractedDirLinux\n\t\t\t\tsrcFileCLI = be.CScalaCliFile\n\t\t\t\tsrcFileD = be.CScalaDFile\n\t\t\t\tsrcFileTX = be.CScalaTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CScalaExtractedDirLinux\n\t\t\t\tsrcFileCLI = be.CScalaCliFile\n\t\t\t\tsrcFileD = be.CScalaDFile\n\t\t\t\tsrcFileTX = be.CScalaTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTSyscoin:\n\t\tif err := be.AddToLog(lf, \"Syscoin detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CSyscoinExtractedDirWindows + \"bin\\\\\"\n\t\t\tsrcFileCLI = be.CSyscoinCliFileWin\n\t\t\tsrcFileD = be.CSyscoinDFileWin\n\t\t\tsrcFileTX = be.CSyscoinTxFileWin\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\", \"arm64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CSyscoinExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CSyscoinCliFile\n\t\t\t\tsrcFileD = be.CSyscoinDFile\n\t\t\t\tsrcFileTX = be.CSyscoinTxFile\n\t\t\tcase \"386\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux 386 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CSyscoinExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CSyscoinCliFile\n\t\t\t\tsrcFileD = be.CSyscoinDFile\n\t\t\t\tsrcFileTX = be.CSyscoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tcase \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CSyscoinExtractedDirLinux + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CSyscoinCliFile\n\t\t\t\tsrcFileD = be.CSyscoinDFile\n\t\t\t\tsrcFileTX = be.CSyscoinTxFile\n\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTTrezarcoin:\n\t\tif err := be.AddToLog(lf, \"TZC detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\terr = errors.New(\"windows is not currently supported for Trezarcoin\")\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CTrezarcoinRPiExtractedDir + \"/\"\n\t\t\t\tsrcFileCLI = be.CTrezarcoinCliFile\n\t\t\t\tsrcFileD = be.CTrezarcoinDFile\n\t\t\t\tsrcFileTX = be.CTrezarcoinTxFile\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CTrezarcoinLinuxExtractedDir + \"bin/\"\n\t\t\t\tsrcFileCLI = be.CTrezarcoinCliFile\n\t\t\t\tsrcFileD = be.CTrezarcoinDFile\n\t\t\t\tsrcFileTX = be.CTrezarcoinTxFile\n\t\t\t\t//srcFileBWCLI = be.CAppFilename\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tcase be.PTVertcoin:\n\t\tif err := be.AddToLog(lf, \"VTC detected...\", false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t}\n\t\tswitch runtime.GOOS {\n\t\tcase \"windows\":\n\t\t\tsrcPath = abf + be.CVertcoinExtractedDirWindows\n\t\t\tsrcFileCLI = be.CVertcoinCliFileWin\n\t\t\tsrcFileD = be.CVertcoinDFileWin\n\t\t\tsrcFileTX = be.CVertcoinTxFileWin\n\t\t\t//srcFileBWCLI = be.CAppFilenameWin\n\t\tcase \"linux\":\n\t\t\tswitch runtime.GOARCH {\n\t\t\tcase \"arm\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux arm detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CVertcoinExtractedDirLinux\n\t\t\t\tsrcFileCLI = be.CVertcoinCliFile\n\t\t\t\tsrcFileD = be.CVertcoinDFile\n\t\t\t\tsrcFileTX = be.CVertcoinTxFile\n\t\t\tcase \"386\", \"amd64\":\n\t\t\t\tif err := be.AddToLog(lf, \"linux amd64 detected.\", false); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t\t\t\t}\n\t\t\t\tsrcPath = abf + be.CVertcoinExtractedDirLinux\n\t\t\t\tsrcFileCLI = be.CVertcoinCliFile\n\t\t\t\tsrcFileD = be.CVertcoinDFile\n\t\t\t\tsrcFileTX = be.CVertcoinTxFile\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unable to determine runtime.GOARCH \" + runtime.GOARCH)\n\t\t\t}\n\t\tdefault:\n\t\t\terr = errors.New(\"unable to determine runtime.GOOS\")\n\t\t}\n\tdefault:\n\t\terr = errors.New(\"unable to determine ProjectType\")\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error: - %v\", err)\n\t}\n\n\tif err := be.AddToLog(lf, \"srcPath=\"+srcPath, false); err != nil {\n\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t}\n\tif err := be.AddToLog(lf, \"srcFileCLI=\"+srcFileCLI, false); err != nil {\n\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t}\n\tif err := be.AddToLog(lf, \"srcFileD=\"+srcFileD, false); err != nil {\n\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t}\n\tif err := be.AddToLog(lf, \"srcFileTX=\"+srcFileTX, false); err != nil {\n\t\treturn fmt.Errorf(\"unable to add to log file: %v\", err)\n\t}\n\n\t// If it's PIVX, see if we need to copy the sapling files\n\tif bwconf.ProjectType == be.PTPIVX {\n\t\tdstSapDir, err := be.GetPIVXSaplingDir()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to call GetPIVXSaplingDir: %v\", err)\n\t\t}\n\n\t\t// Make sure the Sapling directory exists\n\t\tif err := os.MkdirAll(dstSapDir, os.ModePerm); err != nil {\n\t\t\tbe.AddToLog(lf, \"unable to make directory: \"+err.Error(), false)\n\t\t\treturn fmt.Errorf(\"unable to make dir: %v\", err)\n\t\t}\n\n\t\t// Sapling1\n\t\tif !be.FileExists(dstSapDir + srcFileSap1) {\n\t\t\tif err := be.FileCopy(srcPathSap+srcFileSap1, dstSapDir+srcFileSap1, false); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to copyFile from: %v to %v - %v\", srcPathSap+srcFileSap1, dstSapDir+srcFileSap1, err)\n\t\t\t}\n\t\t}\n\t\tif err := os.Chmod(dstSapDir+srcFileSap1, 0777); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to chmod file: %v - %v\", dstSapDir+srcFileSap1, err)\n\t\t}\n\n\t\t// Sapling2\n\t\tif !be.FileExists(dstSapDir + srcFileSap2) {\n\t\t\tif err := be.FileCopy(srcPathSap+srcFileSap2, dstSapDir+srcFileSap2, false); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to copyFile from: %v to %v - %v\", srcPathSap+srcFileSap2, dstSapDir+srcFileSap2, err)\n\t\t\t}\n\t\t}\n\t\tif err := os.Chmod(dstSapDir+srcFileSap2, 0777); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to chmod file: %v - %v\", dstSapDir+srcFileSap2, err)\n\t\t}\n\t}\n\n\t// coin-cli\n\tif !be.FileExists(abf + srcFileCLI) {\n\t\tif err := be.FileCopy(srcPath+srcFileCLI, abf+srcFileCLI, false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to copyFile from: %v to %v - %v\", srcPath+srcFileCLI, abf+srcFileCLI, err)\n\t\t}\n\t}\n\tif err := os.Chmod(abf+srcFileCLI, 0777); err != nil {\n\t\treturn fmt.Errorf(\"unable to chmod file: %v - %v\", abf+srcFileCLI, err)\n\t}\n\n\t// coind\n\tif !be.FileExists(abf + srcFileD) {\n\t\t// This is only required for Rapids on Linux because there are 2 different directory locations.\n\t\tif srcPathD != \"\" {\n\t\t\tif err := be.FileCopy(srcPathD+srcFileD, abf+srcFileD, false); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to copyFile: %v - %v\", srcPathD+srcFileD, err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := be.FileCopy(srcPath+srcFileD, abf+srcFileD, false); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to copyFile: %v - %v\", srcPath+srcFileD, err)\n\t\t\t}\n\t\t}\n\t}\n\terr = os.Chmod(abf+srcFileD, 0777)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to chmod file: %v - %v\", abf+srcFileD, err)\n\t}\n\n\t// cointx\n\tif !be.FileExists(abf + srcFileTX) {\n\t\tif err := be.FileCopy(srcPath+srcFileTX, abf+srcFileTX, false); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to copyFile: %v - %v\", srcPath+srcFileTX, err)\n\t\t}\n\t}\n\terr = os.Chmod(abf+srcFileTX, 0777)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to chmod file: %v - %v\", abf+srcFileTX, err)\n\t}\n\n\treturn nil\n}", "func RunInstall(cmd *cobra.Command, args []string) {\n\tc := LoadOperatorConf(cmd)\n\tutil.KubeCreateSkipExisting(c.NS)\n\tutil.KubeCreateSkipExisting(c.SA)\n\tutil.KubeCreateSkipExisting(c.SAEndpoint)\n\tutil.KubeCreateSkipExisting(c.Role)\n\tutil.KubeCreateSkipExisting(c.RoleEndpoint)\n\tutil.KubeCreateSkipExisting(c.RoleBinding)\n\tutil.KubeCreateSkipExisting(c.RoleBindingEndpoint)\n\tutil.KubeCreateSkipExisting(c.ClusterRole)\n\tutil.KubeCreateSkipExisting(c.ClusterRoleBinding)\n\n\ttestEnv, _ := cmd.Flags().GetBool(\"test-env\")\n\tif testEnv {\n\t\toperatorContainer := c.Deployment.Spec.Template.Spec.Containers[0]\n\t\toperatorContainer.Env = append(operatorContainer.Env, corev1.EnvVar{\n\t\t\tName: \"TEST_ENV\",\n\t\t\tValue: \"true\",\n\t\t})\n\t\tc.Deployment.Spec.Template.Spec.Containers[0].Env = operatorContainer.Env\n\t}\n\n\tadmission, _ := cmd.Flags().GetBool(\"admission\")\n\tif admission {\n\t\tLoadAdmissionConf(c)\n\t\tAdmissionWebhookSetup(c)\n\t\tutil.KubeCreateSkipExisting(c.WebhookConfiguration)\n\t\tutil.KubeCreateSkipExisting(c.WebhookSecret)\n\t\tutil.KubeCreateSkipExisting(c.WebhookService)\n\t\toperatorContainer := c.Deployment.Spec.Template.Spec.Containers[0]\n\t\toperatorContainer.Env = append(operatorContainer.Env, corev1.EnvVar{\n\t\t\tName: \"ENABLE_NOOBAA_ADMISSION\",\n\t\t\tValue: \"true\",\n\t\t})\n\t\tc.Deployment.Spec.Template.Spec.Containers[0].Env = operatorContainer.Env\n\t}\n\n\tnoDeploy, _ := cmd.Flags().GetBool(\"no-deploy\")\n\tif !noDeploy {\n\t\toperatorContainer := c.Deployment.Spec.Template.Spec.Containers[0]\n\t\toperatorContainer.Env = append(\n\t\t\toperatorContainer.Env,\n\t\t\tcorev1.EnvVar{\n\t\t\t\tName: \"NOOBAA_CLI_DEPLOYMENT\",\n\t\t\t\tValue: \"true\",\n\t\t\t},\n\t\t)\n\t\tc.Deployment.Spec.Template.Spec.Containers[0].Env = operatorContainer.Env\n\t\tutil.KubeCreateSkipExisting(c.Deployment)\n\t}\n}", "func DoSetup() {\n\tSetLoggerVerbosity()\n\tgplog.Verbose(\"Backup Command: %s\", os.Args)\n\n\tutils.CheckGpexpandRunning(utils.BackupPreventedByGpexpandMessage)\n\ttimestamp := history.CurrentTimestamp()\n\tCreateBackupLockFile(timestamp)\n\tInitializeConnectionPool()\n\n\tgplog.Info(\"Starting backup of database %s\", MustGetFlagString(options.DBNAME))\n\topts, err := options.NewOptions(cmdFlags)\n\tgplog.FatalOnError(err)\n\n\tvalidateFilterLists(opts)\n\n\terr = opts.ExpandIncludesForPartitions(connectionPool, cmdFlags)\n\tgplog.FatalOnError(err)\n\n\tsegConfig := cluster.MustGetSegmentConfiguration(connectionPool)\n\tglobalCluster = cluster.NewCluster(segConfig)\n\tsegPrefix := filepath.GetSegPrefix(connectionPool)\n\tglobalFPInfo = filepath.NewFilePathInfo(globalCluster, MustGetFlagString(options.BACKUP_DIR), timestamp, segPrefix)\n\tif MustGetFlagBool(options.METADATA_ONLY) {\n\t\t_, err = globalCluster.ExecuteLocalCommand(fmt.Sprintf(\"mkdir -p %s\", globalFPInfo.GetDirForContent(-1)))\n\t\tgplog.FatalOnError(err)\n\t} else {\n\t\tCreateBackupDirectoriesOnAllHosts()\n\t}\n\tglobalTOC = &toc.TOC{}\n\tglobalTOC.InitializeMetadataEntryMap()\n\tutils.InitializePipeThroughParameters(!MustGetFlagBool(options.NO_COMPRESSION), MustGetFlagInt(options.COMPRESSION_LEVEL))\n\tGetQuotedRoleNames(connectionPool)\n\n\tpluginConfigFlag := MustGetFlagString(options.PLUGIN_CONFIG)\n\n\tif pluginConfigFlag != \"\" {\n\t\tpluginConfig, err = utils.ReadPluginConfig(pluginConfigFlag)\n\t\tgplog.FatalOnError(err)\n\t\tconfigFilename := path.Base(pluginConfig.ConfigPath)\n\t\tconfigDirname := path.Dir(pluginConfig.ConfigPath)\n\t\tpluginConfig.ConfigPath = path.Join(configDirname, timestamp+\"_\"+configFilename)\n\t\t_ = cmdFlags.Set(options.PLUGIN_CONFIG, pluginConfig.ConfigPath)\n\t\tgplog.Info(\"Plugin config path: %s\", pluginConfig.ConfigPath)\n\t}\n\n\tInitializeBackupReport(*opts)\n\n\tif pluginConfigFlag != \"\" {\n\t\tbackupReport.PluginVersion = pluginConfig.CheckPluginExistsOnAllHosts(globalCluster)\n\t\tpluginConfig.CopyPluginConfigToAllHosts(globalCluster)\n\t\tpluginConfig.SetupPluginForBackup(globalCluster, globalFPInfo)\n\t}\n}", "func executeLaunch() {\n\tfmt.Println(\"Launching ...\")\n}", "func (p *PouchMigrator) doPrepare(ctx context.Context, meta *pouch.PouchContainer, takeOverContainer bool) error {\n\t// check image existance\n\timg := meta.Config.Image\n\t_, imageExist := p.images[img]\n\tif !imageExist {\n\t\tp.images[img] = struct{}{}\n\t}\n\n\t// if takeOverContainer set, just prepare containerd containers for running containers\n\tif takeOverContainer {\n\t\treturn p.prepareCtrdContainers(ctx, meta)\n\t}\n\n\t// Pull image\n\tif imageExist {\n\t\tlogrus.Infof(\"image %s has been downloaded, skip pull image\", img)\n\t} else {\n\t\tlogrus.Infof(\"Start pull image: %s\", img)\n\t\tif err := p.containerd.PullImage(ctx, img); err != nil {\n\t\t\tlogrus.Errorf(\"failed to pull image %s: %v\\n\", img, err)\n\t\t\treturn err\n\t\t}\n\t\tlogrus.Infof(\"End pull image: %s\", img)\n\t}\n\n\tlogrus.Infof(\"Start prepare snapshot %s\", meta.ID)\n\t_, err := p.containerd.GetSnapshot(ctx, meta.ID)\n\tif err == nil {\n\t\tlogrus.Infof(\"Snapshot %s already exists, delete it\", meta.ID)\n\t\tp.containerd.RemoveSnapshot(ctx, meta.ID)\n\t}\n\t// CreateSnapshot for new pouch container\n\tif err := p.containerd.CreateSnapshot(ctx, meta.ID, img); err != nil {\n\t\treturn err\n\t}\n\n\tupperDir, workDir, err := p.getOverlayFsDir(ctx, meta.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif upperDir == \"\" || workDir == \"\" {\n\t\treturn fmt.Errorf(\"snapshot mounts occurred an error: upperDir=%s, workDir=%s\", upperDir, workDir)\n\t}\n\n\t// If need convert docker container to pouch container,\n\t// we should also convert Snapshotter Data\n\tmeta.Snapshotter.Data = map[string]string{}\n\tmeta.Snapshotter.Data[\"UpperDir\"] = upperDir\n\n\t// Set diskquota for UpperDir and WorkDir.\n\tdiskQuota := \"\"\n\tif v, exists := meta.Config.Labels[\"DiskQuota\"]; exists {\n\t\tdiskQuota = v\n\t}\n\n\tfor _, dir := range []string{upperDir, workDir} {\n\t\tif err := p.setDirDiskQuota(diskQuota, meta.Config.QuotaID, dir); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlogrus.Infof(\"Set diskquota for snapshot %s done\", meta.ID)\n\treturn nil\n}", "func run(ctx context.Context, state *core.BuildState, label core.AnnotatedOutputLabel, args []string, fork, quiet, remote, setenv, detach, tmpDir bool, dir, overrideCmd string) ([]byte, []byte, error) {\n\t// This is a bit strange as normally if you run a binary for another platform, this will fail. In some cases\n\t// this can be quite useful though e.g. to compile a binary for a target arch, then run an .sh script to\n\t// push that to docker.\n\tif state.TargetArch != cli.HostArch() {\n\t\tlabel.Subrepo = state.TargetArch.String()\n\t}\n\n\ttarget := state.Graph.TargetOrDie(label.BuildLabel)\n\t// Non binary targets can be run if an override command is passed in\n\tif !target.IsBinary && overrideCmd == \"\" {\n\t\tlog.Fatalf(\"Target %s cannot be run; it's not marked as binary\", label)\n\t}\n\tif label.Annotation == \"\" && len(target.Outputs()) != 1 {\n\t\tlog.Fatalf(\"Targets %s cannot be run as it has %d outputs.\", label, len(target.Outputs()))\n\t}\n\tif remote {\n\t\t// Send this off to be done remotely.\n\t\t// This deliberately misses the out_exe bit below, but also doesn't pick up whatever's going on with java -jar;\n\t\t// however that will be obsolete post #920 anyway.\n\t\tif state.RemoteClient == nil {\n\t\t\tlog.Fatalf(\"You must configure remote execution to use plz run --remote\")\n\t\t}\n\t\treturn nil, nil, state.RemoteClient.Run(target)\n\t}\n\n\tif tmpDir {\n\t\tvar err error\n\t\tif dir, err = prepareRunDir(state, target); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t// ReplaceSequences always quotes stuff in case it contains spaces or special characters,\n\t// that works fine if we interpret it as a shell but not to pass it as an argument here.\n\tswitch {\n\tcase overrideCmd != \"\":\n\t\tcommand, _ := core.ReplaceSequences(state, target, overrideCmd)\n\t\t// We don't care about passed in args when an override command is provided\n\t\targs = process.BashCommand(\"bash\", strings.Trim(command, \"\\\"\"), true)\n\tcase label.Annotation != \"\":\n\t\tentryPoint, ok := target.EntryPoints[label.Annotation]\n\t\tif !ok {\n\t\t\tlog.Fatalf(\"Cannot run %s as has no entry point %s\", label, label.Annotation)\n\t\t}\n\t\tvar command string\n\t\tif tmpDir {\n\t\t\tcommand = filepath.Join(dir, entryPoint)\n\t\t} else {\n\t\t\tcommand = filepath.Join(target.OutDir(), entryPoint)\n\t\t}\n\t\targs = append(strings.Split(command, \" \"), args...)\n\tdefault:\n\t\t// out_exe handles java binary stuff by invoking the .jar with java as necessary\n\t\tvar command string\n\t\tif tmpDir {\n\t\t\tcommand = filepath.Join(dir, target.Outputs()[0])\n\t\t} else {\n\t\t\tcommand, _ = core.ReplaceSequences(state, target, fmt.Sprintf(\"$(out_exe %s)\", target.Label))\n\t\t\tcommand = strings.Trim(command, \"\\\"\")\n\t\t}\n\t\targs = append(strings.Split(command, \" \"), args...)\n\t}\n\n\t// Handle targets where $(exe ...) returns something nontrivial\n\tif !strings.Contains(args[0], \"/\") {\n\t\t// Probably it's a java -jar, we need an absolute path to it.\n\t\tcmd, err := exec.LookPath(args[0])\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Can't find binary %s\", args[0])\n\t\t}\n\t\targs[0] = cmd\n\t} else if dir != \"\" { // Find an absolute path before changing directory\n\t\tabs, err := filepath.Abs(args[0])\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Couldn't calculate absolute path for %s: %s\", args[0], err)\n\t\t}\n\t\targs[0] = abs\n\t}\n\n\tlog.Info(\"Running target %s...\", strings.Join(args, \" \"))\n\toutput.SetWindowTitle(\"plz run: \" + strings.Join(args, \" \"))\n\tenv := environ(state, target, setenv, tmpDir)\n\n\tif !fork {\n\t\tif dir != \"\" {\n\t\t\terr := syscall.Chdir(dir)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error changing directory %s: %s\", dir, err)\n\t\t\t}\n\t\t}\n\t\t// Plain 'plz run'. One way or another we never return from the following line.\n\t\tmust(syscall.Exec(args[0], args, env), args)\n\t} else if detach {\n\t\t// Bypass the whole process management system since we explicitly aim not to manage this subprocess.\n\t\tcmd := exec.Command(args[0], args[1:]...)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\tcmd.Dir = dir\n\t\treturn nil, nil, toExitError(cmd.Start(), args, nil)\n\t}\n\t// Run as a normal subcommand.\n\t// Note that we don't connect stdin. It doesn't make sense for multiple processes.\n\t// The process executor doesn't actually support not having a timeout, but the max is ~290 years so nobody\n\t// should know the difference.\n\tout, combined, err := process.New().ExecWithTimeout(ctx, nil, dir, env, time.Duration(math.MaxInt64), false, false, !quiet, false, process.NoSandbox, args)\n\treturn out, combined, toExitError(err, args, combined)\n}", "func Run(w io.Writer, workdir, outdir, platform string) error {\n\tif err := packages.Install(w); err != nil {\n\t\treturn err\n\t}\n\tif err := rump.PrepareRumpRepo(w, workdir); err != nil {\n\t\treturn err\n\t}\n\tif err := rump.BuildRump(w, workdir, outdir, platform); err != nil {\n\t\treturn err\n\t}\n\tif err := rump.ApplyPatches(filepath.Join(outdir, \"rumprun\"), platform); err != nil {\n\t\treturn err\n\t}\n\tif err := rump.BuildRump(w, workdir, outdir, platform); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Prepare(mgr *manager.Manager) error {\n\tmgr.Logger.Infoln(\"Downloading Installation Files\")\n\tcfg := mgr.Cluster\n\tcurrentDir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to get current directory\")\n\t}\n\n\tvar kubeVersion string\n\tif cfg.Kubernetes.Version == \"\" {\n\t\tkubeVersion = kubekeyapiv1alpha1.DefaultKubeVersion\n\t} else {\n\t\tkubeVersion = cfg.Kubernetes.Version\n\t}\n\n\tarchMap := make(map[string]bool)\n\tfor _, host := range mgr.Cluster.Hosts {\n\t\tswitch host.Arch {\n\t\tcase \"amd64\":\n\t\t\tarchMap[\"amd64\"] = true\n\t\tcase \"arm64\":\n\t\t\tarchMap[\"arm64\"] = true\n\t\tdefault:\n\t\t\treturn errors.New(fmt.Sprintf(\"Unsupported architecture: %s\", host.Arch))\n\t\t}\n\t}\n\n\tfor arch := range archMap {\n\t\tbinariesDir := fmt.Sprintf(\"%s/%s/%s/%s\", currentDir, kubekeyapiv1alpha1.DefaultPreDir, kubeVersion, arch)\n\t\tif err := util.CreateDir(binariesDir); err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to create download target dir\")\n\t\t}\n\n\t\tif err := FilesDownloadHTTP(mgr, binariesDir, kubeVersion, arch); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (l *langManager) setup(ctx context.Context, pkgVenvPath, srcPath, python3Bin, pipBin, requiredPy string, passthru bool) error {\n\tswitch version.Compare(requiredPy, \"3.0.0\") {\n\tcase version.Greater, version.Equals:\n\t\t// Python 3.x required: build virtual environment\n\t\tlogger := log.FromContext(ctx)\n\n\t\tdefer func() {\n\t\t\tif !passthru {\n\t\t\t\tl.deactivateVirtualEnvironment(ctx, pkgVenvPath, requiredPy)\n\t\t\t}\n\t\t\tlogger.Debugf(\"All virtualenv dependencies successfully installed\")\n\t\t}()\n\n\t\tveExists, err := l.commandExecutor.FileExists(pkgVenvPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !passthru || !veExists {\n\t\t\tlogger.Debugf(\"the virtual environment %s does not exist yet - installing dependencies\", pkgVenvPath)\n\n\t\t\t// upgrade pip and setuptools\n\t\t\tif err := l.upgradePipAndSetuptools(ctx, python3Bin); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// create virtual environment\n\t\t\tif err := l.createVirtualEnvironment(ctx, python3Bin, pkgVenvPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// activate virtual environment\n\t\tif err := l.activateVirtualEnvironment(ctx, pkgVenvPath); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// install packages from requirements.txt\n\t\tvePy, err := l.getVePython(pkgVenvPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := l.installVeRequirements(ctx, srcPath, pkgVenvPath, vePy); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase version.Smaller:\n\t\t// no virtualenv for python 2.x\n\t\treturn installPythonDepsPip(ctx, l.commandExecutor, pipBin, srcPath)\n\t}\n\n\treturn nil\n}", "func main() {\n\tbasedir := flag.String(\"basedir\", \"/tmp\", \"basedir of tmp C binary\")\n\tinput := flag.String(\"input\", \"<input>\", \"test case input\")\n\texpected := flag.String(\"expected\", \"<expected>\", \"test case expected\")\n\ttimeout := flag.String(\"timeout\", \"2000\", \"timeout in milliseconds\")\n\tmemory := flag.String(\"memory\", \"256\", \"memory limitation in MB\")\n\tflag.Parse()\n\n\tresult, u := new(model.Result), uuid.NewV4()\n\tif err := sandbox.InitCGroup(strconv.Itoa(os.Getpid()), u.String(), *memory); err != nil {\n\t\tresult, _ := json.Marshal(result.GetRuntimeErrorTaskResult())\n\t\t_, _ = os.Stdout.Write(result)\n\t\tos.Exit(0)\n\t}\n\n\tcmd := reexec.Command(\"justiceInit\", *basedir, *input, *expected, *timeout, *memory)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCloneflags: syscall.CLONE_NEWNS |\n\t\t\tsyscall.CLONE_NEWUTS |\n\t\t\tsyscall.CLONE_NEWIPC |\n\t\t\tsyscall.CLONE_NEWPID |\n\t\t\tsyscall.CLONE_NEWNET |\n\t\t\tsyscall.CLONE_NEWUSER,\n\t\tUidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getuid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t\tGidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getgid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := cmd.Run(); err != nil {\n\t\tresult, _ := json.Marshal(result.GetRuntimeErrorTaskResult())\n\t\t_, _ = os.Stderr.WriteString(fmt.Sprintf(\"%s\\n\", err.Error()))\n\t\t_, _ = os.Stdout.Write(result)\n\t}\n\n\tos.Exit(0)\n}", "func (m *InstallManager) Run() error {\n\tprovision := &hivev1.ClusterProvision{}\n\tif err := m.loadClusterProvision(provision); err != nil {\n\t\tm.log.WithError(err).Fatal(\"error looking up cluster provision\")\n\t}\n\tswitch provision.Spec.Stage {\n\tcase hivev1.ClusterProvisionStageInitializing, hivev1.ClusterProvisionStageProvisioning:\n\tdefault:\n\t\t// This should not be possible but just in-case we can somehow\n\t\t// run the install job for a cluster provision that is already complete, exit early,\n\t\t// and don't delete *anything*.\n\t\tm.log.Warnf(\"provision is at stage %q, exiting\", provision.Spec.Stage)\n\t\tos.Exit(0)\n\t}\n\tcd, err := m.loadClusterDeployment(provision)\n\tif err != nil {\n\t\tm.log.WithError(err).Fatal(\"error looking up cluster deployment\")\n\t}\n\tif cd.Spec.Installed {\n\t\t// This should not be possible but just in-case we can somehow\n\t\t// run the install job for a cluster already installed, exit early,\n\t\t// and don't delete *anything*.\n\t\tm.log.Warn(\"cluster is already installed, exiting\")\n\t\tos.Exit(0)\n\t}\n\n\tm.ClusterName = cd.Spec.ClusterName\n\n\tm.waitForInstallerBinaries()\n\n\t// Generate an install-config.yaml:\n\tsshKey := os.Getenv(\"SSH_PUB_KEY\")\n\tpullSecret := os.Getenv(\"PULL_SECRET\")\n\tm.log.Info(\"generating install config\")\n\tic, err := install.GenerateInstallConfig(cd, sshKey, pullSecret, true)\n\tif err != nil {\n\t\tm.log.WithError(err).Error(\"error generating install-config\")\n\t\treturn err\n\t}\n\td, err := yaml.Marshal(ic)\n\tif err != nil {\n\t\tm.log.WithError(err).Error(\"error marshalling install-config.yaml\")\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(filepath.Join(m.WorkDir, \"install-config.yaml\"), d, 0644)\n\tif err != nil {\n\t\tm.log.WithError(err).Error(\"error writing install-config.yaml to disk\")\n\t\treturn err\n\t}\n\n\t// If the cluster provision has an infraID set, this implies we failed an install\n\t// and are re-trying. Cleanup any resources that may have been provisioned.\n\tm.log.Info(\"cleaning up from past install attempts\")\n\tif err := m.cleanupFailedInstall(cd, provision); err != nil {\n\t\tm.log.WithError(err).Error(\"error while trying to preemptively clean up\")\n\t\treturn err\n\t}\n\n\t// Generate installer assets we need to modify or upload.\n\tm.log.Info(\"generating assets\")\n\tif err := m.generateAssets(provision); err != nil {\n\t\tm.log.Info(\"reading installer log\")\n\t\tinstallLog, readErr := m.readInstallerLog(provision, m)\n\t\tif readErr != nil {\n\t\t\tm.log.WithError(readErr).Error(\"error reading asset generation log\")\n\t\t\treturn err\n\t\t}\n\t\tm.log.Info(\"updating clusterprovision\")\n\t\tif err := m.updateClusterProvision(\n\t\t\tprovision,\n\t\t\tm,\n\t\t\tfunc(provision *hivev1.ClusterProvision) {\n\t\t\t\tprovision.Spec.InstallLog = pointer.StringPtr(installLog)\n\t\t\t},\n\t\t); err != nil {\n\t\t\tm.log.WithError(err).Error(\"error updating cluster provision with asset generation log\")\n\t\t\treturn err\n\t\t}\n\t\treturn err\n\t}\n\n\t// We should now have cluster metadata.json we can parse for the infra ID,\n\t// the kubeconfig, and the admin password. If we fail to read any of these or\n\t// to extract the infra ID and upload it, this is a critical failure and we\n\t// should restart. No cloud resources have been provisioned at this point.\n\tm.log.Info(\"setting cluster metadata\")\n\tmetadataBytes, metadata, err := m.readClusterMetadata(provision, m)\n\tif err != nil {\n\t\tm.log.WithError(err).Error(\"error reading cluster metadata\")\n\t\treturn errors.Wrap(err, \"error reading cluster metadata\")\n\t}\n\tkubeconfigSecret, err := m.uploadAdminKubeconfig(provision, m)\n\tif err != nil {\n\t\tm.log.WithError(err).Error(\"error uploading admin kubeconfig\")\n\t\treturn errors.Wrap(err, \"error trying to save admin kubeconfig\")\n\t}\n\n\tpasswordSecret, err := m.uploadAdminPassword(provision, m)\n\tif err != nil {\n\t\tm.log.WithError(err).Error(\"error uploading admin password\")\n\t\treturn errors.Wrap(err, \"error trying to save admin password\")\n\t}\n\tif err := m.updateClusterProvision(\n\t\tprovision,\n\t\tm,\n\t\tfunc(provision *hivev1.ClusterProvision) {\n\t\t\tprovision.Spec.Metadata = &runtime.RawExtension{Raw: metadataBytes}\n\t\t\tprovision.Spec.InfraID = pointer.StringPtr(metadata.InfraID)\n\t\t\tprovision.Spec.ClusterID = pointer.StringPtr(metadata.ClusterID)\n\t\t\tprovision.Spec.AdminKubeconfigSecret = &corev1.LocalObjectReference{\n\t\t\t\tName: kubeconfigSecret.Name,\n\t\t\t}\n\t\t\tprovision.Spec.AdminPasswordSecret = &corev1.LocalObjectReference{\n\t\t\t\tName: passwordSecret.Name,\n\t\t\t}\n\t\t},\n\t); err != nil {\n\t\tm.log.WithError(err).Error(\"error updating cluster provision with cluster metadata\")\n\t\treturn errors.Wrap(err, \"error updating cluster provision with cluster metadata\")\n\t}\n\n\tm.log.Info(\"waiting for ClusterProvision to transition to provisioning\")\n\tif err := m.waitForProvisioningStage(provision, m); err != nil {\n\t\tm.log.WithError(err).Error(\"ClusterProvision failed to transition to provisioning\")\n\t\treturn errors.Wrap(err, \"failed to transition to provisioning\")\n\t}\n\n\tm.log.Info(\"provisioning cluster\")\n\tinstallErr := m.provisionCluster()\n\tif installErr != nil {\n\t\tm.log.WithError(installErr).Error(\"error running openshift-install, running deprovision to clean up\")\n\n\t\t// Fetch logs from all cluster machines:\n\t\tif m.isGatherLogsEnabled() {\n\t\t\tm.gatherLogs(cd)\n\t\t}\n\n\t\t// TODO: should we timebox this deprovision attempt in the event it gets stuck?\n\t\tif err := m.cleanupFailedInstall(cd, provision); err != nil {\n\t\t\t// Log the error but continue. It is possible we were not able to clear the infraID\n\t\t\t// here, but we will attempt this again anyhow when the next job retries. The\n\t\t\t// goal here is just to minimize running resources in the event of a long wait\n\t\t\t// until the next retry.\n\t\t\tm.log.WithError(err).Error(\"error while trying to deprovision after failed install\")\n\t\t}\n\t}\n\n\tif installLog, err := m.readInstallerLog(provision, m); err == nil {\n\t\tif err := m.updateClusterProvision(\n\t\t\tprovision,\n\t\t\tm,\n\t\t\tfunc(provision *hivev1.ClusterProvision) {\n\t\t\t\tprovision.Spec.InstallLog = pointer.StringPtr(installLog)\n\t\t\t},\n\t\t); err != nil {\n\t\t\tm.log.WithError(err).Warning(\"error updating cluster provision with installer log\")\n\t\t}\n\t} else {\n\t\tm.log.WithError(err).Error(\"error reading installer log\")\n\t}\n\n\tif installErr != nil {\n\t\tm.log.WithError(installErr).Error(\"failed due to install error\")\n\t\treturn installErr\n\t}\n\n\tm.log.Info(\"install completed successfully\")\n\n\treturn nil\n}", "func (s *Scheduler) prepareAndExec(r gaia.PipelineRun) {\n\t// Mark the scheduled run as running\n\tr.Status = gaia.RunRunning\n\tr.StartDate = time.Now()\n\n\t// Update entry in store\n\terr := s.storeService.PipelinePutRun(&r)\n\tif err != nil {\n\t\tgaia.Cfg.Logger.Debug(\"could not put pipeline run into store during executing work\", \"error\", err.Error())\n\t\treturn\n\t}\n\n\t// Get related pipeline from pipeline run\n\tpipeline, _ := s.storeService.PipelineGet(r.PipelineID)\n\n\t// Check if this pipeline has jobs declared\n\tif len(r.Jobs) == 0 {\n\t\t// Finish pipeline run\n\t\ts.finishPipelineRun(&r, gaia.RunSuccess)\n\t\treturn\n\t}\n\n\t// Check if circular dependency exists\n\tfor _, job := range r.Jobs {\n\t\tif _, err := s.checkCircularDep(job, []*gaia.Job{}, []*gaia.Job{}); err != nil {\n\t\t\tgaia.Cfg.Logger.Info(\"circular dependency detected\", \"pipeline\", pipeline)\n\t\t\tgaia.Cfg.Logger.Info(\"information\", \"info\", err.Error())\n\n\t\t\t// Update store\n\t\t\ts.finishPipelineRun(&r, gaia.RunFailed)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Create logs folder for this run\n\tpath := filepath.Join(gaia.Cfg.WorkspacePath, strconv.Itoa(r.PipelineID), strconv.Itoa(r.ID), gaia.LogsFolderName)\n\terr = os.MkdirAll(path, 0700)\n\tif err != nil {\n\t\tgaia.Cfg.Logger.Error(\"cannot create pipeline run folder\", \"error\", err.Error(), \"path\", path)\n\t}\n\n\t// Create the start command for the pipeline\n\tc := createPipelineCmd(pipeline)\n\tif c == nil {\n\t\tgaia.Cfg.Logger.Debug(\"cannot create pipeline start command\", \"error\", errCreateCMDForPipeline.Error())\n\t\ts.finishPipelineRun(&r, gaia.RunFailed)\n\t\treturn\n\t}\n\n\t// Create new plugin instance\n\tpS := s.pluginSystem.NewPlugin(s.ca)\n\n\t// Init the plugin\n\tpath = filepath.Join(path, gaia.LogsFileName)\n\tif err := pS.Init(c, &path); err != nil {\n\t\tgaia.Cfg.Logger.Debug(\"cannot initialize the plugin\", \"error\", err.Error(), \"pipeline\", pipeline)\n\t\ts.finishPipelineRun(&r, gaia.RunFailed)\n\t\treturn\n\t}\n\n\t// Validate the plugin(pipeline)\n\tif err := pS.Validate(); err != nil {\n\t\tgaia.Cfg.Logger.Debug(\"cannot validate pipeline\", \"error\", err.Error(), \"pipeline\", pipeline)\n\t\ts.finishPipelineRun(&r, gaia.RunFailed)\n\t\treturn\n\t}\n\tdefer pS.Close()\n\n\t// Schedule jobs and execute them.\n\t// Also update the run in the store.\n\ts.executeScheduledJobs(r, pS)\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func main() {\n\tcmd.Execute()\n}", "func Execute() {\n\t// cfg contains tenant related information, e.g. `travel0-dev`,\n\t// `travel0-prod`. some of its information can be sourced via:\n\t// 1. env var (e.g. AUTH0_API_KEY)\n\t// 2. global flag (e.g. --api-key)\n\t// 3. JSON file (e.g. api_key = \"...\" in ~/.config/auth0/config.json)\n\tcli := &cli{\n\t\trenderer: display.NewRenderer(),\n\t\ttracker: analytics.NewTracker(),\n\t}\n\n\trootCmd := buildRootCmd(cli)\n\n\trootCmd.SetUsageTemplate(namespaceUsageTemplate())\n\taddPersistentFlags(rootCmd, cli)\n\taddSubcommands(rootCmd, cli)\n\n\t// TODO(cyx): backport this later on using latest auth0/v5.\n\t// rootCmd.AddCommand(actionsCmd(cli))\n\t// rootCmd.AddCommand(triggersCmd(cli))\n\n\tdefer func() {\n\t\tif v := recover(); v != nil {\n\t\t\terr := fmt.Errorf(\"panic: %v\", v)\n\n\t\t\t// If we're in development mode, we should throw the\n\t\t\t// panic for so we have less surprises. For\n\t\t\t// non-developers, we'll swallow the panics.\n\t\t\tif instrumentation.ReportException(err) {\n\t\t\t\tfmt.Println(panicMessage)\n\t\t\t} else {\n\t\t\t\tpanic(v)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// platform specific terminal initialization:\n\t// this should run for all commands,\n\t// for most of the architectures there's no requirements:\n\tansi.InitConsole()\n\n\tcancelCtx := contextWithCancel()\n\tif err := rootCmd.ExecuteContext(cancelCtx); err != nil {\n\t\tcli.renderer.Heading(\"error\")\n\t\tcli.renderer.Errorf(err.Error())\n\n\t\tinstrumentation.ReportException(err)\n\t\tos.Exit(1)\n\t}\n\n\ttimeoutCtx, cancel := context.WithTimeout(cancelCtx, 3*time.Second)\n\t// defers are executed in LIFO order\n\tdefer cancel()\n\tdefer cli.tracker.Wait(timeoutCtx) // No event should be tracked after this has run, or it will panic e.g. in earlier deferred functions\n}", "func main() {\n\tcmd.Root().Execute()\n}", "func main() {\n\tb := specs.MustNewTasksCfgBuilder()\n\n\t// Create Tasks and Jobs.\n\tfor _, name := range JOBS {\n\t\tprocess(b, name)\n\t}\n\n\tb.MustFinish()\n}", "func Setup(ctx context.Context) error {\n\tif err := setupReadme(); err != nil {\n\t\treturn err\n\t}\n\tif err := setupDocker(); err != nil {\n\t\treturn err\n\t}\n\tif err := setupGitlab(); err != nil {\n\t\treturn err\n\t}\n\tif err := setupInfra(); err != nil {\n\t\treturn err\n\t}\n\tif err := setupGit(); err != nil {\n\t\treturn err\n\t}\n\tif err := setupService(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TestExecutor_Start_NonExecutableBinaries(t *testing.T) {\n\tci.Parallel(t)\n\n\tfor name, factory := range executorFactories {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\n\t\t\ttmpDir := t.TempDir()\n\n\t\t\tnonExecutablePath := filepath.Join(tmpDir, \"nonexecutablefile\")\n\t\t\tioutil.WriteFile(nonExecutablePath,\n\t\t\t\t[]byte(\"#!/bin/sh\\necho hello world\"),\n\t\t\t\t0600)\n\n\t\t\ttestExecCmd := testExecutorCommand(t)\n\t\t\texecCmd, allocDir := testExecCmd.command, testExecCmd.allocDir\n\t\t\texecCmd.Cmd = nonExecutablePath\n\t\t\tfactory.configureExecCmd(t, execCmd)\n\n\t\t\texecutor := factory.new(testlog.HCLogger(t))\n\t\t\tdefer executor.Shutdown(\"\", 0)\n\n\t\t\t// need to configure path in chroot with that file if using isolation executor\n\t\t\tif _, ok := executor.(*UniversalExecutor); !ok {\n\t\t\t\ttaskName := filepath.Base(testExecCmd.command.TaskDir)\n\t\t\t\terr := allocDir.NewTaskDir(taskName).Build(true, map[string]string{\n\t\t\t\t\ttmpDir: tmpDir,\n\t\t\t\t})\n\t\t\t\trequire.NoError(err)\n\t\t\t}\n\n\t\t\tdefer allocDir.Destroy()\n\t\t\tps, err := executor.Launch(execCmd)\n\t\t\trequire.NoError(err)\n\t\t\trequire.NotZero(ps.Pid)\n\n\t\t\tps, err = executor.Wait(context.Background())\n\t\t\trequire.NoError(err)\n\t\t\trequire.NoError(executor.Shutdown(\"SIGINT\", 100*time.Millisecond))\n\n\t\t\texpected := \"hello world\"\n\t\t\ttu.WaitForResult(func() (bool, error) {\n\t\t\t\tact := strings.TrimSpace(string(testExecCmd.stdout.String()))\n\t\t\t\tif expected != act {\n\t\t\t\t\treturn false, fmt.Errorf(\"expected: '%s' actual: '%s'\", expected, act)\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t}, func(err error) {\n\t\t\t\tstderr := strings.TrimSpace(string(testExecCmd.stderr.String()))\n\t\t\t\tt.Logf(\"stderr: %v\", stderr)\n\t\t\t\trequire.NoError(err)\n\t\t\t})\n\t\t})\n\t}\n}", "func Execute(name string, args []string) {\n\topttar := false\n\topttgz := false\n\toptvhdx := false\n\toptvhdxgz := false\n\toptreg := false\n\tswitch len(args) {\n\tcase 0:\n\t\t_, _, flags, _ := wsllib.WslGetDistributionConfiguration(name)\n\t\tif flags&wsllib.FlagEnableWsl2 == wsllib.FlagEnableWsl2 {\n\t\t\toptvhdxgz = true\n\t\t\toptreg = true\n\t\t} else {\n\t\t\topttgz = true\n\t\t\toptreg = true\n\t\t}\n\n\tcase 1:\n\t\tswitch args[0] {\n\t\tcase \"--tar\":\n\t\t\topttar = true\n\t\tcase \"--tgz\":\n\t\t\topttgz = true\n\t\tcase \"--vhdx\":\n\t\t\toptvhdx = true\n\t\tcase \"--vhdxgz\":\n\t\t\toptvhdxgz = true\n\t\tcase \"--reg\":\n\t\t\toptreg = true\n\t\t}\n\n\tdefault:\n\t\tutils.ErrorExit(os.ErrInvalid, true, true, false)\n\t}\n\n\tif optreg {\n\t\terr := backupReg(name, \"backup.reg\")\n\t\tif err != nil {\n\t\t\tutils.ErrorExit(err, true, true, false)\n\t\t}\n\t}\n\tif opttar {\n\t\terr := backupTar(name, \"backup.tar\")\n\t\tif err != nil {\n\t\t\tutils.ErrorExit(err, true, true, false)\n\t\t}\n\n\t}\n\tif opttgz {\n\t\terr := backupTar(name, \"backup.tar.gz\")\n\t\tif err != nil {\n\t\t\tutils.ErrorExit(err, true, true, false)\n\t\t}\n\t}\n\tif optvhdx {\n\t\terr := backupExt4Vhdx(name, \"backup.ext4.vhdx\")\n\t\tif err != nil {\n\t\t\tutils.ErrorExit(err, true, true, false)\n\t\t}\n\t}\n\tif optvhdxgz {\n\t\terr := backupExt4Vhdx(name, \"backup.ext4.vhdx.gz\")\n\t\tif err != nil {\n\t\t\tutils.ErrorExit(err, true, true, false)\n\t\t}\n\t}\n}", "func Execute() {\r\n\r\n\t// Create a database connection (Don't require DB for now)\r\n\tif err := database.Connect(applicationName); err != nil {\r\n\t\tchalker.Log(chalker.ERROR, fmt.Sprintf(\"Error connecting to database: %s\", err.Error()))\r\n\t} else {\r\n\t\t// Set this flag for caching detection\r\n\t\tdatabaseEnabled = true\r\n\r\n\t\t// Defer the database disconnection\r\n\t\tdefer func() {\r\n\t\t\tdbErr := database.GarbageCollection()\r\n\t\t\tif dbErr != nil {\r\n\t\t\t\tchalker.Log(chalker.ERROR, fmt.Sprintf(\"Error in database GarbageCollection: %s\", dbErr.Error()))\r\n\t\t\t}\r\n\r\n\t\t\tif dbErr = database.Disconnect(); dbErr != nil {\r\n\t\t\t\tchalker.Log(chalker.ERROR, fmt.Sprintf(\"Error in database Disconnect: %s\", dbErr.Error()))\r\n\t\t\t}\r\n\t\t}()\r\n\t}\r\n\r\n\t// Run root command\r\n\ter(rootCmd.Execute())\r\n\r\n\t// Generate documentation from all commands\r\n\tif generateDocs {\r\n\t\tgenerateDocumentation()\r\n\t}\r\n\r\n\t// Flush cache?\r\n\tif flushCache && databaseEnabled {\r\n\t\tif dbErr := database.Flush(); dbErr != nil {\r\n\t\t\tchalker.Log(chalker.ERROR, fmt.Sprintf(\"Error in database Flush: %s\", dbErr.Error()))\r\n\t\t} else {\r\n\t\t\tchalker.Log(chalker.SUCCESS, \"Successfully flushed the local database cache\")\r\n\t\t}\r\n\t}\r\n}", "func Execute() {\n\t// redirect stderr to stdout (to capture panics)\n\tsyscall.Dup2(int(os.Stdout.Fd()), int(os.Stderr.Fd()))\n\n\t// we're speaking to the local server only ever\n\tserverAddr := fmt.Sprintf(\"localhost:%d\", *port)\n\tcreds := client.GetClientCreds()\n\tfmt.Printf(\"Connecting to local autodeploy server:%s...\\n\", serverAddr)\n\tconn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds))\n\tif err != nil {\n\t\tfmt.Println(\"fail to dial: %v\", err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\tfmt.Println(\"Creating client...\")\n\tcl := pb.NewAutoDeployerClient(conn)\n\tctx := client.SetAuthToken()\n\n\t// the the server we're starting to deploy and get the parameters for deployment\n\tsr := pb.StartupRequest{Msgid: *msgid}\n\tsrp, err := cl.InternalStartup(ctx, &sr)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to startup: %s\\n\", err)\n\t\tos.Exit(10)\n\t}\n\tif srp.URL == \"\" {\n\t\tfmt.Printf(\"no download url in startup response\\n\")\n\t\tos.Exit(10)\n\t}\n\n\t// change to my working directory\n\terr = os.Chdir(srp.WorkingDir)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to Chdir() to %s: %s\\n\", srp.WorkingDir, err)\n\t}\n\tfmt.Printf(\"Chdir() to %s\\n\", srp.WorkingDir)\n\t// download the binary and/or archive\n\tbinary := \"executable\"\n\tif srp.Binary != \"\" {\n\t\tbinary = srp.Binary\n\t}\n\tfmt.Printf(\"Downloading binary from %s\\n\", srp.URL)\n\terr = DownloadBinary(srp.URL, binary, srp.DownloadUser, srp.DownloadPassword)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to download from %s: %s\\n\", srp.URL, err)\n\t\tos.Exit(10)\n\t}\n\n\t// execute the binary\n\tports := countPortCommands(srp.Args)\n\n\tfmt.Printf(\"Getting resources\\n\")\n\tresources, err := cl.AllocResources(ctx, &pb.ResourceRequest{Msgid: *msgid, Ports: int32(ports)})\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to alloc resources: %s\\n\", err)\n\t\tos.Exit(10)\n\t}\n\tfmt.Printf(\"Start commandline: %s %v (%d ports)\\n\", binary, srp.Args, ports)\n\trArgs := replacePorts(srp.Args, resources.Ports)\n\tfmt.Printf(\"Starting binary \\\"%s\\\" with %d args:\\n\", binary, len(srp.Args))\n\n\tfor _, s := range rArgs {\n\t\tfmt.Printf(\"Arg: \\\"%s\\\"\\n\", s)\n\t}\n\tpath := \"./\"\n\tfullb := fmt.Sprintf(\"%s/%s\", path, binary)\n\terr = os.Chmod(fullb, 0500)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to chmod %s: %s\\n\", fullb, err)\n\t\tos.Exit(10)\n\t}\n\n\tfmt.Printf(\"Starting user application..\\n\")\n\tcmd := exec.Command(fullb, rArgs...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Start()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to start(): %s\\n\", err)\n\t\tos.Exit(10)\n\t}\n\t_, err = cl.Started(ctx, &pb.StartedRequest{Msgid: *msgid})\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to inform daemon about pending startup. aborting. (%s)\\n\", err)\n\t\tos.Exit(10)\n\t}\n\terr = cmd.Wait()\n\tif err == nil {\n\t\tfmt.Printf(\"Command completed with no error\\n\")\n\t} else {\n\t\tfmt.Printf(\"Command completed: %s\\n\", err)\n\t}\n\tfailed := err != nil\n\tcl.Terminated(ctx, &pb.TerminationRequest{Msgid: *msgid, Failed: failed})\n\tos.Exit(0)\n}", "func (e *dockerEngine) Prepare(cfg baetyl.ComposeAppConfig) {\n\tvar wg sync.WaitGroup\n\tss := cfg.Services\n\tfor _, s := range ss {\n\t\twg.Add(1)\n\t\tgo func(i string, w *sync.WaitGroup) {\n\t\t\tdefer w.Done()\n\t\t\te.pullImage(i)\n\t\t}(s.Image, &wg)\n\t}\n\twg.Add(1)\n\tgo func(nw map[string]baetyl.ComposeNetwork, w *sync.WaitGroup) {\n\t\tdefer w.Done()\n\t\te.initNetworks(nw)\n\t}(cfg.Networks, &wg)\n\n\twg.Add(1)\n\tgo func(vs map[string]baetyl.ComposeVolume, w *sync.WaitGroup) {\n\t\tdefer w.Done()\n\t\te.initVolumes(vs)\n\t}(cfg.Volumes, &wg)\n\twg.Wait()\n}", "func (b *Cowbuilder) cowbuilderCommand(d deb.Codename, a deb.Architecture, deps []*AptRepositoryAccess, command string, args ...string) (*exec.Cmd, error) {\n\n\tisUbuntu, err := b.isSupportedUbuntu(d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timagePath := b.imagePath(d, a)\n\tbaseCowPath := path.Join(imagePath, \"base.cow\")\n\tbuildPath := path.Join(imagePath, \"build\")\n\taptCache := path.Join(b.basepath, \"images/aptcache\")\n\tccache := path.Join(b.basepath, \"images/ccache\")\n\n\ttoClean := []string{b.confpath, b.hookspath}\n\ttoCreate := []string{buildPath, aptCache, ccache, b.hookspath}\n\n\tfor _, f := range toClean {\n\t\terr = os.RemoveAll(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, d := range toCreate {\n\t\terr = os.MkdirAll(d, 0755)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tbindmounts, err := b.setHooksForRepoDeps(d, deps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpreDebootstrapOpts := fmt.Sprintf(\"\\\"--arch\\\" \\\"%s\\\"\", a)\n\tvar mirror, components, mirrorsite, postDebootstrapOpts string\n\tif isUbuntu == true {\n\t\tmirror = \"http://ftp.ubuntu.com/ubuntu\"\n\t\tmirrorsite = \"http://ftp.ubuntu.com/ubuntu\"\n\t\tcomponents = \"main restricted universe multiverse\"\n\t\tpostDebootstrapOpts = \"\\\"--keyring=/usr/share/keyrings/ubuntu-archive-keyring.gpg\\\"\"\n\t} else {\n\t\tmirror = \"http://ftp.us.debian.org/debian\"\n\t\tmirrorsite = \"http://ftp.us.debian.org/debian\"\n\t\tcomponents = \"main contrib non-free\"\n\t\tpostDebootstrapOpts = \"\\\"--keyring=/usr/share/keyrings/debian-archive-keyring.gpg\\\"\"\n\t}\n\n\tcmd := exec.Command(\"cowbuilder\", command)\n\tcmd.Args = append(cmd.Args, args...)\n\n\tcmd.Env = append(b.maskedEnviron(), fmt.Sprintf(\"HOME=%s\", b.basepath))\n\n\tf, err := os.Create(b.confpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"BASEPATH\", baseCowPath)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"BUILDPLACE\", buildPath)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"HOOKDIR\", b.hookspath)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"DISTRIBUTION\", d)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"ARCHITECTURE\", a)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"APTCACHE\", aptCache)\n\tfmt.Fprintf(f, \"%s=(%s \\\"${DEBOOTSTRAPOPTS[@]}\\\" %s)\\n\", \"DEBOOTSTRAPOPTS\", preDebootstrapOpts, postDebootstrapOpts)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"MIRROR\", mirror)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"MIRRORSITE\", mirrorsite)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"COMPONENTS\", components)\n\tfmt.Fprintf(f, \"%s=\\\"%s\\\"\\n\", \"BINDMOUNTS\", strings.Join(bindmounts, \" \"))\n\n\treturn cmd, nil\n}", "func (instance *Host) runInstallPhase(ctx context.Context, phase userdata.Phase, userdataContent *userdata.Content, timeout time.Duration) (ferr fail.Error) {\n\tdefer temporal.NewStopwatch().OnExitLogInfo(ctx, fmt.Sprintf(\"Starting install phase %s on '%s'...\", phase, instance.GetName()), fmt.Sprintf(\"Ending phase %s on '%s' with err '%s' ...\", phase, instance.GetName(), ferr))()\n\n\tcontent, xerr := userdataContent.Generate(phase)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tfile := fmt.Sprintf(\"%s/user_data.%s.sh\", utils.TempFolder, phase)\n\txerr = instance.unsafePushStringToFileWithOwnership(ctx, string(content), file, userdataContent.Username, \"755\")\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\trounds := 10\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn fail.ConvertError(ctx.Err())\n\t\tdefault:\n\t\t}\n\n\t\trc, _, _, xerr := instance.unsafeRun(ctx, \"sudo sync\", outputs.COLLECT, 0, 10*time.Second)\n\t\tif xerr != nil {\n\t\t\trounds--\n\t\t\tcontinue\n\t\t}\n\n\t\tif rc == 126 {\n\t\t\tlogrus.WithContext(ctx).Debugf(\"Text busy happened\")\n\t\t}\n\n\t\tif rc == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tif rc != 126 || rounds == 0 {\n\t\t\tif rc == 126 {\n\t\t\t\treturn fail.NewError(\"Text busy killed the script\")\n\t\t\t}\n\t\t}\n\n\t\trounds--\n\t}\n\n\tcommand := getCommand(ctx, file)\n\n\t// Executes the script on the remote Host\n\tretcode, stdout, stderr, xerr := instance.unsafeRun(ctx, command, outputs.COLLECT, 0, timeout)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn fail.Wrap(xerr, \"failed to apply configuration phase '%s'\", phase)\n\t}\n\tif retcode != 0 {\n\t\t// build new error\n\t\tproblem := fail.NewError(\"failed to execute install phase '%s' on Host '%s'\", phase, instance.GetName())\n\t\tproblem.Annotate(\"retcode\", retcode)\n\t\tproblem.Annotate(\"stdout\", stdout)\n\t\tproblem.Annotate(\"stderr\", stderr)\n\n\t\tif abstract.IsProvisioningError(problem) {\n\t\t\t// Rewrite stdout, probably has too much information\n\t\t\tif stdout != \"\" {\n\t\t\t\tlastMsg := \"\"\n\t\t\t\tlines := strings.Split(stdout, \"\\n\")\n\t\t\t\tfor _, line := range lines {\n\t\t\t\t\tif strings.Contains(line, \"+ echo '\") {\n\t\t\t\t\t\tlastMsg = line\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(lastMsg) > 0 {\n\t\t\t\t\tproblem = fail.NewError(\n\t\t\t\t\t\t\"failed to execute install phase '%s' on Host '%s': %s\", phase, instance.GetName(),\n\t\t\t\t\t\tlastMsg[8:len(lastMsg)-1],\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif stderr != \"\" {\n\t\t\t\tlastMsg := \"\"\n\t\t\t\tlines := strings.Split(stderr, \"\\n\")\n\t\t\t\tfor _, line := range lines {\n\t\t\t\t\tif strings.Contains(line, \"+ echo '\") {\n\t\t\t\t\t\tlastMsg = line\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(lastMsg) > 0 {\n\t\t\t\t\tproblem = fail.NewError(\n\t\t\t\t\t\t\"failed to execute install phase '%s' on Host '%s': %s\", phase, instance.GetName(),\n\t\t\t\t\t\tlastMsg[8:len(lastMsg)-1],\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn problem\n\t}\n\treturn nil\n}", "func (this *Task) Execute(zkc zk.ZK, dockerc *docker.Docker) error {\n\n\tfor i, action := range this.Actions {\n\t\topts := action.ContainerControl\n\n\t\t// inject metadata via labels\n\t\tif len(action.ContainerControl.Labels) == 0 {\n\t\t\taction.ContainerControl.Labels = map[string]string{}\n\t\t}\n\n\t\taction.ContainerControl.Labels[EnvDomain] = this.domain\n\t\taction.ContainerControl.Labels[EnvService] = string(this.service)\n\n\t\t// Get the image of the container\n\t\tvar pull *docker.Image\n\t\tif this.assignImage != nil {\n\t\t\timg, err := this.assignImage(i, &opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpull = img\n\t\t\topts.Image = img.Repository + \":\" + img.Tag\n\t\t} else if opts.Image != \"\" {\n\t\t\tk := strings.LastIndex(opts.Image, \":\")\n\t\t\trepository, tag := opts.Image[0:k], opts.Image[k+1:]\n\t\t\tpull = &docker.Image{\n\t\t\t\tRepository: repository,\n\t\t\t\tTag: tag,\n\t\t\t}\n\t\t}\n\n\t\tif pull == nil {\n\t\t\treturn ErrNoImage\n\t\t}\n\n\t\t// Pull Image -- blocking call\n\t\tlogin := this.AuthIdentity\n\t\tif this.DockerAuthInfoPath != \"\" {\n\t\t\tif l, err := fetchAuthIdentity(zkc, this.DockerAuthInfoPath); err == nil {\n\t\t\t\tlogin = l\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif login == nil {\n\t\t\treturn ErrNoImageRegistryAuth\n\t\t}\n\n\t\t// Get the name of the container\n\t\tif this.assignName != nil && action.ContainerNameTemplate != nil {\n\t\t\tif cn := this.assignName(i, *action.ContainerNameTemplate, &opts); cn != \"\" {\n\t\t\t\topts.ContainerName = cn\n\t\t\t}\n\t\t}\n\n\t\tglog.Infoln(\"START (\", this.service, \") ===========================================================\")\n\t\tglog.Infoln(\" Login:\", login)\n\t\tglog.Infoln(\" PullImage:\", *pull)\n\t\tglog.Infoln(\" StartContainer: Image=\", opts.Image, \"ContainerName=\", opts.ContainerName)\n\t\tglog.Infoln(\" StartContainer: ContainerControl=\", *opts.Config, \"HostConfig=\", *opts.HostConfig)\n\n\t\tstopped, err := dockerc.PullImage(login, pull)\n\t\tif err == nil {\n\t\t\t// Block until completion\n\t\t\tglog.Infoln(\"Starting download of\", *pull, \"with auth\", login)\n\t\t\tdownload_err := <-stopped\n\t\t\tglog.Infoln(\"Download of image\", pull.Repository+\":\"+pull.Tag, \"completed with err=\", download_err)\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\n\t\tglog.Infoln(\"Docker starting container: Name=\", opts.ContainerName, \"Opts=\", opts)\n\n\t\tcontainer, err := dockerc.StartContainer(login, &opts)\n\t\tif err != nil {\n\t\t\t// This case is different than the container fails right after start. This is\n\t\t\t// the case where dockerd cannot fork new processes (due to resource limits)\n\t\t\t// or because of container name conflicts.\n\t\t\tExceptionEvent(err, opts, \"Error starting container: Image=\", opts.Image)\n\t\t\treturn err\n\t\t}\n\t\tglog.Infoln(\"Started container\", container.Id[0:12], \"from\", container.Image, \":\", *container)\n\n\t}\n\treturn nil\n}", "func (p Plugin) Exec() error {\n\t// Install specified version of terraform\n\tif p.Terraform.Version != \"\" {\n\t\terr := installTerraform(p.Terraform.Version)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif p.Config.RoleARN != \"\" {\n\t\tassumeRole(p.Config.RoleARN)\n\t}\n\n\t// writing the .netrc file with Github credentials in it.\n\terr := writeNetrc(p.Netrc.Machine, p.Netrc.Login, p.Netrc.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar terraformDataDir string = \".terraform\"\n\tif p.Config.TerraformDataDir != \"\" {\n\t\tterraformDataDir = p.Config.TerraformDataDir\n\t\tos.Setenv(\"TF_DATA_DIR\", p.Config.TerraformDataDir)\n\t}\n\n\tvar commands []*exec.Cmd\n\n\tcommands = append(commands, exec.Command(\"terraform\", \"version\"))\n\n\tif p.Config.Cacert != \"\" {\n\t\tcommands = append(commands, installCaCert(p.Config.Cacert))\n\t}\n\n\tcommands = append(commands, deleteCache(terraformDataDir))\n\tcommands = append(commands, initCommand(p.Config.InitOptions))\n\tcommands = append(commands, getModules())\n\n\tfor _, c := range commands {\n\t\tif c.Dir == \"\" {\n\t\t\twd, err := os.Getwd()\n\t\t\tif err == nil {\n\t\t\t\tc.Dir = wd\n\t\t\t}\n\t\t}\n\t\tif p.Config.RootDir != \"\" {\n\t\t\tc.Dir = c.Dir + \"/\" + p.Config.RootDir\n\t\t}\n\t\tc.Stdout = os.Stdout\n\t\tc.Stderr = os.Stderr\n\t\tif !p.Config.Sensitive {\n\t\t\ttrace(c)\n\t\t}\n\n\t\terr := c.Run()\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t}).Fatal(\"Failed to execute a command\")\n\t\t}\n\t\tlogrus.Debug(\"Command completed successfully\")\n\t}\n\n\terr = tfOutput(p.Config)\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"error\": err,\n\t\t}).Fatal(\"Failed to execute a command\")\n\t}\n\treturn nil\n}", "func runPolicyPackIntegrationTest(\n\tt *testing.T, testDirName string, runtime Runtime,\n\tinitialConfig map[string]string, scenarios []policyTestScenario) {\n\tt.Logf(\"Running Policy Pack Integration Test from directory %q\", testDirName)\n\n\t// Get the directory for the policy pack to run.\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatalf(\"Error getting working directory\")\n\t}\n\trootDir := filepath.Join(cwd, testDirName)\n\n\t// The Pulumi project name matches the test dir name in these tests.\n\tos.Setenv(\"PULUMI_TEST_PROJECT\", testDirName)\n\n\tstackName := fmt.Sprintf(\"%s-%d\", testDirName, time.Now().Unix()%100000)\n\tos.Setenv(\"PULUMI_TEST_STACK\", stackName)\n\n\t// Copy the root directory to /tmp and run various operations within that directory.\n\te := ptesting.NewEnvironment(t)\n\tdefer func() {\n\t\tif !t.Failed() {\n\t\t\te.DeleteEnvironment()\n\t\t}\n\t}()\n\te.ImportDirectory(rootDir)\n\n\t// If there is a testcomponent directory, update dependencies and set the PATH envvar.\n\ttestComponentDir := filepath.Join(e.RootPath, \"testcomponent\")\n\tif _, err := os.Stat(testComponentDir); !os.IsNotExist(err) {\n\t\t// Install dependencies.\n\t\te.CWD = testComponentDir\n\t\te.RunCommand(\"go\", \"mod\", \"tidy\")\n\t\tabortIfFailed(t)\n\n\t\t// Set the PATH envvar to the path to the testcomponent so the provider is available\n\t\t// to the program.\n\t\te.Env = []string{pathEnvWith(testComponentDir)}\n\t}\n\n\t// Change to the Policy Pack directory.\n\tpackDir := filepath.Join(e.RootPath, \"policy-pack\")\n\te.CWD = packDir\n\n\t// Link @pulumi/policy.\n\te.RunCommand(\"yarn\", \"link\", \"@pulumi/policy\")\n\tabortIfFailed(t)\n\n\t// Get dependencies.\n\te.RunCommand(\"yarn\", \"install\")\n\tabortIfFailed(t)\n\n\t// Change to the Pulumi program directory.\n\tprogramDir := filepath.Join(e.RootPath, \"program\")\n\te.CWD = programDir\n\n\t// Create the stack.\n\te.RunCommand(\"pulumi\", \"login\", \"--local\")\n\tabortIfFailed(t)\n\n\te.RunCommand(\"pulumi\", \"stack\", \"init\", stackName)\n\tabortIfFailed(t)\n\n\t// Get dependencies.\n\tvar venvCreated bool\n\tswitch runtime {\n\tcase NodeJS:\n\t\te.RunCommand(\"yarn\", \"install\")\n\t\tabortIfFailed(t)\n\n\tcase Python:\n\t\te.RunCommand(\"pipenv\", \"--python\", \"3\")\n\t\tabortIfFailed(t)\n\t\te.RunCommand(\"pipenv\", \"run\", \"pip\", \"install\", \"-r\", \"requirements.txt\")\n\t\tabortIfFailed(t)\n\t\tvenvCreated = true\n\tdefault:\n\t\tt.Fatalf(\"Unexpected runtime value.\")\n\t}\n\n\t// If we have a Python policy pack, create the virtual environment (if one doesn't already exist),\n\t// and install dependencies into it. If the test uses a Python program, the virtual environment and\n\t// activation will be shared between the program and policy pack.\n\tvar hasPythonPack bool\n\tpythonPackDir := filepath.Join(e.RootPath, \"policy-pack-python\")\n\tif _, err := os.Stat(pythonPackDir); !os.IsNotExist(err) {\n\t\thasPythonPack = true\n\n\t\tif !venvCreated {\n\t\t\te.RunCommand(\"pipenv\", \"--python\", \"3\")\n\t\t\tabortIfFailed(t)\n\t\t}\n\n\t\tpythonPackRequirements := filepath.Join(pythonPackDir, \"requirements.txt\")\n\t\tif _, err := os.Stat(pythonPackRequirements); !os.IsNotExist(err) {\n\t\t\te.RunCommand(\"pipenv\", \"run\", \"pip\", \"install\", \"-r\", pythonPackRequirements)\n\t\t\tabortIfFailed(t)\n\t\t}\n\n\t\tdep := filepath.Join(\"..\", \"..\", \"sdk\", \"python\", \"env\", \"src\")\n\t\tdep, err = filepath.Abs(dep)\n\t\tassert.NoError(t, err)\n\t\te.RunCommand(\"pipenv\", \"run\", \"pip\", \"install\", \"-e\", dep)\n\t\tabortIfFailed(t)\n\t}\n\n\t// Initial configuration.\n\tfor k, v := range initialConfig {\n\t\te.RunCommand(\"pulumi\", \"config\", \"set\", k, v)\n\t}\n\n\t// After this point, we want be sure to cleanup the stack, so we don't accidentally leak\n\t// any cloud resources.\n\tdefer func() {\n\t\tt.Log(\"Cleaning up Stack\")\n\t\te.RunCommand(\"pulumi\", \"destroy\", \"--yes\")\n\t\te.RunCommand(\"pulumi\", \"stack\", \"rm\", \"--yes\")\n\t}()\n\n\tassert.True(t, len(scenarios) > 0, \"no test scenarios provided\")\n\trunScenarios := func(policyPackDirectoryPath string) {\n\t\tt.Run(policyPackDirectoryPath, func(t *testing.T) {\n\t\t\te.T = t\n\n\t\t\t// Clean up the stack after running through the scenarios, so that subsequent runs\n\t\t\t// begin on a clean slate.\n\t\t\tdefer func() {\n\t\t\t\te.RunCommand(\"pulumi\", \"destroy\", \"--yes\")\n\t\t\t\tabortIfFailed(t)\n\t\t\t}()\n\n\t\t\tfor idx, scenario := range scenarios {\n\t\t\t\t// Create a sub-test so go test will output data incrementally, which will let\n\t\t\t\t// a CI system like Travis know not to kill the job if no output is sent after 10m.\n\t\t\t\t// idx+1 to make it 1-indexed.\n\t\t\t\tscenarioName := fmt.Sprintf(\"scenario_%d\", idx+1)\n\t\t\t\tt.Run(scenarioName, func(t *testing.T) {\n\t\t\t\t\te.T = t\n\n\t\t\t\t\te.RunCommand(\"pulumi\", \"config\", \"set\", \"scenario\", fmt.Sprintf(\"%d\", idx+1))\n\n\t\t\t\t\tcmd := \"pulumi\"\n\t\t\t\t\targs := []string{\"up\", \"--yes\", \"--policy-pack\", policyPackDirectoryPath}\n\n\t\t\t\t\t// If there is config for the scenario, write it out to a file and pass the file path\n\t\t\t\t\t// as a --policy-pack-config argument.\n\t\t\t\t\tif len(scenario.PolicyPackConfig) > 0 {\n\t\t\t\t\t\t// Marshal the config to JSON, with indentation for easier debugging.\n\t\t\t\t\t\tbytes, err := json.MarshalIndent(scenario.PolicyPackConfig, \"\", \" \")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tt.Fatalf(\"error marshalling policy config to JSON: %v\", err)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Change to the config directory.\n\t\t\t\t\t\tconfigDir := filepath.Join(e.RootPath, \"config\", scenarioName)\n\t\t\t\t\t\te.CWD = configDir\n\n\t\t\t\t\t\t// Write the JSON to a file.\n\t\t\t\t\t\tfilename := \"policy-config.json\"\n\t\t\t\t\t\te.WriteTestFile(filename, string(bytes))\n\t\t\t\t\t\tabortIfFailed(t)\n\n\t\t\t\t\t\t// Add the policy config argument.\n\t\t\t\t\t\tpolicyConfigFile := filepath.Join(configDir, filename)\n\t\t\t\t\t\targs = append(args, \"--policy-pack-config\", policyConfigFile)\n\n\t\t\t\t\t\t// Change back to the program directory to proceed with the update.\n\t\t\t\t\t\te.CWD = programDir\n\t\t\t\t\t}\n\n\t\t\t\t\tif runtime == Python || hasPythonPack {\n\t\t\t\t\t\tcmd = \"pipenv\"\n\t\t\t\t\t\targs = append([]string{\"run\", \"pulumi\"}, args...)\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(scenario.WantErrors) == 0 {\n\t\t\t\t\t\tt.Log(\"No errors are expected.\")\n\t\t\t\t\t\te.RunCommand(cmd, args...)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tvar stdout, stderr string\n\t\t\t\t\t\tif scenario.Advisory {\n\t\t\t\t\t\t\tstdout, stderr = e.RunCommand(cmd, args...)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tstdout, stderr = e.RunCommandExpectError(cmd, args...)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfor _, wantErr := range scenario.WantErrors {\n\t\t\t\t\t\t\tinSTDOUT := strings.Contains(stdout, wantErr)\n\t\t\t\t\t\t\tinSTDERR := strings.Contains(stderr, wantErr)\n\n\t\t\t\t\t\t\tif !inSTDOUT && !inSTDERR {\n\t\t\t\t\t\t\t\tt.Errorf(\"Did not find expected error %q\", wantErr)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif t.Failed() {\n\t\t\t\t\t\t\tt.Logf(\"Command output:\\nSTDOUT:\\n%v\\n\\nSTDERR:\\n%v\\n\\n\", stdout, stderr)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n\trunScenarios(packDir)\n\tif hasPythonPack {\n\t\trunScenarios(pythonPackDir)\n\t}\n\n\te.T = t\n\tt.Log(\"Finished test scenarios.\")\n\t// Cleanup already registered via defer.\n}", "func (c *cookRun) run(ctx context.Context, args []string, env environ.Env) (*buildbucketpb.Build, int) {\n\tfail := func(err error) (*buildbucketpb.Build, int) {\n\t\treturn &buildbucketpb.Build{\n\t\t\tStatus: buildbucketpb.Status_INFRA_FAILURE,\n\t\t\tSummaryMarkdown: err.Error(),\n\t\t}, 1\n\t}\n\n\t// Process input.\n\tif len(args) != 0 {\n\t\treturn fail(errors.Reason(\"unexpected arguments: %v\", args).Err())\n\t}\n\tif _, err := os.Getwd(); err != nil {\n\t\treturn fail(errors.Reason(\"failed to resolve CWD: %s\", err).Err())\n\t}\n\tif err := c.normalizeFlags(); err != nil {\n\t\treturn fail(err)\n\t}\n\n\t// initialize temp dir.\n\tif c.TempDir == \"\" {\n\t\ttdir, err := ioutil.TempDir(\"\", \"kitchen\")\n\t\tif err != nil {\n\t\t\treturn fail(errors.Annotate(err, \"failed to create temporary directory\").Err())\n\t\t}\n\t\tc.TempDir = tdir\n\t\tdefer func() {\n\t\t\tif rmErr := os.RemoveAll(tdir); rmErr != nil {\n\t\t\t\tlog.Warningf(ctx, \"Failed to clean up temporary directory at [%s]: %s\", tdir, rmErr)\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Prepare recipe properties. Print them too.\n\tvar err error\n\tif c.engine.properties, c.kitchenProps, err = c.prepareProperties(env); err != nil {\n\t\treturn fail(err)\n\t}\n\tif err = c.reportProperties(ctx, \"recipe engine\", c.engine.properties); err != nil {\n\t\treturn fail(err)\n\t}\n\tif err = c.reportProperties(ctx, \"kitchen\", c.kitchenProps); err != nil {\n\t\treturn fail(err)\n\t}\n\n\tif err = c.updateEnv(env); err != nil {\n\t\treturn fail(errors.Annotate(err, \"failed to update the environment\").Err())\n\t}\n\n\t// Make kitchen use the new $PATH too. This is needed for exec.LookPath called\n\t// by kitchen to pick up binaries in the modified $PATH. In practice, we do it\n\t// so that kitchen uses the installed git wrapper.\n\t//\n\t// All other env modifications must be performed using 'env' object.\n\tpath, _ := env.Get(\"PATH\")\n\tif err = os.Setenv(\"PATH\", path); err != nil {\n\t\treturn fail(errors.Annotate(err, \"failed to update process PATH\").Err())\n\t}\n\n\t// Read BuildSecrets message from swarming secret bytes.\n\tif c.buildSecrets, err = readBuildSecrets(ctx); err != nil {\n\t\treturn fail(errors.Annotate(err, \"failed to read build secrets\").Err())\n\t}\n\n\t// Create systemAuth and recipeAuth authentication contexts, since we are\n\t// about to start making authenticated requests now.\n\tif err := c.setupAuth(ctx); err != nil {\n\t\treturn fail(errors.Annotate(err, \"failed to setup auth\").Err())\n\t}\n\tdefer c.recipeAuth.Close(ctx)\n\tdefer c.systemAuth.Close(ctx)\n\n\t// Must happen after c.systemAuth is initialized.\n\t// We create a build updater even if c.CallUpdateBuild is false because we use it to\n\t// construct the req.Build, which is needed by flushResult. Extracting the logic to construct\n\t// req.Build from the BuildUpdater would require large changes, and we plan to replace for\n\t// this code entirely with LUCI runner.\n\tc.bu, err = c.newBuildUpdater()\n\tif err != nil {\n\t\treturn fail(errors.Annotate(err, \"failed to create a build updater\").Err())\n\t}\n\n\t// Run the recipe.\n\tresult := c.runRecipe(ctx, env)\n\n\treq, err := c.bu.ParseAnnotations(ctx, result.Annotations)\n\tif err != nil {\n\t\treturn fail(errors.Annotate(err, \"failed to parse final annotations\").Err())\n\t}\n\n\t// Mark incomplete steps as canceled.\n\tendTime, err := ptypes.TimestampProto(clock.Now(ctx))\n\tif err != nil {\n\t\treturn fail(err)\n\t}\n\tfor _, s := range req.Build.Steps {\n\t\tif !protoutil.IsEnded(s.Status) {\n\t\t\ts.Status = buildbucketpb.Status_CANCELED\n\t\t\tif s.SummaryMarkdown != \"\" {\n\t\t\t\ts.SummaryMarkdown += \"\\n\"\n\t\t\t}\n\t\t\ts.SummaryMarkdown += \"step was canceled because it did not end before build ended\"\n\t\t\ts.EndTime = endTime\n\t\t}\n\t}\n\n\t// If the build failed, update the build status.\n\t// If it succeeded, do not set it just yet, since there are more ways\n\t// the swarming task can fail.\n\tswitch {\n\tcase result.InfraFailure != nil:\n\t\treq.Build.Status = buildbucketpb.Status_INFRA_FAILURE\n\t\treq.Build.SummaryMarkdown = result.InfraFailure.Text\n\t\treq.UpdateMask.Paths = append(req.UpdateMask.Paths, \"build.status\", \"build.summary_markdown\")\n\n\tcase result.RecipeResult.GetFailure() != nil:\n\t\t// Note: if this recipe failure is an infra failure,\n\t\t// result.InfraFailure above is non-nil.\n\t\treq.Build.Status = buildbucketpb.Status_FAILURE\n\t\treq.Build.SummaryMarkdown = result.RecipeResult.GetFailure().HumanReason\n\t\treq.UpdateMask.Paths = append(req.UpdateMask.Paths, \"build.status\", \"build.summary_markdown\")\n\t}\n\n\tif c.CallUpdateBuild {\n\t\t// The final UpdateBuild call is critical.\n\t\t// If it fails, it is fatal to the build.\n\t\tif err := c.bu.UpdateBuild(ctx, req); err != nil {\n\t\t\treturn fail(errors.Annotate(err, \"failed to send final build state to buildbucket\").Err())\n\t\t}\n\t}\n\n\trecipeExitCode := 1\n\tif result.RecipeExitCode != nil {\n\t\trecipeExitCode = int(result.RecipeExitCode.Value)\n\t}\n\t// After the call to UpdateBuild we can safely set the Build successful.\n\tif recipeExitCode == 0 {\n\t\treq.Build.Status = buildbucketpb.Status_SUCCESS\n\t}\n\treturn req.Build, recipeExitCode\n}", "func execute() {\n\terr := rootCmd.Execute()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(0)\n\t}\n\n\t// viper.Debug()\n\n\tif !runSrv {\n\t\tos.Exit(0)\n\t}\n}", "func setup(){}", "func (gen *Generator) Exec() (err error) {\n\tif err := gen.Prompt(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := gen.Extends(); err != nil {\n\t\treturn err\n\t}\n\n\tif !gen.Options.PerformUpgrade {\n\t\t// run scripts in config.run_after array.\n\t\tif err := gen.RunBefore(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := filepath.Walk(gen.Template.Files, gen.WalkFiles); err != nil {\n\t\treturn err\n\t}\n\n\tif err := gen.Project.SaveState(); err != nil {\n\t\treturn err\n\t}\n\n\tif !gen.Options.PerformUpgrade {\n\t\t// run scripts in config.run_after array.\n\t\tif err := gen.RunAfter(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}", "func (c *CLI) Execute() {\n\tc.LoadCredentials()\n\twr := &workflow.WorkflowResult{}\n\texecHandler := workflow.GetExecutorHandler()\n\texec, err := execHandler.Add(c.Workflow, wr.Callback)\n\tc.exitOnError(err)\n\texec.SetLogListener(c.logListener)\n\tstart := time.Now()\n\texec, err = execHandler.Execute(c.Workflow.WorkflowID)\n\tc.exitOnError(err)\n\tchecks := 0\n\toperation := \"\"\n\tif c.Params.InstallRequest != nil {\n\t\tif c.Params.AppCluster {\n\t\t\toperation = \"Installing application cluster\"\n\t\t} else {\n\t\t\toperation = \"Installing management cluster\"\n\t\t}\n\t} else if c.Params.UninstallRequest != nil {\n\t\tif c.Params.AppCluster {\n\t\t\toperation = \"Uninstalling application cluster\"\n\t\t} else {\n\t\t\toperation = \"Uninstalling management cluster\"\n\t\t}\n\t}\n\tfor !wr.Called {\n\t\ttime.Sleep(time.Second * 15)\n\t\tif checks%4 == 0 {\n\t\t\tfmt.Println(operation, string(exec.State), \"-\", time.Since(start).String())\n\t\t}\n\t\tchecks++\n\t}\n\telapsed := time.Since(start)\n\tfmt.Println(\"Operation took \", elapsed)\n\tif wr.Error != nil {\n\t\tfmt.Println(\"Operation failed due to \", wr.Error.Error())\n\t\tlog.Fatal().Str(\"error\", wr.Error.DebugReport()).Msg(fmt.Sprintf(\"%s failed\", operation))\n\t}\n}", "func main() {\n\tLogo(Version, true)\n\n\tif len(os.Args) > 1 {\n\t\tif err := ProcessArgument(os.Args[1:]...); err != nil {\n\t\t\tif err.Error() != \"deploy\" {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t} else {\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tLog(\"Checking contents...\")\n\n\tif config, err := CheckCurrentProjectFolder(); err != nil {\n\t\tlog.Fatal(err)\n\t} else {\n\t\tNew(config)\n\n\t\tSetProjectName(config.Name)\n\n\t\tl := RunService()\n\t\tdefer l.Close()\n\n\t\te := RunEndpointService()\n\t\tdefer e.Close()\n\n\t\tStartAuthorizer(config)\n\n\t\tStartEndpoints(config)\n\t\tDeploy(config)\n\t}\n}", "func (e *Execution) Execute(ctx context.Context) error {\n\n\tvar err error\n\tswitch strings.ToLower(e.Operation.Name) {\n\tcase installOperation, \"standard.create\":\n\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\"Creating Job %q\", e.NodeName)\n\t\terr = e.createJob(ctx)\n\t\tif err != nil {\n\t\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\t\"Failed to create Job %q, error %s\", e.NodeName, err.Error())\n\n\t\t}\n\tcase uninstallOperation, \"standard.delete\":\n\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\"Deleting Job %q\", e.NodeName)\n\t\terr = e.deleteJob(ctx)\n\t\tif err != nil {\n\t\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\t\"Failed to delete Job %q, error %s\", e.NodeName, err.Error())\n\n\t\t}\n\tcase enableFileTransferOperation:\n\t\terr = e.enableFileTransfer(ctx)\n\t\tif err != nil {\n\t\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\t\"Failed to enable file transfer for Job %q, error %s\", e.NodeName, err.Error())\n\n\t\t}\n\tcase disableFileTransferOperation:\n\t\terr = e.disableFileTransfer(ctx)\n\t\tif err != nil {\n\t\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\t\"Failed to disable file transfer for Job %q, error %s\", e.NodeName, err.Error())\n\n\t\t}\n\tcase listChangedFilesOperation:\n\t\terr = e.listChangedFiles(ctx)\n\t\tif err != nil {\n\t\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\t\"Failed to list changed files for Job %q, error %s\", e.NodeName, err.Error())\n\n\t\t}\n\tcase tosca.RunnableSubmitOperationName:\n\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\"Submitting Job %q\", e.NodeName)\n\t\terr = e.submitJob(ctx)\n\t\tif err != nil {\n\t\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\t\"Failed to submit Job %q, error %s\", e.NodeName, err.Error())\n\n\t\t}\n\tcase tosca.RunnableCancelOperationName:\n\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\"Canceling Job %q\", e.NodeName)\n\t\terr = e.cancelJob(ctx)\n\t\tif err != nil {\n\t\t\tevents.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, e.DeploymentID).Registerf(\n\t\t\t\t\"Failed to cancel Job %q, error %s\", e.NodeName, err.Error())\n\n\t\t}\n\tdefault:\n\t\terr = errors.Errorf(\"Unsupported operation %q\", e.Operation.Name)\n\t}\n\n\treturn err\n}", "func CheckFirstPackageSet(parent interface{}) error {\n\tthis := parent.(*ActionSaver)\n\tneed_install := true\n\tneed_install_u := true\n\tneed_install_y := true\n\terr, _, _, stdout_l := executeCommand(makeArgsFromString(\"rpm -qa wget\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, val := range stdout_l {\n\t\tneed_install = (strings.Contains(val, \"wget\") == false)\n\t\tif need_install == false {\n\t\t\tbreak\n\t\t}\n\t}\n\tthis.SetParam(need_install, \"need_install\")\n\tif need_install == true {\n\t\terr, _, _, stdout_l = executeCommand(makeArgsFromString(\"yum install wget -y\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr, _, _, stdout_l = executeCommand(makeArgsFromString(\"rpm -qa unzip\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, val := range stdout_l {\n\t\tneed_install_u = (strings.Contains(val, \"unzip\") == false)\n\t\tif need_install_u == false {\n\t\t\tbreak\n\t\t}\n\t}\n\tthis.SetParam(need_install_u, \"need_install_u\")\n\tif need_install_u == true {\n\t\terr, _, _, stdout_l = executeCommand(makeArgsFromString(\"yum install unzip -y\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr, _, _, stdout_l = executeCommand(makeArgsFromString(\"rpm -qa yum-utils\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, val := range stdout_l {\n\t\tneed_install_y = (strings.Contains(val, \"yum-utils\") == false)\n\t\tif need_install_y == false {\n\t\t\tbreak\n\t\t}\n\t}\n\tthis.SetParam(need_install_y, \"need_install_y\")\n\tif need_install_y == true {\n\t\terr, _, _, stdout_l = executeCommand(makeArgsFromString(\"yum install yum-utils -y\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func runDebloater() {\n\tensureRunning()\n\trunScript(debloatScript, \"-SysPrep\", \"-Debloat\")\n}", "func (p ChrootSetupStep) Install(config parser.InstallConfig, ic *context.InstallContext) error {\n\tvar err error = nil\n\n\troot := ic.GetVar(\"root\")\n\n\terr = utils.StdoutCmd(\"mount\", \"proc\", fmt.Sprintf(\"%s/proc\", root), \"-v\", \"-t\", \"proc\", \"-o\", \"nosuid,noexec,nodev\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utils.StdoutCmd(\"mount\", \"sys\", fmt.Sprintf(\"%s/sys\", root), \"-v\", \"-t\", \"sysfs\", \"-o\", \"nosuid,noexec,nodev,ro\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utils.StdoutCmd(\"mount\", \"udev\", fmt.Sprintf(\"%s/dev\", root), \"-v\", \"-t\", \"devtmpfs\", \"-o\", \"mode=0755,nosuid\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utils.StdoutCmd(\"mount\", \"devpts\", fmt.Sprintf(\"%s/dev/pts\", root), \"-v\", \"-t\", \"devpts\", \"-o\", \"mode=1777,nosuid,nodev\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utils.StdoutCmd(\"mount\", \"shm\", fmt.Sprintf(\"%s/dev/shm\", root), \"-v\", \"-t\", \"tmpfs\", \"-o\", \"mode=1777,strictatime,nodev,nosuid\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utils.StdoutCmd(\"mount\", \"/run\", fmt.Sprintf(\"%s/run\", root), \"-v\", \"--bind\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utils.StdoutCmd(\"mount\", \"tmp\", fmt.Sprintf(\"%s/tmp\", root), \"-v\", \"-t\", \"tmpfs\", \"-o\", \"mode=1777,strictatime,nodev,nosuid\").Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error {\n\tib := stage.Builder\n\tnode := stage.Node\n\tcheckForLayers := true\n\tchildren := node.Children\n\tcommitName := b.output\n\tb.containerIDs = nil\n\n\tvar leftoverArgs []string\n\tfor arg := range b.builder.Args {\n\t\tif !builtinAllowedBuildArgs[arg] {\n\t\t\tleftoverArgs = append(leftoverArgs, arg)\n\t\t}\n\t}\n\tfor i, node := range node.Children {\n\t\tstep := ib.Step()\n\t\tif err := step.Resolve(node); err != nil {\n\t\t\treturn errors.Wrapf(err, \"error resolving step %+v\", *node)\n\t\t}\n\t\tlogrus.Debugf(\"Parsed Step: %+v\", *step)\n\t\tif step.Command == \"arg\" {\n\t\t\tfor index, arg := range leftoverArgs {\n\t\t\t\tfor _, Arg := range step.Args {\n\t\t\t\t\tlist := strings.SplitN(Arg, \"=\", 2)\n\t\t\t\t\tif arg == list[0] {\n\t\t\t\t\t\tleftoverArgs = append(leftoverArgs[:index], leftoverArgs[index+1:]...)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !b.quiet {\n\t\t\tb.log(\"%s\", step.Original)\n\t\t}\n\t\trequiresStart := false\n\t\tif i < len(node.Children)-1 {\n\t\t\trequiresStart = ib.RequiresStart(&parser.Node{Children: node.Children[i+1:]})\n\t\t}\n\n\t\tif !b.layers && !b.noCache {\n\t\t\terr := ib.Run(step, b, requiresStart)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"error building at step %+v\", *step)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif i < len(children)-1 {\n\t\t\tb.output = \"\"\n\t\t} else {\n\t\t\tb.output = commitName\n\t\t}\n\n\t\tvar (\n\t\t\tcacheID string\n\t\t\terr error\n\t\t\timgID string\n\t\t)\n\n\t\tb.copyFrom = \"\"\n\t\t// Check if --from exists in the step command of COPY or ADD\n\t\t// If it exists, set b.copyfrom to that value\n\t\tfor _, n := range step.Flags {\n\t\t\tif strings.Contains(n, \"--from\") && (step.Command == \"copy\" || step.Command == \"add\") {\n\t\t\t\tarr := strings.Split(n, \"=\")\n\t\t\t\tb.copyFrom = b.named[arr[1]].mountPoint\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// checkForLayers will be true if b.layers is true and a cached intermediate image is found.\n\t\t// checkForLayers is set to false when either there is no cached image or a break occurs where\n\t\t// the instructions in the Dockerfile change from a previous build.\n\t\t// Don't check for cache if b.noCache is set to true.\n\t\tif checkForLayers && !b.noCache {\n\t\t\tcacheID, err = b.layerExists(ctx, node, children[:i])\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"error checking if cached image exists from a previous build\")\n\t\t\t}\n\t\t}\n\n\t\tif cacheID != \"\" {\n\t\t\tfmt.Fprintf(b.out, \"--> Using cache %s\\n\", cacheID)\n\t\t}\n\n\t\t// If a cache is found for the last step, that means nothing in the\n\t\t// Dockerfile changed. Just create a copy of the existing image and\n\t\t// save it with the new name passed in by the user.\n\t\tif cacheID != \"\" && i == len(children)-1 {\n\t\t\tif err := b.copyExistingImage(ctx, cacheID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tb.containerIDs = append(b.containerIDs, b.builder.ContainerID)\n\t\t\tbreak\n\t\t}\n\n\t\tif cacheID == \"\" || !checkForLayers {\n\t\t\tcheckForLayers = false\n\t\t\terr := ib.Run(step, b, requiresStart)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"error building at step %+v\", *step)\n\t\t\t}\n\t\t}\n\n\t\t// Commit if no cache is found\n\t\tif cacheID == \"\" {\n\t\t\timgID, _, err = b.Commit(ctx, ib, getCreatedBy(node))\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"error committing container for step %+v\", *step)\n\t\t\t}\n\t\t\tif i == len(children)-1 {\n\t\t\t\tb.log(\"COMMIT %s\", b.output)\n\t\t\t}\n\t\t} else {\n\t\t\t// Cache is found, assign imgID the id of the cached image so\n\t\t\t// it is used to create the container for the next step.\n\t\t\timgID = cacheID\n\t\t}\n\t\t// Add container ID of successful intermediate container to b.containerIDs\n\t\tb.containerIDs = append(b.containerIDs, b.builder.ContainerID)\n\t\t// Prepare for the next step with imgID as the new base image.\n\t\tif i != len(children)-1 {\n\t\t\tif err := b.Prepare(ctx, stage, imgID); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"error preparing container for next step\")\n\t\t\t}\n\t\t}\n\t}\n\tif len(leftoverArgs) > 0 {\n\t\tfmt.Fprintf(b.out, \"[Warning] One or more build-args %v were not consumed\\n\", leftoverArgs)\n\t}\n\treturn nil\n}", "func main(){\n fmt.Println(\"INTRO MESSAGE HERE\")\n installUpdates()\n getVersion()\n getManufacturer()\n validateDNS()\n removeDeleteme()\n expandNova()\n validateHosts()\n}", "func doInitialSetup() {\n\terr := initLogging(os.Getenv(\"SENTRY_DSN\"))\n\tpanicOnError(err)\n\tconfPath := os.Getenv(\"OJC_CONFIG_PATH\")\n\tif len(confPath) == 0 {\n\t\tconfPath = \"/opt/ojc/ojc.yml\"\n\t}\n\tuserConfig, err = loadConfig(confPath) //userConfig is a global\n\tif ocDebug {\n\t\tlog.Println(userConfig)\n\t}\n\tpanicOnError(err)\n}", "func run(t *testing.T, name, repo, hatPath string, fns ...func(t *testing.T, p *params)) {\n\tt.Run(name, func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tp := &params{\n\t\t\trb: newRollback(),\n\t\t}\n\n\t\tdefer p.rb.run()\n\n\t\tconf := mustReadConfig(filepath.Join(metaRoot(), \".sail.toml\"))\n\n\t\trepo, err := parseRepo(\"ssh\", \"github.com\", \"\", repo)\n\t\trequire.NoError(t, err)\n\n\t\tp.proj = &project{\n\t\t\tconf: conf,\n\t\t\trepo: repo,\n\t\t}\n\n\t\t// Ensure our project repo is cloned to the local machine.\n\t\terr = p.proj.ensureDir()\n\t\trequire.NoError(t, err)\n\t\tp.rb.add(func() {\n\t\t\t// TODO: Do we want to remove this? I accidentally deleted\n\t\t\t// my own sail path that I was developing in...\n\t\t\t// err := os.RemoveAll(p.proj.localDir())\n\t\t\t// require.NoError(t, err)\n\t\t})\n\n\t\t// Use the project's custom sail image if one is built.\n\t\tbaseImage, isCustom, err := p.proj.buildImage()\n\t\trequire.NoError(t, err)\n\t\tif !isCustom {\n\t\t\tbaseImage = p.proj.conf.DefaultImage\n\t\t} else {\n\t\t\tp.rb.add(func() {\n\t\t\t\trequireImageRemove(t, baseImage)\n\t\t\t})\n\t\t}\n\n\t\timage := baseImage\n\n\t\t// Create the hat builder and apply the hat if one\n\t\t// is specified.\n\t\tp.bldr = &hatBuilder{\n\t\t\thatPath: hatPath,\n\t\t\tbaseImage: baseImage,\n\t\t}\n\n\t\tif hatPath != \"\" {\n\t\t\timage, err = p.bldr.applyHat()\n\t\t\trequire.NoError(t, err)\n\t\t\tp.rb.add(func() {\n\t\t\t\trequireImageRemove(t, image)\n\t\t\t})\n\t\t}\n\n\t\t// Construct our container runner and run\n\t\t// the container.\n\t\tp.port, err = xnet.FindAvailablePort()\n\t\trequire.NoError(t, err)\n\n\t\tp.runner = &runner{\n\t\t\tprojectName: p.proj.repo.BaseName(),\n\t\t\tprojectLocalDir: p.proj.localDir(),\n\t\t\tcntName: p.proj.cntName(),\n\t\t\thostname: p.proj.repo.BaseName(),\n\t\t\tport: p.port,\n\t\t}\n\n\t\terr = p.runner.runContainer(image)\n\t\trequire.NoError(t, err)\n\t\tp.rb.add(func() {\n\t\t\trequireContainerRemove(t, p.proj.cntName())\n\t\t})\n\n\t\t// Iterate through all the provided testing functions.\n\t\tfor _, fn := range fns {\n\t\t\tfn(t, p)\n\t\t}\n\t})\n}", "func ExecuteCollect(collectOptions *common.CollectOptions) error {\n\t//verification parameters\n\terr := VerificationParameters(collectOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(\"Start collecting data\")\n\t// create tmp direction\n\ttmpName, timenow, err := makeDirTmp()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprintDetail(fmt.Sprintf(\"create tmp file: %s\", tmpName))\n\n\terr = collectSystemData(fmt.Sprintf(\"%s/system\", tmpName))\n\tif err != nil {\n\t\tfmt.Printf(\"collect System data failed\")\n\t}\n\tprintDetail(\"collect systemd data finish\")\n\n\tedgeconfig, err := util.ParseEdgecoreConfig(collectOptions.Config)\n\n\tif err != nil {\n\t\tfmt.Printf(\"fail to load edgecore config: %s\", err.Error())\n\t}\n\terr = collectEdgecoreData(fmt.Sprintf(\"%s/edgecore\", tmpName), edgeconfig, collectOptions)\n\tif err != nil {\n\t\tfmt.Printf(\"collect edgecore data failed\")\n\t}\n\tprintDetail(\"collect edgecore data finish\")\n\n\tif edgeconfig.Modules.Edged.ContainerRuntime == constants.DefaultRuntimeType ||\n\t\tedgeconfig.Modules.Edged.ContainerRuntime == \"\" {\n\t\terr = collectRuntimeData(fmt.Sprintf(\"%s/runtime\", tmpName))\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"collect runtime data failed\")\n\t\t\treturn err\n\t\t}\n\t\tprintDetail(\"collect runtime data finish\")\n\t} else {\n\t\tfmt.Printf(\"now runtime only support: docker\")\n\t\t// TODO\n\t\t// other runtime\n\t}\n\n\tOutputPath := collectOptions.OutputPath\n\tzipName := fmt.Sprintf(\"%s/edge_%s.tar.gz\", OutputPath, timenow)\n\terr = util.Compress(zipName, []string{tmpName})\n\tif err != nil {\n\t\treturn err\n\t}\n\tprintDetail(\"Data compressed successfully\")\n\n\t// delete tmp direction\n\tif err = os.RemoveAll(tmpName); err != nil {\n\t\treturn err\n\t}\n\n\tprintDetail(\"Remove tmp data finish\")\n\n\tfmt.Printf(\"Data collected successfully, path: %s\\n\", zipName)\n\treturn nil\n}", "func main() {\n\texecuteReadFile()\n\tfmt.Println(\"Nunca me ejecutare\")\n}", "func run(contextName string) error {\n\tcfg, err := config.Read(\"qaq15.yml\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t// select context\n\tvar selectedContext config.Context\n\tfor _, ctx := range cfg.Contexts {\n\t\tif ctx.Name == contextName {\n\t\t\tselectedContext = ctx\n\t\t\tbreak\n\t\t}\n\t}\n\tif selectedContext.Name == \"\" {\n\t\treturn errors.Errorf(\"context not found no %s\", contextName)\n\t}\n\n\tcontainers, err := extendContainer(cfg.Containers)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range containers {\n\t\tif err := resolveEnv(&containers[i], cfg.Parameters); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnow := time.Now()\n\tlogPrefix, err := NewLogDir(cfg.Data, now)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tvar wg sync.WaitGroup\n\twg.Add(len(containers) + 1)\n\tmerr := errors.NewMultiErrSafe()\n\n\t// Run containers\n\tfor _, container := range containers {\n\t\tcontainer := container\n\t\tcontainer.Image = selectedContext.Image\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\treportError := func(err error) {\n\t\t\t\tmerr.Append(err)\n\t\t\t\tlog.Error(err)\n\t\t\t\tcancel() // TODO: is cancel go routine safe?\n\t\t\t}\n\n\t\t\t// Add a mount in $logdir/$container to /qaq16\n\t\t\tif p, err := NewMountDir(logPrefix, container.Name); err != nil {\n\t\t\t\treportError(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tcontainer.Mounts = append(container.Mounts, config.Mount{\n\t\t\t\t\tSrc: p,\n\t\t\t\t\tDst: \"/qaq16\",\n\t\t\t\t})\n\t\t\t}\n\n\t\t\texecCtx := ExecContext{log: FormatLog(logPrefix, container.Name)}\n\t\t\tif err := RunContainer(ctx, container, execCtx); err != nil {\n\t\t\t\treportError(err)\n\t\t\t}\n\t\t}()\n\t}\n\t// FIXME: hack to wait container ready\n\ttime.Sleep(1 * time.Second)\n\n\t// Run score\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\texecCtx := ExecContext{log: FormatLog(logPrefix, \"score\")}\n\t\tif err := RunScore(ctx, cfg.Score, execCtx); err != nil {\n\t\t\tmerr.Append(err)\n\t\t\tlog.Error(err)\n\t\t\tcancel()\n\t\t}\n\t}()\n\n\twg.Wait()\n\treturn merr.ErrorOrNil()\n}", "func (st *AddonStep) Execute(ctx context.Context, env []string) {\n\tlog := logr.FromContext(ctx).WithName(\"AddonStep\")\n\tctx = logr.NewContext(ctx, log)\n\tlog.Info(\"start\")\n\n\tst.update(v1.StateRunning, \"values yaml\")\n\n\t// Create values yaml\n\tvalues, err := st.valuesYamlIn(st.SourcePath)\n\tif err != nil {\n\t\tst.error2(err, \"values yaml\")\n\t\treturn\n\t}\n\n\tvar totals []addon.KTResult\n\tfor _, job := range st.JobPaths {\n\t\tst.update(v1.StateRunning, job)\n\n\t\t// Start kubectl-tmplt\n\t\tcmd, ch, err := st.Addon.Start(ctx, env, st.SourcePath, job, values, st.KCPath, st.MasterVaultPath)\n\t\tif err != nil {\n\t\t\tst.error2(err, \"start kubectl-tmplt\")\n\t\t\treturn\n\t\t}\n\n\t\t// notify sink while waiting for command completion.\n\t\tvar last *addon.KTResult\n\t\tfor r := range ch {\n\t\t\tlast = &r\n\t\t}\n\n\t\tif cmd != nil {\n\t\t\t// real cmd (fakes are nil).\n\t\t\terr := cmd.Wait()\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err, \"wait kubectl-tmplt\")\n\t\t\t}\n\t\t}\n\n\t\tif last == nil {\n\t\t\t// no data has been received from the channel since the Start().\n\t\t\tlog.Info(\"kubectl-tmplt no feedback received\")\n\n\t\t\tcontinue //TODO or exit loop?\n\t\t}\n\n\t\ttotals = append(totals, *last)\n\n\t\tif len(last.Errors) > 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Return results.\n\tif len(totals) == 0 {\n\t\tst.error2(nil, \"nothing applied\")\n\t\treturn\n\t}\n\t// aggregate totals\n\tvar tE []string\n\tvar tA, tC, tD int\n\tfor _, t := range totals {\n\t\ttE = append(tE, t.Errors...)\n\t\ttA = +t.Added\n\t\ttC = +t.Changed\n\t\ttD = +t.Deleted\n\t}\n\tif len(tE) > 0 {\n\t\tst.error2(nil, strings.Join(tE, \", \"))\n\t\treturn\n\t}\n\n\tst.Added = tA\n\tst.Changed = tC\n\tst.Deleted = tD\n\n\tst.update(v1.StateReady, fmt.Sprintf(\"kubectl-tmplt errors=0 added=%d changed=%d deleted=%d\", tA, tC, tD))\n}", "func (o *initJobOpts) Execute() error {\n\tif !o.allowAppDowngrade {\n\t\tappVersionGetter, err := o.newAppVersionGetter(o.appName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := validateAppVersion(appVersionGetter, o.appName, o.templateVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Check for a valid healthcheck and add it to the opts.\n\tvar hc manifest.ContainerHealthCheck\n\tvar err error\n\tif o.dockerfilePath != \"\" {\n\t\thc, err = parseHealthCheck(o.initParser(o.dockerfilePath))\n\t\tif err != nil {\n\t\t\tlog.Warningf(\"Cannot parse the HEALTHCHECK instruction from the Dockerfile: %v\\n\", err)\n\t\t}\n\t}\n\t// If the user passes in an image, their docker engine isn't necessarily running, and we can't do anything with the platform because we're not building the Docker image.\n\tif o.image == \"\" && !o.manifestExists {\n\t\tplatform, err := legitimizePlatform(o.dockerEngine, o.wkldType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif platform != \"\" {\n\t\t\to.platform = &platform\n\t\t}\n\t}\n\tenvs, err := envsWithPrivateSubnetsOnly(o.store, o.initEnvDescriber, o.appName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmanifestPath, err := o.init.Job(&initialize.JobProps{\n\t\tWorkloadProps: initialize.WorkloadProps{\n\t\t\tApp: o.appName,\n\t\t\tName: o.name,\n\t\t\tType: o.wkldType,\n\t\t\tDockerfilePath: o.dockerfilePath,\n\t\t\tImage: o.image,\n\t\t\tPlatform: manifest.PlatformArgsOrString{\n\t\t\t\tPlatformString: o.platform,\n\t\t\t},\n\t\t\tPrivateOnlyEnvironments: envs,\n\t\t},\n\n\t\tSchedule: o.schedule,\n\t\tHealthCheck: hc,\n\t\tTimeout: o.timeout,\n\t\tRetries: o.retries,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\to.manifestPath = manifestPath\n\treturn nil\n}", "func executeBuild() {\n\tfmt.Println(\"Building ...\")\n}", "func (m *JobExecutor) Setup() error {\n\tif m == nil {\n\t\treturn fmt.Errorf(\"JobExecutor is nil?\")\n\t}\n\tif m.RootTask == nil {\n\t\treturn fmt.Errorf(\"No task exists for this job\")\n\t}\n\treturn m.RootTask.Setup(0)\n}", "func (d Driver) Run(name, confTarget, hostVolume string, args []string) error {\n\td.containerID = fmt.Sprintf(\"maestro-%s\", name)\n\td.confTarget = confTarget\n\td.hostVolume = hostVolume\n\td.cmd = args\n\tneedToPull, checkErr := d.needToPull(context.Background())\n\tif checkErr != nil {\n\t\treturn checkErr\n\t}\n\tif needToPull {\n\t\tpullErr := d.pull(context.Background())\n\t\tif pullErr != nil {\n\t\t\treturn pullErr\n\t\t}\n\t}\n\tneedToRemoveOld, removalID, checkRemoveErr := d.needToRemove(context.Background())\n\tif checkRemoveErr != nil {\n\t\treturn checkRemoveErr\n\t}\n\tif needToRemoveOld {\n\t\tremoveErr := d.remove(context.Background(), removalID)\n\t\tif removeErr != nil {\n\t\t\treturn removeErr\n\t\t}\n\t}\n\tcreateErr := d.create(context.Background())\n\tif createErr != nil {\n\t\treturn createErr\n\t}\n\treturn d.start(context.Background())\n}", "func (a Adapter) initRunContainerSupervisord(component string, containers []types.Container) (err error) {\n\tfor _, container := range containers {\n\t\tif container.Labels[\"alias\"] == component && !strings.Contains(container.Command, common.SupervisordBinaryPath) {\n\t\t\tcommand := []string{common.SupervisordBinaryPath, \"-c\", common.SupervisordConfFile, \"-d\"}\n\t\t\tcompInfo := common.ComponentInfo{\n\t\t\t\tContainerName: container.ID,\n\t\t\t}\n\t\t\terr = exec.ExecuteCommand(&a.Client, compInfo, command, true, nil, nil)\n\t\t}\n\t}\n\n\treturn\n}", "func (ts *TaskService) Exec(requestCtx context.Context, req *taskAPI.ExecProcessRequest) (*types.Empty, error) {\n\tdefer logPanicAndDie(log.G(requestCtx))\n\n\ttaskID := req.ID\n\texecID := req.ExecID\n\n\tlogger := log.G(requestCtx).WithField(\"TaskID\", taskID).WithField(\"ExecID\", execID)\n\tlogger.Debug(\"exec\")\n\n\textraData, err := unmarshalExtraData(req.Spec)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal extra data\")\n\t}\n\n\t// Just provide runc the options it knows about, not our wrapper\n\treq.Spec = extraData.RuncOptions\n\n\tbundleDir := bundle.Dir(filepath.Join(containerRootDir, taskID))\n\n\tvar ioConnectorSet vm.IOProxy\n\n\tif vm.IsAgentOnlyIO(req.Stdout, logger) {\n\t\tioConnectorSet = vm.NewNullIOProxy()\n\t} else {\n\t\t// Override the incoming stdio FIFOs, which have paths from the host that we can't use\n\t\tfifoSet, err := cio.NewFIFOSetInDir(bundleDir.RootPath(), fifoName(taskID, execID), req.Terminal)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"failed opening stdio FIFOs\")\n\t\t\treturn nil, errors.Wrap(err, \"failed to open stdio FIFOs\")\n\t\t}\n\n\t\tvar stdinConnectorPair *vm.IOConnectorPair\n\t\tif req.Stdin != \"\" {\n\t\t\treq.Stdin = fifoSet.Stdin\n\t\t\tstdinConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.VSockAcceptConnector(extraData.StdinPort),\n\t\t\t\tWriteConnector: vm.FIFOConnector(fifoSet.Stdin),\n\t\t\t}\n\t\t}\n\n\t\tvar stdoutConnectorPair *vm.IOConnectorPair\n\t\tif req.Stdout != \"\" {\n\t\t\treq.Stdout = fifoSet.Stdout\n\t\t\tstdoutConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.FIFOConnector(fifoSet.Stdout),\n\t\t\t\tWriteConnector: vm.VSockAcceptConnector(extraData.StdoutPort),\n\t\t\t}\n\t\t}\n\n\t\tvar stderrConnectorPair *vm.IOConnectorPair\n\t\tif req.Stderr != \"\" {\n\t\t\treq.Stderr = fifoSet.Stderr\n\t\t\tstderrConnectorPair = &vm.IOConnectorPair{\n\t\t\t\tReadConnector: vm.FIFOConnector(fifoSet.Stderr),\n\t\t\t\tWriteConnector: vm.VSockAcceptConnector(extraData.StderrPort),\n\t\t\t}\n\t\t}\n\n\t\tioConnectorSet = vm.NewIOConnectorProxy(stdinConnectorPair, stdoutConnectorPair, stderrConnectorPair)\n\t}\n\n\tresp, err := ts.taskManager.ExecProcess(requestCtx, req, ts.runcService, ioConnectorSet)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"exec failed\")\n\t\treturn nil, err\n\t}\n\n\tlogger.Debug(\"exec succeeded\")\n\treturn resp, nil\n}", "func (*clusterPackagesExecutor) PreCheck(ctx context.Context) error {\n\treturn nil\n}", "func SetupDragonchainPreReqs(config *configuration.Configuration) error {\n\tif err := exec.Command(\"kubectl\", \"apply\", \"-f\", \"https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml\").Run(); err != nil {\n\t\treturn errors.New(\"Error creating local path provisioner:\\n\" + err.Error())\n\t}\n\tif exec.Command(\"kubectl\", \"get\", \"namespace\", \"dragonchain\", \"--context=\"+configuration.MinikubeContext).Run() != nil {\n\t\t// Create the dragonchain namespace if necessary\n\t\tfmt.Println(\"Creating dragonchain namespace\")\n\t\tcmd := exec.Command(\"kubectl\", \"create\", \"namespace\", \"dragonchain\", \"--context=\"+configuration.MinikubeContext)\n\t\tcmd.Stderr = os.Stderr\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn errors.New(\"Error creating dragonchain namespace:\\n\" + err.Error())\n\t\t}\n\t}\n\t// Set up l1 dependencies if needed\n\tif config.Level == 1 {\n\t\t// Set up openfaas\n\t\texists, err := doesHelmDeploymentExist(\"openfaas\", \"openfaas\")\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Error checking for existing openfaas installation:\\n\" + err.Error())\n\t\t}\n\t\tif !exists {\n\t\t\tfmt.Println(\"Openfaas does not appear to be installed. Installing now\")\n\t\t\tif err := createOpenFaasDeployment(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif !config.UseVM {\n\t\t\t// Try to backup old docker daemon config if it exists\n\t\t\tcmd := exec.Command(\"sudo\", \"mv\", \"/etc/docker/daemon.json\", \"/etc/docker/daemon.json.bak\")\n\t\t\tcmd.Stdin = os.Stdin\n\t\t\tcmd.Run()\n\t\t\t// If using native machine docker, need to ensure that insecure registry for the registry is set on the daemon\n\t\t\tdockerDaemonJSON := \"{\\\\\\\"insecure-registries\\\\\\\":[\\\\\\\"\" + configuration.RegistryIP + \":\" + strconv.Itoa(configuration.RegistryPort) + \"\\\\\\\"]}\"\n\t\t\tcmd = exec.Command(\"sh\", \"-c\", \"echo \"+dockerDaemonJSON+\" | sudo tee /etc/docker/daemon.json\")\n\t\t\tcmd.Stderr = os.Stderr\n\t\t\tcmd.Stdin = os.Stdin\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\treturn errors.New(\"Error setting insecure registry setting with docker daemon:\\n\" + err.Error())\n\t\t\t}\n\t\t\tcmd = exec.Command(\"sudo\", \"service\", \"docker\", \"restart\")\n\t\t\tcmd.Stderr = os.Stderr\n\t\t\tcmd.Stdin = os.Stdin\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\treturn errors.New(\"Error restarting docker daemon:\\n\" + err.Error())\n\t\t\t}\n\t\t\t// Briefly wait for containers to come back up after restarting\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\t\t// Set up docker registry\n\t\texists, err = doesHelmDeploymentExist(\"registry\", \"registry\")\n\t\tif err != nil {\n\t\t\treturn errors.New(\"Error checking for existing container registry installation:\\n\" + err.Error())\n\t\t}\n\t\tif !exists {\n\t\t\tfmt.Println(\"Docker registry does not appear to be installed. Installing now\")\n\t\t\tif err := createDockerRegistryDeployment(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t// Set up openfaas builder service account\n\t\tif !openfaasServiceAccountExists() {\n\t\t\tfmt.Println(\"Openfaas builder service account doesn't exist. Creating now\")\n\t\t\tif err := createOpenfaasBuilderServiceAccount(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func TestYumInstallPackage(t *testing.T) {\n\tt.Parallel()\n\n\tt.Run(\"when installed\", func(t *testing.T) {\n\t\tpkg := \"foo1\"\n\t\trunner := newRunner(\"\", nil)\n\t\ty := &rpm.YumManager{Sys: runner}\n\t\t_, err := y.InstallPackage(pkg)\n\t\tassert.NoError(t, err)\n\t\trunner.AssertNumberOfCalls(t, \"Run\", 1)\n\t})\n\n\tt.Run(\"when not installed\", func(t *testing.T) {\n\t\tpkg := \"foo1\"\n\t\trunner := newRunner(\"\", makeExitError(\"\", 1))\n\t\ty := &rpm.YumManager{Sys: runner}\n\t\ty.InstallPackage(pkg)\n\t\trunner.AssertNumberOfCalls(t, \"Run\", 2)\n\t})\n\n\tt.Run(\"when installation error\", func(t *testing.T) {\n\t\tpkg := \"foo1\"\n\t\trunner := newRunner(\"\", makeExitError(\"\", 1))\n\t\ty := &rpm.YumManager{Sys: runner}\n\t\t_, err := y.InstallPackage(pkg)\n\t\tassert.Error(t, err)\n\t\trunner.AssertNumberOfCalls(t, \"Run\", 2)\n\t})\n}", "func runExec(serviceName string, operation string) (string, error) {\n\tbytes, err := exec.Command(Configuration.ExecutorPath, serviceName, operation).CombinedOutput()\n\treturn string(bytes), err\n}", "func Execute() {\n\n\tif len(flag.Args()) == 0 {\n\t\tshowHelp(nil)\n\t\tos.Exit(1)\n\t}\n\n\t// Check if the first argument is a native command\n\tfor _, nc := range nativeCmds {\n\t\tif nc.ID == flag.Arg(0) {\n\t\t\tnc.Cmd(flag.Args()[1:]...)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\tfor _, a := range flag.Args() {\n\t\tartifact.Call(a)\n\t}\n}", "func main() {\n\tcommand := os.Args[1]\n\tswitch command {\n\tcase \"run\":\n\t\trun()\n\tcase \"child\":\n\t\tchild()\n\tdefault:\n\t\tpanic(\"wat should I do with \" + command)\n\t}\n}", "func influunt_ExecutorRun(self, args *pyObject) *C.PyObject {\n\teCapsule, inputs, outputs := parse3ObjectFromArgs(args)\n\te := capsuleToPointer(eCapsule)\n\texec := pointer.Restore(e).(*executor.Executor)\n\n\tinputMap, err := convertPyDictNodeMap(inputs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\toutputArr, err := convertPyListToNodeArr(outputs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tres, err := exec.Run(inputMap, outputArr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresPyObj, err := convertGoTypeToPyObject(res)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn resPyObj\n}", "func beforeBuild() error {\n\terr := runPrepareScript()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func main() {\n\t/* Run shellscript: `$ sh create-lambda.sh` for docker deploy */\n\tlambda.Start(HandleRequest)\n\t// HandleRequest() // \ttesting:\n}", "func Main() {\n\t// delete temp files with substituted env vars when the program terminates\n\tdefer os.RemoveAll(tempFilesDir)\n\tdefer cleanup()\n\n\tsettings = s.Settings\n\t// set the kubecontext to be used Or create it if it does not exist\n\tif !setKubeContext(settings.KubeContext) {\n\t\tif r, msg := createContext(); !r {\n\t\t\tlog.Fatal(msg)\n\t\t}\n\t}\n\n\t// add repos -- fails if they are not valid\n\tif r, msg := addHelmRepos(s.HelmRepos); !r {\n\t\tlog.Fatal(msg)\n\t}\n\n\tif apply || dryRun || destroy {\n\t\t// add/validate namespaces\n\t\tif !noNs {\n\t\t\taddNamespaces(s.Namespaces)\n\t\t}\n\t}\n\n\tif !skipValidation {\n\t\t// validate charts-versions exist in defined repos\n\t\tif err := validateReleaseCharts(s.Apps); err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t}\n\t} else {\n\t\tlog.Info(\"Skipping charts' validation.\")\n\t}\n\n\tlog.Info(\"Preparing plan...\")\n\tif destroy {\n\t\tlog.Info(\"--destroy is enabled. Your releases will be deleted!\")\n\t}\n\n\tcs := buildState()\n\tp := cs.makePlan(&s)\n\tif !keepUntrackedReleases {\n\t\tcs.cleanUntrackedReleases()\n\t}\n\n\tp.sortPlan()\n\tp.printPlan()\n\tp.sendPlanToSlack()\n\n\tif apply || dryRun || destroy {\n\t\tp.execPlan()\n\t}\n}", "func run_with_ns(opt *Opts) {\n\n\topts_debug(opt)\n\t/*\n\t\tMakes corresponding namespaces actions,\n\t\tif flag was set\n\t*/\n\tset_mnt(opt)\n\tset_uts(opt)\n\tset_ipc(opt)\n\tset_net(opt)\n\tset_pid(opt)\n\tset_uid(opt)\n\n\t//cmd := exec.Command(container_cmd(opt))\n\tcmd := exec.Command(shell)\n\tcmd.Env = []string{\"PS1=📦 [$(whoami)@$(hostname)] ~$(pwd) ‣ \"}\n\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\tcmd.Run()\n\n\tunset_pid(opt)\n}", "func main() {\n\texecute.Execute()\n}" ]
[ "0.62555563", "0.61340934", "0.60766923", "0.58835596", "0.5860565", "0.5806928", "0.57558084", "0.57180995", "0.56419706", "0.56223327", "0.55992407", "0.5590488", "0.55770916", "0.5551372", "0.5526669", "0.5500388", "0.54963475", "0.54918987", "0.547027", "0.54582095", "0.5445252", "0.5440301", "0.54395264", "0.5425664", "0.5418762", "0.54144764", "0.5407591", "0.5407165", "0.53723663", "0.5361796", "0.53582907", "0.5352311", "0.5348603", "0.53454286", "0.53419137", "0.53370243", "0.533308", "0.53121483", "0.5303773", "0.5297912", "0.52936924", "0.5285752", "0.52816874", "0.52772766", "0.52772766", "0.52772766", "0.52772766", "0.52772766", "0.52772766", "0.52772766", "0.5274296", "0.52709347", "0.5261501", "0.52586997", "0.5254599", "0.52454406", "0.524334", "0.52399766", "0.5227602", "0.52052456", "0.52028906", "0.5198569", "0.51967007", "0.51948506", "0.51811475", "0.517909", "0.5172196", "0.5170612", "0.5160199", "0.51554465", "0.5153361", "0.5153276", "0.5149865", "0.51461047", "0.51454294", "0.51435065", "0.513989", "0.51374185", "0.51364946", "0.5134412", "0.51294476", "0.512919", "0.5123322", "0.51229393", "0.5120035", "0.51188123", "0.51179945", "0.5115441", "0.5111569", "0.5111397", "0.510276", "0.51020986", "0.5099761", "0.5095545", "0.5095417", "0.5094166", "0.5092933", "0.50879204", "0.50811756", "0.5080368" ]
0.61191183
2
MAIN FUNCTIONS Handle Cart Create
func (main *Main) Create(e echo.Context) (err error) { // get request and validate req := new(request.Create) e.Bind(req) if err = e.Validate(req); err != nil { return rest.ConstructErrorResponse(e, exception.NewInputValidationFailed(err.Error())) } // get user details userId := strconv.Itoa(req.UserID) _, excUser := UserModel.Get("id", userId) if excUser != nil { return rest.ConstructErrorResponse(e, excUser) } // get product details productId := strconv.Itoa(req.ProductID) _, excProduct := ProductModel.Get("id", productId) if excProduct != nil { return rest.ConstructErrorResponse(e, excProduct) } //check if product exist in cart if CartModel.CheckExistingProduct(userId, productId) == true { rest.ConstructErrorResponse(e, exception.NewDuplicateRecordFound("product")) } else { // map req to input data reqData := map[string]interface{}{ "UserID": req.UserID, "ProductID": req.ProductID, "Qty": req.Qty, } //insert data to db cart, exc := CartModel.Create(reqData) if exc != nil { return rest.ConstructErrorResponse(e, exc) } data := map[string]contract.Model{ "created_cart": cart, } return rest.ConstructSuccessResponse(e, data) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (ch *CartHandler) Create(w http.ResponseWriter, r *http.Request) {\n\n\tp := new(Product)\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&p); err != nil {\n\t\tRespondWithError(w, http.StatusBadRequest, err)\n\t}\n\tdefer r.Body.Close()\n\n\tp, err := ch.Cart.Save(p)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase ErrorCorruptDb:\n\t\t\tRespondWithError(w, http.StatusInternalServerError, err)\n\t\tdefault:\n\t\t\tRespondWithError(w, http.StatusBadRequest, err)\n\t\t}\n\t}\n\n\tRespond(w, http.StatusCreated, p)\n}", "func CreateCart(cr cart.Repository) http.Handler {\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tkey := r.URL.Query().Get(\"key\")\n\t\tif key == \"\" {\n\t\t\thttp.Error(w, \"missing key in query string\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tnewCart := cart.New(key)\n\t\terr := cr.Store(newCart)\n\t\tif err != nil {\n\t\t\t//error handling\n\t\t}\n\t\tval := []byte{}\n\t\terr2 := json.Unmarshal(val, newCart)\n\t\tif err2 != nil {\n\t\t\t//\n\t\t}\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tw.Write(val)\n\t})\n}", "func CreateCart(name string) error {\n\tcart := domain.NewCart()\n\tcart.SetName(name)\n\tst := storage.NewMemoryStore()\n\treturn st.Save(cart.ID, cart.UncommitedChanges())\n}", "func (handler *Handler) createShoppingCart(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tuser, err := handler.authUser(r)\n\tif err != nil {\n\t\thandler.Error(w, r, err)\n\t\treturn\n\t}\n\n\tcart := shoppingcart.ShoppingCart{\n\t\tUserID: user.ID,\n\t}\n\n\tif err := handler.shoppingCartService.Create(r.Context(), &cart); err != nil {\n\t\thandler.Error(w, r, err)\n\t\tlogrus.Errorf(\"Unable to create shopping cart for user %d: %s\", user.ID, err)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tif err := json.NewEncoder(w).Encode(cart); err != nil {\n\t\tlogrus.Errorf(\"Unable to respond with cart %s\", err)\n\t}\n}", "func CreateNewOrder(res http.ResponseWriter, req *http.Request) {\n\tdefer func() { //to handle potential panic situation\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Panic(\"Panic occured at create order:\", err)\n\t\t}\n\t}()\n\tmyUser := ses.GetUser(res, req)\n\tif !ses.AlreadyLoggedIn(req) {\n\t\thttp.Redirect(res, req, \"/\", http.StatusSeeOther)\n\t\treturn\n\t}\n\t//fmt.Println(Items)\n\tsortItems(ds.Items)\n\n\tvar newShoppingCart = []ds.Order{}\n\n\tif req.Method == http.MethodPost {\n\t\tnameRegExp := regexp.MustCompile(`^[\\w'\\-,.][^0-9_!¡?÷?¿/\\\\+=@#$%ˆ&*(){}|~<>;:[\\]]{2,30}$`) //name regexp to check for name pattern match\n\t\tname := strings.TrimSpace(req.FormValue(\"name\"))\n\t\tif !nameRegExp.MatchString(name) {\n\t\t\thttp.Error(res, \"You have entered an invalid name field.\", http.StatusBadRequest)\n\t\t\tlog.Warning(\"Invalid user input for name field\")\n\t\t\treturn\n\t\t}\n\t\tname = pol.Sanitize(name) //pol.Sanitize is used to sanitize inputs\n\n\t\taddRegExp := regexp.MustCompile(`^[\\w'\\-,.][^_!¡?÷?¿/\\\\+=$%ˆ&*(){}|~<>;:[\\]]{2,100}$`) ////address regexp to check for address pattern match\n\t\tadd := strings.TrimSpace(req.FormValue(\"address\"))\n\t\tif !addRegExp.MatchString(add) {\n\t\t\thttp.Error(res, \"You have entered an invalid address.\", http.StatusBadRequest)\n\t\t\tlog.Warning(\"Invalid user input for address field\")\n\t\t\treturn\n\t\t}\n\t\tadd = pol.Sanitize(add) //pol.Sanitize is used to sanitize inputs\n\n\t\tsday := req.FormValue(\"dday\") //sday is string day\n\t\tdayRegExp := regexp.MustCompile(`^[1-7]$`)\n\t\tif !dayRegExp.MatchString(sday) {\n\t\t\thttp.Error(res, \"You have entered an invalid delivery day.\", http.StatusBadRequest)\n\t\t\tlog.Warning(\"Invalid user input for delivery day\")\n\t\t\treturn\n\t\t}\n\n\t\tdday, _ := strconv.Atoi(sday)\n\n\t\tavailableDay := ds.IsDayAvailable(dday)\n\t\tif availableDay == false { //this checks if the order was placed on an unavailable day\n\t\t\terrorString := \"Sorry! There are no more available delivery slots for \" + ds.IntToDay(dday)\n\t\t\thttp.Error(res, errorString, http.StatusBadRequest)\n\t\t\tlog.Warning(\"There are no more available delivery slots for \" + ds.IntToDay(dday))\n\t\t\treturn\n\t\t}\n\n\t\torderQtyRegExp := regexp.MustCompile(`^[0-9]{1,2}$`) //order quantity reg exp to check for quantity pattern match\n\n\t\tfor i := 0; i < len(ds.Items); i++ {\n\t\t\tif !orderQtyRegExp.MatchString(req.FormValue(ds.Items[i].Name)) {\n\t\t\t\terrorString := \"You have entered an invalid order quantity for \" + ds.Items[i].Name + \".\"\n\t\t\t\thttp.Error(res, errorString, http.StatusBadRequest)\n\t\t\t\tlog.Warning(\"Invalid user input for order quantity\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tquantity, _ := strconv.Atoi(req.FormValue(ds.Items[i].Name)) //label for the form input is the item name, but returns a quantity of that item\n\t\t\tquantity64 := float64(quantity)\n\n\t\t\tif quantity64 > 0 {\n\t\t\t\titemAvailable := availableItem(ds.Items[i].Name)\n\t\t\t\tif itemAvailable == false { // this checks if the current item is in stock\n\t\t\t\t\terrorString := \"Oops, \" + ds.Items[i].Name + \" is no longer available for ordering.\"\n\t\t\t\t\thttp.Error(res, errorString, http.StatusBadRequest)\n\t\t\t\t\tlog.Warning(\"User overordered on item:\", ds.Items[i].Name)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tavailableBalance := isBalanceEnough(ds.Items[i].Name, quantity64)\n\t\t\t\tif availableBalance == false { //this checks if the user over ordered on the item\n\t\t\t\t\terrorString := \"Oops, there is no sufficient balance of\" + ds.Items[i].Name + \" for ordering..\"\n\t\t\t\t\thttp.Error(res, errorString, http.StatusBadRequest)\n\t\t\t\t\tlog.Warning(\"User overordered on item:\", ds.Items[i].Name)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tsingleCart := ds.Order{\n\t\t\t\t\tOrderItem: ds.Items[i].Name,\n\t\t\t\t\tQty: quantity64,\n\t\t\t\t}\n\t\t\t\tnewShoppingCart = append(newShoppingCart, singleCart)\n\t\t\t}\n\t\t}\n\n\t\tif len(newShoppingCart) == 0 {\n\t\t\thttp.Error(res, \"Error: You cannot submit an empty shopping cart.\", http.StatusBadRequest)\n\t\t\tlog.Warning(\"User entered empty shopping cart.\")\n\t\t\treturn\n\t\t}\n\n\t\ton := atomic.AddInt64(&OrderNumber, 1) // use of atomic function to prevent multiple clients from possibly creating identical order number\n\t\tamt := ds.CalculateAmount(newShoppingCart)\n\t\tnewOrder := ds.OrderInfo{\n\t\t\tUsername: myUser.Username,\n\t\t\tName: name,\n\t\t\tAddress: add,\n\t\t\tDeliveryDay: dday,\n\t\t\tOrderNum: on,\n\t\t\tShoppingCart: newShoppingCart,\n\t\t\tAmount: amt,\n\t\t}\n\n\t\tds.OrderList.AddOrder(newOrder)\n\t\terr := UpdateWeeklySchedule(ds.OrderList)\n\t\tif err != nil {\n\t\t\thttp.Error(res, err.Error(), http.StatusBadRequest)\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\terr = UpdateWeeklyOrder(ds.OrderList)\n\t\tif err != nil {\n\t\t\thttp.Error(res, err.Error(), http.StatusBadRequest)\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\t//fmt.Println(weeklySchedule)\n\t\t//fmt.Println(items)\n\t\t//orderList.printAllOrderNodes()\n\n\t\thttp.Redirect(res, req, \"/menu\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\ttype balanceStruct struct {\n\t\tItem string\n\t\tQuantity float64\n\t}\n\n\tvar itemData []balanceStruct\n\n\tfor i := 0; i < len(ds.Items); i++ {\n\t\tremainingQuantity := ds.Items[i].WeeklyCapacity - ds.Items[i].WeeklyOrder\n\t\td := balanceStruct{\n\t\t\tItem: ds.Items[i].Name,\n\t\t\tQuantity: remainingQuantity}\n\t\titemData = append(itemData, d)\n\t}\n\n\terr := tpl.ExecuteTemplate(res, \"createOrder.gohtml\", itemData)\n\tif err != nil {\n\t\thttp.Error(res, err.Error(), http.StatusBadRequest)\n\t\tlog.Fatalln(err)\n\t}\n\n\tViewAvailableDays(res, req)\n\tshowRemainingBalance(res, req)\n}", "func (cartData *CartData) CreateCartItem(CartID, UserID uint64) error {\n\tconn, err := db.MySQLConnect()\n\t// defer conn.Close()\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tconn.Close()\n\t\treturn err\n\t}\n\n\tcartItem := CartItem{\n\t\tCartID: CartID,\n\t}\n\n\tlog.Printf(\"Duzina: %d\", len(cartData.Items))\n\tfor _, item := range cartData.Items {\n\t\tcartItem.ItemID = item.ItemID\n\t\tcartItem.Amount = item.Amount\n\t\tlog.Printf(\"ITEMID: %d\", item.ItemID)\n\t\tconn.Create(&cartItem)\n\t\t// if createErr != nil {\n\t\t// \tlog.Println(\"Drugi fail\")\n\t\t// \tlog.Println(createErr.Error)\n\t\t// \tconn.Close()\n\t\t// \treturn createErr.Error\n\t\t// }\n\t\t// portions := GetPortionByCategoryID(item.CategoryID)\n\t\t// image := GetImageByItemID(item.ItemID)\n\t\t// ingredients := GetIngredientsByItemID(item.ItemID)\n\t\t// homeItem := HomeItem{\n\t\t// \tItem: item,\n\t\t// \tPortion: portions,\n\t\t// \tIngredient: ingredients,\n\t\t// \tImage: image,\n\t\t// }\n\t\t// homeItems = append(homeItems, homeItem)\n\t}\n\n\tdeliveryAt := time.Now().Add(DefaultOrderWaitTime).Format(\"H:i:s\")\n\tparsedDeliveryAt, parseErr := time.Parse(\"H:i:s\", deliveryAt)\n\tif parseErr != nil {\n\t\tlog.Println(parseErr.Error())\n\t}\n\n\torder := Order{\n\t\tUserID: UserID,\n\t\tCartID: CartID,\n\t\tIsCanceled: 0,\n\t\tIsDelivered: 0,\n\t\tIsAccepted: \"pending\",\n\t\tDeliveryAt: parsedDeliveryAt,\n\t\tCreatedAt: time.Now(),\n\t}\n\n\tconn.Create(&order)\n\t// if createErr != nil {\n\t// \tlog.Println(\"CreateERR\")\n\t// \tlog.Println(createErr.Error)\n\t// }\n\tconn.Close()\n\n\treturn nil\n}", "func (rest *RestApi) AddToCart(w http.ResponseWriter, r *http.Request, cart_id int64, item_id int64, quantity int64) {\n\n //@ TODO: Need to check for quantity and increment if necessary\n\n cart := rest.GoCart.GetCart(cart_id)\n\n item := rest.GoCart.GetItem(item_id)\n item.SetItemQuantity(quantity)\n\n cart.Add(*item)\n rest.GoCart.SaveCart(*cart)\n}", "func (app *application) AddToCart(w http.ResponseWriter, r *http.Request) {\r\n\t// a seller does not have a shopping cart\r\n\tisSeller := app.isSeller(r)\r\n\tif isSeller {\r\n\t\tw.WriteHeader(http.StatusUnauthorized)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusUnauthorized),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// retrieve userid from session cookie\r\n\tuserid := app.session.GetString(r, \"userid\")\r\n\r\n\t// retrieve ProductID from url\r\n\t// the ProducID should be valid\r\n\tproductID, err := strconv.Atoi(r.URL.Query().Get(\"productid\"))\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusBadRequest)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusBadRequest),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// perform the insert at the database\r\n\terr = app.cart.InsertItem(userid, productID)\r\n\tif err != nil {\r\n\t\tapp.errorLog.Println(err)\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\tapp.session.Put(r, \"flash\", \"Product successfully added to cart.\")\r\n\r\n\thttp.Redirect(w, r, r.Referer(), http.StatusSeeOther)\r\n}", "func (s *Server) CreateCart(ctx context.Context, req *proto.CartCreateRequest) (*proto.CartResponse, error) {\n\tcart, err := s.carts.Create(ctx, req.UserId)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to create the Cart: %s\", err)\n\t}\n\n\tpCart, err := toProtoCart(cart)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to convert the Cart: %s\", err)\n\t}\n\n\treturn &proto.CartResponse{Cart: pCart}, nil\n}", "func GenerateUniqueCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func AddItemToCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func AddCart() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\trequestBody := model.ChatfuelCarts{}\n\t\tc.Bind(&requestBody)\n\n\t\tcart := model.Carts{\n\t\t\tMessengerUserID: requestBody.MessengerUserID,\n\t\t\tFirstName: requestBody.FirstName,\n\t\t\tProductID: requestBody.ProductID,\n\t\t\tProductName: requestBody.ProductName,\n\t\t\tQty: requestBody.Qty,\n\t\t\tPrice: requestBody.Price,\n\t\t}\n\n\t\tdb.Db.Create(&cart)\n\n\t\ttext := []model.Text{}\n\t\ttext = append(text, model.Text{\n\t\t\tText: \"加入購物車成功\",\n\t\t})\n\n\t\tmessage := model.Message{\n\t\t\tMessage: text,\n\t\t}\n\n\t\tc.JSON(http.StatusOK, message)\n\t}\n}", "func CreateProduct(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {\n /*\n Arguments:\n * ID\n * Name\n * HeatId\n * Quantity\n * Description\n * Organization\n * Unit\n * CreatedAt\n */\n\n ID := \"product-\" + args[0]\n quantity, err := strconv.ParseFloat(args[4], 64)\n if err != nil {\n return shim.Error(err.Error())\n }\n createdAt := time.Now().Format(\"2006-01-02 15:04:05 +0000 UTC\")\n if quantity < 0 {\n return shim.Error(\"Quantity should be positive\")\n }\n\n var product = types.Product{Class: \"Product\", ID: ID, Name: args[1], HeatID: args[2], Quantity: quantity, Description: args[4], Owner: args[5], Unit: args[6], CreatedAt: createdAt, ModifiedAt: createdAt}\n\n CreatedProductBytes, _ := json.Marshal(product)\n APIstub.PutState(ID, CreatedProductBytes)\n\n return shim.Success(CreatedProductBytes)\n}", "func addCartHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t/**\n\t\t\tMongo server setup\n\t\t**/\n\t\tsession, err := mgo.Dial(mongodb_server)\n if err != nil {\n fmt.Println(\"mongoserver panic\")\n }\n defer session.Close()\n session.SetMode(mgo.Monotonic, true)\n c := session.DB(mongodb_database).C(\"cart\")\n \n\t\t/**\n\t\t\tGet Post body\n\t\t**/ \n body, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tvar cart Cart\n\t\tjson.Unmarshal(body, &cart)\n\t\tc.Insert(cart)\n\t\t\n\t\tvar response Success\n\t\tresponse.Success = true\n \n\t\tformatter.JSON(w, http.StatusOK, response)\n\t}\n}", "func (cartItem CartItem) create() (Entity, error) {\n\n\t_item := cartItem\n\tif err := db.Create(&_item).Error; err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := _item.GetPreloadDb(false,false, nil).First(&_item,_item.Id).Error; err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := (Order{Id: _item.OrderId, AccountId: _item.AccountId}).UpdateDeliveryData(); err != nil {\n\t\tlog.Println(\"Error update cart item: \", err)\n\t}\n\tif err := (Order{Id: _item.OrderId, AccountId: _item.AccountId}).UpdateCost(); err != nil {\n\t\tlog.Println(\"Error update cart item: \", err)\n\t}\n\n\tvar entity Entity = &_item\n\n\treturn entity, nil\n}", "func (h *Stocks) Create(ctx context.Context, w http.ResponseWriter, r *http.Request, _ map[string]string) error {\n\n\tctxValues, err := webcontext.ContextValues(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclaims, err := auth.ClaimsFromContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//\n\treq := new(inventory.AddStockRequest)\n\tdata := make(map[string]interface{})\n\tf := func() (bool, error) {\n\t\tif r.Method == http.MethodPost {\n\t\t\terr := r.ParseForm()\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tdecoder := schema.NewDecoder()\n\t\t\tdecoder.IgnoreUnknownKeys(true)\n\n\t\t\tif err := decoder.Decode(req, r.PostForm); err != nil {\n\t\t\t\treturn false, errors.WithMessage(err, \"Something wrong\")\n\t\t\t}\n\n\t\t\tresp, err := h.Repo.AddStock(ctx, claims, *req, ctxValues.Now)\n\t\t\tif err != nil {\n\t\t\t\tswitch errors.Cause(err) {\n\t\t\t\tdefault:\n\t\t\t\t\tif verr, ok := weberror.NewValidationError(ctx, err); ok {\n\t\t\t\t\t\tdata[\"validationErrors\"] = verr.(*weberror.Error)\n\t\t\t\t\t\treturn false, nil\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Display a success message to the product.\n\t\t\twebcontext.SessionFlashSuccess(ctx,\n\t\t\t\t\"Inventory Created\",\n\t\t\t\t\"Inventory successfully created.\")\n\n\t\t\treturn true, web.Redirect(ctx, w, r, urlStocksView(resp.ID), http.StatusFound)\n\t\t}\n\n\t\treturn false, nil\n\t}\n\n\tend, err := f()\n\tif err != nil {\n\t\treturn web.RenderError(ctx, w, r, err, h.Renderer, TmplLayoutBase, TmplContentErrorGeneric, web.MIMETextHTMLCharsetUTF8)\n\t} else if end {\n\t\treturn nil\n\t}\n\n\tdata[\"products\"], err = h.ShopRepo.FindProduct(ctx, shop.ProductFindRequest{ Order: []string{\"name\"} })\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata[\"form\"] = req\n\tdata[\"urlStocksIndex\"] = urlStocksIndex()\n\n\tif verr, ok := weberror.NewValidationError(ctx, webcontext.Validator().Struct(inventory.AddStockRequest{})); ok {\n\t\tdata[\"validationDefaults\"] = verr.(*weberror.Error)\n\t}\n\n\treturn h.Renderer.Render(ctx, w, r, TmplLayoutBase, \"stocks-create.gohtml\", web.MIMETextHTMLCharsetUTF8, http.StatusOK, data)\n}", "func (main *Main) Create(e echo.Context) (err error) {\n\n\t// get request and validate\n\treq := new(request.Create)\n\te.Bind(req)\n\tif err = e.Validate(req); err != nil {\n\t\treturn rest.ConstructErrorResponse(e, exception.NewInputValidationFailed(err.Error()))\n\t}\n\t// map req to input data\n\treqData := input.NewNewProductCreate(\n\t\tmap[string]interface{}{\n\t\t\t\"Name\": req.Name,\n\t\t\t\"Qty\": req.Qty,\n\t\t\t\"Price\": req.Price,\n\t\t\t\"Weight\": req.Weight,\n\t\t\t\"Images\": req.Images,\n\t\t\t\"Description\": req.Description,\n\t\t},\n\t)\n\t//insert data to db\n\tproduct, exc := ProductModel.Create(reqData)\n\tif exc != nil {\n\t\treturn rest.ConstructErrorResponse(e, exc)\n\t}\n\tdata := map[string]contract.Model{\n\t\t\"created_product\": product,\n\t}\n\treturn rest.ConstructSuccessResponse(e, data)\n}", "func (store *Store) AddToCart(ctx *gin.Context) (bool, error) {\n\tctx.String(200, \"You are trying to add items to the cart.\")\n\treturn true, nil\n}", "func (t *Procure2Pay) CreateItems(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\n\tvar objitem item\t\n\tvar err error\n\n\tfmt.Println(\"Entering CreateItems\")\n\n\tif (len(args) < 1) {\n\t\tfmt.Println(\"Invalid number of args\")\n\t\treturn shim.Error(err.Error())\n\t\t//return nil, errors.New(\"Expected atleast one arguments for initiate Transaction\")\n\t}\n\n\tfmt.Println(\"Args [0] is : %v\\n\",args[0])\n\t\n\t//unmarshal item data from UI to \"item\" struct\n\terr = json.Unmarshal([]byte(args[0]), &objitem)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to unmarshal CreateItem input item: %s\\n\", err)\n\t\treturn shim.Error(err.Error())\n\t\t//return nil, nil\n\t}\n\n\tfmt.Println(\"item object ItemID variable value is : %s\\n\",objitem.ItemID);\n\t\n\t\t// Data insertion for Couch DB starts here \n\t\ttransJSONasBytes, err := json.Marshal(objitem)\n\t\terr = stub.PutState(objitem.ItemID, transJSONasBytes)\n\t\t// Data insertion for Couch DB ends here \n\n\t\tfmt.Println(\"Create items Successfully Done\")\t\n\t\n\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"\\nUnable to make transevent inputs : %v \", err)\n\t\t\t\treturn shim.Error(err.Error())\n\t\t\t\t//return nil,nil\n\t\t\t}\n\treturn shim.Success(nil)\n\t//return nil, nil\n}", "func AddCartItem(service Service, userService users.Service) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\t\tlogger := loglib.GetLogger(ctx)\n\t\tusername, err := auth.GetLoggedInUsername(r)\n\t\tif err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusForbidden, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tuser, err := userService.RetrieveUserByUsername(ctx, username)\n\t\tif err != nil || user == nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusUnauthorized, errorcode.UserNotFound, \"User not found\")\n\t\t\treturn\n\t\t}\n\n\t\tlogger.Infof(\"user is %v\", user.Username)\n\t\t// unmarshal request\n\t\treq := addCartItemRequest{}\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); (err != nil || req == addCartItemRequest{}) {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusBadRequest, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// validate request\n\t\tif err := req.Validate(); err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusBadRequest, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tcart, err := service.AddItemCart(ctx, user.ID, req.ProductID, req.Quantity)\n\t\tif err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusInternalServerError, \"internal_error\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\thttpresponse.RespondJSON(w, http.StatusOK, cart, nil)\n\t}\n}", "func (main *Main) Create(e echo.Context) (err error) {\n\n\t// get request and validate\n\treq := new(request.Create)\n\te.Bind(req)\n\tif err = e.Validate(req); err != nil {\n\t\treturn rest.ConstructErrorResponse(e, exception.NewInputValidationFailed(err.Error()))\n\t}\n\t// map req to input data\n\treqData := input.NewNewTransactionCreate(\n\t\tmap[string]interface{}{\n\t\t\t\"Name\": req.Name,\n\t\t\t\"Qty\": req.Qty,\n\t\t\t\"Price\": req.Price,\n\t\t\t\"Weight\": req.Weight,\n\t\t\t\"Images\": req.Images,\n\t\t\t\"Description\": req.Description,\n\t\t},\n\t)\n\t//insert data to db\n\ttransaction, exc := TransactionModel.Create(reqData)\n\tif exc != nil {\n\t\treturn rest.ConstructErrorResponse(e, exc)\n\t}\n\tdata := map[string]contract.Model{\n\t\t\"created_transaction\": transaction,\n\t}\n\treturn rest.ConstructSuccessResponse(e, data)\n}", "func (ctl *SaleCounterProductController) Create() {\n\tctl.Data[\"Action\"] = \"create\"\n\tctl.Data[\"Readonly\"] = false\n\tctl.PageAction = utils.MsgCreate\n\tctl.Data[\"FormField\"] = \"form-create\"\n\tctl.Layout = \"base/base.html\"\n\tctl.TplName = \"sale/sale_counter_product_form.html\"\n}", "func (t *Procure2Pay) CreatePurchaseOrder(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\n var objpurchaseOrder purchaseOrder\n\tvar objitem item\n\tvar err error\n\t\n\tfmt.Println(\"Entering CreatePurchaseOrder\")\n\n\tif len(args) < 1 {\n\t\tfmt.Println(\"Invalid number of args\")\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"Args [0] is : %v\\n\", args[0])\n\n\t//unmarshal customerInfo data from UI to \"customerInfo\" struct\n\terr = json.Unmarshal([]byte(args[0]), &objpurchaseOrder)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to unmarshal CreatePurchaseOrder input purchaseOrder: %s\\n\", err)\n\t\treturn shim.Error(err.Error())\n\t\t}\n\n\tfmt.Println(\"purchase order object PO ID variable value is : %s\\n\", objpurchaseOrder.POID)\n\tfmt.Println(\"purchase order object PO ID variable value is : %s\\n\", objpurchaseOrder.Quantity)\n\n\t// Data insertion for Couch DB starts here \n\ttransJSONasBytes, err := json.Marshal(objpurchaseOrder)\n\terr = stub.PutState(objpurchaseOrder.POID, transJSONasBytes)\n\t// Data insertion for Couch DB ends here\n\n\t//unmarshal LoanTransactions data from UI to \"LoanTransactions\" struct\n\terr = json.Unmarshal([]byte(args[0]), &objitem)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to unmarshal CreatePurchaseOrder input purchaseOrder: %s\\n\", err)\n\t\treturn shim.Error(err.Error())\n\t\t}\n\n\tfmt.Println(\"item object Item ID variable value is : %s\\n\", objitem.ItemID)\n\n\t// Data insertion for Couch DB starts here \n\ttransJSONasBytesLoan, err := json.Marshal(objitem)\n\terr = stub.PutState(objitem.ItemID, transJSONasBytesLoan)\n\t// Data insertion for Couch DB ends here\n\n\tfmt.Println(\"Create Purchase Order Successfully Done\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\nUnable to make transevent inputs : %v \", err)\n\t\treturn shim.Error(err.Error())\n\t\t//return nil,nil\n\t}\n\treturn shim.Success(nil)\n}", "func CreateProduct(c *gin.Context) {\n\t// if os.Getenv(\"app_status\") != \"true\" {\n\t// \tc.JSON(http.StatusInternalServerError, \"Product service is temporarily down\")\n\t// \treturn\n\t// }\n\tdb := DbConn()\n\tvar prod Products\n\terr := c.BindJSON(&prod)\n\tfmt.Println(\"Error binding json: \", err)\n\tfmt.Println(\"prod: \", prod)\n\tif err != nil {\n\t\tfmt.Println(\"Error in req format\")\n\t\tc.JSON(http.StatusBadRequest, \"Error in req format\")\n\t}\n\terr = db.Create(&prod).Error\n\tif err != nil {\n\t\tfmt.Println(\"Error in inserting in database\")\n\t\tc.String(http.StatusServiceUnavailable, \"Error in inserting in database\")\n\t}\n\n\tc.String(http.StatusOK, \"Success\")\n\n}", "func CreateProduct(w http.ResponseWriter, r *http.Request) {\n var p dynamodbservice.Product\n w.Header().Add(\"Content-Type\", \"application/json\")\n if err := json.NewDecoder(r.Body).Decode(&p); err != nil {\n http.Error(w, err.Error(), http.StatusBadRequest)\n return\n }\n\n defer r.Body.Close()\n\n if err := dynamodbservice.Items.AddProduct(p); err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n return\n }\n\n w.WriteHeader(http.StatusCreated)\n json.NewEncoder(w).Encode(p)\n}", "func NewCart() console.Cartridge {\n\treturn &cartridge{\n\t\tBaseCartridge: console.NewBaseCart(),\n\t}\n}", "func prepareCartWithDeliveries(t *testing.T, e *httpexpect.Expect) {\n\tt.Helper()\n\thelper.GraphQlRequest(t, e, loadGraphQL(t, \"cart_add_to_cart\", map[string]string{\"MARKETPLACE_CODE\": \"fake_simple\", \"DELIVERY_CODE\": \"delivery1\"})).Expect().Status(http.StatusOK)\n\thelper.GraphQlRequest(t, e, loadGraphQL(t, \"cart_add_to_cart\", map[string]string{\"MARKETPLACE_CODE\": \"fake_simple\", \"DELIVERY_CODE\": \"delivery2\"})).Expect().Status(http.StatusOK)\n}", "func Create(c *gin.Context) {\n\tworkshopData := CreateWorkShop{}\n\tuser := c.MustGet(\"user\").(*entities.User)\n\n\terr := c.ShouldBind(&workshopData)\n\tif err == nil {\n\t\tr, errRegister := RegisterNewWorkShop(user, workshopData)\n\t\tif errRegister != nil {\n\t\t\tresponse := global.ResponseServices(workshopData, \"400\", errRegister.Error())\n\t\t\tc.JSON(400, response)\n\t\t\treturn\n\t\t}\n\t\tresponse := global.ResponseServices(r, \"200\", \"Se he creado el taller con exito\")\n\t\tc.JSON(http.StatusOK, response)\n\t\treturn\n\t}\n\tresponse := global.ResponseServices(workshopData, \"400\", err.Error())\n\tc.JSON(400, response)\n}", "func (store *Store) Cart(ctx *gin.Context) (bool, error) {\n\tctx.String(200, \"You have requested the cart.\")\n\treturn true, nil\n}", "func (rest *RestApi) Init() error {\n rest.GoCart.loadConfig()\n mysql := MysqlConnection{\n host: rest.GoCart.Config.Database.Host,\n port: rest.GoCart.Config.Database.Port,\n user: rest.GoCart.Config.Database.Username,\n password: rest.GoCart.Config.Database.Password,\n database: rest.GoCart.Config.Database.Database,\n table: rest.GoCart.Config.Database.Cart.Table,\n table_index: rest.GoCart.Config.Database.Cart.Mappings.Index,\n }\n mysql.EnsureCartTable()\n\n rest.GoCart = GoCart{\n Connection: mysql,\n }\n router := mux.NewRouter()\n\n /**\n * GET request\n */\n //http.HandleFunc(\"/gocart/getCart\", func(w http.ResponseWriter, r *http.Request) {\n router.HandleFunc(\"/gocart/getCart\", func(w http.ResponseWriter, r *http.Request) {\n cart_id, err := strconv.ParseInt(r.URL.Query().Get(\"cart_id\"), 10, 64)\n if err != nil {\n panic(err)\n }\n rest.GetCart(w, r, cart_id)\n }).Methods(\"GET\")\n\n /**\n * POST request\n */\n router.HandleFunc(\"/gocart/addToCart\", func(w http.ResponseWriter, r *http.Request) {\n cart_id, err := strconv.ParseInt(r.URL.Query().Get(\"cart_id\"), 10, 64)\n if err != nil {\n panic(err)\n }\n items_qsp := r.URL.Query().Get(\"items\")\n item_quantity := r.URL.Query().Get(\"quantity\")\n\n ids := strings.Split(items_qsp, \",\")\n for _, item_id := range ids {\n item_id, err := strconv.ParseInt(item_id, 10, 64)\n if err != nil {\n panic(err)\n }\n item_quantity, err := strconv.ParseInt(item_quantity, 10, 64)\n if err != nil {\n panic(err)\n }\n rest.AddToCart(w, r, cart_id, item_id, item_quantity)\n }\n // @TODO: Print some error/success message\n }).Methods(\"POST\")\n\n log.Fatal(http.ListenAndServe(\":9090\", router))\n return nil\n}", "func Store(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\t//if !c.FormValid(\"name\") {\n\t//\tCreate(w, r)\n\t//\treturn\n\t//}\n\n\tif !u.IsPositiveInteger(r.FormValue(\"amount\")) {\n\t\tc.FlashNotice(\"Enter valid amount\")\n\t\tCreate(w, r)\n\t\treturn\n\t}\n\n\t_, err := code.Create(c.DB, r.FormValue(\"amount\"), r.FormValue(\"details\"))\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tCreate(w, r)\n\t\treturn\n\t}\n\n\tc.FlashSuccess(\"Item added.\")\n\tc.Redirect(uri)\n}", "func handleFuncCart(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\t\n\tc.Infof(\"handleFuncCarts\")\n\tval, err := handleCarts(c, r)\n\tif err == nil {\n\t\terr = json.NewEncoder(w).Encode(val)\n\t}\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(fmt.Sprintf(\"api error: %#v\", err)))\n\t\treturn\t\n\t}\n}", "func CreateItem(w http.ResponseWriter, r *http.Request) {\r\n\r\n\t// get the bearer token\r\n\tbearerHeader := r.Header.Get(\"Authorization\")\r\n\r\n\t// validate token, it will return a User{UserID, Name}\r\n\tuser, err := ValidateToken(bearerHeader)\r\n\r\n\tcheckErr(err)\r\n\r\n\t// get the db connection\r\n\tdb := getDBConn()\r\n\r\n\t// close db connection\r\n\tdefer db.Close()\r\n\r\n\tvar params item\r\n\r\n\t// decode the request parameters to item type\r\n\terr = json.NewDecoder(r.Body).Decode(&params)\r\n\r\n\tcheckErr(err)\r\n\r\n\t// insert into the items db\r\n\tstmt, err := db.Prepare(\"INSERT INTO items SET itemID=?,merchantID=?,name=?\")\r\n\r\n\t// close the stmt request\r\n\tdefer stmt.Close()\r\n\r\n\tcheckErr(err)\r\n\r\n\t// execute the insert statement\r\n\tres, err := stmt.Exec(params.ItemID, user.UserID, params.Name)\r\n\r\n\tcheckErr(err)\r\n\r\n\tid, err := res.LastInsertId()\r\n\r\n\tcheckErr(err)\r\n\r\n\tfmt.Println(id)\r\n\r\n\t// return the order created msg in json format\r\n\tjson.NewEncoder(w).Encode(\"Item Created!\")\r\n}", "func (clgCtl *CatalogueController) Create(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"Creating Catalogue.\\n\")\n\n\tauthClaims := r.Context().Value(contextkey.ClaimToken).(signinclaimresource.SignInClaimResource)\n\n\tnewClg := cataloguemodel.NewCatalogue()\n\terr := json.NewDecoder(r.Body).Decode(newClg)\n\tif err != nil {\n\t\tclgCtl.WriteResponse(w, http.StatusBadRequest, false, nil, \"Invalid create catalogue request.\")\n\t\treturn\n\t}\n\n\tvalid, message := newClg.DoValidate()\n\tif !valid {\n\t\tclgCtl.WriteResponse(w, http.StatusBadRequest, false, nil, message)\n\t\treturn\n\t}\n\n\tnewClg.Status = status.Active.String()\n\tnewClg.CreatedBy = authClaims.GetUsername()\n\tnewClg.ModifiedBy = authClaims.GetUsername()\n\tnewClg.Vers = 1\n\n\tclgRepo := cataloguerepository.NewCatalogueRepository()\n\tnbrRows, err := clgRepo.Create(r.Context(), newClg)\n\tif err != nil {\n\t\tclgCtl.WriteResponse(w, http.StatusInternalServerError, false, nil, err.Error())\n\t\treturn\n\t}\n\n\tif nbrRows == 0 {\n\t\tclgCtl.WriteResponse(w, http.StatusNotFound, false, nil, \"Catalogue was not created.\")\n\t\treturn\n\t}\n\n\tresult, err := clgRepo.GetByID(r.Context(), newClg.GetCode())\n\tif err != nil {\n\t\tclgCtl.WriteResponse(w, http.StatusInternalServerError, false, nil, err.Error())\n\t\treturn\n\t}\n\n\tif result != nil {\n\t\tfieldDefRepo := customfielddefinitionrepository.NewCustomFieldDefinitionRepository()\n\t\tfor _, newFieldDef := range newClg.GetAllCustomFieldDefinitions() {\n\t\t\tif newFieldDef.GetChangeMode() == changemode.Add {\n\t\t\t\tnewFieldDef.CatalogueCode = newClg.GetCode()\n\t\t\t\tnewFieldDef.CreatedBy = newClg.GetCreatedBy()\n\t\t\t\tnewFieldDef.CreatedAt = newClg.GetCreatedAt()\n\t\t\t\tnewFieldDef.ModifiedBy = newClg.GetModifiedBy()\n\t\t\t\tnewFieldDef.ModifiedAt = newClg.GetModifiedAt()\n\t\t\t\tnewFieldDef.Vers = 1\n\n\t\t\t\tnbrRows, err := fieldDefRepo.Create(r.Context(), newFieldDef)\n\t\t\t\tif err != nil {\n\t\t\t\t\tclgCtl.WriteResponse(w, http.StatusInternalServerError, false, nil, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif nbrRows == 0 {\n\t\t\t\t\tclgCtl.WriteResponse(w, http.StatusNotFound, false, nil, \"Custom Field Definition was not created.\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfieldDefs, err := fieldDefRepo.GetByCatalogue(r.Context(), newClg.GetCode())\n\t\tif err != nil {\n\t\t\tclgCtl.WriteResponse(w, http.StatusInternalServerError, false, nil, err.Error())\n\t\t\treturn\n\t\t}\n\t\tresult.CustomFieldDefinitions = fieldDefs\n\t}\n\n\tclgCtl.WriteResponse(w, http.StatusAccepted, true, result, \"Catalogue has been created.\")\n}", "func GetCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func (c *PurchaseController) Create(ctx *app.CreatePurchaseContext) error {\n\n\t// sets the document id\n\tnewID := bson.NewObjectId()\n\tctx.Payload.ID = &newID\n\n\t// sets initial purchase status\n\tnewStatus := new(string)\n\t*newStatus = \"CREATED\"\n\n\tctx.Payload.Status = newStatus\n\n\t// reuse from connection pool\n\tsession := Database.Session.Copy()\n\tdefer session.Close()\n\n\t// inserts the document into Purchase collection\n\terr := session.DB(\"services-pos\").C(\"Purchase\").Insert(ctx.Payload)\n\n\t// ops! something went wrong...\n\tif err != nil {\n\t\tif mgo.IsDup(err) {\n\t\t\t// purchase already exists. (HTTP 409 - Conflict)\n\t\t\treturn ctx.Conflict()\n\t\t}\n\n\t\tService.LogError(err.Error())\n\n\t\t// HTTP 500 - Internal Server Error\n\t\treturn ctx.Err()\n\t}\n\n\t// indicates the new URI for the created resource (e.g. /purchases/{:id})\n\tctx.ResponseData.Header().Set(\"Location\", app.PurchaseHref(newID.Hex()))\n\n\t// HTTP 201 - Created\n\treturn ctx.Created()\n}", "func (s *service) Create(ctx context.Context, buyer string) (*Basket, error) {\n\n\t//TODO validation?\n\tbasket := &Basket{\n\t\tId: GenerateId(),\n\t\tBuyerId: buyer,\n\t\tItems: nil,\n\t\tCreatedAt: time.Now(),\n\t}\n\terr := s.repo.Create(ctx, basket)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Service:Failed to create basket\")\n\t}\n\treturn basket, nil\n\n}", "func (ac *AdminCtl) Create() {\n\tif ac.Ctx.Request().Method == \"POST\" {\n\t\tnumber, _ := strconv.Atoi(ac.Ctx.Request().FormValue(\"number\"))\n\t\tname := ac.Ctx.Request().FormValue(\"name\")\n\t\tmtype := ac.Ctx.Request().FormValue(\"type\")\n\t\tcount, _ := strconv.Atoi(strings.Trim(ac.Ctx.Request().FormValue(\"count\"), \" \")) // 去除空白字符\n\t\tprice, _ := strconv.Atoi(ac.Ctx.Request().FormValue(\"price\"))\n\t\thref := ac.Ctx.Request().FormValue(\"href\")\n\t\turl := ac.Ctx.Request().FormValue(\"url\")\n\t\tsnumber := ac.Ctx.Request().FormValue(\"number\")\n\n\t\tproduct := &models.Product{\n\t\t\tNumber: number,\n\t\t\tName: name,\n\t\t\tType: mtype,\n\t\t\tCount: count,\n\t\t\tPrice: price,\n\t\t\tHref: href,\n\t\t\tURL: url,\n\t\t\tBrief: \"/data/\" + snumber + \"/brief\",\n\t\t\tDetail: \"/data/\" + snumber + \"/detail\",\n\t\t}\n\t\tac.Ctx.DB.Create(&product)\n\t\tac.Ctx.Redirect(\"/admin\", http.StatusFound)\n\t} else {\n\t\tac.Ctx.Data[\"AddPage\"] = true\n\t\tac.Ctx.Template = \"admin-add\"\n\t\tac.HTML(http.StatusOK)\n\t}\n}", "func (s *SmartContract) createProduct(stub shim.ChaincodeStubInterface, args []string) peer.Response {\n\tidentity, err := GetInvokerIdentity(stub)\n\tif err != nil {\n\t\tshim.Error(fmt.Sprintf(\"Error getting invoker identity: %s\\n\", err.Error()))\n\t}\n\ts.logger.Infof(\"%+v\\n\", identity.Cert.Subject.String())\n\n\tif !identity.CanInvoke(\"createProduct\") {\n\t\treturn peer.Response{\n\t\t\tStatus: 403,\n\t\t\tMessage: fmt.Sprintf(\"You are not authorized to perform this transaction, cannot invoke createProduct\"),\n\t\t}\n\t}\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1\")\n\t}\n\n\t// Create ProductRequest struct from input JSON.\n\targBytes := []byte(args[0])\n\tvar request ProductRequest\n\tif err := json.Unmarshal(argBytes, &request); err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t//Check if product state using id as key exsists\n\ttestProductAsBytes, err := stub.GetState(request.ID)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\t// Return 403 if item exisits\n\tif len(testProductAsBytes) != 0 {\n\t\treturn peer.Response{\n\t\t\tStatus: 403,\n\t\t\tMessage: fmt.Sprintf(\"Existing Product %s Found\", args[0]),\n\t\t}\n\t}\n\n\tproduct := Product{\n\t\tID: request.ID,\n\t\tType: \"product\",\n\t\tName: request.ProductName,\n\t\tHealth: \"\",\n\t\tMetadata: request.Metadata,\n\t\tLocation: request.Location,\n\t\tSold: false,\n\t\tRecalled: false,\n\t\tContainerID: \"\",\n\t\tCustodian: identity.Cert.Subject.String(),\n\t\tTimestamp: int64(s.clock.Now().UTC().Unix()),\n\t\tParticipants: request.Participants,\n\t}\n\n\tproduct.Participants = append(product.Participants, identity.Cert.Subject.String())\n\n\t// Put new Product onto blockchain\n\tproductAsBytes, _ := json.Marshal(product)\n\tif err := stub.PutState(product.ID, productAsBytes); err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tresponse := map[string]interface{}{\n\t\t\"generatedID\": product.ID,\n\t}\n\tbytes, _ := json.Marshal(response)\n\n\ts.logger.Infof(\"Wrote Product: %s\\n\", product.ID)\n\treturn shim.Success(bytes)\n}", "func (ctl *SaleCounterProductController) PostCreate() {\n\tresult := make(map[string]interface{})\n\tpostData := ctl.GetString(\"postData\")\n\tfmt.Printf(\"%+v\\n\", postData)\n\tcounterProduct := new(md.SaleCounterProduct)\n\tvar (\n\t\terr error\n\t\tid int64\n\t)\n\tif err = json.Unmarshal([]byte(postData), counterProduct); err == nil {\n\t\t// 获得struct表名\n\t\t// structName := reflect.Indirect(reflect.ValueOf(template)).Type().Name()\n\t\tif id, err = md.AddSaleCounterProduct(counterProduct, &ctl.User); err == nil {\n\t\t\tresult[\"code\"] = \"success\"\n\t\t\tresult[\"location\"] = ctl.URL + strconv.FormatInt(id, 10) + \"?action=detail\"\n\t\t} else {\n\t\t\tresult[\"code\"] = utils.FailedCode\n\t\t\tresult[\"message\"] = utils.FailedMsg\n\t\t\tresult[\"debug\"] = err.Error()\n\t\t}\n\t} else {\n\t\tresult[\"code\"] = utils.FailedCode\n\t\tresult[\"message\"] = utils.FailedData\n\t\tresult[\"debug\"] = err.Error()\n\t}\n\tctl.Data[\"json\"] = result\n\tctl.ServeJSON()\n\n}", "func (rest *RestApi) GetCart(w http.ResponseWriter, r *http.Request, id int64) error {\n gc := rest.GoCart\n cart := gc.GetCart(id)\n\n bytes, err := json.Marshal(cart)\n if err != nil {\n panic(err)\n }\n\n response := string(bytes)\n fmt.Fprintln(w, response)\n return nil\n}", "func (sc StoreController) Create(c *gin.Context) {\n\tlog.Debug().Caller().Msg(\"stores create\")\n\tp, err := sc.Storeservice.Create(c)\n\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"InvalidAddress\") {\n\t\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\t\tlog.Error().Caller().Err(err).Send()\n\t\t} else {\n\t\t\tc.AbortWithStatus(http.StatusBadRequest)\n\t\t\tlog.Error().Caller().Err(err).Send()\n\t\t}\n\t} else {\n\t\tc.JSON(http.StatusCreated, p)\n\t}\n}", "func (s *Store) Create(c *gin.Context) {\n\n}", "func Create(id, pid, name, foreman, email string) *Order {\n\treturn &Order{\n\t\tID: id,\n\t\tProject: Project{\n\t\t\tID: pid,\n\t\t\tName: name,\n\t\t\tForeman: foreman,\n\t\t\tForemanEmail: email,\n\t\t},\n\t\tItems: []Item{},\n\t\tSentDate: time.Now().Unix(),\n\t\tStatus: Draft,\n\t}\n}", "func (h Handler) create(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tvar co CreateOrder\n\n\tdefer r.Body.Close()\n\tif err := json.NewDecoder(r.Body).Decode(&co); err != nil {\n\t\tdhttputil.ErrorHandler(err, w, r)\n\t\treturn\n\t}\n\n\tif err := h.service.CreateOrder(ctx, co); err != nil {\n\t\tdhttputil.ErrorHandler(err, w, r)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n}", "func CreateProduct(c *gin.Context) {\n\tvar product Models.Product\n\tc.BindJSON(&product)\n\terr := Models.CreateProduct(&product)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, product)\n\t}\n}", "func (purchase *Purchase) Create(p Purchase) (Purchase, error) {\n\treturn test, nil\n}", "func MakePurchase(c *gin.Context) {\n\n\tform := new(forms.Purchase)\n\tif err := c.Bind(form); err != nil {\n\t\tc.JSON(http.StatusBadRequest, forms.Response{\"bind\", \"can't bind params\"})\n\t\treturn\n\t}\n\n\t// validate\n\tif errs := form.Validate(); errs != nil {\n\t\tc.JSON(http.StatusBadRequest, errs)\n\t\treturn\n\t}\n\n\t// do\n\tcreds, err := form.Do()\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, forms.Response{\"do\", err.Error()})\n\t\treturn\n\t}\n\n\t// ok\n\tc.JSON(http.StatusOK, forms.Response{\"credits_remain\", creds})\n\treturn\n}", "func (c *Cart) Checkout() {\n\t// TODO\n\tfmt.Println(c.Items)\n fmt.Println(\"Total : \" + c.TotalPrice.getPriceInEuro())\n c = new(Cart)\n}", "func ShowCart(w http.ResponseWriter, r *http.Request) {\n\tHomeVars := NewHomePageVars(r)\n\tutils.GenerateTemplate(w, HomeVars, \"cart\")\n}", "func (mr *MockCartMockRecorder) CreateCart(ctx, cart interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateCart\", reflect.TypeOf((*MockCart)(nil).CreateCart), ctx, cart)\n}", "func Create() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\trequestBody := productPostRequest{}\n\t\tc.Bind(&requestBody)\n\n\t\tproduct := model.Products{\n\t\t\tProductTypeID: requestBody.ProductTypeID,\n\t\t\tName: requestBody.Name,\n\t\t}\n\n\t\terr := db.Db.Create(&product)\n\n\t\tif product.ID == 0 {\n\t\t\tc.JSON(http.StatusBadRequest, err.Error)\n\t\t\treturn\n\t\t}\n\n\t\tc.JSON(http.StatusOK, product)\n\t}\n}", "func (c *Client) Create(ctx context.Context, params *razorpay.OrderParams) (*razorpay.Order, error) {\n\torder := &razorpay.Order{}\n\terr := c.Call(ctx, http.MethodPost, \"/orders\", params, order)\n\treturn order, err\n}", "func EmptyCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func (ph *Handler) CreateProduct(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"App : POST /app/product API hit!\")\n\tvar request CreateProductRequest\n\tbody := json.NewDecoder(r.Body)\n\terr := body.Decode(&request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tutils.Fail(w, 400, utils.DecodeErrorCode, err.Error())\n\t\treturn\n\t}\n\tvalidator := validator.New()\n\terr = validator.Struct(request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tutils.Fail(w, 400, utils.ValidationErrorCode, err.Error())\n\t\treturn\n\t}\n\tproduct, err := ph.ps.CreateProduct(r.Context(), &request)\n\tif err != nil {\n\t\tlog.Println(\"Error : \", err.Error())\n\t\tif err.Error() == utils.ProductNameExistsError {\n\t\t\tutils.Fail(w, 200, utils.ProductNameExistsErrorCode, err.Error())\n\t\t\treturn\n\t\t}\n\t\tutils.Fail(w, 500, utils.DatabaseErrorCode, err.Error())\n\t\treturn\n\t}\n\tlog.Println(\"App : product created! id_product : \", product.ID)\n\tutils.Send(w, 200, product)\n}", "func CreateItem(c *gin.Context) {\n\t// Validate input\n\tvar input CreateItemInput\n\tif err := c.ShouldBindJSON(&input); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\t// Create item\n\titem := models.Item{\n\t\tName: input.Name,\n\t\tType: input.Type,\n\t\tRarity: input.Rarity,\n\t\tCost: input.Cost}\n\tmodels.DB.Create(&item)\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": item})\n}", "func (e *engine) CreateProduct(c *httpEngine.ServerContext) {\n\t// check iid exists or not\n\tid, err := c.GetURLParam(\"iid\")\n\tif err != nil {\n\t\tc.ErrorHandler(400, err)\n\t\treturn\n\t}\n\tvar product = &domain.Product{}\n\t// check is valid json for product\n\terr = c.BindToJson(product)\n\tif err != nil {\n\t\tc.ErrorHandler(400, err)\n\t\treturn\n\n\t}\n\tproduct.Iid = id\n\tres, err := e.ProductLogic.NewProduct(product)\n\tif err != nil {\n\t\tc.ErrorHandler(400, err)\n\t\treturn\n\n\t}\n\tc.JSON(200, res)\n}", "func (handler *ProdukHandler) CreateProduk(echoCtx echo.Context) error {\n\tvar form CreateProdukBodyRequest\n\tif err := echoCtx.Bind(&form); err != nil {\n\t\terrorResponse := buildErrorResponse(err, entity.ErrInvalidInput)\n\t\treturn echoCtx.JSON(nethttp.StatusBadRequest, errorResponse)\n\t}\n\n\tprodukEntity := entity.NewProduk(\n\t\tuuid.Nil,\n\t\tform.KodeProduk,\n\t\tform.NamaProduk,\n\t\tint(form.Harga),\n\t\tint64(form.Stok),\n\t)\n\n\tif err := handler.service.Create(echoCtx.Request().Context(), produkEntity); err != nil {\n\t\terrorResponse := buildErrorResponse(err, entity.ErrInternalServerError)\n\t\treturn echoCtx.JSON(nethttp.StatusInternalServerError, errorResponse)\n\t}\n\n\tvar res = entity.NewResponse(nethttp.StatusCreated, \"Request processed successfully.\", produkEntity)\n\treturn echoCtx.JSON(res.Status, res)\n}", "func TestCartScenario(t *testing.T) {\n\t\n\te := httpexpect.New(t, API_URL)\n\t\n\tprintComment(\"SC20001\", \"Test Add 2 items of Unlimited 1 GB for $24.90\")\n\tcart := map[string]interface{}{\n\t\t\"code\": \"ult_small\",\n\t\t\"name\": \"Unlimited 1GB\",\n\t\t\"price\": 24.90,\n\t\t\"items\": 2,\n\t}\n\n\te.POST(\"/cart\").\n\t\tWithJSON(cart).\n\t\tExpect().\n\t\tStatus(http.StatusOK)\n\n}", "func (app *application) CheckOut(w http.ResponseWriter, r *http.Request) {\r\n\t// retrieve userid from session cookie\r\n\tuserid := app.session.GetString(r, \"userid\")\r\n\r\n\t// check that every item in the cart has legal qty\r\n\t// retrieve information from database\r\n\tshoppingcart, err := app.cart.CheckOut(userid)\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// loop through the results to check if every item\r\n\t// passes the check\r\n\tvar pass bool = true\r\n\tfor _, item := range shoppingcart {\r\n\t\tif item.Invalid {\r\n\t\t\tpass = false\r\n\t\t}\r\n\t}\r\n\r\n\t// if any item fails the check, it is flagged\r\n\tif !pass {\r\n\t\tapp.render(w, r, \"shoppingcart.page.tmpl\", &templateData{\r\n\t\t\tShoppingCart: shoppingcart,\r\n\t\t})\r\n\t} else {\r\n\t\t// else proceed to create an order for every item\r\n\t\tfor _, v := range shoppingcart {\r\n\t\t\tv.UserID = userid\r\n\t\t\terr := app.orders.Create(v)\r\n\t\t\tif err != nil {\r\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\t\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t\t\t})\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\t// then delete the user's shopping cart\r\n\t\terr = app.cart.DeleteAll(userid)\r\n\t\tif err != nil {\r\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t\t})\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\tapp.render(w, r, \"success.page.tmpl\", &templateData{\r\n\t\t\tUser: &models.User{UserID: userid},\r\n\t\t})\r\n\t}\r\n}", "func Create(db *db.DB) *echo.Echo {\n\n\te := echo.New()\n\te.Validator = &CustomValidator{validator: validator.New()}\n\te.Logger.SetLevel(log.DEBUG)\n\te.Pre(middleware.RemoveTrailingSlash())\n\te.Use(middleware.Logger())\n\te.Use(middleware.CORSWithConfig(middleware.CORSConfig{\n\t\tAllowOrigins: []string{\"*\"},\n\t\tAllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept, echo.HeaderAuthorization},\n\t\tAllowMethods: []string{echo.GET, echo.HEAD, echo.PUT, echo.PATCH, echo.POST, echo.DELETE},\n\t}))\n\n\t// ticketStore := store.NewTicketStore(db)\n\t// ticketHandler := handler.NewTicketHandler(ticketStore)\n\n\te.POST(\"/ticket/buy\", controllers.BuyTicket)\n\n\treturn e\n}", "func (cart *Cart) SaveCart() (*Cart, error) {\n\terr := db.Create(&cart).Error\n\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\treturn nil, err\n\t}\n\n\treturn cart, nil\n}", "func (s *OrderItemService) Create(ctx context.Context, no entity.NewOrderItem) (entity.OrderItem, error) {\n\treturn s.repo.Create(ctx, no)\n}", "func RemoveItemFromCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func CreateOrder(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func CreateProduct(stub shim.ChaincodeStubInterface, args []string) peer.Response {\n\n\tif len(args) != 5 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 5\")\n\t}\n\tvar products = Products{\n\t\tProductName: args[0],\n\t\tProductId: args[1],\n\t\tProductDescription: args[2],\n\t\tProductPrice: args[3],\n\t\tSellerID: args[4]\n\t\tStatus: args[5]\n\t\tCreatedAt: args[6]\n\t\tUpdatedAt: args[7]\n\t\tCategory: args[8]}\n\n\tproductAsBytes, _ := json.Marshal(products)\n\n\tvar uniqueID = args[1]\n\n\terr := stub.PutState(uniqueID, productAsBytes)\n\n\tif err != nil {\n\t\tfmt.Println(\"Erro in create product\")\n\t}\n\n\treturn shim.Success(nil)\n}", "func (dch *DealCashbackHandler) Create(context *gin.Context) {\n\ttokenData := context.MustGet(\"Token\").(map[string]string)\n\n\tdealCashbackData := CreateDealCashback{}\n\n\tif error := Binding.Bind(&dealCashbackData, context); error != nil {\n\t\tcontext.JSON(http.StatusUnprocessableEntity, error)\n\t\treturn\n\t}\n\n\tuserGUID := context.Param(\"guid\")\n\n\tif tokenData[\"user_guid\"] != userGUID {\n\t\tcontext.JSON(http.StatusUnauthorized, Error.TokenIdentityNotMatchError(\"add deal to list\"))\n\t\treturn\n\t}\n\n\tshoppingList := dch.ShoppingListRepository.GetByGUIDAndUserGUID(dealCashbackData.ShoppingListGUID, userGUID, \"\")\n\n\tif shoppingList.GUID == \"\" {\n\t\tcontext.JSON(http.StatusNotFound, Error.ResourceNotFoundError(\"Shopping List\", \"guid\", dealCashbackData.ShoppingListGUID))\n\t\treturn\n\t}\n\n\tdbTransaction := context.MustGet(\"DB\").(*gorm.DB).Begin()\n\n\terror := dch.DealCashbackService.CreateDealCashbackAndShoppingListItem(dbTransaction, userGUID, dealCashbackData)\n\n\tif error != nil {\n\t\tdbTransaction.Rollback()\n\t\terrorCode, _ := strconv.Atoi(error.Error.Status)\n\t\tcontext.JSON(errorCode, error)\n\t\treturn\n\t}\n\n\tdbTransaction.Commit()\n\n\tresult := make(map[string]string)\n\tresult[\"message\"] = \"Successfully add deal guid \" + dealCashbackData.DealGUID + \" to list.\"\n\n\tcontext.JSON(http.StatusOK, gin.H{\"data\": result})\n}", "func (m *MockCart) CreateCart(ctx context.Context, cart models.Cart) (int64, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateCart\", ctx, cart)\n\tret0, _ := ret[0].(int64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (v OrdersResource) Create(c buffalo.Context) error {\n // Allocate an empty Order\n order := &models.Order{}\n\n // Bind order to the html form elements\n if err := c.Bind(order); err != nil {\n return err\n }\n\n // Get the DB connection from the context\n tx, ok := c.Value(\"tx\").(*pop.Connection)\n if !ok {\n return fmt.Errorf(\"no transaction found\")\n }\n\n // Validate the data from the html form\n verrs, err := tx.ValidateAndCreate(order)\n if err != nil {\n return err\n }\n\n if verrs.HasAny() {\n return responder.Wants(\"html\", func (c buffalo.Context) error {\n // Make the errors available inside the html template\n c.Set(\"errors\", verrs)\n\n // Render again the new.html template that the user can\n // correct the input.\n c.Set(\"order\", order)\n\n return c.Render(http.StatusUnprocessableEntity, r.HTML(\"/orders/new.plush.html\"))\n }).Wants(\"json\", func (c buffalo.Context) error {\n return c.Render(http.StatusUnprocessableEntity, r.JSON(verrs))\n }).Wants(\"xml\", func (c buffalo.Context) error {\n return c.Render(http.StatusUnprocessableEntity, r.XML(verrs))\n }).Respond(c)\n }\n\n return responder.Wants(\"html\", func (c buffalo.Context) error {\n // If there are no errors set a success message\n c.Flash().Add(\"success\", T.Translate(c, \"order.created.success\"))\n\n // and redirect to the show page\n return c.Redirect(http.StatusSeeOther, \"/orders/%v\", order.ID)\n }).Wants(\"json\", func (c buffalo.Context) error {\n return c.Render(http.StatusCreated, r.JSON(order))\n }).Wants(\"xml\", func (c buffalo.Context) error {\n return c.Render(http.StatusCreated, r.XML(order))\n }).Respond(c)\n}", "func (app *application) listingCreate(w http.ResponseWriter, r *http.Request) {\n\tsession, err := app.sessionStore.Get(r, \"session-name\")\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t\treturn\n\t}\n\terr = r.ParseForm()\n\tif err != nil {\n\t\tapp.clientError(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\tform := forms.New(r.PostForm)\n\tform.Required(\"name\", \"description\", \"price\")\n\tif !form.Valid() {\n\t\tapp.render(w, r, \"listingcreate.page.tmpl\", &templateData{Form: form})\n\t\treturn\n\t}\n\tvendorID := app.authenticatedVendor(r)\n\tprice, err := strconv.Atoi(form.Get(\"price\"))\n\tif err != nil {\n\t\tform.Errors.Add(\"price\", \"enter valid integer\")\n\t}\n\terr = app.listings.Insert(\n\t\tvendorID,\n\t\tprice,\n\t\tform.Get(\"description\"),\n\t\tform.Get(\"name\"),\n\t)\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t}\n\n\tsession.AddFlash(\"Succesful Listed\")\n\terr = session.Save(r, w)\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t}\n\thttp.Redirect(w, r, \"/vendor/listings\", http.StatusSeeOther)\n}", "func (db *ProductRepo) Create() (message string) {\n\tcreate := \"This is create function\"\n\treturn create\n}", "func (client *Client) OrderCreate(ctx context.Context, draft *OrderFromCartDraft, opts ...RequestOption) (result *Order, err error) {\n\tparams := url.Values{}\n\tfor _, opt := range opts {\n\t\topt(&params)\n\t}\n\n\tendpoint := \"orders\"\n\terr = client.create(ctx, endpoint, params, draft, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func InsertProductIntoCart(id_user, id_product int, product model.Product, qty int) (interface{}, error) {\n\t// insert and select data product to cart\n\tshoppingCart := model.Shopping_cart{\n\t\tUser_id: id_user,\n\t\tProduct_id: id_product,\n\t\tName: product.Name,\n\t\tCategory: product.Category,\n\t\tType: product.Type,\n\t\tPrice: product.Price,\n\t\tQty: qty,\n\t}\n\tif err := config.DB.Save(&shoppingCart).Error; err != nil {\n\t\treturn shoppingCart, err\n\t}\n\treturn shoppingCart, nil\n}", "func (*Products) Create(product *Products) (int64, error) {\n\n\tstmt, err := db.Prepare(\n\t\t\"insert into products (name, unit) \" +\n\t\t\t\"values ($1, $2) returning id\")\n\n\tif err != nil {\n\t\tlog.Println(\"(CreateProduct:Prepare)\", err)\n\t\treturn -1, err\n\t}\n\n\tdefer stmt.Close()\n\n\terr = stmt.QueryRow(product.Name,\n\t\tproduct.Unit).Scan(&product.ID)\n\n\tif err != nil {\n\t\tlog.Println(\"(CreateProduct:Exec)\", err)\n\t\treturn -1, err\n\t}\n\n\treturn product.ID, nil\n\n}", "func CreateBucket(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var ecsBucket ECSBucket\n err := decoder.Decode(&ecsBucket)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if ecsBucket.ReplicationGroup != \"\" {\n headers[\"x-emc-vpool\"] = []string{ecsBucket.ReplicationGroup}\n }\n if ecsBucket.MetadataSearch != \"\" {\n headers[\"x-emc-metadata-search\"] = []string{ecsBucket.MetadataSearch}\n }\n if ecsBucket.EnableADO {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-is-stale-allowed\"] = []string{\"false\"}\n }\n if ecsBucket.EnableFS {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-file-system-access-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableCompliance {\n headers[\"x-emc-compliance-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-compliance-enabled\"] = []string{\"false\"}\n }\n if ecsBucket.EnableEncryption {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"true\"}\n } else {\n headers[\"x-emc-server-side-encryption-enabled\"] = []string{\"false\"}\n }\n var bucketCreateResponse Response\n if ecsBucket.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = s3Request(s3, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code == 200 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"swift\" {\n bucketCreateResponse, err = swiftRequest(ecsBucket.Endpoint, ecsBucket.User, ecsBucket.Password, ecsBucket.Name, \"PUT\", \"/\", headers, \"\")\n log.Print(bucketCreateResponse)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, ecsBucket.Name)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n } else if ecsBucket.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n bucketCreateResponse, err = atmosRequest(ecsBucket.Endpoint, s3.AccessKey, s3.SecretKey, \"\", \"PUT\", \"/rest/subtenant\", headers, \"\")\n if err != nil {\n log.Print(err)\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketCreateResponse.Code >= 200 && bucketCreateResponse.Code < 300 {\n rendering.JSON(w, http.StatusOK, bucketCreateResponse.ResponseHeaders[\"Subtenantid\"][0])\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketCreateResponse.Body}\n }\n }\n\n return nil\n}", "func Create(reqDto *ReqCreateDto, custDto *ReqCustomerDto) (statusCode int, code string, respDto *RespBase, err error) {\r\n\treqDto.ServiceType = \"TMS_CREATE_ORDER\"\r\n\tbizData, err := xml.Marshal(reqDto.BizData)\r\n\tif err != nil {\r\n\t\tcode = E02\r\n\t\treturn\r\n\t}\r\n\tsignParam := string(bizData) + reqDto.PartnerKey\r\n\treqDto.Sign, err = sign.GetMD5Hash(signParam, true)\r\n\tif err != nil {\r\n\t\tcode = E02\r\n\t\treturn\r\n\t}\r\n\treqMap := make(map[string]string, 0)\r\n\treqMap[\"serviceType\"] = reqDto.ServiceType\r\n\treqMap[\"partnerID\"] = reqDto.PartnerID\r\n\treqMap[\"bizData\"] = string(bizData)\r\n\treqMap[\"sign\"] = reqDto.Sign\r\n\r\n\tdata := base.JoinMapString(reqMap)\r\n\r\n\treq := httpreq.New(http.MethodPost, custDto.Url, data, func(httpReq *httpreq.HttpReq) error {\r\n\t\thttpReq.ReqDataType = httpreq.FormType\r\n\t\thttpReq.RespDataType = httpreq.XmlType\r\n\t\treturn nil\r\n\t})\r\n\tstatusCode, err = req.Call(&respDto)\r\n\tif err != nil {\r\n\t\tcode = E01\r\n\t\treturn\r\n\t}\r\n\tif statusCode != http.StatusOK {\r\n\t\tcode = E01\r\n\t\terr = fmt.Errorf(\"http status exp:200,act:%v\", statusCode)\r\n\t\treturn\r\n\t}\r\n\tif respDto.Result != true {\r\n\t\tcode = E03\r\n\t\terr = fmt.Errorf(\"%v-%v\", respDto.ErrorCode, respDto.ErrorDescription)\r\n\t\treturn\r\n\t}\r\n\tcode = SUC\r\n\treturn\r\n}", "func createInvoice(w http.ResponseWriter, r *http.Request) {\n\tmaxId = maxId + 1\n\tvar newInvoice Invoice\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Kindly enter data of the invoice\")\n\t}\n\n\tjson.Unmarshal(reqBody, &newInvoice)\n\tvar newLines = arrayLines{}\n\tfor i, line := range newInvoice.Lines {\n\t\tvar lastPrice float64\n\t\tif line.Currency != \"CRC\" {\n\t\t\tvalue := getCurrencyValueSale()\n\t\t\tline.Price_crc = line.Price * value\n\t\t\tlastPrice = line.Price_crc\n\t\t\tnewInvoice.Lines = append(newInvoice.Lines[:i], line)\n\t\t} else {\n\t\t\tlastPrice = line.Price\n\t\t}\n\t\tnewLines = append(newLines, line)\n\t\tvar priceWithDiscount = lastPrice * (float64(line.Discount_rate) / 100)\n\t\tvar priceWithTaxt = lastPrice * (float64(line.Tax_rate) / 100)\n\t\tnewInvoice.Tax_total = newInvoice.Tax_total + priceWithTaxt\n\t\tnewInvoice.Discount_total = newInvoice.Discount_total + priceWithDiscount\n\t\tnewInvoice.Subtotal = newInvoice.Subtotal + lastPrice\n\t\tnewInvoice.Total = newInvoice.Subtotal - priceWithDiscount + priceWithTaxt\n\t}\n\tnewInvoice.Balance = newInvoice.Balance - newInvoice.Total\n\tnewInvoice.Id = maxId\n\tnewInvoice.Lines = newLines\n\tinvoices = append(invoices, newInvoice)\n\tw.WriteHeader(http.StatusCreated)\n\n\tjson.NewEncoder(w).Encode(newInvoice)\n}", "func CreateProduct(rw http.ResponseWriter, r *http.Request) {\n\tvar product data.Product\n\terr := json.NewDecoder(r.Body).Decode(&product)\n\n\tif err != nil {\n\t\trw.WriteHeader(500)\n\t\terrorMessage := helper.ErrorMessage(err.Error())\n\t\tjson.NewEncoder(rw).Encode(errorMessage)\n\t\treturn\n\t}\n\n\terr = services.CreateProduct(&product)\n\n\tif err != nil {\n\t\trw.WriteHeader(500)\n\t\terrorMessage := helper.ErrorMessage(err.Error())\n\t\tjson.NewEncoder(rw).Encode(errorMessage)\n\t\treturn\n\t}\n\n\tjson.NewEncoder(rw).Encode(product)\n}", "func (service *Service) CreateProduct(c *gin.Context) error {\n\tname := c.PostForm(\"name\")\n\tdescription := c.PostForm(\"description\")\n\tuserID, err := jwt.VerifyToken(c)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar product Product\n\n\tproduct.SetName(name)\n\tproduct.SetDescription(description)\n\tproduct.SetUserID(userID)\n\n\terr = service.ProductRepository.Create(product)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func CreateShop(c *fiber.Ctx) {\n\tUserID := userIDF(c.Get(\"token\"))\n\n\tvar Shop DataShop\n\tvar Services ServiceNames\n\n\tif errorParse := c.BodyParser(&Shop); errorParse != nil {\n\t\tfmt.Println(\"Error parsing data\", errorParse)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Error al parsear información\"})\n\t\tc.Status(400)\n\t\treturn\n\t}\n\n\tListCards := \"[\"\n\n\tfor i := 0; i < len(Shop.ListCards); i++ {\n\t\tif len(Shop.ListCards)-1 == i {\n\t\t\tListCards = fmt.Sprintf(`%s\"%v\"`, ListCards, Shop.ListCards[i])\n\t\t} else {\n\t\t\tListCards = fmt.Sprintf(`%s\"%v\",`, ListCards, Shop.ListCards[i])\n\t\t}\n\t}\n\n\tListCards = fmt.Sprintf(`%s]`, ListCards)\n\n\tShopID, errorInsertShop := sq.Insert(\"shop\").\n\t\tColumns(\n\t\t\t\"user_id\",\n\t\t\t\"shop_name\",\n\t\t\t\"address\",\n\t\t\t\"phone\",\n\t\t\t\"phone2\",\n\t\t\t\"description\",\n\t\t\t\"cover_image\",\n\t\t\t\"accept_card\",\n\t\t\t\"list_cards\",\n\t\t\t\"lat\",\n\t\t\t\"lon\",\n\t\t\t\"score_shop\",\n\t\t\t\"logo\",\n\t\t\t\"service_type_id\",\n\t\t\t\"sub_service_type_id\",\n\t\t\t\"status\",\n\t\t).\n\t\tValues(\n\t\t\tUserID,\n\t\t\tShop.ShopName,\n\t\t\tShop.Address,\n\t\t\tShop.Phone,\n\t\t\tShop.Phone2,\n\t\t\tShop.Description,\n\t\t\tShop.CoverImage,\n\t\t\tShop.AcceptCard,\n\t\t\tListCards,\n\t\t\tShop.Lat,\n\t\t\tShop.Lon,\n\t\t\t0,\n\t\t\tShop.Logo,\n\t\t\tShop.ServiceTypeID,\n\t\t\tShop.SubServiceTypeID,\n\t\t\tfalse,\n\t\t).\n\t\tRunWith(database).\n\t\tExec()\n\n\tif errorInsertShop != nil {\n\t\tfmt.Println(\"Error to save shop\", errorInsertShop)\n\t}\n\n\tIDLastShop, _ := ShopID.LastInsertId()\n\n\t_, errorInsertPages := sq.Insert(\"pages\").\n\t\tColumns(\n\t\t\t\"shop_id\",\n\t\t).\n\t\tValues(\n\t\t\tIDLastShop,\n\t\t).\n\t\tRunWith(database).\n\t\tExec()\n\n\tif errorInsertPages != nil {\n\t\tfmt.Println(\"Error to save page\", errorInsertPages)\n\t}\n\n\tErrorService := sq.Select(\n\t\t\"sub_service_name\",\n\t\t\"service_name\",\n\t).\n\t\tFrom(\"sub_service_type\").\n\t\tLeftJoin(\"service_type on sub_service_type.service_type_id = service_type.service_type_id\").\n\t\tWhere(\"sub_service_type.sub_service_type_id = ? AND sub_service_type.service_type_id = ? \", Shop.SubServiceTypeID, Shop.ServiceTypeID).\n\t\tRunWith(database).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&Services.SubServiceName,\n\t\t\t&Services.ServiceName,\n\t\t)\n\n\tif ErrorService != nil {\n\t\tfmt.Println(\"Error to get service names\", ErrorService)\n\t}\n\n\tfor i := 0; i < len(Shop.ListImages); i++ {\n\t\t_, errorInsertShop := sq.Insert(\"images_shop\").\n\t\t\tColumns(\n\t\t\t\t\"url_image\",\n\t\t\t\t\"shop_id\",\n\t\t\t).\n\t\t\tValues(\n\t\t\t\tShop.ListImages[i],\n\t\t\t\tIDLastShop,\n\t\t\t).\n\t\t\tRunWith(database).\n\t\t\tExec()\n\t\tif errorInsertShop != nil {\n\t\t\tfmt.Println(\"Problem to insert url\", errorInsertShop)\n\t\t}\n\t}\n\n\t_, errorInsertSchedules := sq.Insert(\"shop_schedules\").\n\t\tColumns(\n\t\t\t\"LUN\",\n\t\t\t\"MAR\",\n\t\t\t\"MIE\",\n\t\t\t\"JUE\",\n\t\t\t\"VIE\",\n\t\t\t\"SAB\",\n\t\t\t\"DOM\",\n\t\t\t\"shop_id\",\n\t\t).\n\t\tValues(\n\t\t\tShop.ShopSchedules[0],\n\t\t\tShop.ShopSchedules[1],\n\t\t\tShop.ShopSchedules[2],\n\t\t\tShop.ShopSchedules[3],\n\t\t\tShop.ShopSchedules[4],\n\t\t\tShop.ShopSchedules[5],\n\t\t\tShop.ShopSchedules[6],\n\t\t\tIDLastShop,\n\t\t).\n\t\tRunWith(database).\n\t\tExec()\n\n\tif errorInsertSchedules != nil {\n\t\tfmt.Println(\"Problem with insert ShopSchedules\", errorInsertSchedules)\n\t}\n\n\tIDSInt := strconv.FormatInt(IDLastShop, 10)\n\tIDString := fmt.Sprintf(\"%s\", IDSInt)\n\n\t_, errInsertMongo := mongodb.Collection(\"shop\").InsertOne(context.TODO(), bson.M{\n\t\t\"name\": Shop.ShopName,\n\t\t\"shop_id\": IDString,\n\t\t\"location\": bson.M{\n\t\t\t\"type\": \"Point\",\n\t\t\t\"coordinates\": []float64{Shop.Lon, Shop.Lat},\n\t\t},\n\t\t\"category\": Services.ServiceName.String,\n\t\t\"sub_category\": Services.SubServiceName.String,\n\t\t\"status\": false,\n\t})\n\n\tif errInsertMongo != nil {\n\t\tfmt.Println(errInsertMongo, \"Error to Insert mongo\")\n\t}\n\n\tc.JSON(ResponseCreateShop{Message: \"Create shop success\", ShopID: IDLastShop, Status: 200})\n}", "func (s *service) Create(ctx context.Context, order ordersvc.Order) (string, error) {\n\tlogger := log.With(s.logger, \"method\", \"Create\")\n\tuuid, _ := uuid.NewV4()\n\tid := uuid.String()\n\torder.ID = id\n\torder.Status = \"Pending\"\n\torder.CreatedOn = time.Now().Unix()\n\n\tif err := s.repository.CreateOrder(ctx, order); err != nil {\n\t\tlevel.Error(logger).Log(\"err\", err)\n\t\treturn \"\", ordersvc.ErrCmdRepository\n\t}\n\treturn id, nil\n}", "func (app *application) ShoppingCart(w http.ResponseWriter, r *http.Request) {\r\n\t// a seller does not have a shopping cart\r\n\tisSeller := app.isSeller(r)\r\n\tif isSeller {\r\n\t\tw.WriteHeader(http.StatusUnauthorized)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusUnauthorized),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\t// retrieves userid from the session cookie\r\n\tuserID := app.session.GetString(r, \"userid\")\r\n\r\n\t// query shoppingcart table in the database\r\n\t// for the list of item in the user's cart\r\n\tcart, err := app.cart.Get(userID)\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// display the list of items\r\n\tapp.render(w, r, \"shoppingcart.page.tmpl\", &templateData{\r\n\t\tUser: &models.User{UserID: userID},\r\n\t\tShoppingCart: cart,\r\n\t\tDiscounts: models.Discount,\r\n\t})\r\n}", "func createPayback(args []string) {\n\n\tpaybackRepo := payback.NewRepository(persistence.GetGormClient())\n\ttxnRepo := transaction.NewRepository(persistence.GetGormClient())\n\tpaybackSVC := payback.NewPaybackService(paybackRepo, txnRepo)\n\terr := paybackSVC.CreatePayback(context.Background(), args)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\tfmt.Println(\"Successfully added payback\")\n}", "func (c *Client) create(line string) {\n\targs := strings.Split(line, \" \")\n\targs = args[1:] // chop off the first word which should be \"create\"\n\n\treq := frontend.CreateRequest{Key: args[0], DataShapes: args[1], RowType: args[2]}\n\treqs := &frontend.MultiCreateRequest{\n\t\tRequests: []frontend.CreateRequest{req},\n\t}\n\tresponses := &frontend.MultiServerResponse{}\n\tvar err error\n\tif c.mode == local {\n\t\tds := frontend.DataService{}\n\t\terr = ds.Create(nil, reqs, responses)\n\t} else {\n\t\tvar respI interface{}\n\t\trespI, err = c.rc.DoRPC(\"Create\", reqs)\n\t\tif respI != nil {\n\t\t\tresponses = respI.(*frontend.MultiServerResponse)\n\t\t}\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"Failed with error: %s\\n\", err.Error())\n\t\treturn\n\t}\n\n\tfor _, resp := range responses.Responses {\n\t\tif len(resp.Error) != 0 {\n\t\t\tfmt.Printf(\"Failed with error: %s\\n\", resp.Error)\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Printf(\"Successfully created a new catalog entry for bucket %s\\n\", args[0])\n}", "func addProduct(writer http.ResponseWriter, request *http.Request) {\n\ttransactionId := request.Header.Get(\"transactionId\")\n\tlog.Printf(\"Adding product for trasnsactionId %s!\", transactionId)\n\tdecoder := json.NewDecoder(request.Body)\n\taddProductCommand := command.AddProductCommand{}\n\terr := decoder.Decode(&addProductCommand)\n\tif err != nil {\n\t\twriteErrorResponse(writer, err)\n\t}\n\tproductHandler.AddProduct(transactionId, addProductCommand)\n\trenderResponse(writer, []byte(\"\"))\n}", "func Create(responseWriter http.ResponseWriter, request *http.Request) {\n\tfmt.Println(\"[ CreateOrder ]\")\n\tbody, _ := json.Marshal(request.Body)\n\tfmt.Println(\"[ CreateOrder ] Body=\" + string(body))\n\t//params := mux.Vars(request)\n\tvar orderEntity OrderEntity\n\t_ = json.NewDecoder(request.Body).Decode(&orderEntity)\n\n\tvar result OrderEntity = Insert(orderEntity)\n\n\tWriteMessages(result, Topic.TOPIC_SUCCESS)\n\n\tjson.NewEncoder(responseWriter).Encode(result)\n}", "func CreateCar(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"%v %v %v\\n\", r.RemoteAddr, r.Method, r.URL)\n\tif r.Method == http.MethodGet {\n\t\thttp.Redirect(w, r, \"http://localhost:\"+port+\"/\", http.StatusMovedPermanently)\n\t}\n\n\tvar car data.Car\n\tif r.Method == http.MethodPost {\n\t\tif r.Header.Get(\"Content-type\") == \"application/x-www-form-urlencoded\" {\n\t\t\tbrand := r.FormValue(\"brand\")\n\t\t\tm := r.FormValue(\"model\")\n\t\t\tcolor := r.FormValue(\"color\")\n\t\t\tprice, err := strconv.ParseFloat(r.FormValue(\"price\"), 32)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[ERROR] %v %v %v\\n\", r.RemoteAddr, r.Method, r.URL)\n\t\t\t\thttp.Error(w, \"Incorrect numbers.\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcar = data.Car{\n\t\t\t\tBrand: brand,\n\t\t\t\tModel: m,\n\t\t\t\tColor: color,\n\t\t\t\tPrice: float32(price),\n\t\t\t}\n\t\t\tif ok := data.AddCar(&car); !ok {\n\t\t\t\tlog.Printf(\"[ERROR] %v %v %v\\n\", r.RemoteAddr, r.Method, r.URL)\n\t\t\t\thttp.Error(w, \"Error adding an ad for a car sale.\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprint(w, \"(POST) SUCCESS! Added new car sale announcement.\")\n\t\t} else {\n\t\t\tif err := car.FromJSON(r.Body); err != nil {\n\t\t\t\tlog.Printf(\"[ERROR] %v %v %v\\n\", r.RemoteAddr, r.Method, r.URL)\n\t\t\t\thttp.Error(w, \"Error retrieving data from JSON.\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif ok := data.AddCar(&car); !ok {\n\t\t\t\tlog.Printf(\"[ERROR] %v %v %v\\n\", r.RemoteAddr, r.Method, r.URL)\n\t\t\t\thttp.Error(w, \"Error adding an ad for a car sale.\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfmt.Fprint(w, \"(JSON) SUCCESS! Added new car sale announcement.\")\n\t\t}\n\t}\n}", "func NewCartServer(\n\tlogger *logrus.Logger,\n\tcc creating.CartCreator,\n\tpa adding.ProductAdder,\n\tpl listing.ProductLister,\n) *CartServer {\n\n\tcs := new(CartServer)\n\n\tcs.cartCreator = cc\n\tcs.productAdder = pa\n\tcs.productLister = pl\n\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/carts\", cs.createCart).Methods(http.MethodPost)\n\trouter.HandleFunc(\"/carts/{cartID}/products\", cs.addProduct).Methods(http.MethodPost)\n\trouter.HandleFunc(\"/carts/{cartID}/products\", cs.getProducts).Methods(http.MethodGet)\n\n\tloggingMiddleware := logging.Middleware(logger)\n\tloggedRouter := loggingMiddleware(router)\n\n\tcs.Handler = loggedRouter\n\n\treturn cs\n}", "func (d *Distributor) CreatePO(ctx contractapi.TransactionContextInterface, poInfo []byte) (string, error) {\n\ttype POData struct {\n\t\tOrg\t\t\t\tstring `json:\"org\"`\t\n\t\tBuyerCRN\t\tstring `json:\"buyerCRN\"`\t\n\t\tSellerCRN\t\tstring `json:\"sellerCRN\"`\n\t\tDrugName\t\tstring `json:\"drugName\"`\n\t\tQuantity\t\tint `json:\"quantity\"`\n\t}\n\t\n\tvar poData POData\n\t\n\terr := json.Unmarshal(poInfo, &poData)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to unmarshal JSON: %v\", err)\n\t}\n\n\tpoCompositeKey, err := ctx.GetStub().CreateCompositeKey(\"po.pharma-net.com\", []string{poData.BuyerCRN, poData.DrugName})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create composite key: %v\", err)\n\t}\n\n\t\n\tbuyer, _ := getCompanyByPartialCompositeKey(ctx, poData.BuyerCRN, \"company.pharma-net.com\")\n\n\tseller, _ := getCompanyByPartialCompositeKey(ctx, poData.SellerCRN, \"company.pharma-net.com\")\n\t\n\tif buyer.HierarchyKey - seller.HierarchyKey != 1 {\n\t\treturn \"\", fmt.Errorf(\"%v not allowed to buy from %v\", buyer.Name, seller.Name)\n\t}\n\t//buyerCompositeKey, err := ctx.GetStub().CreateCompositeKey(\"company.pharma-net.com\", []string{buyer[0].Name, poData.BuyerCRN})\n\t//sellerCompositeKey, err := ctx.GetStub().CreateCompositeKey(\"company.pharma-net.com\", []string{seller[0].Name, poData.SellerCRN})\n\n\tnewPO := PurchaseOrder {\n\t\tOrg:\t\t\t\tpoData.Org,\t\n\t\tPOID:\t\t\t\tpoCompositeKey,\n\t\tDrugName:\t\t\tpoData.DrugName,\n\t\tQuantity:\t\t\tpoData.Quantity,\n\t\tBuyer:\t\t\t\tbuyer.CompanyID,\n\t\tSeller:\t\t\t\tseller.CompanyID,\n\n\t}\n\n\tmarshaledPO, err := json.Marshal(newPO)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to marshal PO into JSON: %v\", err)\n\t}\n\terr = ctx.GetStub().PutState(poCompositeKey, marshaledPO)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to put PO: %v\", err)\n\t}\n\t\n\treturn string(marshaledPO), nil\n\t\n}", "func TestCartScenario2(t *testing.T) {\n\t\n\te := httpexpect.New(t, API_URL)\n\t\n\tprintComment(\"SC20001\", \"Test Add 4 items of Unlimited 5 GB for $209.40\")\n\tcart := map[string]interface{}{\n\t\t\"code\": \"ult_large\",\n\t\t\"name\": \"Unlimited 5GB\",\n\t\t\"price\": 44.90,\n\t\t\"items\": 4,\n\t}\n\n\te.POST(\"/cart\").\n\t\tWithJSON(cart).\n\t\tExpect().\n\t\tStatus(http.StatusOK)\n\n}", "func (c *Chef) Create(v interface{}) (string, error) {\n\tpdt, ok := v.(model.Product)\n\tif !ok {\n\t\treturn \"\", ErrUnsupportedType\n\t}\n\tpdt.ID = uuid.NewV4().String()\n\n\tif err := pdt.Validate(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr := c.db.Exec(fmt.Sprintf(`INSERT INTO %s (\"id\", \"name\", \"price\", \"weight\", \"available\") VALUES('%s', '%s', %d, %d, %t)`,\n\t\tc.table, pdt.ID, pdt.Name, pdt.Price, pdt.Weight, pdt.Available,\n\t))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn pdt.ID, nil\n}", "func (a *ApiDB) CreateContract(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tp := MODELS.CREATE_UPDATE_CONTRACT_REQUEST{}\n\terr := json.NewDecoder(r.Body).Decode(&p)\n\tif err != nil {\n\t\tio.WriteString(w, `{\"message\": \"wrong format!\"}`)\n\t\treturn\n\t}\n\n\tresult := BUSINESS.CreateContract(a.Db, p)\n\tif result {\n\t\tio.WriteString(w, ` { \"status\": 200,\n \"message\": \"Create contract success\",\n \"data\": {\n \"status\": 1\n }\n}\n`)\n\t} else {\n\t\tio.WriteString(w, `{ \"message\": \"Can’t create contract\"}`)\n\t}\n}", "func (app *application) checkout(w http.ResponseWriter, r *http.Request) {\n\tsession, err := app.sessionStore.Get(r, \"session-name\")\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t\treturn\n\t}\n\tcustomerID := session.Values[\"customerID\"].(int)\n\tcart := app.carts[customerID]\n\tvendorID := 0\n\n\t// check if all items in cart are from same vendor\n\tfor listingID := range cart {\n\t\tlisting, err := app.listings.Get(listingID)\n\t\tif err != nil {\n\t\t\tapp.serverError(w, err)\n\t\t\treturn\n\t\t}\n\t\tif vendorID == 0 {\n\t\t\tvendorID = listing.VendorID\n\t\t} else if vendorID != listing.VendorID {\n\t\t\tsession.AddFlash(\"Please select items from only one vendor.\")\n\t\t\terr = session.Save(r, w)\n\t\t\tif err != nil {\n\t\t\t\tapp.serverError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Redirect(w, r, \"/customer/checkout\", http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t}\n\terr = r.ParseForm()\n\tif err != nil {\n\t\tapp.clientError(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\tform := forms.New(r.PostForm)\n\tform.Required(\"drop_long\", \"drop_lat\")\n\tdropLat, err := strconv.ParseFloat(form.Get(\"drop_lat\"), 64)\n\tif err != nil {\n\t\tform.Errors.Add(\"drop_lat\", \"enter valid floating point number\")\n\t}\n\tdropLong, err := strconv.ParseFloat(form.Get(\"drop_long\"), 64)\n\tif err != nil {\n\t\tform.Errors.Add(\"drop_long\", \"enter valid floating point number\")\n\t}\n\tif !form.Valid() {\n\t\tapp.render(w, r, \"checkout.page.tmpl\", &templateData{Form: form})\n\t\treturn\n\t}\n\n\tdeliveryID, err := app.deliveries.Insert(customerID, vendorID, time.Now(), dropLat, dropLong)\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t\treturn\n\t}\n\tfor listingID, quantity := range cart {\n\t\tlisting, err := app.listings.Get(listingID)\n\t\tif err != nil {\n\t\t\tapp.serverError(w, err)\n\t\t}\n\t\terr = app.orders.Insert(deliveryID, listingID, quantity, listing.Price*quantity)\n\t\tif err != nil {\n\t\t\tapp.serverError(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tsession.AddFlash(\"Order placed! Track your order status here.\")\n\terr = session.Save(r, w)\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/customer/activeorders\", http.StatusSeeOther)\n}", "func (s *SmartContract) CreateContract(ctx contractapi.TransactionContextInterface, id string, sellerID string, consumerID string, created string, contract string, signature string) error {\n\tctc := Contract{\n\t\tContext: \"http://wuldid.ddns.net\",\n\t\tSellerID: sellerID,\n\t\tConsumerID: consumerID,\n\t\tCreated: created,\n\t\tContract: contract,\n\t\tSignature: signature,\n\t}\n\n\texists, err := s.DidExists(ctx, id)\n\tif err != nil {\n\t\tfmt.Errorf(\"Unexpected error!! : %q\", err)\n\t}\n\tif !exists {\n\t\tctcJSON, _ := json.Marshal(ctc)\n\t\treturn ctx.GetStub().PutState(id, ctcJSON)\n\t} else {\n\t\treturn fmt.Errorf(\"Don't exsit did!\")\n\t}\n\n}", "func CreateProduct(c echo.Context) error {\n\tvar objRequest types.Product\n\tif err := c.Bind(&objRequest); err != nil {\n\t\tlog.Error(err)\n\t\treturn c.JSON(http.StatusBadRequest, types.ParseStatus(\"REQ_ERR\", \"Có lỗi xảy ra, vui lòng kiểm tra lại thông tin\"))\n\t}\n\tif err := c.Validate(&objRequest); err != nil {\n\t\treturn c.JSON(http.StatusBadRequest, types.ParseStatus(\"REQ_INVALID\", err.Error()))\n\t}\n\n\tdata, err := db.CreateNewProduct(&objRequest)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusNotAcceptable, types.ParseStatus(\"NOT_ACCEPTED\", err.Error()))\n\t}\n\treturn c.JSON(http.StatusCreated, data)\n}", "func (c *CartHandler) AddItem() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\tlog.Printf(\"entering-AddItem\")\n\t\tdefer log.Printf(\"exiting-AddItem\")\n\n\t\taddItemRequest := resources.AddItemRequest{}\n\t\terr := utils.UnmarshalDataFromRequest(req, &addItemRequest)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error-unmarshalling-%s\", err.Error())\n\t\t\tutils.WriteResponse(w, 409, &resources.AddItemResponse{Error: err})\n\t\t\treturn\n\t\t}\n\n\t\tresp := c.cart.AddItem(addItemRequest)\n\t\tif resp.Error != nil {\n\t\t\tlog.Printf(\"error-writing-response-%s\", resp.Error.Error())\n\t\t\tutils.WriteResponse(w, 409, &resp)\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"cart %#v\", c.cart)\n\t\tutils.WriteResponse(w, http.StatusOK, resp)\n\t}\n}", "func (pr ProductRepository) Create(ctx context.Context, product *domain.Product) error {\n\treturn pr.db.Insert(ctx, product)\n}", "func (slir *ShoppingListItemRepository) Create(dbTransaction *gorm.DB, data CreateShoppingListItem) (*ShoppingListItem, *systems.ErrorData) {\n\tdealGUID := data.DealGUID\n\n\tcashbackAmount := data.CashbackAmount\n\n\tshoppingListItem := &ShoppingListItem{\n\t\tGUID: Helper.GenerateUUID(),\n\t\tUserGUID: data.UserGUID,\n\t\tShoppingListGUID: data.ShoppingListGUID,\n\t\tName: data.Name,\n\t\tCategory: data.Category,\n\t\tSubCategory: data.SubCategory,\n\t\tQuantity: data.Quantity,\n\t\tAddedToCart: data.AddedToCart,\n\t\tAddedFromDeal: data.AddedFromDeal,\n\t\tDealGUID: &dealGUID,\n\t\tCashbackAmount: &cashbackAmount,\n\t}\n\n\tresult := dbTransaction.Create(shoppingListItem)\n\n\tif result.Error != nil || result.RowsAffected == 0 {\n\t\treturn nil, Error.InternalServerError(result.Error, systems.DatabaseError)\n\t}\n\n\treturn result.Value.(*ShoppingListItem), nil\n}", "func (ch *CartHandler) Checkout(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\terr := ch.Cart.Delete(id)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase ErrorCorruptDb:\n\t\t\tRespondWithError(w, http.StatusInternalServerError, err)\n\t\tdefault:\n\t\t\tRespondWithError(w, http.StatusNotFound, err)\n\t\t}\n\t}\n\n\tmsg := fmt.Sprintf(\"Item with id %s has been successfully checked out. Please check your email for confirmation.\", id)\n\tRespond(w, http.StatusOK, msg)\n}", "func CreateCertif(w http.ResponseWriter, r *http.Request) {\r\n\tparams := mux.Vars(r)\r\n var certifi Certificate\r\n\t\r\n _ = json.NewDecoder(r.Body).Decode(&certifi) //Collect information entered in the 'Body' section by the user \r\n\t\r\n\t//Whatever the certificate ID entered by the user, we create the certificate with the next available ID (ex.: if 5 certificates in the database: available ID = 6)\r\n\tvar taille = len(certif) //Number of certificates\r\n\ttaille=taille+1\t//Available ID\r\n\tt:=strconv.Itoa(taille) //Convert an integer into string\r\n\t\r\n\t//Define Certificate ID and Owner ID, add it to the existing certificates\r\n\tcertifi.Id = t\r\n\tcertifi.Ownerid = clientnum\r\n\tcertifi.Transfer = \"Nil\"\r\n\tcertif=append(certif, certifi)\r\n\t//Then, we inform the user\r\n\tif params[\"id\"] != t {\r\n\t\tfmt.Fprintf(w, \"This certificate number was not available, we automatically created your new certificate with the ID:\"+\" \"+t)\r\n\t}\r\n}" ]
[ "0.75426686", "0.747326", "0.71038234", "0.6888138", "0.6783495", "0.66843", "0.66568315", "0.66497815", "0.6567787", "0.6509555", "0.6456917", "0.63474876", "0.626733", "0.6116991", "0.61137474", "0.6104265", "0.6075365", "0.6064078", "0.60196215", "0.60145146", "0.5993004", "0.5973354", "0.5955215", "0.5953684", "0.5795877", "0.5791607", "0.57901955", "0.57517135", "0.57300544", "0.5695458", "0.5693027", "0.5689151", "0.567906", "0.5650693", "0.56246126", "0.5623384", "0.55983514", "0.55652666", "0.5561182", "0.5541343", "0.55380034", "0.55310667", "0.54921323", "0.5489776", "0.5460293", "0.5454606", "0.5446171", "0.5443736", "0.54290533", "0.54289246", "0.5425293", "0.5404945", "0.5403134", "0.5397836", "0.5396096", "0.5378864", "0.53738207", "0.53631806", "0.5362518", "0.5359231", "0.5345024", "0.53409874", "0.5340658", "0.5320209", "0.53187704", "0.5316375", "0.5314887", "0.5314281", "0.5300812", "0.52969563", "0.52800053", "0.52679133", "0.5262104", "0.52417165", "0.5236767", "0.5228027", "0.5219386", "0.521073", "0.51947063", "0.51936126", "0.5191193", "0.51911783", "0.51899475", "0.5183756", "0.51770985", "0.51666355", "0.51593465", "0.5157815", "0.51569265", "0.5156483", "0.51550597", "0.51500833", "0.51437515", "0.51387924", "0.51386565", "0.5128561", "0.512824", "0.5123292", "0.5112472", "0.51059437" ]
0.7153766
2
Handle Cart Detail Show
func (main *Main) GetCartByUserId(e echo.Context) (err error) { UserID := e.Param("code") user, cart, exc := CartModel.GetAll("user_id", UserID) if exc != nil { return rest.ConstructErrorResponse(e, exc) } data := map[string]contract.Model{ "cart_detail": cart, "user_detail": user, } return rest.ConstructSuccessResponse(e, data) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ShowCart(w http.ResponseWriter, r *http.Request) {\n\tHomeVars := NewHomePageVars(r)\n\tutils.GenerateTemplate(w, HomeVars, \"cart\")\n}", "func (ctl *SaleCounterProductController) Detail() {\n\t//获取信息一样,直接调用Edit\n\tctl.Edit()\n\tctl.Data[\"Readonly\"] = true\n\tctl.Data[\"Action\"] = \"detail\"\n}", "func (g *Goods) Detail(c Context) {\n\t// TODO\n\tc.String(http.StatusOK, \"get goods detail\")\n}", "func GetCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func GetCart(c *gin.Context) {\r\n\tvar cart []Models.Cart\r\n\terr := Models.GetCart(&cart)\r\n\tif err != nil {\r\n\t\tc.AbortWithStatus(http.StatusNotFound)\r\n\t} else {\r\n\t\tc.JSON(http.StatusOK, cart)\r\n\t}\r\n}", "func (rest *RestApi) GetCart(w http.ResponseWriter, r *http.Request, id int64) error {\n gc := rest.GoCart\n cart := gc.GetCart(id)\n\n bytes, err := json.Marshal(cart)\n if err != nil {\n panic(err)\n }\n\n response := string(bytes)\n fmt.Fprintln(w, response)\n return nil\n}", "func (s *Service) getCatalogItemDetailsHandler(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tsku := vars[\"sku\"]\n\tstatus, err := s.fulfillmentClient.getFulfillmentStatus(sku)\n\tif err == nil {\n\t\ts.render.JSON(w, http.StatusOK, catalogItem{\n\t\t\tProductID: 1,\n\t\t\tSKU: sku,\n\t\t\tDescription: \"This is a fake product\",\n\t\t\tPrice: 1599, // $15.99\n\t\t\tShipsWithin: status.ShipsWithin,\n\t\t\tQuantityInStock: status.QuantityInStock,\n\t\t})\n\t} else {\n\t\ts.render.JSON(w, http.StatusInternalServerError, fmt.Sprintf(\"Fulfillment Client error: %s\", err.Error()))\n\t}\n}", "func CartList(c *gin.Context) {\n\tid, _ := strconv.Atoi(c.Query(\"cartId\"))\n\tvar CartitemViewModel []Cartitem\n\tif (id > 0) {\n\t\t//database.DBConn.Where(\"cart_id = ?\", id).Find(&Cartitems)\n\t\tdatabase.DBConn.Model(&Cartitem{}).Select(\" cart_id, item_id, count(cart_id) as count \").Group(` cart_id, item_id `).Having(\" cart_id = ?\", id).Find(&CartitemViewModel)\n\t} else {\n\t\tdatabase.DBConn.Model(&Cartitem{}).Select(\" cart_id, item_id, count(cart_id) as count \").Group(` cart_id, item_id `).Find(&CartitemViewModel)\n\t}\t\n\tc.JSON(200, CartitemViewModel)\n}", "func (store *Store) Cart(ctx *gin.Context) (bool, error) {\n\tctx.String(200, \"You have requested the cart.\")\n\treturn true, nil\n}", "func (c *PurchaseController) Show(ctx *app.ShowPurchaseContext) error {\n\n\tsession := Database.Session.Copy()\n\tdefer session.Close()\n\n\tresult := app.Purchase{}\n\n\terr := session.DB(\"services-pos\").C(\"Purchase\").FindId(bson.ObjectIdHex(ctx.TransactionID)).One(&result)\n\n\tif err != nil {\n\t\treturn ctx.NotFound()\n\t}\n\n\tresult.TransactionID = ctx.TransactionID\n\tresult.Href = app.PurchaseHref(ctx.TransactionID)\n\n\treturn ctx.OK(&result)\n}", "func (main *Main) GetDetail(e echo.Context) (err error) {\n\t// get path parameter\n\tproductCode := e.Param(\"code\")\n\n\t// get product details\n\tproductDetail, exc := ProductModel.Get(\"code\", productCode)\n\tif exc != nil {\n\t\treturn rest.ConstructErrorResponse(e, exc)\n\t}\n\n\t// prepare data\n\tdata := map[string]contract.Model{\n\t\t\"product_detail\": output.NewProductDetail(productDetail),\n\t}\n\n\treturn rest.ConstructSuccessResponse(e, data)\n}", "func GetCartByID(c *gin.Context) {\r\n\tid := c.Params.ByName(\"id\")\r\n\tvar cart []Models.Cart\r\n\terr := Models.GetCartByID(&cart, id)\r\n\tif err != nil {\r\n\t\tc.AbortWithStatus(http.StatusNotFound)\r\n\t} else {\r\n\t\tc.JSON(http.StatusOK, cart)\r\n\t}\r\n}", "func (ch *CartHandler) Query(w http.ResponseWriter, r *http.Request) {\n\n\tp, err := ch.Cart.FetchAll()\n\tif err != nil {\n\t\tRespondWithError(w, http.StatusInternalServerError, err)\n\t}\n\n\tRespond(w, http.StatusOK, p)\n}", "func (app *application) ShoppingCart(w http.ResponseWriter, r *http.Request) {\r\n\t// a seller does not have a shopping cart\r\n\tisSeller := app.isSeller(r)\r\n\tif isSeller {\r\n\t\tw.WriteHeader(http.StatusUnauthorized)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusUnauthorized),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\t// retrieves userid from the session cookie\r\n\tuserID := app.session.GetString(r, \"userid\")\r\n\r\n\t// query shoppingcart table in the database\r\n\t// for the list of item in the user's cart\r\n\tcart, err := app.cart.Get(userID)\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// display the list of items\r\n\tapp.render(w, r, \"shoppingcart.page.tmpl\", &templateData{\r\n\t\tUser: &models.User{UserID: userID},\r\n\t\tShoppingCart: cart,\r\n\t\tDiscounts: models.Discount,\r\n\t})\r\n}", "func viewAnyOrderGet(c *gin.Context) { //admin also have the same view , later combine those two func TBD\n\tOrdID := c.Request.URL.Query()[\"ordid\"][0] // Getting Order ID passed with URL\n\t_, usrName := session.SessinStatus(c, \"user_session_cookie\")\n\tfmt.Println(\"Wnat to see the order details of order number \", OrdID)\n\toK, itemsList, date, status, PayMode, amt := db.GetSingleOredrDetails(OrdID)\n\tif !oK {\n\t\tfmt.Println(\"Something went wrong while picking Single Order Deatils ..Please have a look\")\n\t}\n\tfmt.Println(oK, itemsList, date, status, PayMode, amt)\n\t//\t\tsubTotalToFloat, _ := strconv.ParseFloat(singleCartItem.SubTotal, 64)\n\t//\t\tTotalAmt = TotalAmt + subTotalToFloat\n\t//\tTotalAmtInPaisa := TotalAmt * 100 // This is required while initate for payment in Razorpay\n\n\t//\tTotalAmtString := fmt.Sprintf(\"%.2f\", TotalAmt)\n\n\tc.HTML(\n\t\thttp.StatusOK,\n\t\t\"view_particular_order.html\",\n\t\tgin.H{\"title\": \"OrderDetail\",\n\t\t\t\"ItemsOrdered\": itemsList,\n\t\t\t\"OrdID\": OrdID,\n\t\t\t\"date\": date,\n\t\t\t\"PayMode\": PayMode,\n\t\t\t\"amt\": amt,\n\t\t\t\"OrdStatus\": status,\n\t\t\t\"usrName\": usrName,\n\n\t\t\t// \"TotalAmt\": TotalAmtString,\n\t\t\t// \"TotalAmtInPaisa\": TotalAmtInPaisa,\n\t\t},\n\t)\n\n}", "func (h *Handler) show(c echo.Context) (e error) {\n\tctx := c.(*cuxs.Context)\n\n\tvar id int64\n\tvar as *model.SalesReturn\n\tif id, e = common.Decrypt(ctx.Param(\"id\")); e == nil {\n\t\tif as, e = ShowSalesReturn(\"id\", id); e == nil {\n\t\t\tctx.Data(as)\n\t\t} else {\n\t\t\te = echo.ErrNotFound\n\t\t}\n\t}\n\treturn ctx.Serve(e)\n}", "func viewandedititemGet(c *gin.Context) {\n\tIsSectionActive, _ := session.SessinStatus(c, \"admin_session_cookie\")\n\tif !IsSectionActive {\n\t\tfmt.Println(\"No Active Sessions found \")\n\t\t// c.HTML(http.StatusOK, \"admin_login.html\", []string{\"a\", \"b\", \"c\"})\n\t\tc.HTML(\n\t\t\thttp.StatusOK,\n\t\t\t\"admin_login.html\",\n\t\t\tgin.H{\"title\": \"success login\",\n\t\t\t\t\"diplay\": \"none\",\n\t\t\t},\n\t\t)\n\t} else {\n\t\titemID := c.Request.URL.Query()[\"itemid\"][0] // Getting Order ID passed with URL\n\t\tfmt.Println(\"Initiating to View/Edit item ,having ID\", itemID)\n\t\t//populateCategoryItems(c, itemID)\n\t\t//GetItemDetails(itemID string) (itemDesc string, itemRate float64, unit string,itmID,itmStock int,itmBuyRate float64) {\n\t\t//Don't Confuse above function will redirect\n\t\t//to edit page, usual practice is giving here\n\t\t//but we achived this by modifying the existing\n\t\t//code so it happend so..\n\t\titmDesc, itmSelRate, itmUnit, itmID, itmStock, itmBuyPrice := db.GetItemDetails(itemID)\n\t\tc.HTML(\n\t\t\thttp.StatusOK,\n\t\t\t\"edit_item.html\", gin.H{\n\t\t\t\t\"delWarning\": \"none\",\n\t\t\t\t\"updateSucess\": \"none\",\n\t\t\t\t\"title\": \"Edit Item\",\n\t\t\t\t\"itmID\": itmID,\n\t\t\t\t\"itmDesc\": itmDesc,\n\t\t\t\t\"itmUnit\": itmUnit,\n\t\t\t\t\"itmBuyPrice\": itmBuyPrice,\n\t\t\t\t\"itmSelRate\": itmSelRate,\n\t\t\t\t\"itmStock\": itmStock,\n\t\t\t})\n\t}\n}", "func (ch *CartHandler) Checkout(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\terr := ch.Cart.Delete(id)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase ErrorCorruptDb:\n\t\t\tRespondWithError(w, http.StatusInternalServerError, err)\n\t\tdefault:\n\t\t\tRespondWithError(w, http.StatusNotFound, err)\n\t\t}\n\t}\n\n\tmsg := fmt.Sprintf(\"Item with id %s has been successfully checked out. Please check your email for confirmation.\", id)\n\tRespond(w, http.StatusOK, msg)\n}", "func (app *application) customerCart(w http.ResponseWriter, r *http.Request) {\n\tcustomerID := app.authenticatedCustomer(r)\n\tcart := app.carts[customerID]\n\n\tcartRowSlice := make([]cartRow, 0)\n\ttotal := 0\n\n\tfor listID, quantity := range cart {\n\t\tlisting, err := app.listings.Get(listID)\n\t\tif err != nil {\n\t\t\tapp.serverError(w, err)\n\t\t}\n\t\trow := cartRow{\n\t\t\tListingID: listing.ID,\n\t\t\tName: listing.Name,\n\t\t\tPrice: listing.Price,\n\t\t\tQuantity: quantity,\n\t\t\tAmount: quantity * listing.Price,\n\t\t}\n\t\tcartRowSlice = append(cartRowSlice, row)\n\t\ttotal += quantity * listing.Price\n\t}\n\tapp.render(w, r, \"customercart.page.tmpl\", &templateData{\n\t\tCart: cartRowSlice,\n\t\tCartTotal: total,\n\t})\n\treturn\n}", "func handleFuncCart(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\t\n\tc.Infof(\"handleFuncCarts\")\n\tval, err := handleCarts(c, r)\n\tif err == nil {\n\t\terr = json.NewEncoder(w).Encode(val)\n\t}\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(fmt.Sprintf(\"api error: %#v\", err)))\n\t\treturn\t\n\t}\n}", "func (s *Store) Show(c *gin.Context) {\n\n}", "func (c *Cart) Checkout() {\n\t// TODO\n\tfmt.Println(c.Items)\n fmt.Println(\"Total : \" + c.TotalPrice.getPriceInEuro())\n c = new(Cart)\n}", "func (app *application) AddToCart(w http.ResponseWriter, r *http.Request) {\r\n\t// a seller does not have a shopping cart\r\n\tisSeller := app.isSeller(r)\r\n\tif isSeller {\r\n\t\tw.WriteHeader(http.StatusUnauthorized)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusUnauthorized),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// retrieve userid from session cookie\r\n\tuserid := app.session.GetString(r, \"userid\")\r\n\r\n\t// retrieve ProductID from url\r\n\t// the ProducID should be valid\r\n\tproductID, err := strconv.Atoi(r.URL.Query().Get(\"productid\"))\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusBadRequest)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusBadRequest),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// perform the insert at the database\r\n\terr = app.cart.InsertItem(userid, productID)\r\n\tif err != nil {\r\n\t\tapp.errorLog.Println(err)\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\tapp.session.Put(r, \"flash\", \"Product successfully added to cart.\")\r\n\r\n\thttp.Redirect(w, r, r.Referer(), http.StatusSeeOther)\r\n}", "func CreateCart(cr cart.Repository) http.Handler {\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tkey := r.URL.Query().Get(\"key\")\n\t\tif key == \"\" {\n\t\t\thttp.Error(w, \"missing key in query string\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tnewCart := cart.New(key)\n\t\terr := cr.Store(newCart)\n\t\tif err != nil {\n\t\t\t//error handling\n\t\t}\n\t\tval := []byte{}\n\t\terr2 := json.Unmarshal(val, newCart)\n\t\tif err2 != nil {\n\t\t\t//\n\t\t}\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tw.Write(val)\n\t})\n}", "func (obj *ShopSys) StockCardDetail(input ReqId, _opt ...map[string]string) (output ShopStockCard, err error) {\n\tctx := context.Background()\n\treturn obj.StockCardDetailWithContext(ctx, input, _opt...)\n}", "func (obj *ShopSys) StockCardDetail(input ReqId, _opt ...map[string]string) (output ShopStockCard, err error) {\n\tctx := context.Background()\n\treturn obj.StockCardDetailWithContext(ctx, input, _opt...)\n}", "func (h *Stocks) View(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\n\tid := params[\"stock_id\"]\n\n\tctxValues, err := webcontext.ContextValues(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclaims, err := auth.ClaimsFromContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata := make(map[string]interface{})\n\tf := func() (bool, error) {\n\t\tif r.Method == http.MethodPost {\n\t\t\terr := r.ParseForm()\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tswitch r.PostForm.Get(\"action\") {\n\t\t\tcase \"archive\":\n\t\t\t\terr = h.Repo.Archive(ctx, claims, inventory.ArchiveRequest{\n\t\t\t\t\tID: id,\n\t\t\t\t}, ctxValues.Now)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\n\t\t\t\twebcontext.SessionFlashSuccess(ctx,\n\t\t\t\t\t\"Inventory Deleted\",\n\t\t\t\t\t\"Inventory successfully deleted.\")\n\n\t\t\t\treturn true, web.Redirect(ctx, w, r, urlStocksIndex(), http.StatusFound)\n\t\t\t}\n\t\t}\n\n\t\treturn false, nil\n\t}\n\n\tend, err := f()\n\tif err != nil {\n\t\treturn web.RenderError(ctx, w, r, err, h.Renderer, TmplLayoutBase, TmplContentErrorGeneric, web.MIMETextHTMLCharsetUTF8)\n\t} else if end {\n\t\treturn nil\n\t}\n\n\tprj, err := h.Repo.ReadByID(ctx, claims, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata[\"stock\"] = prj.Response(ctx)\n\tdata[\"urlStocksCreate\"] = urlStocksCreate()\n\tdata[\"urlStocksRemove\"] = urlStocksRemove()\n\tdata[\"urlStocksIndex\"] = urlStocksIndex()\n\tdata[\"urlStocksView\"] = urlStocksView(id)\n\tdata[\"urlStocksUpdate\"] = urlStocksUpdate(id)\n\n\treturn h.Renderer.Render(ctx, w, r, TmplLayoutBase, \"stocks-view.gohtml\", web.MIMETextHTMLCharsetUTF8, http.StatusOK, data)\n}", "func addCartHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t/**\n\t\t\tMongo server setup\n\t\t**/\n\t\tsession, err := mgo.Dial(mongodb_server)\n if err != nil {\n fmt.Println(\"mongoserver panic\")\n }\n defer session.Close()\n session.SetMode(mgo.Monotonic, true)\n c := session.DB(mongodb_database).C(\"cart\")\n \n\t\t/**\n\t\t\tGet Post body\n\t\t**/ \n body, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tvar cart Cart\n\t\tjson.Unmarshal(body, &cart)\n\t\tc.Insert(cart)\n\t\t\n\t\tvar response Success\n\t\tresponse.Success = true\n \n\t\tformatter.JSON(w, http.StatusOK, response)\n\t}\n}", "func Catalog(c *gin.Context) { // TODO: Delete this!\n\tif !auth.UserCanAccess(c, 0) {\n\t\tForbidden(c)\n\t\treturn\n\t}\n\tuser := db.UserGet(auth.GetLogin(c))\n\tdata := &catalogData{HeaderData: &web.HeaderData{}, UserMode: user.Mode, Username: user.Username}\n\tc.HTML(http.StatusOK, \"catalog.tmpl\", data)\n}", "func AddItemToCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func (c *PlanController) PlanDetails() {\n\tplanViewModel := viewmodels.Plan{}\n\tr := c.Ctx.Request\n\tw := c.Ctx.ResponseWriter\n\tsessionValues, sessionStatus := SessionForPlan(w,r)\n\tplanViewModel.SessionFlag = sessionStatus\n\tplanViewModel.CompanyPlan = sessionValues.CompanyPlan\n\tplanViewModel.CompanyTeamName =sessionValues.CompanyTeamName\n\tc.Data[\"vm\"] = planViewModel\n\tc.TplName = \"template/plan.html\"\n}", "func (ch *CartHandler) QueryByID(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\tp, err := ch.Cart.Fetch(id)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase ErrorCorruptDb:\n\t\t\tRespondWithError(w, http.StatusInternalServerError, err)\n\t\tdefault:\n\t\t\tRespondWithError(w, http.StatusNotFound, err)\n\t\t}\n\t}\n\n\tRespond(w, http.StatusOK, p)\n}", "func (ch *CartHandler) Delete(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\terr := ch.Cart.Delete(id)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase ErrorCorruptDb:\n\t\t\tRespondWithError(w, http.StatusInternalServerError, err)\n\t\tdefault:\n\t\t\tRespondWithError(w, http.StatusNotFound, err)\n\t\t}\n\t}\n\n\tRespond(w, http.StatusNoContent, nil)\n}", "func (main *Main) GetDetail(e echo.Context) (err error) {\n\t// get path parameter\n\ttransactionCode := e.Param(\"code\")\n\n\t// get transaction details\n\ttransactionDetail, exc := TransactionModel.Get(\"code\", transactionCode)\n\tif exc != nil {\n\t\treturn rest.ConstructErrorResponse(e, exc)\n\t}\n\n\t// prepare data\n\tdata := map[string]contract.Model{\n\t\t\"transaction_detail\": output.NewTransactionDetail(transactionDetail),\n\t}\n\n\treturn rest.ConstructSuccessResponse(e, data)\n}", "func CartsList(c *cli.Context) {\n\tcolor.Allow(c)\n\n\tconf := config.GetConfig()\n\tdefer conf.Flush()\n\n\tindex := 1\n\tcache := make(map[string]string)\n\tfor _, cart := range conf.Carts {\n\t\tfmt.Printf(\"(%s) %s\\n\", color.ShortID(strconv.Itoa(index)), cart.Name)\n\t\tcache[strconv.Itoa(index)] = cart.Name\n\t\tindex++\n\t}\n\tconf.ResultCache[\"Carts\"] = cache\n}", "func (obj *ShopSys) StockDetail(input ReqId, _opt ...map[string]string) (output ShopStock, err error) {\n\tctx := context.Background()\n\treturn obj.StockDetailWithContext(ctx, input, _opt...)\n}", "func (obj *ShopSys) StockDetail(input ReqId, _opt ...map[string]string) (output ShopStock, err error) {\n\tctx := context.Background()\n\treturn obj.StockDetailWithContext(ctx, input, _opt...)\n}", "func EmptyCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func (client *Client) GetCatalogCart(req *Request) (*Response, error) {\n\treturn client.Execute(&Request{\n\t\tMethod: \"GET\",\n\t\tPath: ServiceCatalogPath,\n\t\tQueryParams: req.QueryParams,\n\t\tResult: &GetCatalogCartResult{},\n\t})\n}", "func orderAdminApproveGet(c *gin.Context) {\n\tOrdID := c.Request.URL.Query()[\"ordid\"][0] // Getting Order ID passed with URL\n\t_, usrName := session.SessinStatus(c, \"user_session_cookie\")\n\tfmt.Println(\"Wnat to see the order details of order number \", OrdID)\n\toK, itemsList, date, status, PayMode, amt := db.GetSingleOredrDetails(OrdID)\n\tif !oK {\n\t\tfmt.Println(\"Something went wrong while picking Single Order Deatils ..Please have a look\")\n\t}\n\tfmt.Println(oK, itemsList, date, status, PayMode, amt)\n\tc.HTML(\n\t\thttp.StatusOK,\n\t\t\"order_adminview.html\",\n\t\tgin.H{\"title\": \"OrderDetail\",\n\t\t\t\"ItemsOrdered\": itemsList,\n\t\t\t\"OrdID\": OrdID,\n\t\t\t\"date\": date,\n\t\t\t\"PayMode\": PayMode,\n\t\t\t\"amt\": amt,\n\t\t\t\"OrdStatus\": status,\n\t\t\t\"usrName\": usrName,\n\n\t\t\t// \"TotalAmt\": TotalAmtString,\n\t\t\t// \"TotalAmtInPaisa\": TotalAmtInPaisa,\n\t\t},\n\t)\n}", "func NewCartHandler(cs *ProductService) *CartHandler {\n\treturn &CartHandler{Cart: cs}\n\n}", "func (store *Store) AddToCart(ctx *gin.Context) (bool, error) {\n\tctx.String(200, \"You are trying to add items to the cart.\")\n\treturn true, nil\n}", "func (ch *CartHandler) Create(w http.ResponseWriter, r *http.Request) {\n\n\tp := new(Product)\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&p); err != nil {\n\t\tRespondWithError(w, http.StatusBadRequest, err)\n\t}\n\tdefer r.Body.Close()\n\n\tp, err := ch.Cart.Save(p)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase ErrorCorruptDb:\n\t\t\tRespondWithError(w, http.StatusInternalServerError, err)\n\t\tdefault:\n\t\t\tRespondWithError(w, http.StatusBadRequest, err)\n\t\t}\n\t}\n\n\tRespond(w, http.StatusCreated, p)\n}", "func (sc StoreController) Get(c *gin.Context) {\n\tlog.Debug().Caller().Msg(\"stores get\")\n\tid := c.Params.ByName(\"id\")\n\tp, err := sc.Storeservice.Get(id)\n\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\tlog.Error().Caller().Err(err).Send()\n\t} else {\n\t\tc.JSON(http.StatusOK, p)\n\t}\n}", "func DetailHandler(c *gin.Context) {\n\tvar p form.BaseQueryParam\n\tif err := c.ShouldBind(&p); err != nil {\n\t\tutils.Error(c, err)\n\t\treturn\n\t}\n\n\tmatter, err := models.GetMatterByUUID(p.UUID)\n\tif err != nil {\n\t\tutils.Error(c, err)\n\t\treturn\n\t}\n\n\t// Add parent info\n\tif matter.PUUID != settings.MatterRootUUID {\n\t\tparent, _ := models.GetMatterByUUID(matter.PUUID)\n\t\tutils.Ok(c, form.SubDirDetailMatter{Matter: matter, Parent: parent})\n\t\treturn\n\t}\n\n\tutils.Ok(c, form.RootDirDetailMatter{Matter: matter, Parent: nil})\n}", "func CatalogHandler(w http.ResponseWriter, request *http.Request) {\n\tlog.Trace(fmt.Sprintf(\"%s /v2/catalog\", request.Method))\n\tswitch request.Method {\n\tcase \"GET\":\n\t\tc := Catalog{}\n\t\terr := c.Fetch()\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(`{\"status\": %d, \"description\": \"%s\"}`, http.StatusInternalServerError, err)\n\t\t\tlog.Error(msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tjsonCatalog, err := json.Marshal(c)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(`{\"status\": %d, \"description\": \"%s\"}`, http.StatusInternalServerError, err)\n\t\t\tlog.Error(msg)\n\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tw.Write(jsonCatalog)\n\t\t}\n\tdefault:\n\t\tmsg := fmt.Sprintf(`{\"status\": %d, \"description\": \"Allowed Methods: GET\"}`, http.StatusMethodNotAllowed)\n\t\tlog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusMethodNotAllowed)\n\t}\n\treturn\n}", "func ItemDetailsHandler(w http.ResponseWriter, r *http.Request, uniqueID string) {\n\n\trepo := CreateItemRepository()\n\titem, _ := repo.GetItem(uniqueID)\n\n\terr := templateparse.RenderTemplateFromFile(w, \"./webserver/items/itemDetails.html\",\n\t\t\"itemDetails.html\",\n\t\titem)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n}", "func RemoveItemFromCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func (h *PageHandler) GetDetail(c *fiber.Ctx) error {\n\tp, children, err := h.repository.FindBySlug(c.Params(\"slug\"))\n\n\tif err != nil {\n\t\tlog.Debugf(\"Error while getting page %s\", err)\n\t\treturn h.Error(404)\n\t}\n\n\treturn h.JSON(c, 200, &models.PageDetailResponse{\n\t\tPage: *p,\n\t\tChildren: children,\n\t})\n}", "func (c *TradeController) Show(ctx *app.ShowTradeContext) error {\n\t// TradeController_Show: start_implement\n\n\t// Put your logic here\n\tt, ok := tradeRegistry[ctx.TradeID]\n\tif !ok {\n\t\treturn ctx.NotFound()\n\t}\n\tres := &app.GoaTrade{\n\t\tTradeID: t.TradeID,\n\t\tContractID: t.ContractID,\n\t\tCounterpartyID: t.CounterpartyID,\n\t}\n\treturn ctx.OK(res)\n\t// TradeController_Show: end_implement\n}", "func (app *application) DeleteFromCart(w http.ResponseWriter, r *http.Request) {\r\n\t// a seller does not have a shopping cart\r\n\tisSeller := app.isSeller(r)\r\n\tif isSeller {\r\n\t\tw.WriteHeader(http.StatusUnauthorized)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusUnauthorized),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\t// retrieve userid from session cookie\r\n\tuserid := app.session.GetString(r, \"userid\")\r\n\r\n\t// retrieve ProductID from url\r\n\t// the ProducID should be valid\r\n\tproductid, err := strconv.Atoi(r.URL.Query().Get(\"productid\"))\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusBadRequest)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusBadRequest),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// perform the delete at the database\r\n\terr = app.cart.DeleteItem(userid, productid)\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\tapp.session.Put(r, \"flash\", \"Cart item successfully deleted.\")\r\n\r\n\thttp.Redirect(w, r, \"/shoppingcart/\", http.StatusSeeOther)\r\n}", "func (c OrdersController) Show(orderID int) revel.Result {\n\tvar res models.Order\n\n\tfor _, order := range orders {\n\t\tif order.ID == orderID {\n\t\t\tres = order\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif res.ID == 0 {\n\t\treturn c.NotFound(\"Could not find beer\")\n\t}\n\n\treturn c.RenderJson(res)\n}", "func Checkout(c *cli.Context) {\n\tapi := api.Create(c.GlobalString(\"locale\"))\n\n\tconf := config.GetConfig()\n\tdefer conf.Flush()\n\n\tcartName := conf.CartNameFromCache(c.Args().First())\n\n\tif cart, exists := conf.Carts[cartName]; exists {\n\t\tif getResponse, getErr := api.CartGet(cart.CartID, cart.HMAC); getErr == nil {\n\t\t\tdelete(conf.Carts, cartName)\n\t\t\tbrowser.OpenURL(getResponse.Cart.PurchaseURL)\n\t\t} else {\n\t\t\tpanic(getErr)\n\t\t}\n\t} else {\n\t\tfmt.Fprintf(os.Stderr, \"Cart %s is unknown\\n\", cartName)\n\t\tos.Exit(1)\n\t}\n}", "func (r *CommerceCartQueryResolver) CommerceCart(ctx context.Context) (*dto.DecoratedCart, error) {\n\treq := web.RequestFromContext(ctx)\n\tdc, err := r.applicationCartReceiverService.ViewDecoratedCart(ctx, req.Session())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dto.NewDecoratedCart(dc), nil\n}", "func GetProductInCart() (interface{}, error) {\n\tvar shopping_carts []model.Shopping_cart\n\tif err := config.DB.Find(&shopping_carts).Error; err != nil {\n\t\treturn nil, err\n\t}\n\treturn shopping_carts, nil\n}", "func (obj *ShopSys) CouponCardDetail(input ReqId, _opt ...map[string]string) (output ShopCouponCard, err error) {\n\tctx := context.Background()\n\treturn obj.CouponCardDetailWithContext(ctx, input, _opt...)\n}", "func (obj *ShopSys) CouponCardDetail(input ReqId, _opt ...map[string]string) (output ShopCouponCard, err error) {\n\tctx := context.Background()\n\treturn obj.CouponCardDetailWithContext(ctx, input, _opt...)\n}", "func Detail(id string) string {\r\n\tconfigData := config.LoadConfig()\r\n\r\n\treq, _ := http.NewRequest(\"GET\", \"https://api.vultr.com/v1/server/list?SUBID=\"+id, nil)\r\n\treq.Header.Set(\"API-Key\", configData.VultrApiKey)\r\n\tclient := new(http.Client)\r\n\tresp, err := client.Do(req)\r\n\tfmt.Println(err)\r\n\tbyteArray, _ := ioutil.ReadAll(resp.Body)\r\n\trespStr := string(byteArray)\r\n\trespStr = jsonedit.StripQ(respStr)\r\n\t//fmt.Println(respStr)\r\n\toutput := jsonedit.Key(\"vultrDetail\", respStr) // htmlをstringで取得\r\n\r\n\treturn output\r\n}", "func Show(c *gin.Context) {\r\n\tpost := getById(c)\r\n\tc.JSON(http.StatusOK, gin.H{\r\n\t\t\"messege\": \"\",\r\n\t\t\"data\": post,\r\n\t})\r\n}", "func (itemHandle) Retrieve(c *app.Context) error {\n\tvar items []item.Item\n\tids := strings.Split(c.Params[\"id\"], \",\")\n\titems, err := item.GetByIDs(c.SessionID, c.Ctx[\"DB\"].(*db.DB), ids)\n\tif err != nil {\n\t\tif err == item.ErrNotFound {\n\t\t\terr = app.ErrNotFound\n\t\t}\n\t\treturn err\n\t}\n\n\tc.Respond(items, http.StatusOK)\n\treturn nil\n}", "func AddCart() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\trequestBody := model.ChatfuelCarts{}\n\t\tc.Bind(&requestBody)\n\n\t\tcart := model.Carts{\n\t\t\tMessengerUserID: requestBody.MessengerUserID,\n\t\t\tFirstName: requestBody.FirstName,\n\t\t\tProductID: requestBody.ProductID,\n\t\t\tProductName: requestBody.ProductName,\n\t\t\tQty: requestBody.Qty,\n\t\t\tPrice: requestBody.Price,\n\t\t}\n\n\t\tdb.Db.Create(&cart)\n\n\t\ttext := []model.Text{}\n\t\ttext = append(text, model.Text{\n\t\t\tText: \"加入購物車成功\",\n\t\t})\n\n\t\tmessage := model.Message{\n\t\t\tMessage: text,\n\t\t}\n\n\t\tc.JSON(http.StatusOK, message)\n\t}\n}", "func (dch *DealCashbackHandler) ViewByShoppingList(context *gin.Context) {\n\ttokenData := context.MustGet(\"Token\").(map[string]string)\n\n\tuserGUID := context.Param(\"guid\")\n\n\tif tokenData[\"user_guid\"] != userGUID {\n\t\tcontext.JSON(http.StatusUnauthorized, Error.TokenIdentityNotMatchError(\"view deal cashbacks in shopping list\"))\n\t\treturn\n\t}\n\n\tqueryStringValidationRules := map[string]string{\n\t\t\"page_number\": \"numeric\",\n\t\t\"page_limit\": \"numeric\",\n\t}\n\n\terror := Validation.Validate(context.Request.URL.Query(), queryStringValidationRules)\n\n\tif error != nil {\n\t\tcontext.JSON(http.StatusUnprocessableEntity, error)\n\t\treturn\n\t}\n\n\tshoppingListGUID := context.Param(\"shopping_list_guid\")\n\n\tshoppingList := dch.ShoppingListRepository.GetByGUIDAndUserGUID(shoppingListGUID, userGUID, \"\")\n\n\tif shoppingList.GUID == \"\" {\n\t\tcontext.JSON(http.StatusNotFound, Error.ResourceNotFoundError(\"Shopping List\", \"guid\", shoppingListGUID))\n\t\treturn\n\t}\n\n\tpageNumber := context.Query(\"page_number\")\n\tpageLimit := context.Query(\"page_limit\")\n\trelations := context.Query(\"include\")\n\n\ttransactionStatus := context.Query(\"transaction_status\")\n\n\tdbTransaction := context.MustGet(\"DB\").(*gorm.DB).Begin()\n\n\tuserDealCashbacks, totalUserDealCashback, error := dch.DealCashbackService.GetUserDealCashbacksByShoppingList(dbTransaction, userGUID, shoppingListGUID,\n\t\ttransactionStatus, pageNumber, pageLimit, relations)\n\n\tif error != nil {\n\t\terrorCode, _ := strconv.Atoi(error.Error.Status)\n\t\tcontext.JSON(errorCode, error)\n\t\treturn\n\t}\n\n\tdealCashbackResponse := dch.DealCashbackTransformer.transformCollection(context.Request, userDealCashbacks, totalUserDealCashback, pageLimit)\n\n\tcontext.JSON(http.StatusOK, gin.H{\"data\": dealCashbackResponse})\n}", "func RetrieveUserCart(service Service, userService users.Service) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\n\t\tusername, err := auth.GetLoggedInUsername(r)\n\t\tif err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusForbidden, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tuser, err := userService.RetrieveUserByUsername(ctx, username)\n\t\tif err != nil || user == nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusUnauthorized, errorcode.UserNotFound, \"User not found\")\n\t\t\treturn\n\t\t}\n\n\t\tcart, err := service.GetUserCart(ctx, user.ID)\n\t\tif err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusInternalServerError, \"internal_error\", err.Error())\n\t\t\treturn\n\t\t}\n\t\thttpresponse.RespondJSON(w, http.StatusOK, cart, nil)\n\t}\n}", "func (app *application) CheckOut(w http.ResponseWriter, r *http.Request) {\r\n\t// retrieve userid from session cookie\r\n\tuserid := app.session.GetString(r, \"userid\")\r\n\r\n\t// check that every item in the cart has legal qty\r\n\t// retrieve information from database\r\n\tshoppingcart, err := app.cart.CheckOut(userid)\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t})\r\n\t\treturn\r\n\t}\r\n\r\n\t// loop through the results to check if every item\r\n\t// passes the check\r\n\tvar pass bool = true\r\n\tfor _, item := range shoppingcart {\r\n\t\tif item.Invalid {\r\n\t\t\tpass = false\r\n\t\t}\r\n\t}\r\n\r\n\t// if any item fails the check, it is flagged\r\n\tif !pass {\r\n\t\tapp.render(w, r, \"shoppingcart.page.tmpl\", &templateData{\r\n\t\t\tShoppingCart: shoppingcart,\r\n\t\t})\r\n\t} else {\r\n\t\t// else proceed to create an order for every item\r\n\t\tfor _, v := range shoppingcart {\r\n\t\t\tv.UserID = userid\r\n\t\t\terr := app.orders.Create(v)\r\n\t\t\tif err != nil {\r\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\t\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t\t\t})\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\t// then delete the user's shopping cart\r\n\t\terr = app.cart.DeleteAll(userid)\r\n\t\tif err != nil {\r\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\t\tapp.render(w, r, \"error.page.tmpl\", &templateData{\r\n\t\t\t\tError: http.StatusText(http.StatusInternalServerError),\r\n\t\t\t})\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\tapp.render(w, r, \"success.page.tmpl\", &templateData{\r\n\t\t\tUser: &models.User{UserID: userid},\r\n\t\t})\r\n\t}\r\n}", "func (tc *TransactionsController) Show(c *gin.Context) {\n\thash := common.HexToHash(c.Param(\"TxHash\"))\n\tif tx, err := tc.App.GetStore().FindTxByAttempt(hash); err == orm.ErrorNotFound {\n\t\tpublicError(c, http.StatusNotFound, errors.New(\"Transaction not found\"))\n\t} else if err != nil {\n\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t} else if doc, err := jsonapi.Marshal(presenters.NewTx(tx)); err != nil {\n\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t} else {\n\t\tc.Data(http.StatusOK, MediaType, doc)\n\t}\n}", "func Show(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\titem, _, err := summary.ByID(c.DB, c.Param(\"id\"))\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tc.Redirect(uri)\n\t\treturn\n\t}\n\n\tv := c.View.New(\"summary/show\")\n\tv.Vars[\"item\"] = item\n\tv.Render(w, r)\n}", "func (s *Server) GetCart(ctx context.Context, req *proto.CartRequest) (*proto.CartResponse, error) {\n\tcart, err := s.carts.Cart(ctx, req.Id)\n\tif err != nil {\n\t\tif err == errNotFound {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"cart with ID: %d not found\", req.Id)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get the Cart: %s\", err)\n\t}\n\n\tpCart, err := toProtoCart(cart)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to convert the Cart: %s\", err)\n\t}\n\n\treturn &proto.CartResponse{Cart: pCart}, nil\n}", "func (u *UseCase) ShowOrder(w http.ResponseWriter, r *http.Request) {\n\n\tmerchant := r.Header.Get(\"merchant_id\")\n\tuuid := mux.Vars(r)[\"order_id\"]\n\n\torder, err := cache.ShowOrder(merchant, uuid)\n\tif err == nil && order != nil {\n\t\trespondWithJSON(w, http.StatusOK, order)\n\t\treturn\n\t}\n\n\tvar dbOrders models.OrderPg\n\tu.DB.Conn.Table(\"orders\").Where(\"uuid = ?\", uuid).First(&dbOrders)\n\n\trespondWithJSON(w, http.StatusOK, dbOrders.Payload)\n}", "func (obj *ShopSys) CouponDetail(input ReqId, _opt ...map[string]string) (output ResShopCoupon, err error) {\n\tctx := context.Background()\n\treturn obj.CouponDetailWithContext(ctx, input, _opt...)\n}", "func (obj *ShopSys) CouponDetail(input ReqId, _opt ...map[string]string) (output ResShopCoupon, err error) {\n\tctx := context.Background()\n\treturn obj.CouponDetailWithContext(ctx, input, _opt...)\n}", "func getCatalogsPage(res http.ResponseWriter, req *http.Request, params httprouter.Params) {\n\tpu, _ := GetUserFromSession(res, req)\n\tServeTemplateWithParams(res, \"catalogs.html\", pu)\n}", "func (store *Store) EmptyCart(ctx *gin.Context) (bool, error) {\n\tctx.String(200, \"You are trying empty the cart.\")\n\treturn true, nil\n}", "func showMenuItem(db *sql.DB, w http.ResponseWriter, r *http.Request) {\n var menuitem MenuItem\n drink_id := r.URL.Path[len(\"/menu/\"):]\n\n // Get basic receipe information\n\trow := db.QueryRow(\"select id, name, description from recipe where id = ?\", drink_id)\n\terr := row.Scan(&menuitem.Id, &menuitem.DrinkName, &menuitem.Description)\n if err == sql.ErrNoRows {\n http.NotFound(w, r)\n return\n }\n\n\tmenuitem.Dietary = \"\"\n tx, _ := db.Begin()\n defer tx.Rollback()\n if recipeContainsAlcohol(tx, strconv.Itoa(menuitem.Id)) {\n\t\tmenuitem.Dietary += \"One or more ingredients in this recipe contain alcohol. \"\n\t} else {\n\t\tmenuitem.Dietary += \"There is no alcohol in this recipe. \"\n }\n\n if recipeIsVegan(tx, strconv.Itoa(menuitem.Id)) {\n\t\tmenuitem.Dietary += \"This recipe is vegan. \"\n\t} else {\n\t\tmenuitem.Dietary += \"One or more ingredients in this recipe contain animal products. \"\n }\n\n\tmenuitem.Direct = Direct\n menuitem.Ingredients = getRecipeIngrediants(db, drink_id)\n\n t, _ := template.ParseFiles(\"menu_item.html\")\n t.Execute(w, menuitem)\n}", "func Show(w http.ResponseWriter, r *http.Request) {\n\tc := flight.Context(w, r)\n\n\titem, _, err := code.ByID(c.DB, c.Param(\"id\"))\n\tif err != nil {\n\t\tc.FlashErrorGeneric(err)\n\t\tc.Redirect(uri)\n\t\treturn\n\t}\n\n\tv := c.View.New(\"code/show\")\n\tv.Vars[\"item\"] = item\n\tv.Render(w, r)\n}", "func (store *Store) RemoveFromCart(ctx *gin.Context) (bool, error) {\n\tctx.String(200, \"You are trying to remove items from the cart.\")\n\treturn true, nil\n}", "func (s *Service) Show(ctx context.Context, req *ShowRequest) (*ShowReply, error) {\n\ti, err := s.getInstanceByToken(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := cid.Decode(req.GetCid())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := i.Show(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treply := &ShowReply{\n\t\tCidInfo: &CidInfo{\n\t\t\tJobID: info.JobID.String(),\n\t\t\tCid: info.Cid.String(),\n\t\t\tCreated: info.Created.UnixNano(),\n\t\t\tHot: &HotInfo{\n\t\t\t\tEnabled: info.Hot.Enabled,\n\t\t\t\tSize: int64(info.Hot.Size),\n\t\t\t\tIpfs: &IpfsHotInfo{\n\t\t\t\t\tCreated: info.Hot.Ipfs.Created.UnixNano(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tCold: &ColdInfo{\n\t\t\t\tFilecoin: &FilInfo{\n\t\t\t\t\tDataCid: info.Cold.Filecoin.DataCid.String(),\n\t\t\t\t\tProposals: make([]*FilStorage, len(info.Cold.Filecoin.Proposals)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor i, p := range info.Cold.Filecoin.Proposals {\n\t\treply.CidInfo.Cold.Filecoin.Proposals[i] = &FilStorage{\n\t\t\tProposalCid: p.ProposalCid.String(),\n\t\t\tRenewed: p.Renewed,\n\t\t\tDuration: p.Duration,\n\t\t\tActivationEpoch: p.ActivationEpoch,\n\t\t\tMiner: p.Miner,\n\t\t}\n\t}\n\treturn reply, nil\n}", "func UpdateCartItem(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func (this *FamilyAccount) showDetails() {\n\tfmt.Println(\"------My Income and Expense Detail-------\")\n\tif this.flag {\n\t\t//因为我们用的是FamilyAccount结构体里传过来的字段,所以不能直接yongflag, 要用this. , 表示调用这个方法的结构体变量里面的字段\n\t\tfmt.Println(this.details)\n\t} else {\n\t\tfmt.Println(\"No current income and expenditure details!\")\n\t}\n}", "func GenerateUniqueCart(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func prepareCartWithDeliveries(t *testing.T, e *httpexpect.Expect) {\n\tt.Helper()\n\thelper.GraphQlRequest(t, e, loadGraphQL(t, \"cart_add_to_cart\", map[string]string{\"MARKETPLACE_CODE\": \"fake_simple\", \"DELIVERY_CODE\": \"delivery1\"})).Expect().Status(http.StatusOK)\n\thelper.GraphQlRequest(t, e, loadGraphQL(t, \"cart_add_to_cart\", map[string]string{\"MARKETPLACE_CODE\": \"fake_simple\", \"DELIVERY_CODE\": \"delivery2\"})).Expect().Status(http.StatusOK)\n}", "func TestgetCart(t *testing.T) {\n\n\tt.Run(\"Open\", func(tt *testing.T) {\n\t\t// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.\n\t\tw := httptest.NewRecorder()\n\n\t\t// Create a request to pass to our handler. We don't have any query parameters for now, so we'll\n\t\t// pass 'nil' as the third parameter.\n\t\tr, err := http.NewRequest(\"GET\", \"/carts/1\", nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdataBase, _ := db.OpenDB(\"db.json\")\n\t\tservice := NewService(dataBase)\n\t\thandler := http.HandlerFunc(service.getCart)\n\n\t\t// Our handlers satisfy http.Handler, so we can call their ServeHTTP method\n\t\t// directly and pass in our Request and ResponseRecorder.\n\t\thandler.ServeHTTP(w, r)\n\n\t\t// Check the status code is what we expect.\n\t\tfmt.Println(w.Code)\n\t\tif status := w.Code; status != http.StatusOK {\n\t\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\t\tstatus, http.StatusOK)\n\t\t}\n\t\t// Check the response body is what we expect.\n\t\t// expected := `{\"Id\": string}`\n\t\t// if rr.Body.String() != expected {\n\t\t// \tt.Errorf(\"handler returned unexpected body: got %v want %v\",\n\t\t// \t\trr.Body.String(), expected)\n\t\t// }\n\t})\n}", "func CatalogHandler(c *gin.Context) {\n\tconfigPath := os.Getenv(\"BROKER_CONFIG_PATH\")\n\tcatalogServices, err := config.ReadConfig(configPath)\n\tif err != nil {\n\t\tlog.Panicf(\"error reading config file %s\", err)\n\t}\n\n\t// filter with supported drivers?\n\tlibstorageServices, err := libstoragewrapper.GetServices(NewLibsClient())\n\tif err != nil {\n\t\tlog.Panicf(\"error retrieving services from libstorage host %s : (%s) \", os.Getenv(\"LIBSTORAGE_URI\"), err)\n\t}\n\n\tplansExists := DoPlansExistInLibstorage(catalogServices[0].Plans, libstorageServices)\n\tif !plansExists {\n\t\tlog.Panic(\"plan(s) do not exist in libstorage services.\")\n\t}\n\n\tcatalogServices[0].Plans, err = AddCatalogPlanIDs(catalogServices[0].Plans, libstorageHost)\n\tif err != nil {\n\t\tlog.Panic(\"could not modify plans' ids\")\n\t}\n\n\tc.JSON(http.StatusOK, model.Catalog{Services: catalogServices})\n}", "func (c *Client) ShowContract(ctx context.Context, path string) (*http.Response, error) {\n\treq, err := c.NewShowContractRequest(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Client.Do(ctx, req)\n}", "func (app *application) checkoutForm(w http.ResponseWriter, r *http.Request) {\n\tsession, err := app.sessionStore.Get(r, \"session-name\")\n\tif err != nil {\n\t\tapp.serverError(w, err)\n\t\treturn\n\t}\n\tcustomerID := session.Values[\"customerID\"].(int)\n\tcart := app.carts[customerID]\n\n\tcartRowSlice := make([]cartRow, 0)\n\ttotal := 0\n\n\tfor listID, quantity := range cart {\n\t\tlisting, err := app.listings.Get(listID)\n\t\tif err != nil {\n\t\t\tapp.serverError(w, err)\n\t\t\treturn\n\t\t}\n\t\trow := cartRow{\n\t\t\tListingID: listing.ID,\n\t\t\tName: listing.Name,\n\t\t\tPrice: listing.Price,\n\t\t\tQuantity: quantity,\n\t\t\tAmount: quantity * listing.Price,\n\t\t}\n\t\tcartRowSlice = append(cartRowSlice, row)\n\t\ttotal += quantity * listing.Price\n\t}\n\tapp.render(w, r, \"checkout.page.tmpl\", &templateData{\n\t\tCart: cartRowSlice,\n\t\tCartTotal: total,\n\t\tForm: forms.New(nil),\n\t})\n}", "func ViewAllProducts(c *gin.Context) {\n\tvar prod []Products\n\tdb := DbConn()\n\terr := db.Find(&prod).Error\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tc.JSON(http.StatusServiceUnavailable, err)\n\t}\n\n\tc.JSON(http.StatusOK, prod)\n}", "func IndexGet(ctx *iris.Context) {\n\n\tvar Send ProductoModel.SProducto\n\tNameUsrLoged, MenuPrincipal, MenuUsr, errSes := Session.GetDataSession(ctx) //Retorna los datos de la session\n\tSend.SSesion.Name = NameUsrLoged\n\tSend.SSesion.MenuPrincipal = template.HTML(MenuPrincipal)\n\tSend.SSesion.MenuUsr = template.HTML(MenuUsr)\n\tif errSes != nil {\n\t\tSend.SEstado = false\n\t\tSend.SMsj = errSes.Error()\n\t\tctx.Render(\"ZError.html\", Send)\n\t\treturn\n\t}\n\n\tvar Cabecera, Cuerpo string\n\tnumeroRegistros = ProductoModel.CountAll()\n\tpaginasTotales = MoGeneral.Totalpaginas(numeroRegistros, limitePorPagina)\n\tProductos := ProductoModel.GetAll()\n\n\tarrIDMgo = []bson.ObjectId{}\n\tfor _, v := range Productos {\n\t\tarrIDMgo = append(arrIDMgo, v.ID)\n\t}\n\tarrIDElastic = arrIDMgo\n\n\tif numeroRegistros <= limitePorPagina {\n\t\tCabecera, Cuerpo = ProductoModel.GeneraTemplatesBusqueda(Productos[0:numeroRegistros])\n\t} else if numeroRegistros >= limitePorPagina {\n\t\tCabecera, Cuerpo = ProductoModel.GeneraTemplatesBusqueda(Productos[0:limitePorPagina])\n\t}\n\n\tSend.SIndex.SCabecera = template.HTML(Cabecera)\n\tSend.SIndex.SBody = template.HTML(Cuerpo)\n\tSend.SIndex.SGrupo = template.HTML(CargaCombos.CargaComboMostrarEnIndex(limitePorPagina))\n\tPaginacion := MoGeneral.ConstruirPaginacion(paginasTotales, 1)\n\tSend.SIndex.SPaginacion = template.HTML(Paginacion)\n\tSend.SIndex.SResultados = true\n\n\tctx.Render(\"ProductoIndex.html\", Send)\n\n}", "func show(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// Get the `employeeid` query string parameter from the request and\n\t// validate it.\n\temployeeid := req.QueryStringParameters[\"employeeid\"]\n\tif !employeeidRegexp.MatchString(employeeid) {\n\t\treturn clientError(http.StatusBadRequest)\n\t} //fetch a specific employee record from dynamodb in this case\n\n\t// Fetch the employee record from the database based on the employeeid value.\n\temp, err := getItem(employeeid)\n\tif err != nil {\n\t\treturn serverError(err)\n\t}\n\tif emp == nil {\n\t\treturn clientError(http.StatusNotFound)\n\t}\n\n\t// The APIGatewayProxyResponse.Body field needs to be a string, so\n\t// we marshal the employee record into JSON.\n\tjs, err := json.Marshal(emp)\n\tif err != nil {\n\t\treturn serverError(err)\n\t}\n\n\t// Return a response with a 200 OK status and the JSON employee record\n\t// as the body.\n\treturn events.APIGatewayProxyResponse{\n\t\tStatusCode: http.StatusOK,\n\t\tBody: string(js),\n\t}, nil\n}", "func (rest *RestApi) Init() error {\n rest.GoCart.loadConfig()\n mysql := MysqlConnection{\n host: rest.GoCart.Config.Database.Host,\n port: rest.GoCart.Config.Database.Port,\n user: rest.GoCart.Config.Database.Username,\n password: rest.GoCart.Config.Database.Password,\n database: rest.GoCart.Config.Database.Database,\n table: rest.GoCart.Config.Database.Cart.Table,\n table_index: rest.GoCart.Config.Database.Cart.Mappings.Index,\n }\n mysql.EnsureCartTable()\n\n rest.GoCart = GoCart{\n Connection: mysql,\n }\n router := mux.NewRouter()\n\n /**\n * GET request\n */\n //http.HandleFunc(\"/gocart/getCart\", func(w http.ResponseWriter, r *http.Request) {\n router.HandleFunc(\"/gocart/getCart\", func(w http.ResponseWriter, r *http.Request) {\n cart_id, err := strconv.ParseInt(r.URL.Query().Get(\"cart_id\"), 10, 64)\n if err != nil {\n panic(err)\n }\n rest.GetCart(w, r, cart_id)\n }).Methods(\"GET\")\n\n /**\n * POST request\n */\n router.HandleFunc(\"/gocart/addToCart\", func(w http.ResponseWriter, r *http.Request) {\n cart_id, err := strconv.ParseInt(r.URL.Query().Get(\"cart_id\"), 10, 64)\n if err != nil {\n panic(err)\n }\n items_qsp := r.URL.Query().Get(\"items\")\n item_quantity := r.URL.Query().Get(\"quantity\")\n\n ids := strings.Split(items_qsp, \",\")\n for _, item_id := range ids {\n item_id, err := strconv.ParseInt(item_id, 10, 64)\n if err != nil {\n panic(err)\n }\n item_quantity, err := strconv.ParseInt(item_quantity, 10, 64)\n if err != nil {\n panic(err)\n }\n rest.AddToCart(w, r, cart_id, item_id, item_quantity)\n }\n // @TODO: Print some error/success message\n }).Methods(\"POST\")\n\n log.Fatal(http.ListenAndServe(\":9090\", router))\n return nil\n}", "func ShowPost(c buffalo.Context) error {\n\tdatabase := c.Value(\"tx\").(*pop.Connection)\n\n\tpost := &models.Post{}\n\n\tif txErr := database.Eager().Find(post, c.Param(\"post_id\")); txErr != nil {\n\n\t\tnotFoundResponse := utils.NewErrorResponse(\n\t\t\thttp.StatusNotFound,\n\t\t\t\"post_id\",\n\t\t\tfmt.Sprintf(\"The requested post %s is removed or move to somewhere else.\", c.Param(\"post_id\")),\n\t\t)\n\t\treturn c.Render(http.StatusNotFound, r.JSON(notFoundResponse))\n\t}\n\n\tpostResponse := PostResponse{\n\t\tCode: fmt.Sprintf(\"%d\", http.StatusOK),\n\t\tData: post,\n\t}\n\treturn c.Render(http.StatusOK, r.JSON(postResponse))\n}", "func (rest *RestApi) AddToCart(w http.ResponseWriter, r *http.Request, cart_id int64, item_id int64, quantity int64) {\n\n //@ TODO: Need to check for quantity and increment if necessary\n\n cart := rest.GoCart.GetCart(cart_id)\n\n item := rest.GoCart.GetItem(item_id)\n item.SetItemQuantity(quantity)\n\n cart.Add(*item)\n rest.GoCart.SaveCart(*cart)\n}", "func Show(c *eclcloud.ServiceClient, id string) (r ShowResult) {\n\t_, r.Err = c.Get(showURL(c, id), &r.Body, &eclcloud.RequestOpts{\n\t\tOkCodes: []int{200},\n\t})\n\n\treturn\n}", "func S3CmdInfo(cmd *cobra.Command, args []string) {\n\tcontainerNameToShow := args[0]\n\tcontainerName := containerNamePrefix + containerNameToShow\n\n\tnotExistCheck(containerName)\n\tnotRunningCheck(containerName)\n\tcommand := []string{\"s3cmd\", \"info\", \"s3://\" + args[1]}\n\tif debugS3 {\n\t\tcommand = append(command, \"--debug\")\n\t}\n\n\toutput := strings.TrimSuffix(string(execContainer(containerName, command)), \"\\n\") + \" on cluster \" + containerNameToShow\n\tfmt.Println(output)\n}", "func (j JSONPrinter) Detail(data interface{}) error {\n\treturn j.Print(data)\n}", "func (self *Controller) HandleProductOrder(product Product) (*Sale, error) {\n\n sale := NewSale(self.register.nextTransactionId(), 0.0)\n\n // product is an interface to a Product which can be of any type that conforms\n // to that interface.\n t, err := product.GetTotal(self.datastore)\n if err == nil {\n sale.Cost = t\n return sale, nil\n }\n return nil, err\n}", "func (prod *Product) printData() {\n\tfmt.Printf(\"ID: %v\\n\", prod.id)\n\tfmt.Printf(\"Title: %v\\n\", prod.title)\n\tfmt.Printf(\"Description: %v\\n\", prod.description)\n\tfmt.Printf(\"Price: $%.2f\\n\", prod.price)\n}", "func GetArtistCtrl(c *gin.Context) {\n\tartistId := c.Param(\"id\")\n\tid, err := strconv.Atoi(artistId)\n\tif err != nil {\n\t\tlog.Printf(\"Error Occur while Converting string %d to %T\", id, id)\n\t}\n\tartist := dao.GetArtist(id)\n\tlog.Printf(\"%+v\", artist)\n\t//TODO: Look at refactoring the json.Marshal & Unmarshal [DRY]\n\tresponse, err := json.Marshal(artist)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = json.Unmarshal(response, &artist)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tc.JSON(http.StatusOK, gin.H{\"statusCode\": http.StatusOK, \"response\": artist})\n}", "func (fn RemoveFromCartHandlerFunc) Handle(params RemoveFromCartParams, principal interface{}) middleware.Responder {\n\treturn fn(params, principal)\n}", "func (c CategoryPostController) Show(ctx *fasthttp.RequestCtx) {\n\tvar post model.PostDEP\n\tvar postSlug model.PostSlug\n\tvar postDetail model.PostDetail\n\tvar postCategoryAssignment model.PostCategoryAssignment\n\tvar category model.Category\n\tvar user model.User\n\tc.GetDB().QueryRowWithModel(fmt.Sprintf(`\n\t\tSELECT \n\t\t\tp.id as id, p.author_id as author_id, u.username as author_username, \n\t\t\tp.inserted_at as inserted_at, ps.slug as slug, pd.title as title, \n\t\t\tpd.description as description, pd.content as content\n\t\tFROM %s AS p\n\t\tLEFT OUTER JOIN %s AS ps ON p.id = ps.post_id\n\t\tLEFT OUTER JOIN %s AS ps2 ON ps.post_id = ps2.post_id AND ps.id < ps2.id\n\t\tINNER JOIN %s AS pd ON p.id = pd.post_id\n\t\tLEFT OUTER JOIN %s AS pd2 ON pd.post_id = pd2.post_id AND pd.id < pd2.id\n\t\tINNER JOIN %s AS u ON p.author_id = u.id\n\t\tINNER JOIN %s AS pca ON p.id = pca.post_id\n\t\tINNER JOIN %s AS c ON pca.category_id = c.id\n\t\tWHERE ps2.id IS NULL AND pd2.id IS NULL AND (c.id::text = $1::text OR c.slug = $1) AND \n\t\t\t(p.id::text = $2::text OR ps.slug = $2)\n\t`, c.Model.TableName(), postSlug.TableName(), postSlug.TableName(), postDetail.TableName(),\n\t\tpostDetail.TableName(), user.TableName(), postCategoryAssignment.TableName(), category.TableName()),\n\t\t&post,\n\t\tphi.URLParam(ctx, \"categoryID\"),\n\t\tphi.URLParam(ctx, \"postID\")).Force()\n\n\tc.JSONResponse(ctx, model2.ResponseSuccessOne{\n\t\tData: post,\n\t}, fasthttp.StatusOK)\n}", "func (c *RarityController) Show(ctx *app.ShowRarityContext) error {\n\t// RarityController_Show: start_implement\n\n\tdataStore := &dal.DataStore{}\n\tdataStore.GetSession()\n\t// Close the session\n\tdefer dataStore.Close()\n\tdc := dal.NewDalRarity(dataStore)\n\tuuid, err := helpers.DecodeUUID(ctx.RarityID)\n\n\tif err != nil {\n\t\treturn ctx.NotFound()\n\t}\n\n\trarity, err := dc.Fetch(uuid)\n\n\tif err != nil {\n\t\tctx.ResponseData.Service.LogError(\"InternalServerError\", \"req_id\", middleware.ContextRequestID(ctx), \"ctrl\", \"Rarity\", \"action\", \"Show\", ctx.RequestData.Request.Method, ctx.RequestData.Request.URL, \"databaseError\", err.Error())\n\t\treturn ctx.InternalServerError()\n\t}\n\n\t// RarityController_Show: end_implement\n\tres, _ := factory.CreateRarity(rarity)\n\treturn ctx.OK(res)\n}", "func GetDetail(c *server.Context) error {\n\tvar (\n\t\terr error\n\t\tres *ware.Ware\n\t\treq struct {\n\t\t\tID uint32 `json:\"id\" validate:\"required\"`\n\t\t}\n\t)\n\n\terr = c.JSONBody(&req)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInvalidParam, nil)\n\t}\n\n\terr = c.Validate(req)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInvalidParam, nil)\n\t}\n\n\tconn, err := mysql.Pool.Get()\n\tdefer mysql.Pool.Release(conn)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t}\n\n\tres, err = ware.Service.GetByID(conn, req.ID)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t}\n\n\treturn core.WriteStatusAndDataJSON(c, constants.ErrSucceed, res)\n}" ]
[ "0.7177097", "0.64602536", "0.63461", "0.6326297", "0.6014537", "0.59876144", "0.5870068", "0.5844098", "0.57844645", "0.5701917", "0.5658653", "0.56194425", "0.559065", "0.5489284", "0.54830635", "0.5445919", "0.54083335", "0.5368832", "0.5322026", "0.523323", "0.5221796", "0.52107817", "0.5155989", "0.5119505", "0.5097529", "0.5097529", "0.50493723", "0.5040473", "0.5020432", "0.49883556", "0.4968056", "0.49633276", "0.49606696", "0.4938958", "0.49325064", "0.49106258", "0.49106258", "0.48952276", "0.48542258", "0.48523885", "0.4846845", "0.48353165", "0.4833541", "0.48031127", "0.47987798", "0.47903395", "0.47666734", "0.47603795", "0.47444463", "0.47429016", "0.47391802", "0.4732318", "0.47301108", "0.4701115", "0.46760947", "0.46665612", "0.46665612", "0.46649945", "0.4663775", "0.4656797", "0.4655621", "0.46532205", "0.46475646", "0.4642221", "0.46344578", "0.46335295", "0.4623258", "0.46188417", "0.46033293", "0.46033293", "0.4595384", "0.4585035", "0.45395672", "0.45323488", "0.45210522", "0.45193952", "0.45120895", "0.44921124", "0.4487267", "0.44830778", "0.44730604", "0.44705158", "0.44493994", "0.44312727", "0.44242573", "0.44095427", "0.43998942", "0.43962428", "0.43852967", "0.43847588", "0.43844926", "0.43724656", "0.43716738", "0.43668994", "0.43525743", "0.43506473", "0.43440303", "0.434124", "0.4336586", "0.43348625" ]
0.51809716
22
Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest is an autogenerated conversion function.
func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error { return autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func NewVolumeFormat(source string, quiet bool) Format {\n\tswitch source {\n\tcase TableFormatKey:\n\t\tif quiet {\n\t\t\treturn defaultVolumeQuietFormat\n\t\t}\n\t\treturn defaultVolumeTableFormat\n\tcase RawFormatKey:\n\t\tif quiet {\n\t\t\treturn `name: {{.Name}}`\n\t\t}\n\t\treturn `name: {{.Name}}\\ndriver: {{.Driver}}\\n`\n\t}\n\treturn Format(source)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out)\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func InitVolumeOperationRequestInterface(ctx context.Context, cleanupInterval int,\n\tisBlockVolumeSnapshotEnabled func() bool) (VolumeOperationRequest, error) {\n\tlog := logger.GetLogger(ctx)\n\tcsiNamespace = getCSINamespace()\n\n\toperationStoreInitLock.Lock()\n\tdefer operationStoreInitLock.Unlock()\n\tif operationRequestStoreInstance == nil {\n\t\t// Create CnsVolumeOperationRequest definition on API server.\n\t\tlog.Info(\n\t\t\t\"Creating CnsVolumeOperationRequest definition on API server and initializing VolumeOperationRequest instance\",\n\t\t)\n\t\terr := k8s.CreateCustomResourceDefinitionFromManifest(ctx,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFile,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFileName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create CnsVolumeOperationRequest CRD with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get in cluster config for client to API server.\n\t\tconfig, err := k8s.GetKubeConfig(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get kubeconfig with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create client to API server.\n\t\tk8sclient, err := k8s.NewClientForGroup(ctx, config, cnsvolumeoprequestv1alpha1.SchemeGroupVersion.Group)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create k8sClient with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Initialize the operationRequestStoreOnETCD implementation of\n\t\t// VolumeOperationRequest interface.\n\t\t// NOTE: Currently there is only a single implementation of this\n\t\t// interface. Future implementations will need modify this step.\n\t\toperationRequestStoreInstance = &operationRequestStore{\n\t\t\tk8sclient: k8sclient,\n\t\t}\n\t\tgo operationRequestStoreInstance.cleanupStaleInstances(cleanupInterval, isBlockVolumeSnapshotEnabled)\n\t}\n\n\treturn operationRequestStoreInstance, nil\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func NewVolume(volumeRequest provider.Volume) Volume {\n\t// Build the template to send to backend\n\n\tvolume := Volume{\n\t\tID: volumeRequest.VolumeID,\n\t\tCRN: volumeRequest.CRN,\n\t\tTags: volumeRequest.VPCVolume.Tags,\n\t\tZone: &Zone{\n\t\t\tName: volumeRequest.Az,\n\t\t},\n\t\tProvider: string(volumeRequest.Provider),\n\t\tVolumeType: string(volumeRequest.VolumeType),\n\t}\n\tif volumeRequest.Name != nil {\n\t\tvolume.Name = *volumeRequest.Name\n\t}\n\tif volumeRequest.Capacity != nil {\n\t\tvolume.Capacity = int64(*volumeRequest.Capacity)\n\t}\n\tif volumeRequest.VPCVolume.Profile != nil {\n\t\tvolume.Profile = &Profile{\n\t\t\tName: volumeRequest.VPCVolume.Profile.Name,\n\t\t}\n\t}\n\tif volumeRequest.VPCVolume.ResourceGroup != nil {\n\t\tvolume.ResourceGroup = &ResourceGroup{\n\t\t\tID: volumeRequest.VPCVolume.ResourceGroup.ID,\n\t\t\tName: volumeRequest.VPCVolume.ResourceGroup.Name,\n\t\t}\n\t}\n\n\tif volumeRequest.Iops != nil {\n\t\tvalue, err := strconv.ParseInt(*volumeRequest.Iops, 10, 64)\n\t\tif err != nil {\n\t\t\tvolume.Iops = 0\n\t\t}\n\t\tvolume.Iops = value\n\t}\n\tif volumeRequest.VPCVolume.VolumeEncryptionKey != nil && len(volumeRequest.VPCVolume.VolumeEncryptionKey.CRN) > 0 {\n\t\tencryptionKeyCRN := volumeRequest.VPCVolume.VolumeEncryptionKey.CRN\n\t\tvolume.VolumeEncryptionKey = &VolumeEncryptionKey{CRN: encryptionKeyCRN}\n\t}\n\n\tvolume.Cluster = volumeRequest.Attributes[ClusterIDTagName]\n\tvolume.Status = StatusType(volumeRequest.Attributes[VolumeStatus])\n\treturn volume\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (o *ReadStorageV1alpha1VolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Exact != nil {\n\n\t\t// query param exact\n\t\tvar qrExact bool\n\t\tif o.Exact != nil {\n\t\t\tqrExact = *o.Exact\n\t\t}\n\t\tqExact := swag.FormatBool(qrExact)\n\t\tif qExact != \"\" {\n\t\t\tif err := r.SetQueryParam(\"exact\", qExact); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Export != nil {\n\n\t\t// query param export\n\t\tvar qrExport bool\n\t\tif o.Export != nil {\n\t\t\tqrExport = *o.Export\n\t\t}\n\t\tqExport := swag.FormatBool(qrExport)\n\t\tif qExport != \"\" {\n\t\t\tif err := r.SetQueryParam(\"export\", qExport); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Pretty != nil {\n\n\t\t// query param pretty\n\t\tvar qrPretty string\n\t\tif o.Pretty != nil {\n\t\t\tqrPretty = *o.Pretty\n\t\t}\n\t\tqPretty := qrPretty\n\t\tif qPretty != \"\" {\n\t\t\tif err := r.SetQueryParam(\"pretty\", qPretty); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func Convert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in *v1beta1.ResizeVolumeRequest, out *internal.ResizeVolumeRequest) error {\n\treturn autoConvert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in, out)\n}", "func (c *UFSClient) NewDescribeUFSVolumePriceRequest() *DescribeUFSVolumePriceRequest {\n\treq := &DescribeUFSVolumePriceRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func From(vol *apis.ZFSVolume) *ZFSVolume {\n\treturn &ZFSVolume{\n\t\tObject: vol,\n\t}\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func volumeToAPIType(v volume.Volume) *types.Volume {\n\treturn &types.Volume{\n\t\tName: v.Name(),\n\t\tDriver: v.DriverName(),\n\t\tMountpoint: v.Path(),\n\t}\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func ParseVolume(input string) (Volume, error) {\n\tparts := strings.Split(input, \":\")\n\tswitch len(parts) {\n\tcase 1:\n\t\treturn Volume{Type: VolumeTypeInstance, Path: input}, nil\n\tcase 2:\n\t\tif vt, mountOptions, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{Type: vt, Path: parts[1], MountOptions: mountOptions}, nil\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0]}, nil\n\tcase 3:\n\t\tif _, _, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t\t}\n\t\toptions, err := parseVolumeOptions(parts[2])\n\t\tif err != nil {\n\t\t\treturn Volume{}, maskAny(err)\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0], Options: options}, nil\n\tdefault:\n\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t}\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (o *VolumeDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Force != nil {\n\n\t\t// query param force\n\t\tvar qrForce bool\n\t\tif o.Force != nil {\n\t\t\tqrForce = *o.Force\n\t\t}\n\t\tqForce := swag.FormatBool(qrForce)\n\t\tif qForce != \"\" {\n\t\t\tif err := r.SetQueryParam(\"force\", qForce); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *Stack) CreateVolume(request resources.VolumeRequest) (volume *resources.Volume, err error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif request.Name == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"request.Name\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", request.Name), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvolume, err = s.GetVolume(request.Name)\n\tif err != nil {\n\t\tif _, ok := err.(scerr.ErrNotFound); !ok {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif volume != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\taz, err := s.SelectedAvailabilityZone()\n\tif err != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\tvar v resources.Volume\n\tswitch s.versions[\"volume\"] {\n\tcase \"v1\":\n\t\tvar vol *volumesv1.Volume\n\t\tvol, err = volumesv1.Create(s.VolumeClient, volumesv1.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tcase \"v2\":\n\t\tvar vol *volumesv2.Volume\n\t\tvol, err = volumesv2.Create(s.VolumeClient, volumesv2.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tdefault:\n\t\terr = scerr.Errorf(fmt.Sprintf(\"unmanaged service 'volume' version '%s'\", s.versions[\"volume\"]), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error creating volume : %s\", ProviderErrorToString(err)))\n\t}\n\n\treturn &v, nil\n}", "func VolumeType(volumeType string) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.VolumeType = volumeType\n\t\treturn nil\n\t}\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in *v2alpha1.RmdirRequest, out *impl.RmdirRequest) error {\n\treturn autoConvert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func ToPbVolume(in api.Volume) *pb.Volume {\n\treturn &pb.Volume{\n\t\tID: in.ID,\n\t\tName: in.Name,\n\t\tSize: int32(in.Size),\n\t\tSpeed: pb.VolumeSpeed(in.Speed),\n\t}\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func (c *UFSClient) NewRemoveUFSVolumeRequest() *RemoveUFSVolumeRequest {\n\treq := &RemoveUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func VolumeEncryption(value bool) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.EncryptVols = value\n\t\treturn nil\n\t}\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *UFSClient) NewCreateUFSVolumeRequest() *CreateUFSVolumeRequest {\n\treq := &CreateUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (c *restClient) CreateVolume(ctx context.Context, req *netapppb.CreateVolumeRequest, opts ...gax.CallOption) (*CreateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tparams.Add(\"volumeId\", fmt.Sprintf(\"%v\", req.GetVolumeId()))\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"parent\", url.QueryEscape(req.GetParent()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &CreateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func FormatVolumeSize(inputSize int64, step int64) int64 {\n\tif inputSize <= Gib || step < 0 {\n\t\treturn Gib\n\t}\n\tremainder := inputSize % step\n\tif remainder != 0 {\n\t\treturn inputSize - remainder + step\n\t}\n\treturn inputSize\n}", "func (a *HyperflexApiService) PatchHyperflexVolume(ctx context.Context, moid string) ApiPatchHyperflexVolumeRequest {\n\treturn ApiPatchHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif req.GetName() == \"\" {\n\t\treturn errors.New(\"volume name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn errors.New(\"volume capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn errors.New(\"block access type not allowed\")\n\t\t}\n\t}\n\n\tif req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {\n\t\treturn errors.New(\"secrets cannot be nil or empty\")\n\t}\n\n\treturn nil\n}", "func (o *GetContainersUUIDVolumesVolumeUUIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\t// path param uuid\n\tif err := r.SetPathParam(\"uuid\", o.UUID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param volume_uuid\n\tif err := r.SetPathParam(\"volume_uuid\", o.VolumeUUID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *UFSClient) NewDescribeUFSVolumeMountpointRequest() *DescribeUFSVolumeMountpointRequest {\n\treq := &DescribeUFSVolumeMountpointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (o *QtreeCreateRequest) Volume() string {\n\tvar r string\n\tif o.VolumePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.VolumePtr\n\treturn r\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {\n\tif err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\tklog.Errorf(\"invalid create volume req: %v\", req)\n\t\treturn nil, err\n\t}\n\n\tvolumeCapabilities := req.GetVolumeCapabilities()\n\tname := req.GetName()\n\tif len(name) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"CreateVolume Name must be provided\")\n\t}\n\tif len(volumeCapabilities) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"CreateVolume Volume capabilities must be provided\")\n\t}\n\n\tvolSizeBytes := int64(req.GetCapacityRange().GetRequiredBytes())\n\trequestGiB := int(util.RoundUpGiB(volSizeBytes))\n\n\tparameters := req.GetParameters()\n\tvar storageAccountType, resourceGroup, location, accountName, containerName string\n\n\t// Apply ProvisionerParameters (case-insensitive). We leave validation of\n\t// the values to the cloud provider.\n\tfor k, v := range parameters {\n\t\tswitch strings.ToLower(k) {\n\t\tcase \"skuname\":\n\t\t\tstorageAccountType = v\n\t\tcase \"storageaccounttype\":\n\t\t\tstorageAccountType = v\n\t\tcase \"location\":\n\t\t\tlocation = v\n\t\tcase \"storageaccount\":\n\t\t\taccountName = v\n\t\tcase \"resourcegroup\":\n\t\t\tresourceGroup = v\n\t\tcase \"containername\":\n\t\t\tcontainerName = v\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid option %q\", k)\n\t\t}\n\t}\n\n\tif resourceGroup == \"\" {\n\t\tresourceGroup = d.cloud.ResourceGroup\n\t}\n\n\taccount, accountKey, err := d.cloud.EnsureStorageAccount(accountName, storageAccountType, string(storage.BlockBlobStorage), resourceGroup, location, goofysAccountNamePrefix)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get storage key for storage account %s: %v\", accountName, err)\n\t}\n\taccountName = account\n\n\tif containerName == \"\" {\n\t\tcontainerName = getValidContainerName(name)\n\t}\n\n\tklog.V(2).Infof(\"begin to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d)\", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB)\n\tclient, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblobClient := client.GetBlobService()\n\tcontainer := blobClient.GetContainerReference(containerName)\n\t_, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d), error: %v\", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB, err)\n\t}\n\n\tvolumeID := fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, containerName)\n\n\t/* todo: snapshot support\n\tif req.GetVolumeContentSource() != nil {\n\t\tcontentSource := req.GetVolumeContentSource()\n\t\tif contentSource.GetSnapshot() != nil {\n\t\t}\n\t}\n\t*/\n\tklog.V(2).Infof(\"create container %s on storage account %s successfully\", containerName, accountName)\n\n\treturn &csi.CreateVolumeResponse{\n\t\tVolume: &csi.Volume{\n\t\t\tVolumeId: volumeID,\n\t\t\tCapacityBytes: req.GetCapacityRange().GetRequiredBytes(),\n\t\t\tVolumeContext: parameters,\n\t\t},\n\t}, nil\n}", "func svformatFromQueryString(r *http.Request) SparseVolFormat {\n\tswitch r.URL.Query().Get(\"format\") {\n\tcase \"srles\":\n\t\treturn FormatStreamingRLE\n\tcase \"blocks\":\n\t\treturn FormatBinaryBlocks\n\tdefault:\n\t\treturn FormatLegacyRLE\n\t}\n}", "func (r ApiGetHyperflexVolumeListRequest) Filter(filter string) ApiGetHyperflexVolumeListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func (c *UFSClient) NewDescribeUFSVolume2Request() *DescribeUFSVolume2Request {\n\treq := &DescribeUFSVolume2Request{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func NewVolumeService(api api.ClientAPI) VolumeAPI {\n\treturn &VolumeService{\n\t\tprovider: providers.FromClient(api),\n\t}\n}", "func StorageFormatRequest(mc Control, parms interface{}, ch chan ClientResult) {\n\tsRes := StorageFormatResult{}\n\n\t// Maximum time limit for format is 2hrs to account for lengthy low\n\t// level formatting of multiple devices sequentially.\n\tctx, cancel := context.WithTimeout(context.Background(), 120*time.Minute)\n\tdefer cancel()\n\n\treq := &ctlpb.StorageFormatReq{}\n\tif parms != nil {\n\t\tif preq, ok := parms.(*ctlpb.StorageFormatReq); ok {\n\t\t\treq = preq\n\t\t}\n\t}\n\n\tstream, err := mc.getCtlClient().StorageFormat(ctx, req)\n\tif err != nil {\n\t\tch <- ClientResult{mc.getAddress(), nil, err}\n\t\treturn // stream err\n\t}\n\n\tfor {\n\t\tresp, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\terr := errors.Wrapf(err, msgStreamRecv, stream)\n\t\t\tmc.logger().Errorf(err.Error())\n\t\t\tch <- ClientResult{mc.getAddress(), nil, err}\n\t\t\treturn // recv err\n\t\t}\n\n\t\tsRes.Nvme = resp.Crets\n\t\tsRes.Scm = resp.Mrets\n\n\t\tch <- ClientResult{mc.getAddress(), sRes, nil}\n\t}\n}", "func (v *VolumeService) VolumeCreate(ctx context.Context, options volume.VolumeCreateBody) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(options.Name) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound and\n\t// check if the notfound should be ignored\n\tif strings.Contains(options.Name, \"notfound\") &&\n\t\t!strings.Contains(options.Name, \"ignorenotfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// check if the volume is not-found and\n\t// check if the not-found should be ignored\n\tif strings.Contains(options.Name, \"not-found\") &&\n\t\t!strings.Contains(options.Name, \"ignore-not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: options.Driver,\n\t\tLabels: options.Labels,\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: options.Name,\n\t\tOptions: options.DriverOpts,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}", "func QueryVolumeUtil(ctx context.Context, m cnsvolume.Manager, queryFilter cnstypes.CnsQueryFilter,\n\tquerySelection *cnstypes.CnsQuerySelection, useQueryVolumeAsync bool) (*cnstypes.CnsQueryResult, error) {\n\tlog := logger.GetLogger(ctx)\n\tvar queryAsyncNotSupported bool\n\tvar queryResult *cnstypes.CnsQueryResult\n\tvar err error\n\tif useQueryVolumeAsync {\n\t\t// AsyncQueryVolume feature switch is enabled.\n\t\tqueryResult, err = m.QueryVolumeAsync(ctx, queryFilter, querySelection)\n\t\tif err != nil {\n\t\t\tif err.Error() == cnsvsphere.ErrNotSupported.Error() {\n\t\t\t\tlog.Warn(\"QueryVolumeAsync is not supported. Invoking QueryVolume API\")\n\t\t\t\tqueryAsyncNotSupported = true\n\t\t\t} else { // Return for any other failures.\n\t\t\t\treturn nil, logger.LogNewErrorCodef(log, codes.Internal,\n\t\t\t\t\t\"queryVolumeAsync failed for queryFilter: %v. Err=%+v\", queryFilter, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tif !useQueryVolumeAsync || queryAsyncNotSupported {\n\t\tqueryResult, err = m.QueryVolume(ctx, queryFilter)\n\t\tif err != nil {\n\t\t\treturn nil, logger.LogNewErrorCodef(log, codes.Internal,\n\t\t\t\t\"queryVolume failed for queryFilter: %+v. Err=%+v\", queryFilter, err.Error())\n\t\t}\n\t}\n\treturn queryResult, nil\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func NewVolume() *Volume {\n\tthis := Volume{}\n\treturn &this\n}", "func validateVolume(volume *provider.Volume) (err error) {\n\tif volume == nil {\n\t\terr = userError.GetUserError(\"InvalidVolumeID\", nil, nil)\n\t\treturn\n\t}\n\n\tif IsValidVolumeIDFormat(volume.VolumeID) {\n\t\treturn nil\n\t}\n\terr = userError.GetUserError(\"InvalidVolumeID\", nil, volume.VolumeID)\n\treturn\n}", "func (r ApiGetHyperflexVolumeListRequest) Select_(select_ string) ApiGetHyperflexVolumeListRequest {\n\tr.select_ = &select_\n\treturn r\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func DiscoveryRequestToV3(r *xdspb2.DiscoveryRequest) *discoverypb.DiscoveryRequest {\n\tb := proto.NewBuffer(nil)\n\tb.SetDeterministic(true)\n\terr := b.Marshal(r)\n\n\terr = err\n\tx := &discoverypb.DiscoveryRequest{}\n\tif err := proto.Unmarshal(b.Bytes(), x); err != nil {\n\t\tlog.Fatalln(\"Failed to parse DiscoveryRequest:\", err)\n\t}\n\tx.TypeUrl = edsURL\n\tlog.Printf(\"REQUEST TO V3 %v\", x)\n\n\treturn x\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func (client *KeyVaultClient) decryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientDecryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/decrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *KeyVaultClient) decryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientDecryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/decrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func VolumeSpec(vspec *api.VolumeSpec) corev1.PersistentVolumeClaimSpec {\n\treturn corev1.PersistentVolumeClaimSpec{\n\t\tStorageClassName: vspec.StorageClass,\n\t\tAccessModes: vspec.AccessModes,\n\t\tResources: corev1.ResourceRequirements{\n\t\t\tRequests: corev1.ResourceList{\n\t\t\t\tcorev1.ResourceStorage: vspec.SizeParsed,\n\t\t\t},\n\t\t},\n\t}\n}", "func NewRequest(t Type, body io.WriterTo) *Request {\n\treq := &Request{\n\t\tBody: &copyReader{WriterTo: body},\n\t\tProto: \"OFP/1.3\",\n\t\tProtoMajor: 1, ProtoMinor: 3,\n\t}\n\n\treq.Header.Version = uint8(req.ProtoMajor + req.ProtoMinor)\n\treq.Header.Type = t\n\n\treturn req\n}", "func Convert_v1alpha1_GetServiceRequest_To_internal_GetServiceRequest(in *v1alpha1.GetServiceRequest, out *internal.GetServiceRequest) error {\n\treturn autoConvert_v1alpha1_GetServiceRequest_To_internal_GetServiceRequest(in, out)\n}", "func ensureVolumeOptions(v string) (vo cloudprovider.VolumeOptions) {\n\terr := json.Unmarshal([]byte(v), &vo)\n\tif err != nil {\n\t\tfatalf(\"Invalid json options: %s\", v)\n\t}\n\treturn\n}", "func ValidateVolumeSpec(volspec *api.VolumeSpecUpdate) error {\n\t// case of checking possible halevel flag combination\n\tif volspec.GetHaLevel() > 0 {\n\t\tif volspec.GetSize() > 0 || volspec.GetShared() || volspec.GetSticky() {\n\t\t\t// Please have unique msgs for each case so it's easy for use to identity the\n\t\t\t// flags mismatch combination.\n\t\t\treturn fmt.Errorf(\"Invalid halevel flag combination. Size, Shared or Sticky flag not supported \" +\n\t\t\t\t\"with halevel flag\")\n\t\t}\n\t}\n\treturn nil\n}", "func (c *UDiskClient) NewDescribeUDiskPriceRequest() *DescribeUDiskPriceRequest {\n\treq := &DescribeUDiskPriceRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (s *VolumeListener) Create(inctx context.Context, in *protocol.VolumeCreateRequest) (_ *protocol.VolumeInspectResponse, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot create volume\")\n\n\tif s == nil {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tname := in.GetName()\n\tjob, xerr := PrepareJob(inctx, in.GetTenantId(), fmt.Sprintf(\"/volume/%s/create\", name))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer job.Close()\n\n\tspeed := in.GetSpeed()\n\tsize := in.GetSize()\n\tctx := job.Context()\n\n\thandler := handlers.NewVolumeHandler(job)\n\tvolumeInstance, xerr := handler.Create(name, int(size), volumespeed.Enum(speed))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\treturn volumeInstance.ToProtocol(ctx)\n}", "func (o *CreateIscsiLunSnapshotParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ContentLanguage != nil {\n\n\t\t// header param content-language\n\t\tif err := r.SetHeaderParam(\"content-language\", *o.ContentLanguage); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif o.RequestBody != nil {\n\t\tif err := r.SetBodyParam(o.RequestBody); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (a *HyperflexApiService) UpdateHyperflexVolume(ctx context.Context, moid string) ApiUpdateHyperflexVolumeRequest {\n\treturn ApiUpdateHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (e *ExternalInterface) DeleteVolume(ctx context.Context, req *systemsproto.VolumeRequest, pc *PluginContact, taskID string) {\n\tvar resp response.RPC\n\tvar targetURI = \"/redfish/v1/Systems/\" + req.SystemID + \"/Storage/\" + req.StorageInstance + \"/Volumes\" + req.VolumeID\n\n\ttaskInfo := &common.TaskUpdateInfo{Context: ctx, TaskID: taskID, TargetURI: targetURI,\n\t\tUpdateTask: pc.UpdateTask, TaskRequest: string(req.RequestBody)}\n\tvar volume smodel.Volume\n\terr := JSONUnmarshalFunc(req.RequestBody, &volume)\n\tif err != nil {\n\t\terrorMessage := \"Error while unmarshaling the create volume request: \" + err.Error()\n\t\tl.LogWithFields(ctx).Error(errorMessage)\n\t\tcommon.GeneralError(http.StatusBadRequest, response.MalformedJSON, errorMessage, []interface{}{}, taskInfo)\n\t\treturn\n\t}\n\n\t// spliting the uuid and system id\n\trequestData := strings.SplitN(req.SystemID, \".\", 2)\n\tif len(requestData) != 2 || requestData[1] == \"\" {\n\t\terrorMessage := \"error: SystemUUID not found\"\n\t\tcommon.GeneralError(http.StatusNotFound, response.ResourceNotFound, errorMessage, []interface{}{\"System\", req.SystemID}, taskInfo)\n\t\treturn\n\t}\n\tuuid := requestData[0]\n\ttarget, gerr := e.DB.GetTarget(uuid)\n\tif gerr != nil {\n\t\tcommon.GeneralError(http.StatusNotFound, response.ResourceNotFound, gerr.Error(), []interface{}{\"System\", uuid}, taskInfo)\n\t\treturn\n\t}\n\t// Validating the storage instance\n\tif StringTrimSpace(req.VolumeID) == \"\" {\n\t\terrorMessage := \"error: Volume id is not found\"\n\t\tcommon.GeneralError(http.StatusBadRequest, response.ResourceNotFound, errorMessage, []interface{}{\"Volume\", req.VolumeID}, taskInfo)\n\t\treturn\n\t}\n\n\t// Validating the request JSON properties for case sensitive\n\tinvalidProperties, err := RequestParamsCaseValidatorFunc(req.RequestBody, volume)\n\tif err != nil {\n\t\terrMsg := \"error while validating request parameters for volume creation: \" + err.Error()\n\t\tl.LogWithFields(ctx).Error(errMsg)\n\t\tcommon.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, taskInfo)\n\t\treturn\n\t} else if invalidProperties != \"\" {\n\t\terrorMessage := \"error: one or more properties given in the request body are not valid, ensure properties are listed in uppercamelcase \"\n\t\tl.LogWithFields(ctx).Error(errorMessage)\n\t\tcommon.GeneralError(http.StatusBadRequest, response.PropertyUnknown, errorMessage, []interface{}{invalidProperties}, taskInfo)\n\t\treturn\n\t}\n\n\tkey := fmt.Sprintf(\"/redfish/v1/Systems/%s/Storage/%s/Volumes/%s\", req.SystemID, req.StorageInstance, req.VolumeID)\n\t_, dbErr := e.DB.GetResource(ctx, \"Volumes\", key)\n\tif dbErr != nil {\n\t\tl.LogWithFields(ctx).Error(\"error getting volumes details : \" + dbErr.Error())\n\t\terrorMessage := dbErr.Error()\n\t\tif errors.DBKeyNotFound == dbErr.ErrNo() {\n\t\t\tvar getDeviceInfoRequest = scommon.ResourceInfoRequest{\n\t\t\t\tURL: key,\n\t\t\t\tUUID: uuid,\n\t\t\t\tSystemID: requestData[1],\n\t\t\t\tContactClient: e.ContactClient,\n\t\t\t\tDevicePassword: e.DevicePassword,\n\t\t\t\tGetPluginStatus: e.GetPluginStatus,\n\t\t\t}\n\t\t\tvar err error\n\t\t\tif _, err = scommon.GetResourceInfoFromDevice(ctx, getDeviceInfoRequest, true); err != nil {\n\t\t\t\tcommon.GeneralError(http.StatusNotFound, response.ResourceNotFound, errorMessage, []interface{}{\"Volumes\", key}, taskInfo)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t} else {\n\t\t\tcommon.GeneralError(http.StatusInternalServerError, response.InternalError, errorMessage, nil, taskInfo)\n\t\t\treturn\n\t\t}\n\t}\n\n\tdecryptedPasswordByte, err := e.DevicePassword(target.Password)\n\tif err != nil {\n\t\terrorMessage := \"error while trying to decrypt device password: \" + err.Error()\n\t\tcommon.GeneralError(http.StatusInternalServerError, response.InternalError, errorMessage, nil, taskInfo)\n\t\treturn\n\t}\n\ttarget.Password = decryptedPasswordByte\n\t// Get the Plugin info\n\tplugin, gerr := e.DB.GetPluginData(target.PluginID)\n\tif gerr != nil {\n\t\terrorMessage := \"error while trying to get plugin details\"\n\t\tcommon.GeneralError(http.StatusInternalServerError, response.InternalError, errorMessage, nil, taskInfo)\n\t\treturn\n\t}\n\n\tvar contactRequest scommon.PluginContactRequest\n\tcontactRequest.ContactClient = e.ContactClient\n\tcontactRequest.Plugin = plugin\n\tcontactRequest.GetPluginStatus = e.GetPluginStatus\n\tif StringsEqualFold(plugin.PreferredAuthType, \"XAuthToken\") {\n\t\tvar err error\n\t\tcontactRequest.HTTPMethodType = http.MethodPost\n\t\tcontactRequest.DeviceInfo = map[string]interface{}{\n\t\t\t\"UserName\": plugin.Username,\n\t\t\t\"Password\": string(plugin.Password),\n\t\t}\n\t\tcontactRequest.OID = \"/ODIM/v1/Sessions\"\n\t\t_, token, _, getResponse, err := scommon.ContactPlugin(ctx, contactRequest, \"error while creating session with the plugin: \")\n\n\t\tif err != nil {\n\t\t\tcommon.GeneralError(getResponse.StatusCode, getResponse.StatusMessage, err.Error(), nil, taskInfo)\n\t\t\treturn\n\t\t}\n\t\tcontactRequest.Token = token\n\t} else {\n\t\tcontactRequest.BasicAuth = map[string]string{\n\t\t\t\"UserName\": plugin.Username,\n\t\t\t\"Password\": string(plugin.Password),\n\t\t}\n\n\t}\n\n\tif string(req.RequestBody) == \"null\" {\n\t\ttarget.PostBody = []byte{}\n\t} else {\n\t\ttarget.PostBody = req.RequestBody\n\n\t}\n\tcontactRequest.HTTPMethodType = http.MethodDelete\n\tcontactRequest.DeviceInfo = target\n\tcontactRequest.OID = fmt.Sprintf(\"/ODIM/v1/Systems/%s/Storage/%s/Volumes/%s\", requestData[1], req.StorageInstance, req.VolumeID)\n\n\tbody, location, pluginIP, getResponse, err := scommon.ContactPlugin(ctx, contactRequest, \"error while deleting a volume: \")\n\tif err != nil {\n\t\tresp.StatusCode = getResponse.StatusCode\n\t\tjson.Unmarshal(body, &resp.Body)\n\t\terrMsg := \"error while deleting volume: \" + err.Error()\n\t\tl.LogWithFields(ctx).Error(errMsg)\n\t\ttask := fillTaskData(taskID, targetURI, string(req.RequestBody), resp,\n\t\t\tcommon.Completed, common.Warning, 100, http.MethodPost)\n\t\tpc.UpdateTask(ctx, task)\n\t\treturn\n\t}\n\tif getResponse.StatusCode == http.StatusAccepted {\n\t\terr = pc.SavePluginTaskInfo(ctx, pluginIP, plugin.IP, taskID, location)\n\t\tif err != nil {\n\t\t\tl.LogWithFields(ctx).Error(err)\n\t\t}\n\t\treturn\n\t}\n\n\t// handling the status ok response from plugin\n\tif getResponse.StatusCode == http.StatusOK && body != nil {\n\t\tresp.StatusCode = getResponse.StatusCode\n\t\tjson.Unmarshal(body, &resp.Body)\n\t\ttask := fillTaskData(taskID, targetURI, string(req.RequestBody), resp,\n\t\t\tcommon.Completed, common.OK, 100, http.MethodDelete)\n\t\tpc.UpdateTask(ctx, task)\n\t\treturn\n\t}\n\n\t// delete a volume in db\n\tif derr := e.DB.DeleteVolume(ctx, key); derr != nil {\n\t\terrMsg := \"error while trying to delete volume: \" + derr.Error()\n\t\tl.LogWithFields(ctx).Error(errMsg)\n\t\tif errors.DBKeyNotFound == derr.ErrNo() {\n\t\t\tcommon.GeneralError(http.StatusNotFound, response.ResourceNotFound, errMsg, []interface{}{\"Volumes\", key}, taskInfo)\n\t\t\treturn\n\t\t}\n\t\tcommon.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, taskInfo)\n\t\treturn\n\t}\n\n\t// adding volume collection uri and deleted volume uri to the AddSystemResetInfo\n\t// for avoiding storing or retrieving them from DB before a BMC reset.\n\tcollectionKey := fmt.Sprintf(\"/redfish/v1/Systems/%s/Storage/%s/Volumes\", req.SystemID, req.StorageInstance)\n\te.DB.AddSystemResetInfo(ctx, key, \"On\")\n\te.DB.AddSystemResetInfo(ctx, collectionKey, \"On\")\n\n\tresp.StatusCode = http.StatusNoContent\n\tresp.StatusMessage = response.Success\n\ttask := fillTaskData(taskID, targetURI, string(req.RequestBody), resp,\n\t\tcommon.Completed, common.OK, 100, http.MethodDelete)\n\tpc.UpdateTask(ctx, task)\n}" ]
[ "0.7557715", "0.70121026", "0.6958181", "0.69265646", "0.67332816", "0.651643", "0.64908344", "0.5972802", "0.58260036", "0.57331175", "0.57248485", "0.5501726", "0.5450384", "0.5362462", "0.5328478", "0.52477133", "0.52475905", "0.5200867", "0.5192905", "0.51742464", "0.5105107", "0.50752914", "0.5073951", "0.4982234", "0.49695638", "0.49497378", "0.49200442", "0.49167085", "0.4851285", "0.48487407", "0.4847571", "0.4842808", "0.4842252", "0.48208255", "0.48180306", "0.48162872", "0.48076096", "0.47952616", "0.47874364", "0.4784012", "0.47541967", "0.4730107", "0.4730107", "0.47039863", "0.46798146", "0.46521628", "0.464046", "0.46235353", "0.46188942", "0.45949984", "0.45806324", "0.45745033", "0.45680955", "0.4566307", "0.44998422", "0.44995272", "0.44772464", "0.44438702", "0.44354895", "0.4418633", "0.4417958", "0.43992966", "0.43968493", "0.43692037", "0.43331784", "0.42747927", "0.42690295", "0.4267934", "0.42575505", "0.4252701", "0.42509395", "0.4239338", "0.4232932", "0.42237124", "0.42204237", "0.41960752", "0.41884965", "0.4185727", "0.4183245", "0.41800675", "0.41668004", "0.41651928", "0.416086", "0.41596013", "0.41507265", "0.4143524", "0.41351008", "0.4127912", "0.41245347", "0.41140687", "0.41057986", "0.41011417", "0.40896106", "0.40869755", "0.4085266", "0.40766948", "0.4075076", "0.40693855", "0.4059296", "0.40551433" ]
0.8823009
0
Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest is an autogenerated conversion function.
func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error { return autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func (o *ReadStorageV1alpha1VolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Exact != nil {\n\n\t\t// query param exact\n\t\tvar qrExact bool\n\t\tif o.Exact != nil {\n\t\t\tqrExact = *o.Exact\n\t\t}\n\t\tqExact := swag.FormatBool(qrExact)\n\t\tif qExact != \"\" {\n\t\t\tif err := r.SetQueryParam(\"exact\", qExact); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Export != nil {\n\n\t\t// query param export\n\t\tvar qrExport bool\n\t\tif o.Export != nil {\n\t\t\tqrExport = *o.Export\n\t\t}\n\t\tqExport := swag.FormatBool(qrExport)\n\t\tif qExport != \"\" {\n\t\t\tif err := r.SetQueryParam(\"export\", qExport); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Pretty != nil {\n\n\t\t// query param pretty\n\t\tvar qrPretty string\n\t\tif o.Pretty != nil {\n\t\t\tqrPretty = *o.Pretty\n\t\t}\n\t\tqPretty := qrPretty\n\t\tif qPretty != \"\" {\n\t\t\tif err := r.SetQueryParam(\"pretty\", qPretty); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func NewVolumeFormat(source string, quiet bool) Format {\n\tswitch source {\n\tcase TableFormatKey:\n\t\tif quiet {\n\t\t\treturn defaultVolumeQuietFormat\n\t\t}\n\t\treturn defaultVolumeTableFormat\n\tcase RawFormatKey:\n\t\tif quiet {\n\t\t\treturn `name: {{.Name}}`\n\t\t}\n\t\treturn `name: {{.Name}}\\ndriver: {{.Driver}}\\n`\n\t}\n\treturn Format(source)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func NewVolume(volumeRequest provider.Volume) Volume {\n\t// Build the template to send to backend\n\n\tvolume := Volume{\n\t\tID: volumeRequest.VolumeID,\n\t\tCRN: volumeRequest.CRN,\n\t\tTags: volumeRequest.VPCVolume.Tags,\n\t\tZone: &Zone{\n\t\t\tName: volumeRequest.Az,\n\t\t},\n\t\tProvider: string(volumeRequest.Provider),\n\t\tVolumeType: string(volumeRequest.VolumeType),\n\t}\n\tif volumeRequest.Name != nil {\n\t\tvolume.Name = *volumeRequest.Name\n\t}\n\tif volumeRequest.Capacity != nil {\n\t\tvolume.Capacity = int64(*volumeRequest.Capacity)\n\t}\n\tif volumeRequest.VPCVolume.Profile != nil {\n\t\tvolume.Profile = &Profile{\n\t\t\tName: volumeRequest.VPCVolume.Profile.Name,\n\t\t}\n\t}\n\tif volumeRequest.VPCVolume.ResourceGroup != nil {\n\t\tvolume.ResourceGroup = &ResourceGroup{\n\t\t\tID: volumeRequest.VPCVolume.ResourceGroup.ID,\n\t\t\tName: volumeRequest.VPCVolume.ResourceGroup.Name,\n\t\t}\n\t}\n\n\tif volumeRequest.Iops != nil {\n\t\tvalue, err := strconv.ParseInt(*volumeRequest.Iops, 10, 64)\n\t\tif err != nil {\n\t\t\tvolume.Iops = 0\n\t\t}\n\t\tvolume.Iops = value\n\t}\n\tif volumeRequest.VPCVolume.VolumeEncryptionKey != nil && len(volumeRequest.VPCVolume.VolumeEncryptionKey.CRN) > 0 {\n\t\tencryptionKeyCRN := volumeRequest.VPCVolume.VolumeEncryptionKey.CRN\n\t\tvolume.VolumeEncryptionKey = &VolumeEncryptionKey{CRN: encryptionKeyCRN}\n\t}\n\n\tvolume.Cluster = volumeRequest.Attributes[ClusterIDTagName]\n\tvolume.Status = StatusType(volumeRequest.Attributes[VolumeStatus])\n\treturn volume\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func VolumeType(volumeType string) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.VolumeType = volumeType\n\t\treturn nil\n\t}\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif req.GetName() == \"\" {\n\t\treturn errors.New(\"volume name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn errors.New(\"volume capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn errors.New(\"block access type not allowed\")\n\t\t}\n\t}\n\n\tif req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {\n\t\treturn errors.New(\"secrets cannot be nil or empty\")\n\t}\n\n\treturn nil\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func VolumeEncryption(value bool) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.EncryptVols = value\n\t\treturn nil\n\t}\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func (s *Stack) CreateVolume(request resources.VolumeRequest) (volume *resources.Volume, err error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif request.Name == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"request.Name\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", request.Name), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvolume, err = s.GetVolume(request.Name)\n\tif err != nil {\n\t\tif _, ok := err.(scerr.ErrNotFound); !ok {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif volume != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\taz, err := s.SelectedAvailabilityZone()\n\tif err != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\tvar v resources.Volume\n\tswitch s.versions[\"volume\"] {\n\tcase \"v1\":\n\t\tvar vol *volumesv1.Volume\n\t\tvol, err = volumesv1.Create(s.VolumeClient, volumesv1.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tcase \"v2\":\n\t\tvar vol *volumesv2.Volume\n\t\tvol, err = volumesv2.Create(s.VolumeClient, volumesv2.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tdefault:\n\t\terr = scerr.Errorf(fmt.Sprintf(\"unmanaged service 'volume' version '%s'\", s.versions[\"volume\"]), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error creating volume : %s\", ProviderErrorToString(err)))\n\t}\n\n\treturn &v, nil\n}", "func Convert_internal_StopServiceRequest_To_v1alpha1_StopServiceRequest(in *internal.StopServiceRequest, out *v1alpha1.StopServiceRequest) error {\n\treturn autoConvert_internal_StopServiceRequest_To_v1alpha1_StopServiceRequest(in, out)\n}", "func NewPatchStorageV1alpha1VolumeAttachment(ctx *middleware.Context, handler PatchStorageV1alpha1VolumeAttachmentHandler) *PatchStorageV1alpha1VolumeAttachment {\n\treturn &PatchStorageV1alpha1VolumeAttachment{Context: ctx, Handler: handler}\n}", "func Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1alpha1.VolumeAttachmentSpec, s conversion.Scope) error {\n\treturn autoConvert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in, out, s)\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v1alpha3_FileSpec_To_v1alpha1_FileSpec(in *v1alpha3.FileSpec, out *FileSpec, s conversion.Scope) error {\n\treturn autoConvert_v1alpha3_FileSpec_To_v1alpha1_FileSpec(in, out, s)\n}", "func Convert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in *impl.RmdirContentsRequest, out *v2alpha1.RmdirContentsRequest) error {\n\treturn autoConvert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in, out)\n}", "func InitVolumeOperationRequestInterface(ctx context.Context, cleanupInterval int,\n\tisBlockVolumeSnapshotEnabled func() bool) (VolumeOperationRequest, error) {\n\tlog := logger.GetLogger(ctx)\n\tcsiNamespace = getCSINamespace()\n\n\toperationStoreInitLock.Lock()\n\tdefer operationStoreInitLock.Unlock()\n\tif operationRequestStoreInstance == nil {\n\t\t// Create CnsVolumeOperationRequest definition on API server.\n\t\tlog.Info(\n\t\t\t\"Creating CnsVolumeOperationRequest definition on API server and initializing VolumeOperationRequest instance\",\n\t\t)\n\t\terr := k8s.CreateCustomResourceDefinitionFromManifest(ctx,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFile,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFileName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create CnsVolumeOperationRequest CRD with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get in cluster config for client to API server.\n\t\tconfig, err := k8s.GetKubeConfig(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get kubeconfig with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create client to API server.\n\t\tk8sclient, err := k8s.NewClientForGroup(ctx, config, cnsvolumeoprequestv1alpha1.SchemeGroupVersion.Group)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create k8sClient with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Initialize the operationRequestStoreOnETCD implementation of\n\t\t// VolumeOperationRequest interface.\n\t\t// NOTE: Currently there is only a single implementation of this\n\t\t// interface. Future implementations will need modify this step.\n\t\toperationRequestStoreInstance = &operationRequestStore{\n\t\t\tk8sclient: k8sclient,\n\t\t}\n\t\tgo operationRequestStoreInstance.cleanupStaleInstances(cleanupInterval, isBlockVolumeSnapshotEnabled)\n\t}\n\n\treturn operationRequestStoreInstance, nil\n}", "func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, diskEncryptionSet)\n}", "func Convert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in *impl.IsSymlinkRequest, out *v2alpha1.IsSymlinkRequest) error {\n\treturn autoConvert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in, out)\n}", "func (c *UFSClient) NewDescribeUFSVolume2Request() *DescribeUFSVolume2Request {\n\treq := &DescribeUFSVolume2Request{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (r *RequestAPI) CreateRequestV1(ctx context.Context, req *desc.CreateRequestV1Request) (*desc.CreateRequestV1Response, error) {\n\tlog.Printf(\"Got create request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"CreateRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.CreateEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewReq := models.NewRequest(\n\t\t0,\n\t\treq.UserId,\n\t\treq.Type,\n\t\treq.Text,\n\t)\n\tnewId, err := r.repo.Add(ctx, newReq)\n\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tStr(\"endpoint\", \"CreateRequestV1\").\n\t\t\tErr(err).\n\t\t\tMsgf(\"Failed to create request\")\n\t\treturn nil, err\n\t}\n\n\tr.producer.Send(producer.NewEvent(ctx, newId, producer.CreateEvent, err))\n\tr.metrics.IncCreate(1, \"CreateRequestV1\")\n\treturn &desc.CreateRequestV1Response{\n\t\tRequestId: newId,\n\t}, nil\n}", "func ParseVolume(input string) (Volume, error) {\n\tparts := strings.Split(input, \":\")\n\tswitch len(parts) {\n\tcase 1:\n\t\treturn Volume{Type: VolumeTypeInstance, Path: input}, nil\n\tcase 2:\n\t\tif vt, mountOptions, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{Type: vt, Path: parts[1], MountOptions: mountOptions}, nil\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0]}, nil\n\tcase 3:\n\t\tif _, _, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t\t}\n\t\toptions, err := parseVolumeOptions(parts[2])\n\t\tif err != nil {\n\t\t\treturn Volume{}, maskAny(err)\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0], Options: options}, nil\n\tdefault:\n\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t}\n}", "func Convert_v1alpha1_FileSpec_To_v1alpha3_FileSpec(in *FileSpec, out *v1alpha3.FileSpec, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_FileSpec_To_v1alpha3_FileSpec(in, out, s)\n}", "func (r *RequestAPI) ListRequestV1(ctx context.Context, req *desc.ListRequestsV1Request) (*desc.ListRequestsV1Response, error) {\n\tlog.Printf(\"Got list request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"ListRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.ReadEvent); err != nil {\n\t\treturn nil, err\n\t}\n\tvar (\n\t\trequests []models.Request\n\t\terr error\n\t)\n\n\tif req.SearchQuery != \"\" { // ideally would move search to a separate endpoint, so it's easier to extend\n\t\trequests, err = r.searcher.Search(ctx, req.SearchQuery, req.Limit, req.Offset)\n\t} else {\n\t\trequests, err = r.repo.List(ctx, req.Limit, req.Offset)\n\t}\n\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tErr(err).\n\t\t\tStr(\"endpoint\", \"ListRequestV1\").\n\t\t\tUint64(\"limit\", req.Limit).\n\t\t\tUint64(\"offset\", req.Offset).\n\t\t\tMsgf(\"Failed to list requests\")\n\t\tr.producer.Send(producer.NewEvent(ctx, 0, producer.ReadEvent, err))\n\t\treturn nil, err\n\t}\n\n\tret := make([]*desc.Request, 0, len(requests))\n\teventMsgs := make([]producer.EventMsg, 0, len(requests))\n\n\tfor _, req := range requests {\n\t\tret = append(ret, &desc.Request{\n\t\t\tId: req.Id,\n\t\t\tUserId: req.UserId,\n\t\t\tType: req.Type,\n\t\t\tText: req.Text,\n\t\t})\n\t\teventMsgs = append(eventMsgs, producer.NewEvent(ctx, req.Id, producer.ReadEvent, nil))\n\t\tr.producer.Send(eventMsgs...)\n\n\t}\n\tr.metrics.IncList(1, \"ListRequestV1\")\n\treturn &desc.ListRequestsV1Response{\n\t\tRequests: ret,\n\t}, nil\n}", "func (client *KeyVaultClient) decryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientDecryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/decrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (r ApiGetHyperflexVolumeListRequest) Filter(filter string) ApiGetHyperflexVolumeListRequest {\n\tr.filter = &filter\n\treturn r\n}", "func (client *KeyVaultClient) decryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientDecryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/decrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (r *RequestAPI) UpdateRequestV1(ctx context.Context, req *desc.UpdateRequestV1Request) (*desc.UpdateRequestV1Response, error) {\n\tlog.Printf(\"Got update request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"UpdateRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.UpdateEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := r.repo.Update(\n\t\tctx, models.NewRequest(req.RequestId, req.UserId, req.Type, req.Text),\n\t)\n\tif errors.Is(err, repository.NotFound) {\n\t\treturn nil, status.Error(codes.NotFound, \"request does not exist\")\n\t} else if err != nil {\n\t\tlog.Error().\n\t\t\tUint64(\"request_id\", req.RequestId).\n\t\t\tStr(\"endpoint\", \"UpdateRequestV1\").\n\t\t\tErr(err).\n\t\t\tMsgf(\"Failed to update request\")\n\t\treturn nil, err\n\t}\n\n\tr.producer.Send(producer.NewEvent(ctx, req.RequestId, producer.UpdateEvent, err))\n\tr.metrics.IncUpdate(1, \"UpdateRequestV1\")\n\treturn &desc.UpdateRequestV1Response{}, nil\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func (a *HyperflexApiService) PatchHyperflexVolume(ctx context.Context, moid string) ApiPatchHyperflexVolumeRequest {\n\treturn ApiPatchHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func Convert_impl_CreateSymlinkRequest_To_v2alpha1_CreateSymlinkRequest(in *impl.CreateSymlinkRequest, out *v2alpha1.CreateSymlinkRequest) error {\n\treturn autoConvert_impl_CreateSymlinkRequest_To_v2alpha1_CreateSymlinkRequest(in, out)\n}", "func (c *restClient) CreateVolume(ctx context.Context, req *netapppb.CreateVolumeRequest, opts ...gax.CallOption) (*CreateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tparams.Add(\"volumeId\", fmt.Sprintf(\"%v\", req.GetVolumeId()))\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"parent\", url.QueryEscape(req.GetParent()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &CreateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (client *KeyVaultClient) encryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientEncryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/encrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func Convert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in *impl.PathExistsRequest, out *v2alpha1.PathExistsRequest) error {\n\treturn autoConvert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in, out)\n}", "func (client *KeyVaultClient) encryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientEncryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/encrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateModifyHostAvailabilityRequest() (request *ModifyHostAvailabilityRequest) {\n\trequest = &ModifyHostAvailabilityRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"ModifyHostAvailability\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func Convert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in *FakeRequest, out *v1alpha2.FakeRequest, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in, out, s)\n}", "func (client *ApplicationTypeVersionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, options *ApplicationTypeVersionsClientListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif applicationTypeName == \"\" {\n\t\treturn nil, errors.New(\"parameter applicationTypeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{applicationTypeName}\", url.PathEscape(applicationTypeName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func Convert_application_ApplicationSpec_To_v1alpha1_ApplicationSpec(in *application.ApplicationSpec, out *ApplicationSpec, s conversion.Scope) error {\n\treturn autoConvert_application_ApplicationSpec_To_v1alpha1_ApplicationSpec(in, out, s)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func NewGetL2OrderBookRequest(server string, symbol SymbolParam) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParamWithLocation(\"simple\", false, \"symbol\", runtime.ParamLocationPath, symbol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserverURL, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toperationPath := fmt.Sprintf(\"/l2/%s\", pathParam0)\n\tif operationPath[0] == '/' {\n\t\toperationPath = operationPath[1:]\n\t}\n\toperationURL := url.URL{\n\t\tPath: operationPath,\n\t}\n\n\tqueryURL := serverURL.ResolveReference(&operationURL)\n\n\treq, err := http.NewRequest(\"GET\", queryURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (client *KeyVaultClient) importKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters, options *KeyVaultClientImportKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *KeyVaultClient) importKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters, options *KeyVaultClientImportKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func newFSFormatV1() (format *formatConfigV1) {\n\treturn &formatConfigV1{\n\t\tVersion: \"1\",\n\t\tFormat: \"fs\",\n\t\tFS: &fsFormat{\n\t\t\tVersion: \"1\",\n\t\t},\n\t}\n}", "func Convert_v1alpha1_SourceSpec_To_v1alpha3_SourceSpec(in *SourceSpec, out *v1alpha3.SourceSpec, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_SourceSpec_To_v1alpha3_SourceSpec(in, out, s)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func From(vol *apis.ZFSVolume) *ZFSVolume {\n\treturn &ZFSVolume{\n\t\tObject: vol,\n\t}\n}", "func ValidateRequest(fromCurrency *models.CurrencyExchangeRequest) []string {\n\tvar validations []string\n\n\tif fromCurrency.FromCurrency == \"\" {\n\t\tvalidations = append(validations, EmptyFromCurrency)\n\t}\n\tif fromCurrency.ToCurrency == \"\" {\n\t\tvalidations = append(validations, EmptyToCurrency)\n\t}\n\treturn validations\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskEncryptionSetsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *UFSClient) NewDescribeUFSVolumePriceRequest() *DescribeUFSVolumePriceRequest {\n\treq := &DescribeUFSVolumePriceRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (client *CapacitiesClient) checkNameAvailabilityCreateRequest(ctx context.Context, location string, capacityParameters CheckCapacityNameAvailabilityParameters, options *CapacitiesClientCheckNameAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/locations/{location}/checkNameAvailability\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, capacityParameters)\n}", "func (r ApiGetHyperflexVolumeListRequest) Select_(select_ string) ApiGetHyperflexVolumeListRequest {\n\tr.select_ = &select_\n\treturn r\n}", "func (m *E2SmRcPreIndicationHeaderFormat1) Validate() error {\n\treturn m.validate(false)\n}", "func NewRequest(t Type, body io.WriterTo) *Request {\n\treq := &Request{\n\t\tBody: &copyReader{WriterTo: body},\n\t\tProto: \"OFP/1.3\",\n\t\tProtoMajor: 1, ProtoMinor: 3,\n\t}\n\n\treq.Header.Version = uint8(req.ProtoMajor + req.ProtoMinor)\n\treq.Header.Type = t\n\n\treturn req\n}", "func (client *AvailabilityGroupListenersClient) listByGroupCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, options *AvailabilityGroupListenersClientListByGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func validateVanillaControllerExpandVolumeRequest(ctx context.Context, req *csi.ControllerExpandVolumeRequest) error {\n\treturn common.ValidateControllerExpandVolumeRequest(ctx, req)\n}", "func Convert_v1alpha3_SourceSpec_To_v1alpha1_SourceSpec(in *v1alpha3.SourceSpec, out *SourceSpec, s conversion.Scope) error {\n\treturn autoConvert_v1alpha3_SourceSpec_To_v1alpha1_SourceSpec(in, out, s)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (m *UpdateAzureEncryptionParametersV1Request) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAzureResourceEncryptionParameters(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}" ]
[ "0.73874754", "0.71819955", "0.71305686", "0.7091473", "0.7023427", "0.69751865", "0.59313124", "0.5856581", "0.58321905", "0.57898825", "0.5695062", "0.56714684", "0.5650184", "0.5641485", "0.56066394", "0.5495693", "0.5485299", "0.5477154", "0.54045326", "0.5344156", "0.5236967", "0.5220978", "0.5217857", "0.50943863", "0.49572292", "0.49398857", "0.493271", "0.49018028", "0.4891894", "0.4868181", "0.48342976", "0.47638905", "0.47391906", "0.47371072", "0.4733425", "0.47079325", "0.4696234", "0.46795934", "0.46657798", "0.4640356", "0.46323988", "0.46288976", "0.45956275", "0.45646498", "0.45632562", "0.45593643", "0.4522733", "0.44916648", "0.4486912", "0.4486338", "0.44733295", "0.44729638", "0.44706696", "0.44495392", "0.4439852", "0.4424226", "0.4423935", "0.44180435", "0.44108677", "0.43831727", "0.43633434", "0.43594098", "0.43486854", "0.43470305", "0.4344506", "0.4344502", "0.4339977", "0.4310332", "0.42927298", "0.42841375", "0.42748094", "0.42658365", "0.4262829", "0.42625508", "0.42590034", "0.42554456", "0.42237398", "0.420756", "0.42027056", "0.4200388", "0.4200388", "0.41871628", "0.41819388", "0.41785035", "0.41712603", "0.4170371", "0.41600692", "0.4157832", "0.41519955", "0.41347045", "0.4107178", "0.40999764", "0.40801558", "0.40792003", "0.40749487", "0.40733838", "0.40629292", "0.40620205", "0.40529823", "0.40527555" ]
0.8999532
0
Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse is an autogenerated conversion function.
func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error { return autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func ResponseFormat(h http.Header) Format {\n\tct := h.Get(hdrContentType)\n\n\tmediatype, params, err := mime.ParseMediaType(ct)\n\tif err != nil {\n\t\treturn FmtUnknown\n\t}\n\n\tconst textType = \"text/plain\"\n\n\tswitch mediatype {\n\tcase ProtoType:\n\t\tif p, ok := params[\"proto\"]; ok && p != ProtoProtocol {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\tif e, ok := params[\"encoding\"]; ok && e != \"delimited\" {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtProtoDelim\n\n\tcase textType:\n\t\tif v, ok := params[\"version\"]; ok && v != TextVersion {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtText\n\t}\n\n\treturn FmtUnknown\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in *v2alpha1.WriteVolumeCacheResponse, out *impl.WriteVolumeCacheResponse) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in, out)\n}", "func (o *GetVMVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ToPbVolume(in api.Volume) *pb.Volume {\n\treturn &pb.Volume{\n\t\tID: in.ID,\n\t\tName: in.Name,\n\t\tSize: int32(in.Size),\n\t\tSpeed: pb.VolumeSpeed(in.Speed),\n\t}\n}", "func Convert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in *v2alpha1.MkdirResponse, out *impl.MkdirResponse) error {\n\treturn autoConvert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in, out)\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in *v1beta1.IsVolumeFormattedResponse, out *internal.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func responseFormat(acceptHeader string) (Format, *protocolError) {\n\tif acceptHeader == \"\" {\n\t\treturn FormatBinary, nil\n\t}\n\n\tparsed, err := parseAccept(acceptHeader)\n\tif err != nil {\n\t\treturn FormatBinary, errorf(http.StatusBadRequest, \"Accept header: %s\", err)\n\t}\n\tformats := make(acceptFormatSlice, 0, len(parsed))\n\tfor _, at := range parsed {\n\t\tf, err := FormatFromMediaType(at.MediaType, at.MediaTypeParams)\n\t\tif err != nil {\n\t\t\t// Ignore invalid format. Check further.\n\t\t\tcontinue\n\t\t}\n\t\tformats = append(formats, acceptFormat{f, at.QualityFactor})\n\t}\n\tif len(formats) == 0 {\n\t\treturn FormatBinary, errorf(\n\t\t\thttp.StatusNotAcceptable,\n\t\t\t\"Accept header: specified media types are not not supported. Supported types: %q, %q, %q, %q.\",\n\t\t\tFormatBinary.MediaType(),\n\t\t\tFormatJSONPB.MediaType(),\n\t\t\tFormatText.MediaType(),\n\t\t\tContentTypeJSON,\n\t\t)\n\t}\n\tsort.Sort(formats) // order by quality factor and format preference.\n\treturn formats[0].Format, nil\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ListStorageV1alpha1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in *v1beta1.ResizeVolumeResponse, out *internal.ResizeVolumeResponse) error {\n\treturn autoConvert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in, out)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *GetVMVolumeDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o FioSpecOutput) Volume() FioSpecVolumeOutput {\n\treturn o.ApplyT(func(v FioSpec) FioSpecVolume { return v.Volume }).(FioSpecVolumeOutput)\n}", "func volumeToAPIType(v volume.Volume) *types.Volume {\n\treturn &types.Volume{\n\t\tName: v.Name(),\n\t\tDriver: v.DriverName(),\n\t\tMountpoint: v.Path(),\n\t}\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func (o *RemoveVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewRemoveVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewRemoveVolumeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 409:\n\t\tresult := NewRemoveVolumeConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewRemoveVolumeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (s *SaleResponse) FormatResponse() *g.Response {\n\tresponse := new(g.Response)\n\tresponse.Acquirer = Name\n\n\tif s.OrderResult != nil {\n\t\tresponse.Id = s.OrderResult.OrderReference\n\t\tresponse.AuthorizationCode = s.OrderResult.OrderKey\n\t}\n\n\t// If CreditCard\n\tif len(s.CreditCardTransactionResultCollection) > 0 {\n\t\ttransaction := s.CreditCardTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\t//response.CreditCard = &g.CreditCard{}\n\t\tresponse.NSU = transaction.UniqueSequentialNumber\n\t\tresponse.TID = transaction.TransactionIdentifier\n\t}\n\n\t// If BankingBillet\n\tif len(s.BoletoTransactionResultCollection) > 0 {\n\t\ttransaction := s.BoletoTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\tresponse.BarCode = transaction.Barcode\n\t\tresponse.BoletoUrl = transaction.BoletoUrl\n\t}\n\n\treturn response\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func Convert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in *v1beta1.DismountVolumeResponse, out *internal.DismountVolumeResponse) error {\n\treturn autoConvert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in, out)\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (o *GetVMVolumeNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(404)\n}", "func (o *PatchCoreV1PersistentVolumeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *GetVMVolumeBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(400)\n}", "func (o *PostAPI24VolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPostApi24VolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostApi24VolumesBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (v *VolumeService) VolumeCreate(ctx context.Context, options volume.VolumeCreateBody) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(options.Name) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound and\n\t// check if the notfound should be ignored\n\tif strings.Contains(options.Name, \"notfound\") &&\n\t\t!strings.Contains(options.Name, \"ignorenotfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// check if the volume is not-found and\n\t// check if the not-found should be ignored\n\tif strings.Contains(options.Name, \"not-found\") &&\n\t\t!strings.Contains(options.Name, \"ignore-not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: options.Driver,\n\t\tLabels: options.Labels,\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: options.Name,\n\t\tOptions: options.DriverOpts,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudPvminstancesVolumesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesGetUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o IopingSpecOutput) Volume() IopingSpecVolumeOutput {\n\treturn o.ApplyT(func(v IopingSpec) IopingSpecVolume { return v.Volume }).(IopingSpecVolumeOutput)\n}", "func NewVolumeFormat(source string, quiet bool) Format {\n\tswitch source {\n\tcase TableFormatKey:\n\t\tif quiet {\n\t\t\treturn defaultVolumeQuietFormat\n\t\t}\n\t\treturn defaultVolumeTableFormat\n\tcase RawFormatKey:\n\t\tif quiet {\n\t\t\treturn `name: {{.Name}}`\n\t\t}\n\t\treturn `name: {{.Name}}\\ndriver: {{.Driver}}\\n`\n\t}\n\treturn Format(source)\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func (o *VolumeCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 201:\n\t\tresult := NewVolumeCreateCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewVolumeCreateInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func (o *DeleteStorageV1VolumeAttachmentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func (o *GetStoragePureVolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetStoragePureVolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetStoragePureVolumesDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (o *PcloudPvminstancesVolumesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 202:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 409:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func CreateNormalRpcHsfApiResponse() (response *NormalRpcHsfApiResponse) {\n\tresponse = &NormalRpcHsfApiResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in *v2alpha1.RmdirContentsResponse, out *impl.RmdirContentsResponse) error {\n\treturn autoConvert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in, out)\n}", "func (c *restClient) CreateVolume(ctx context.Context, req *netapppb.CreateVolumeRequest, opts ...gax.CallOption) (*CreateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tparams.Add(\"volumeId\", fmt.Sprintf(\"%v\", req.GetVolumeId()))\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"parent\", url.QueryEscape(req.GetParent()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &CreateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func DecodeStorageVolumesDeleteResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_delete\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (p *volumeGetFormatter) JsonFormat() (string, error) {\n\tvols, err := p.volumes.GetVolumes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn util.ToJson(vols)\n}", "func (o *PatchCoreV1PersistentVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) {\n\tstatus := &DriverStatus{\n\t\tVolume: v1.PersistentVolume{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tAnnotations: map[string]string{},\n\t\t\t\tLabels: map[string]string{},\n\t\t\t}}}\n\tif err := json.Unmarshal(output, status); err != nil {\n\t\tglog.Errorf(\"Failed to unmarshal output for command: %s, output: %q, error: %s\", cmd, string(output), err.Error())\n\t\treturn nil, err\n\t} else if status.Status == StatusNotSupported {\n\t\tglog.V(5).Infof(\"%s command is not supported by the driver\", cmd)\n\t\treturn nil, errors.New(status.Status)\n\t} else if status.Status != StatusSuccess {\n\t\terrMsg := fmt.Sprintf(\"%s command failed, status: %s, reason: %s\", cmd, status.Status, status.Message)\n\t\tglog.Errorf(errMsg)\n\t\treturn nil, fmt.Errorf(\"%s\", errMsg)\n\t}\n\n\treturn status, nil\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func FormatResponse(o interface{}) string {\n\tout, err := json.MarshalIndent(o, \"\", \"\\t\")\n\tMust(err, `Command failed because an error occurred while prettifying output: %s`, err)\n\treturn string(out)\n}", "func CreateDropPartitionResponse() (response *DropPartitionResponse) {\n\tresponse = &DropPartitionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (p *VolumesClientListPager) PageResponse() VolumesClientListResponse {\n\treturn p.current\n}", "func UnmarshalVersionResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(VersionResponse)\n\terr = core.UnmarshalPrimitive(m, \"builddate\", &obj.Builddate)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"buildno\", &obj.Buildno)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"commitsha\", &obj.Commitsha)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"helm_provider_version\", &obj.HelmProviderVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"helm_version\", &obj.HelmVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"supported_template_types\", &obj.SupportedTemplateTypes)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"terraform_provider_version\", &obj.TerraformProviderVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"terraform_version\", &obj.TerraformVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (*ResponseGetVolumes) Descriptor() ([]byte, []int) {\n\treturn file_pkg_metadata_metadata_proto_rawDescGZIP(), []int{1}\n}", "func Convert_v2alpha1_PathExistsResponse_To_impl_PathExistsResponse(in *v2alpha1.PathExistsResponse, out *impl.PathExistsResponse) error {\n\treturn autoConvert_v2alpha1_PathExistsResponse_To_impl_PathExistsResponse(in, out)\n}", "func (client VolumesClient) GetResponder(resp *http.Response) (result Volume, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (h *ApiHandler) handleListVolumes(c echo.Context) error {\n\tbuilder := h.Builder(c)\n\n\tvar kalmPVCList v1.PersistentVolumeClaimList\n\tif err := builder.List(&kalmPVCList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar kalmPVList v1.PersistentVolumeList\n\tif err := builder.List(&kalmPVList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkalmPVMap := make(map[string]v1.PersistentVolume)\n\tfor _, kalmPV := range kalmPVList.Items {\n\t\tkalmPVMap[kalmPV.Name] = kalmPV\n\t}\n\n\trespVolumes := []resources.Volume{}\n\tfor _, kalmPVC := range kalmPVCList.Items {\n\t\trespVolume, err := builder.BuildVolumeResponse(kalmPVC, kalmPVMap[kalmPVC.Spec.VolumeName])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trespVolumes = append(respVolumes, *respVolume)\n\t}\n\n\treturn c.JSON(200, respVolumes)\n}", "func Convert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in *v1beta1.VolumeStatsResponse, out *internal.VolumeStatsResponse) error {\n\treturn autoConvert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in, out)\n}", "func (r *Response) AsV2() *CheckResponseV2 {\n\tconvertHeaders := func(h http.Header) []*envoy_api_v2_core.HeaderValueOption {\n\t\tvar headers []*envoy_api_v2_core.HeaderValueOption\n\n\t\tfor k, v := range h {\n\t\t\theaders = append(headers,\n\t\t\t\t&envoy_api_v2_core.HeaderValueOption{\n\t\t\t\t\tHeader: &envoy_api_v2_core.HeaderValue{Key: k, Value: v[0]},\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\n\t\treturn headers\n\t}\n\n\tif r.Allow {\n\t\treturn &CheckResponseV2{\n\t\t\tStatus: &status.Status{Code: int32(codes.OK)},\n\t\t\tHttpResponse: &envoy_service_auth_v2.CheckResponse_OkResponse{\n\t\t\t\tOkResponse: &envoy_service_auth_v2.OkHttpResponse{\n\t\t\t\t\tHeaders: convertHeaders(r.Response.Header),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\treturn &CheckResponseV2{\n\t\tStatus: &status.Status{Code: int32(codes.PermissionDenied)},\n\t\tHttpResponse: &envoy_service_auth_v2.CheckResponse_DeniedResponse{\n\t\t\tDeniedResponse: &envoy_service_auth_v2.DeniedHttpResponse{\n\t\t\t\tHeaders: convertHeaders(r.Response.Header),\n\t\t\t\tStatus: &envoy_type.HttpStatus{\n\t\t\t\t\tCode: envoy_type.StatusCode(r.Response.StatusCode),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (t *ApiVersionsResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// ApiKeys\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.ApiKeys = make([]ApiVersionsResponseKey18, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item ApiVersionsResponseKey18\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.ApiKeys[i] = item\n\t\t}\n\t}\n\tif version >= 1 {\n\t\tt.ThrottleTimeMs, err = d.Int32()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func (c *restClient) RevertVolume(ctx context.Context, req *netapppb.RevertVolumeRequest, opts ...gax.CallOption) (*RevertVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tjsonReq, err := m.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v:revert\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &RevertVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (client *GalleryImageVersionsClient) updateHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (o *CreateCoreV1PersistentVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewCreateCoreV1PersistentVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 201:\n\t\tresult := NewCreateCoreV1PersistentVolumeCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewCreateCoreV1PersistentVolumeAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewCreateCoreV1PersistentVolumeUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func CreateListFileSystemsResponse() (response *ListFileSystemsResponse) {\n\tresponse = &ListFileSystemsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func NewVolumeService(api api.ClientAPI) VolumeAPI {\n\treturn &VolumeService{\n\t\tprovider: providers.FromClient(api),\n\t}\n}", "func (*DemoteVolumeResponse) Descriptor() ([]byte, []int) {\n\treturn file_replication_proto_rawDescGZIP(), []int{7}\n}", "func (s *OsdCsiServer) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\tlogrus.Debugf(\"ListVolumes req[%#v]\", req)\n\n\t// Until the issue #138 on the CSI spec is resolved we will not support\n\t// tokenization\n\tif req.GetMaxEntries() != 0 {\n\t\treturn nil, status.Error(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"Driver does not support tokenization. Please see \"+\n\t\t\t\t\"https://github.com/container-storage-interface/spec/issues/138\")\n\t}\n\n\tvolumes, err := s.driver.Enumerate(&api.VolumeLocator{}, nil)\n\tif err != nil {\n\t\terrs := fmt.Sprintf(\"Unable to get list of volumes: %s\", err.Error())\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tentries := make([]*csi.ListVolumesResponse_Entry, len(volumes))\n\tfor i, v := range volumes {\n\t\t// Initialize entry\n\t\tentries[i] = &csi.ListVolumesResponse_Entry{\n\t\t\tVolume: &csi.Volume{},\n\t\t}\n\n\t\t// Required\n\t\tentries[i].Volume.Id = v.Id\n\n\t\t// This entry is optional in the API, but OSD has\n\t\t// the information available to provide it\n\t\tentries[i].Volume.CapacityBytes = int64(v.Spec.Size)\n\n\t\t// Attributes. We can add or remove as needed since they\n\t\t// are optional and opaque to the Container Orchestrator(CO)\n\t\t// but could be used for debugging using a csi complient client.\n\t\tentries[i].Volume.Attributes = osdVolumeAttributes(v)\n\t}\n\n\treturn &csi.ListVolumesResponse{\n\t\tEntries: entries,\n\t}, nil\n}", "func DiscoveryResponseToV2(r *discoverypb.DiscoveryResponse) *xdspb2.DiscoveryResponse {\n\tb := proto.NewBuffer(nil)\n\tb.SetDeterministic(true)\n\terr := b.Marshal(r)\n\n\terr = err\n\tx := &xdspb2.DiscoveryResponse{}\n\tif err := proto.Unmarshal(b.Bytes(), x); err != nil {\n\t\tlog.Fatalln(\"Failed to parse DiscoveryResponse:\", err)\n\t}\n\n\tx.TypeUrl = v2edsURL\n\tfor i := range x.GetResources() {\n\t\tx.Resources[i].TypeUrl = v2edsURL\n\t}\n\tlog.Printf(\"RESPONSE TO V2 %v\", x)\n\n\treturn x\n}", "func (*VodGetHlsDecryptionKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_vod_response_response_vod_proto_rawDescGZIP(), []int{3}\n}", "func (cfr CreateFilesystemResponse) Response() *http.Response {\n\treturn cfr.rawResponse\n}", "func ParseDeleteaspecificSoundFileResponse(rsp *http.Response) (*DeleteaspecificSoundFileResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &DeleteaspecificSoundFileResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\t}\n\n\treturn response, nil\n}", "func CreateUpdateEndpointGroupResponse() (response *UpdateEndpointGroupResponse) {\n\tresponse = &UpdateEndpointGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}" ]
[ "0.78839433", "0.723757", "0.7031197", "0.69320405", "0.68155026", "0.6682518", "0.6516242", "0.6373819", "0.6219293", "0.6138538", "0.60458726", "0.5972684", "0.57703555", "0.5738681", "0.5348114", "0.5318249", "0.5293405", "0.52880096", "0.52173066", "0.52129215", "0.5194722", "0.51827323", "0.50979704", "0.5096284", "0.50842553", "0.5073773", "0.5071728", "0.50662774", "0.50657874", "0.50573605", "0.5040987", "0.5040648", "0.50266606", "0.5007349", "0.49773565", "0.49725175", "0.4925964", "0.49228448", "0.49131134", "0.49006796", "0.48838967", "0.48703936", "0.48434967", "0.48287404", "0.48210025", "0.48166937", "0.4812333", "0.47902837", "0.47822616", "0.47707972", "0.4766189", "0.47440687", "0.47374916", "0.47319278", "0.47281387", "0.47278935", "0.47001058", "0.4698525", "0.4693746", "0.46908903", "0.4672827", "0.46674925", "0.46548516", "0.46453637", "0.46382728", "0.4627639", "0.4624332", "0.462341", "0.46182346", "0.4614957", "0.4612686", "0.4602549", "0.459575", "0.45851424", "0.45479307", "0.45105436", "0.4498462", "0.4481516", "0.4460435", "0.44492996", "0.44492322", "0.4448459", "0.4446704", "0.44309294", "0.4428257", "0.44174266", "0.44109225", "0.4403657", "0.44029248", "0.4395098", "0.4385696", "0.43851", "0.43761668", "0.4374303", "0.43731293", "0.436341", "0.43472654", "0.4346891", "0.43428868", "0.43378508" ]
0.8726518
0
Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse is an autogenerated conversion function.
func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error { return autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func ResponseFormat(h http.Header) Format {\n\tct := h.Get(hdrContentType)\n\n\tmediatype, params, err := mime.ParseMediaType(ct)\n\tif err != nil {\n\t\treturn FmtUnknown\n\t}\n\n\tconst textType = \"text/plain\"\n\n\tswitch mediatype {\n\tcase ProtoType:\n\t\tif p, ok := params[\"proto\"]; ok && p != ProtoProtocol {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\tif e, ok := params[\"encoding\"]; ok && e != \"delimited\" {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtProtoDelim\n\n\tcase textType:\n\t\tif v, ok := params[\"version\"]; ok && v != TextVersion {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtText\n\t}\n\n\treturn FmtUnknown\n}", "func responseFormat(acceptHeader string) (Format, *protocolError) {\n\tif acceptHeader == \"\" {\n\t\treturn FormatBinary, nil\n\t}\n\n\tparsed, err := parseAccept(acceptHeader)\n\tif err != nil {\n\t\treturn FormatBinary, errorf(http.StatusBadRequest, \"Accept header: %s\", err)\n\t}\n\tformats := make(acceptFormatSlice, 0, len(parsed))\n\tfor _, at := range parsed {\n\t\tf, err := FormatFromMediaType(at.MediaType, at.MediaTypeParams)\n\t\tif err != nil {\n\t\t\t// Ignore invalid format. Check further.\n\t\t\tcontinue\n\t\t}\n\t\tformats = append(formats, acceptFormat{f, at.QualityFactor})\n\t}\n\tif len(formats) == 0 {\n\t\treturn FormatBinary, errorf(\n\t\t\thttp.StatusNotAcceptable,\n\t\t\t\"Accept header: specified media types are not not supported. Supported types: %q, %q, %q, %q.\",\n\t\t\tFormatBinary.MediaType(),\n\t\t\tFormatJSONPB.MediaType(),\n\t\t\tFormatText.MediaType(),\n\t\t\tContentTypeJSON,\n\t\t)\n\t}\n\tsort.Sort(formats) // order by quality factor and format preference.\n\treturn formats[0].Format, nil\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVMVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in *impl.RmdirContentsResponse, out *v2alpha1.RmdirContentsResponse) error {\n\treturn autoConvert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in, out)\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (r *Response) AsV2() *CheckResponseV2 {\n\tconvertHeaders := func(h http.Header) []*envoy_api_v2_core.HeaderValueOption {\n\t\tvar headers []*envoy_api_v2_core.HeaderValueOption\n\n\t\tfor k, v := range h {\n\t\t\theaders = append(headers,\n\t\t\t\t&envoy_api_v2_core.HeaderValueOption{\n\t\t\t\t\tHeader: &envoy_api_v2_core.HeaderValue{Key: k, Value: v[0]},\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\n\t\treturn headers\n\t}\n\n\tif r.Allow {\n\t\treturn &CheckResponseV2{\n\t\t\tStatus: &status.Status{Code: int32(codes.OK)},\n\t\t\tHttpResponse: &envoy_service_auth_v2.CheckResponse_OkResponse{\n\t\t\t\tOkResponse: &envoy_service_auth_v2.OkHttpResponse{\n\t\t\t\t\tHeaders: convertHeaders(r.Response.Header),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\treturn &CheckResponseV2{\n\t\tStatus: &status.Status{Code: int32(codes.PermissionDenied)},\n\t\tHttpResponse: &envoy_service_auth_v2.CheckResponse_DeniedResponse{\n\t\t\tDeniedResponse: &envoy_service_auth_v2.DeniedHttpResponse{\n\t\t\t\tHeaders: convertHeaders(r.Response.Header),\n\t\t\t\tStatus: &envoy_type.HttpStatus{\n\t\t\t\t\tCode: envoy_type.StatusCode(r.Response.StatusCode),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (o *GetVMVolumeBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(400)\n}", "func (s *SaleResponse) FormatResponse() *g.Response {\n\tresponse := new(g.Response)\n\tresponse.Acquirer = Name\n\n\tif s.OrderResult != nil {\n\t\tresponse.Id = s.OrderResult.OrderReference\n\t\tresponse.AuthorizationCode = s.OrderResult.OrderKey\n\t}\n\n\t// If CreditCard\n\tif len(s.CreditCardTransactionResultCollection) > 0 {\n\t\ttransaction := s.CreditCardTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\t//response.CreditCard = &g.CreditCard{}\n\t\tresponse.NSU = transaction.UniqueSequentialNumber\n\t\tresponse.TID = transaction.TransactionIdentifier\n\t}\n\n\t// If BankingBillet\n\tif len(s.BoletoTransactionResultCollection) > 0 {\n\t\ttransaction := s.BoletoTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\tresponse.BarCode = transaction.Barcode\n\t\tresponse.BoletoUrl = transaction.BoletoUrl\n\t}\n\n\treturn response\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in *impl.CreateSymlinkResponse, out *v2alpha1.CreateSymlinkResponse) error {\n\treturn autoConvert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in, out)\n}", "func (v *VolumeService) VolumeCreate(ctx context.Context, options volume.VolumeCreateBody) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(options.Name) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound and\n\t// check if the notfound should be ignored\n\tif strings.Contains(options.Name, \"notfound\") &&\n\t\t!strings.Contains(options.Name, \"ignorenotfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// check if the volume is not-found and\n\t// check if the not-found should be ignored\n\tif strings.Contains(options.Name, \"not-found\") &&\n\t\t!strings.Contains(options.Name, \"ignore-not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: options.Driver,\n\t\tLabels: options.Labels,\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: options.Name,\n\t\tOptions: options.DriverOpts,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}", "func (c *restClient) CreateVolume(ctx context.Context, req *netapppb.CreateVolumeRequest, opts ...gax.CallOption) (*CreateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tparams.Add(\"volumeId\", fmt.Sprintf(\"%v\", req.GetVolumeId()))\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"parent\", url.QueryEscape(req.GetParent()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &CreateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (o *PostAPI24VolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPostApi24VolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostApi24VolumesBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func (o *VolumeCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 201:\n\t\tresult := NewVolumeCreateCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewVolumeCreateInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func NewVolumeFormat(source string, quiet bool) Format {\n\tswitch source {\n\tcase TableFormatKey:\n\t\tif quiet {\n\t\t\treturn defaultVolumeQuietFormat\n\t\t}\n\t\treturn defaultVolumeTableFormat\n\tcase RawFormatKey:\n\t\tif quiet {\n\t\t\treturn `name: {{.Name}}`\n\t\t}\n\t\treturn `name: {{.Name}}\\ndriver: {{.Driver}}\\n`\n\t}\n\treturn Format(source)\n}", "func (p *volumeGetFormatter) JsonFormat() (string, error) {\n\tvols, err := p.volumes.GetVolumes()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn util.ToJson(vols)\n}", "func FormatResponse(o interface{}) string {\n\tout, err := json.MarshalIndent(o, \"\", \"\\t\")\n\tMust(err, `Command failed because an error occurred while prettifying output: %s`, err)\n\treturn string(out)\n}", "func (t ListOffsetPartitionResponse2) Encode(e *Encoder, version int16) {\n\te.PutInt32(t.PartitionIndex) // PartitionIndex\n\te.PutInt16(t.ErrorCode) // ErrorCode\n\tif version >= 0 && version <= 0 {\n\t\te.PutInt64Array(t.OldStyleOffsets) // OldStyleOffsets\n\t}\n\tif version >= 1 {\n\t\te.PutInt64(t.Timestamp) // Timestamp\n\t}\n\tif version >= 1 {\n\t\te.PutInt64(t.Offset) // Offset\n\t}\n\tif version >= 4 {\n\t\te.PutInt32(t.LeaderEpoch) // LeaderEpoch\n\t}\n}", "func (o *DeleteStorageV1VolumeAttachmentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetVMVolumeDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PatchCoreV1PersistentVolumeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Convert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in *impl.PathExistsResponse, out *v2alpha1.PathExistsResponse) error {\n\treturn autoConvert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in, out)\n}", "func DiscoveryResponseToV2(r *discoverypb.DiscoveryResponse) *xdspb2.DiscoveryResponse {\n\tb := proto.NewBuffer(nil)\n\tb.SetDeterministic(true)\n\terr := b.Marshal(r)\n\n\terr = err\n\tx := &xdspb2.DiscoveryResponse{}\n\tif err := proto.Unmarshal(b.Bytes(), x); err != nil {\n\t\tlog.Fatalln(\"Failed to parse DiscoveryResponse:\", err)\n\t}\n\n\tx.TypeUrl = v2edsURL\n\tfor i := range x.GetResources() {\n\t\tx.Resources[i].TypeUrl = v2edsURL\n\t}\n\tlog.Printf(\"RESPONSE TO V2 %v\", x)\n\n\treturn x\n}", "func toAdmissionResponse(allowed bool, err error) *v1beta1.AdmissionResponse {\n\tresponse := &v1beta1.AdmissionResponse{\n\t\tAllowed: allowed,\n\t}\n\tif err != nil {\n\t\tresponse.Result = &metav1.Status{\n\t\t\tMessage: err.Error(),\n\t\t}\n\t}\n\treturn response\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func GetVolumesV2() (VolumeV2, error) {\n\tvar volumes VolumeV2\n\tquery := \"/api/datacenter/storage/volume\"\n\tbodyText, err := getResponseBody(query)\n\tif err != nil {\n\t\treturn VolumeV2{}, err\n\t}\n\terr = json.Unmarshal(bodyText, &volumes)\n\tif err != nil {\n\t\tlog.Printf(\"verita-core: Error: %v\", err)\n\t\treturn volumes, err\n\t}\n\treturn volumes, nil\n}", "func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudPvminstancesVolumesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesGetUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func CreateListAvailableFileSystemTypesResponse() (response *ListAvailableFileSystemTypesResponse) {\n\tresponse = &ListAvailableFileSystemTypesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in *impl.IsSymlinkResponse, out *v2alpha1.IsSymlinkResponse) error {\n\treturn autoConvert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in, out)\n}", "func ParseCreateanewSoundFileResponse(rsp *http.Response) (*CreateanewSoundFileResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateanewSoundFileResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 201:\n\t\tvar dest []Thenewlycreateditemorempty22\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON201 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 422:\n\t\tvar dest Anerror\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON422 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func CreateNormalRpcHsfApiResponse() (response *NormalRpcHsfApiResponse) {\n\tresponse = &NormalRpcHsfApiResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *GetVMVolumeNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(404)\n}", "func Convert_internal_StopServiceResponse_To_v1alpha1_StopServiceResponse(in *internal.StopServiceResponse, out *v1alpha1.StopServiceResponse) error {\n\treturn autoConvert_internal_StopServiceResponse_To_v1alpha1_StopServiceResponse(in, out)\n}", "func (o FioSpecOutput) Volume() FioSpecVolumeOutput {\n\treturn o.ApplyT(func(v FioSpec) FioSpecVolume { return v.Volume }).(FioSpecVolumeOutput)\n}", "func ToPbVolume(in api.Volume) *pb.Volume {\n\treturn &pb.Volume{\n\t\tID: in.ID,\n\t\tName: in.Name,\n\t\tSize: int32(in.Size),\n\t\tSpeed: pb.VolumeSpeed(in.Speed),\n\t}\n}", "func (o *RemoveVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewRemoveVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewRemoveVolumeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 409:\n\t\tresult := NewRemoveVolumeConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewRemoveVolumeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func CreateDropPartitionResponse() (response *DropPartitionResponse) {\n\tresponse = &DropPartitionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (m MultiVersionResponse) DecodeResponse1() (resp MessagesResponse, err error) {\n\treturn resp, rlp.DecodeBytes(m.Response, &resp)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func (o *GetStoragePureVolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetStoragePureVolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetStoragePureVolumesDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func volumeToAPIType(v volume.Volume) *types.Volume {\n\treturn &types.Volume{\n\t\tName: v.Name(),\n\t\tDriver: v.DriverName(),\n\t\tMountpoint: v.Path(),\n\t}\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func CreateUpdateEndpointGroupResponse() (response *UpdateEndpointGroupResponse) {\n\tresponse = &UpdateEndpointGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func FormatRenameResponse(createResp *CreateResponse) RenameResponse {\n\tnewResp := RenameResponse{}\n\tnewResp.ContentLength = createResp.ContentLength\n\tnewResp.Continuation = createResp.Continuation\n\tnewResp.Date = createResp.Date\n\tnewResp.ETag = createResp.ETag\n\tnewResp.EncryptionKeySHA256 = createResp.EncryptionKeySHA256\n\tnewResp.IsServerEncrypted = createResp.IsServerEncrypted\n\tnewResp.LastModified = createResp.LastModified\n\tnewResp.RequestID = createResp.RequestID\n\tnewResp.Version = createResp.Version\n\treturn newResp\n}", "func CreateModifyDirectoryResponse() (response *ModifyDirectoryResponse) {\n\tresponse = &ModifyDirectoryResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *PatchCoreV1PersistentVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o JsonSerializationResponseOutput) Format() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v JsonSerializationResponse) *string { return v.Format }).(pulumi.StringPtrOutput)\n}", "func NewVolumeV2(ctx *pulumi.Context,\n\tname string, args *VolumeV2Args, opts ...pulumi.ResourceOption) (*VolumeV2, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Size == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Size'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource VolumeV2\n\terr := ctx.RegisterResource(\"openstack:blockstorage/volumeV2:VolumeV2\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func toAdmissionResponse(err error) *v1beta1.AdmissionResponse {\n\treturn &v1beta1.AdmissionResponse{\n\t\tResult: &metav1.Status{\n\t\t\tMessage: err.Error(),\n\t\t},\n\t}\n}", "func toAdmissionResponse(err error) *v1beta1.AdmissionResponse {\n\treturn &v1beta1.AdmissionResponse{\n\t\tResult: &metav1.Status{\n\t\t\tMessage: err.Error(),\n\t\t},\n\t}\n}", "func (*VodGetHlsDecryptionKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_vod_response_response_vod_proto_rawDescGZIP(), []int{3}\n}", "func (r *CustomResource) ToV1Alpha1() map[string]interface{} {\n\tresult := map[string]interface{}{}\n\tresult[\"name\"] = r.Name()\n\tresult[\"kind\"] = r.Kind()\n\tfor k, v := range r.Spec {\n\t\tresult[k] = v\n\t}\n\treturn result\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func (o *PcloudPvminstancesVolumesDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 202:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 409:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesDeleteInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (p *VolumesClientListPager) PageResponse() VolumesClientListResponse {\n\treturn p.current\n}", "func ParseGetL2OrderBookResponse(rsp *http.Response) (*GetL2OrderBookResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &GetL2OrderBookResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest OrderBook\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func FormatStandardResponse(success bool, errorCode, errorSubcode, message string, w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tresponse := StandardResponse{Success: success, ErrorCode: errorCode, ErrorSubcode: errorSubcode, ErrorMessage: message}\n\n\tif !response.Success {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\n\t// Encode the response as JSON\n\tif err := json.NewEncoder(w).Encode(response); err != nil {\n\t\tlog.Printf(\"Error forming the boolean response (%v)\\n. %v\", response, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func CreateListDAGVersionsResponse() (response *ListDAGVersionsResponse) {\n\tresponse = &ListDAGVersionsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (*VersionResponse) Descriptor() ([]byte, []int) {\n\treturn file_provider_v1alpha1_service_proto_rawDescGZIP(), []int{1}\n}", "func (*DemoteVolumeResponse) Descriptor() ([]byte, []int) {\n\treturn file_replication_proto_rawDescGZIP(), []int{7}\n}", "func CreateListFileSystemsResponse() (response *ListFileSystemsResponse) {\n\tresponse = &ListFileSystemsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (*ResponseGetVolumes) Descriptor() ([]byte, []int) {\n\treturn file_pkg_metadata_metadata_proto_rawDescGZIP(), []int{1}\n}" ]
[ "0.73623383", "0.7316481", "0.7242547", "0.71427774", "0.70498836", "0.6979951", "0.6330165", "0.60542643", "0.5822024", "0.5761874", "0.57228094", "0.562423", "0.5611371", "0.5601447", "0.55586636", "0.5525151", "0.5371286", "0.5355247", "0.5310187", "0.5298041", "0.52764446", "0.52584094", "0.520589", "0.51925486", "0.50719565", "0.50554353", "0.4992193", "0.4987082", "0.4970764", "0.4934804", "0.49320078", "0.49013507", "0.48776528", "0.48398378", "0.48281777", "0.48103055", "0.4770276", "0.47503147", "0.4741436", "0.47224995", "0.47041163", "0.46921548", "0.46909198", "0.4673458", "0.46729153", "0.46559763", "0.4635424", "0.46176487", "0.46165702", "0.46146983", "0.461088", "0.46071985", "0.45933288", "0.4587701", "0.4576158", "0.4557091", "0.45481184", "0.45320293", "0.4524935", "0.45062023", "0.4506175", "0.4490622", "0.4488622", "0.44775134", "0.44358662", "0.4431029", "0.44162893", "0.4408418", "0.44042552", "0.44024697", "0.4376896", "0.43674493", "0.43654695", "0.4344461", "0.4341852", "0.4334106", "0.43330637", "0.43267024", "0.43178788", "0.43145975", "0.4311527", "0.4307888", "0.42959195", "0.428803", "0.42849925", "0.42840618", "0.42831668", "0.42831668", "0.42799646", "0.4279712", "0.42787513", "0.42780966", "0.4277206", "0.42668554", "0.42657605", "0.42603937", "0.42579183", "0.4247347", "0.42457342", "0.42451358" ]
0.89614576
0
Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest is an autogenerated conversion function.
func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error { return autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func NewClosestDiffFinder(exp expectations.ReadOnly, dCounter digest_counter.DigestCounter, diffStore diff.DiffStore) *Impl {\n\treturn &Impl{\n\t\texpectations: exp,\n\t\tdCounter: dCounter,\n\t\tdiffStore: diffStore,\n\t}\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryptionSetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *string { return v.TargetDiskEncryptionSetId }).(pulumi.StringPtrOutput)\n}", "func (d *Dijkstra) PathToTarget() ([]graphEdge, error) {\n\tif d.err != nil {\n\t\treturn []graphEdge{}, d.err\n\t}\n\n\tvar path []graphEdge\n\tidx := d.target\n\tfor {\n\t\tif idx == d.source {\n\t\t\tbreak\n\t\t}\n\t\te, ok := d.spt[idx]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tpath = append(path, e)\n\t\tidx = e.From\n\t}\n\n\treturn reversePath(path), nil\n}", "func (o *FileInfoCollectionGetParams) SetTarget(target *string) {\n\to.Target = target\n}", "func (in *TargetVolumeSpec) DeepCopy() *TargetVolumeSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TargetVolumeSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (fs *FS) TargetIPLUNToDevicePath(ctx context.Context, targetIP string, lunID int) (map[string]string, error) {\n\treturn fs.targetIPLUNToDevicePath(ctx, targetIP, lunID)\n}", "func (o ElastigroupScalingDownPolicyStepAdjustmentActionOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingDownPolicyStepAdjustmentAction) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func (o *SearchSLOResponseDataAttributesFacets) SetTarget(v []SearchSLOResponseDataAttributesFacetsObjectInt) {\n\to.Target = v\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryption() ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *ReplicatedVMManagedDiskTargetDiskEncryption {\n\t\treturn v.TargetDiskEncryption\n\t}).(ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput)\n}", "func (o *FileInfoCollectionGetParams) WithTarget(target *string) *FileInfoCollectionGetParams {\n\to.SetTarget(target)\n\treturn o\n}", "func (r *GetSLOHistoryOptionalParameters) WithTarget(target float64) *GetSLOHistoryOptionalParameters {\n\tr.Target = &target\n\treturn r\n}", "func (o LookupAliasResultOutput) TargetKeyId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAliasResult) string { return v.TargetKeyId }).(pulumi.StringOutput)\n}", "func GetDeviceByMntPoint(targetPath string) string {\n\tdeviceCmd := fmt.Sprintf(\"mount | grep \\\"on %s\\\" | awk 'NR==1 {print $1}'\", targetPath)\n\tdeviceCmdOut, err := utils.Run(deviceCmd)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(deviceCmdOut)\n}", "func closestToTarget(arr []int, target int) int {\n\tmin := math.MaxInt32\n\tsize := len(arr)\n\n\tandProducts := make([]int, 0)\n\n\tfor r := 0; r < size; r++ {\n\t\tfor i := 0; i < len(andProducts); i++ {\n\t\t\tandProducts[i] &= arr[r]\n\t\t}\n\t\tandProducts = append(andProducts, arr[r])\n\t\tsort.Ints(andProducts)\n\t\tandProducts = dedup(andProducts)\n\n\t\tfor _, ap := range andProducts {\n\t\t\tdiff := myAbs(ap - target)\n\t\t\tif diff == 0 {\n\t\t\t\treturn 0\n\t\t\t}\n\n\t\t\tif min > diff {\n\t\t\t\tmin = diff\n\t\t\t} else if ap > target {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn min\n}", "func (s *DescribeEffectivePolicyInput) SetTargetId(v string) *DescribeEffectivePolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (o ElastigroupScalingUpPolicyStepAdjustmentActionOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingUpPolicyStepAdjustmentAction) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func (s *DetachPolicyInput) SetTargetId(v string) *DetachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func getFoodTarget(p *Player, data PlayerInput) (mgl32.Vec2, bool) {\n\ttargetAcquired := false\n\tok := false\n\tvar target mgl32.Vec2\n\tvar closestFood []int\n\ttmpPos := p.Pos[0]\n\tmin[0], min[1] = float64(tmpPos[0]-p.viewRadius), float64(tmpPos[1]-p.viewRadius)\n\tmax[0], max[1] = float64(tmpPos[0]+p.viewRadius), float64(tmpPos[1]+p.viewRadius)\n\n\tdata.Food.Search(min, max,\n\t\tfunc(min, max []float64, value interface{}) bool {\n\t\t\tif data.FoodDict[value.(int)].P.Sub(tmpPos).Len() < p.viewRadius {\n\t\t\t\tclosestFood = append(closestFood, value.(int))\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t)\n\n\tfor _, f := range closestFood {\n\t\tif !targetAcquired || tmpPos.Sub(data.FoodDict[f].P).Len() < tmpPos.Sub(target).Len() {\n\t\t\ttarget = data.FoodDict[f].P\n\t\t\ttargetAcquired = true\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn target, ok\n}", "func (s *Attribute) SetTargetId(v string) *Attribute {\n\ts.TargetId = &v\n\treturn s\n}", "func newClosest() *Closest {\n\treturn &Closest{\n\t\tDigest: NoDigestFound,\n\t\tDiff: math.MaxFloat32,\n\t\tDiffPixels: math.MaxFloat32,\n\t\tMaxRGBA: [4]int{},\n\t}\n}", "func (s *EffectivePolicy) SetTargetId(v string) *EffectivePolicy {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *ListPoliciesForTargetInput) SetTargetId(v string) *ListPoliciesForTargetInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *RepositorySyncDefinition) SetTarget(v string) *RepositorySyncDefinition {\n\ts.Target = &v\n\treturn s\n}", "func (o ElastigroupScalingDownPolicyOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingDownPolicy) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func (s *InviteAccountToOrganizationInput) SetTarget(v *HandshakeParty) *InviteAccountToOrganizationInput {\n\ts.Target = v\n\treturn s\n}", "func (s *AttachPolicyInput) SetTargetId(v string) *AttachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *ResourceSyncAttempt) SetTarget(v string) *ResourceSyncAttempt {\n\ts.Target = &v\n\treturn s\n}", "func (t *transpiler) VisitTargetPath(ctx *parser.TargetPathContext) interface{} {\n\tp := ctx.TargetPathHead().Accept(t).(pathSpec)\n\tfor i := range ctx.AllTargetPathSegment() {\n\t\tp.field += ctx.TargetPathSegment(i).Accept(t).(string)\n\t}\n\n\tif ctx.OWMOD() != nil && ctx.OWMOD().GetText() != \"\" {\n\t\tp.field += ctx.OWMOD().GetText()\n\t}\n\n\t// Only one of p.arg and p.index can be filled.\n\tif (p.arg == \"\") == (p.index == \"\") {\n\t\tt.fail(ctx, fmt.Errorf(\"invalid target path - expected arg xor index but got both or neither (arg %s and index %s)\", p.arg, p.index))\n\t}\n\n\treturn p\n}", "func (o *RequestTarget) SetTarget(v ResourceReference) {\n\to.Target = &v\n}", "func (o AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpecOutput) Target() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec) *int { return v.Target }).(pulumi.IntPtrOutput)\n}", "func findTargetIDAndMethod(reqPath string, headers http.Header) (targetID string, method string) {\n\tif appID := headers.Get(daprAppID); appID != \"\" {\n\t\treturn appID, strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t}\n\n\tif auth := headers.Get(\"Authorization\"); strings.HasPrefix(auth, \"Basic \") {\n\t\tif s, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, \"Basic \")); err == nil {\n\t\t\tpair := strings.Split(string(s), \":\")\n\t\t\tif len(pair) == 2 && pair[0] == daprAppID {\n\t\t\t\treturn pair[1], strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we're here, the handler was probably invoked with /v1.0/invoke/ (or the invocation is invalid, missing the app id provided as header or Basic auth)\n\t// However, we are not relying on wildcardParam because the URL may have been sanitized to remove `//``, so `http://` would have been turned into `http:/`\n\t// First, check to make sure that the path has the prefix\n\tif idx := pathHasPrefix(reqPath, apiVersionV1, \"invoke\"); idx > 0 {\n\t\treqPath = reqPath[idx:]\n\n\t\t// Scan to find app ID and method\n\t\t// Matches `<appid>/method/<method>`.\n\t\t// Examples:\n\t\t// - `appid/method/mymethod`\n\t\t// - `http://example.com/method/mymethod`\n\t\t// - `https://example.com/method/mymethod`\n\t\t// - `http%3A%2F%2Fexample.com/method/mymethod`\n\t\tif idx = strings.Index(reqPath, \"/method/\"); idx > 0 {\n\t\t\ttargetID := reqPath[:idx]\n\t\t\tmethod := reqPath[(idx + len(\"/method/\")):]\n\t\t\tif t, _ := url.QueryUnescape(targetID); t != \"\" {\n\t\t\t\ttargetID = t\n\t\t\t}\n\t\t\treturn targetID, method\n\t\t}\n\t}\n\n\treturn \"\", \"\"\n}", "func LoginIscsiTarget(targetName string, isInformationalSession bool, initiatorInstance *string, initiatorPortNumber *uint32, targetPortal *iscsidsc.Portal,\n\tsecurityFlags *iscsidsc.SecurityFlags, loginOptions *iscsidsc.LoginOptions, key *string, isPersistent bool) (*iscsidsc.SessionID, *iscsidsc.ConnectionID, error) {\n\ttargetNamePtr, err := windows.UTF16PtrFromString(targetName)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"invalid target name: %q\", targetName)\n\t}\n\n\tinitiatorInstancePtr, initiatorPortNumberValue, err := internal.ConvertInitiatorArgs(initiatorInstance, initiatorPortNumber)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tinternalPortal, err := internal.CheckAndConvertPortal(targetPortal)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"invalid portal argument\")\n\t}\n\n\tvar securityFlagsValue iscsidsc.SecurityFlags\n\tif securityFlags != nil {\n\t\tsecurityFlagsValue = *securityFlags\n\t}\n\n\tinternalLoginOptions, userNamePtr, passwordPtr, err := internal.CheckAndConvertLoginOptions(loginOptions)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"invalid loginOptions argument\")\n\t}\n\n\tkeyPtr, keySize, err := internal.CheckAndConvertKey(key)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn callProcLoginIScsiTargetW(targetNamePtr, isInformationalSession, initiatorInstancePtr, initiatorPortNumberValue,\n\t\tinternalPortal, securityFlagsValue, internalLoginOptions, uintptr(unsafe.Pointer(userNamePtr)), uintptr(unsafe.Pointer(passwordPtr)),\n\t\tkeyPtr, keySize, isPersistent)\n}", "func (b *RoutePortApplyConfiguration) WithTargetPort(value intstr.IntOrString) *RoutePortApplyConfiguration {\n\tb.TargetPort = &value\n\treturn b\n}", "func (opts *CompactRangeOptions) TargetLevel() int32 {\n\treturn int32(C.rocksdb_compactoptions_get_target_level(opts.c))\n}", "func (a *HyperflexApiService) GetHyperflexTargetByMoid(ctx context.Context, moid string) ApiGetHyperflexTargetByMoidRequest {\n\treturn ApiGetHyperflexTargetByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (o MrScalarTaskScalingDownPolicyOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MrScalarTaskScalingDownPolicy) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func BestPath(g *graphs.Graph, query *data.Query, circleFinder *CircleFinder) *data.Path {\n\tfromVertices := circleFinder.VerticesInCircle(query.From, query.WalkingRadius) // O(V*log(D))\n\ttoVertices := circleFinder.VerticesInCircle(query.To, query.WalkingRadius) // O(V*log(D))\n\n\tbestTime, walkingDistance, drivingDistance := g.Dijkstra(fromVertices, toVertices) // O(E*log(V))\n\n\treturn &data.Path{\n\t\tDrivingDistance: drivingDistance,\n\t\tWalkingDistance: walkingDistance,\n\t\tTime: bestTime * 60,\n\t}\n}", "func (o VpcCniOptionsPtrOutput) WarmIpTarget() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *VpcCniOptions) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.WarmIpTarget\n\t}).(pulumi.IntPtrOutput)\n}", "func (o ApplicationSpecRolloutplanPtrOutput) TargetSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationSpecRolloutplan) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.TargetSize\n\t}).(pulumi.IntPtrOutput)\n}", "func (plugin *IscsiPlugin) LoginTarget(blockDev model.BlockDeviceAccessInfo) (err error) {\n\tlog.Tracef(\">>>>> LoginTarget, TargetName=%v\", blockDev.TargetName)\n\tdefer log.Traceln(\"<<<<< LoginTarget\")\n\n\t// If the iSCSI iqn is not provided, fail the request\n\tif blockDev.TargetName == \"\" {\n\t\terr := cerrors.NewChapiError(cerrors.InvalidArgument, errorMessageMissingIscsiTargetName)\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\t// If the IscsiAccessInfo object is not provided, fail the request\n\tif blockDev.IscsiAccessInfo == nil {\n\t\terr := cerrors.NewChapiError(cerrors.InvalidArgument, errorMessageMissingIscsiAccessInfo)\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\t// Use the platform specific routine to login to the iSCSI target\n\terr = plugin.loginTarget(blockDev)\n\n\t// If there was an error logging into the iSCSI target, but connections remain, clean up\n\t// after ourselves by logging out the target.\n\tif err != nil {\n\t\tif loggedIn, _ := plugin.IsTargetLoggedIn(blockDev.TargetName); loggedIn == true {\n\t\t\tplugin.LogoutTarget(blockDev.TargetName)\n\t\t}\n\t\treturn err\n\t}\n\n\t// Success!!!\n\treturn nil\n}", "func (o MrScalarCoreScalingDownPolicyOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MrScalarCoreScalingDownPolicy) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func (o DeliveryPipelineSerialPipelineStageOutput) TargetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DeliveryPipelineSerialPipelineStage) *string { return v.TargetId }).(pulumi.StringPtrOutput)\n}", "func WithTargetSorter(sorter fab.TargetSorter) RequestOption {\n\treturn func(ctx context.Client, o *requestOptions) error {\n\t\to.TargetSorter = sorter\n\t\treturn nil\n\t}\n}", "func (s *DescribeMountTargetSecurityGroupsInput) SetMountTargetId(v string) *DescribeMountTargetSecurityGroupsInput {\n\ts.MountTargetId = &v\n\treturn s\n}", "func (o *RequestTarget) GetTarget() *ResourceReference {\n\tif o == nil {\n\t\treturn nil\n\t}\n\n\treturn o.Target\n}", "func TargetPath(src, dst Part) string {\n\tpath, err := filepath.Rel(filepath.Dir(src.Path()), dst.Path())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn path\n}", "func (o *SLOOverallStatuses) SetTarget(v float64) {\n\to.Target = &v\n}", "func (s *PolicyTargetSummary) SetTargetId(v string) *PolicyTargetSummary {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *DescribeTestSetDiscrepancyReportOutput) SetTarget(v *TestSetDiscrepancyReportResourceTarget) *DescribeTestSetDiscrepancyReportOutput {\n\ts.Target = v\n\treturn s\n}", "func DecodeRelayTarget(decryptedCell []byte, payloadLength int) string {\n\tstart := CmdLength + StreamIDLength + DigestLength + PayloadLength + (RelayDataLength - payloadLength)\n\tpayload := decryptedCell[start:]\n\tend := TargetLength\n\treturn DecodeAddr(payload[:end])\n}", "func (mr *MockIscsiReconcileUtilsMockRecorder) GetISCSIHostSessionMapForTarget(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetISCSIHostSessionMapForTarget\", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetISCSIHostSessionMapForTarget), arg0, arg1)\n}", "func assertClosest(t *testing.T, node, closest *Node, id byte) {\n\tremoteNode, err := node.closestPrecedingFingerRPC(node.Node, []byte{id})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while getting closest:%v\", err)\n\t} else if !idsEqual(remoteNode.Id, closest.Id) {\n\t\tt.Fatalf(\"Expected %v, got %v\", closest.Id, remoteNode.Id)\n\t}\n}", "func (o ControlPolicyAttachmentOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ControlPolicyAttachment) pulumi.StringOutput { return v.TargetId }).(pulumi.StringOutput)\n}", "func (d *Dao) Target(c context.Context, id int64) (res *model.Target, err error) {\n\tres = &model.Target{}\n\tif err = d.db.QueryRow(c, _targetSQL, id).Scan(&res.ID, &res.SubEvent, &res.Event, &res.Product, &res.Source, &res.GroupIDs, &res.Threshold, &res.Duration, &res.State, &res.Ctime, &res.Mtime); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tres = nil\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"d.Target.Scan error(%+v), id(%d)\", err, id)\n\t}\n\tif res.GroupIDs != \"\" {\n\t\tvar gids []int64\n\t\tif gids, err = xstr.SplitInts(res.GroupIDs); err != nil {\n\t\t\tlog.Error(\"d.Product.SplitInts error(%+v), group ids(%s)\", err, res.GroupIDs)\n\t\t\treturn\n\t\t}\n\t\tif res.Groups, err = d.Groups(c, gids); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (c *Calculator) ShortestPathTo(dstID string) ([]string, int, error) {\n\tvertMap := c.g.Vertices()\n\tv, exists := vertMap[dstID]\n\tif !exists {\n\t\treturn nil, 0, xerrors.Errorf(\"unknown vertex with ID %q\", dstID)\n\t}\n\n\tvar (\n\t\tminDist = v.Value().(*pathState).minDist\n\t\tpath []string\n\t)\n\n\tfor ; v.ID() != c.srcID; v = vertMap[v.Value().(*pathState).prevInPath] {\n\t\tpath = append(path, v.ID())\n\t}\n\tpath = append(path, c.srcID)\n\n\t// Reverse in place to get path from src->dst\n\tfor i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {\n\t\tpath[i], path[j] = path[j], path[i]\n\t}\n\treturn path, minDist, nil\n}", "func (o *KvmPolicyInventory) SetTargetMo(v MoBaseMoRelationship) {\n\to.TargetMo = &v\n}", "func GetEffectiveTarget(blockHeaderTarget uint32, minAnnTarget uint32, annCount uint64, packetCryptVersion int) uint32 {\n\tbnBlockHeaderTarget := CompactToBig(blockHeaderTarget)\n\tbnMinAnnTarget := CompactToBig(minAnnTarget)\n\n\tbnBlockHeaderWork := WorkForTarget(bnBlockHeaderTarget)\n\tbnMinAnnWork := WorkForTarget(bnMinAnnTarget)\n\n\tbnEffectiveWork := getEffectiveWorkRequirement(bnBlockHeaderWork, bnMinAnnWork, annCount, packetCryptVersion)\n\n\tbnEffectiveTarget := TargetForWork(bnEffectiveWork)\n\teffectiveTarget := BigToCompact(bnEffectiveTarget)\n\n\tif effectiveTarget > 0x207fffff {\n\t\treturn 0x207fffff\n\t}\n\treturn effectiveTarget\n}", "func (o ElastigroupScalingDownPolicyStepAdjustmentActionOutput) Target() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingDownPolicyStepAdjustmentAction) *string { return v.Target }).(pulumi.StringPtrOutput)\n}", "func (s *ModifyMountTargetSecurityGroupsInput) SetMountTargetId(v string) *ModifyMountTargetSecurityGroupsInput {\n\ts.MountTargetId = &v\n\treturn s\n}", "func (o *HyperflexVmSnapshotInfoAllOf) SetTargetCompletionTimestamp(v int64) {\n\to.TargetCompletionTimestamp = &v\n}", "func (o ElastigroupScalingDownPolicyStepAdjustmentActionOutput) MaxTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingDownPolicyStepAdjustmentAction) *string { return v.MaxTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func (o ElastigroupMultaiTargetSetOutput) TargetSetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ElastigroupMultaiTargetSet) string { return v.TargetSetId }).(pulumi.StringOutput)\n}", "func (u *User) QueryRequestTarget() *RequestTargetQuery {\n\treturn NewUserClient(u.config).QueryRequestTarget(u)\n}", "func newRouteFromTargetRouteDef(targetRouteDef *TargetRouteDef) *routev1.Route {\n\tannotations := map[string]string{\n\t\t\"openshift.io/generated-by\": operatorName,\n\t}\n\talternateBackends := []routev1.RouteTargetReference{}\n\tif len(targetRouteDef.canaryService.Name) > 0 {\n\t\tcanaryWeight := 100 - targetRouteDef.primaryService.Weight\n\t\talternateBackends = []routev1.RouteTargetReference{routev1.RouteTargetReference{\n\t\t\tKind: \"Service\",\n\t\t\tName: targetRouteDef.canaryService.Name,\n\t\t\tWeight: &canaryWeight,\n\t\t}}\n\t}\n\treturn &routev1.Route{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: targetRouteDef.routeName,\n\t\t\tNamespace: targetRouteDef.namespace,\n\t\t\tLabels: targetRouteDef.selector,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tSpec: routev1.RouteSpec{\n\t\t\tPort: &routev1.RoutePort{\n\t\t\t\tTargetPort: targetRouteDef.targetPort,\n\t\t\t},\n\t\t\tTo: routev1.RouteTargetReference{\n\t\t\t\tKind: \"Service\",\n\t\t\t\tName: targetRouteDef.primaryService.Name,\n\t\t\t\tWeight: &targetRouteDef.primaryService.Weight,\n\t\t\t},\n\t\t\tAlternateBackends: alternateBackends,\n\t\t},\n\t}\n}", "func (c *MockFileStorageClient) DeleteMountTarget(ctx context.Context, id string) error {\n\treturn nil\n}", "func (o *VnicEthAdapterPolicyInventory) SetTargetMo(v MoBaseMoRelationship) {\n\to.TargetMo = &v\n}", "func (b *PodNetworkConnectivityCheckSpecApplyConfiguration) WithTargetEndpoint(value string) *PodNetworkConnectivityCheckSpecApplyConfiguration {\n\tb.TargetEndpoint = &value\n\treturn b\n}", "func (o ArgoCDSpecServerAutoscaleHpaPtrOutput) ScaleTargetRef() ArgoCDSpecServerAutoscaleHpaScaleTargetRefPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecServerAutoscaleHpa) *ArgoCDSpecServerAutoscaleHpaScaleTargetRef {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.ScaleTargetRef\n\t}).(ArgoCDSpecServerAutoscaleHpaScaleTargetRefPtrOutput)\n}", "func GetTargetFilePath(ctx context.Context, resourcePath, arg string) string {\n\tLogc(ctx).Debug(\">>>> osutils_windows.GetTargetFilePath\")\n\tdefer Logc(ctx).Debug(\"<<<< osutils_windows.GetTargetFilePath\")\n\tfilePath := path.Join(resourcePath, arg)\n\treturn strings.Replace(filePath, \"/\", \"\\\\\", -1)\n}", "func disectPath(basePath string, newPath string) ([]string, []string, error) {\n\tbaseComps := splitPath(basePath)\n\tnewComps := splitPath(newPath)\n\tif len(baseComps) > len(newComps) {\n\t\terr := errors.New(\"Base Path is longer than requested path!\")\n\t\treturn nil, nil, err\n\t}\n\tfor i, _ := range baseComps {\n\t\tif strings.Compare(newComps[i], baseComps[i]) != 0 {\n\t\t\terr := errors.New(\"Base Path is not a prefix of newPath\")\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\trelPath := newComps[len(baseComps):]\n\tfor i, e := range relPath {\n\t\tif isCommand(e) {\n\t\t\treturn relPath[:i], relPath[i:], nil\n\t\t}\n\t}\n\treturn relPath, nil, nil\n}", "func (o GetRulesRuleTargetOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetRulesRuleTarget) string { return v.TargetId }).(pulumi.StringOutput)\n}", "func (g *Game) TargetCenter() intersect.Point { return g.target }", "func (plugin *IscsiPlugin) RescanIscsiTarget(lunID string) error {\n\tlog.Tracef(\">>>>> RescanIscsiTarget initiated for lunID %v\", lunID)\n\tdefer log.Traceln(\"<<<<< RescanIscsiTarget\")\n\treturn rescanIscsiTarget(lunID)\n}", "func (r *CachesIscsiVolume) TargetName() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"targetName\"])\n}", "func (mfs *MountedFS) Path2MpathInfo(path string) (info *MountpathInfo, relativePath string) {\n\tvar (\n\t\tmax int\n\t\tavailablePaths, _ = mfs.Get()\n\t\tcleanedPath = filepath.Clean(path)\n\t)\n\tfor mpath, mpathInfo := range availablePaths {\n\t\trel, ok := pathPrefixMatch(mpath, cleanedPath)\n\t\tif ok && len(mpath) > max {\n\t\t\tinfo = mpathInfo\n\t\t\tmax = len(mpath)\n\t\t\trelativePath = rel\n\t\t\tif relativePath == \".\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (r *CachesIscsiVolume) TargetArn() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"targetArn\"])\n}", "func (tm *tileManager) closest(target color.Color) tile {\n\tvar dmin float64 = 0xffffffffff\n\tvar closest tile\n\n\tfor _, t := range tm.tiles {\n\t\td := distance(t.avg, target)\n\t\tif d < dmin {\n\t\t\tclosest = t\n\t\t\tdmin = d\n\t\t}\n\t}\n\t//log.Printf(\"closest for %v is tile %v\\n\", target, closest.avg)\n\treturn closest\n}", "func findAbsoluteDeviceByIDPath(volumeName string) (string, error) {\n\tpath := getDeviceByIDPath(volumeName)\n\n\t// EvalSymlinks returns relative link if the file is not a symlink\n\t// so we do not have to check if it is symlink prior to evaluation\n\tresolved, err := filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not resolve symlink %q: %v\", path, err)\n\t}\n\n\tif !strings.HasPrefix(resolved, \"/dev\") {\n\t\treturn \"\", fmt.Errorf(\"resolved symlink %q for %q was unexpected\", resolved, path)\n\t}\n\n\treturn resolved, nil\n}", "func (req *ClientRequest) Target() *url.URL {\n\treturn req.target\n}", "func (m *ItemTranslateExchangeIdsPostRequestBody) GetTargetIdType()(*iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.ExchangeIdFormat) {\n val, err := m.GetBackingStore().Get(\"targetIdType\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.ExchangeIdFormat)\n }\n return nil\n}", "func DiffToTarget(diff float64, powLimit *big.Int) (*big.Int, error) {\n\tif diff <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid pool difficulty %v (0 or less than \"+\n\t\t\t\"zero passed)\", diff)\n\t}\n\n\t// Round down in the case of a non-integer diff since we only support\n\t// ints (unless diff < 1 since we don't allow 0)..\n\tif diff < 1 {\n\t\tdiff = 1\n\t} else {\n\t\tdiff = math.Floor(diff)\n\t}\n\tdivisor := new(big.Int).SetInt64(int64(diff))\n\tmax := powLimit\n\ttarget := new(big.Int)\n\ttarget.Div(max, divisor)\n\n\treturn target, nil\n}", "func (o ContainerResourceMetricSourcePatchPtrOutput) Target() MetricTargetPatchPtrOutput {\n\treturn o.ApplyT(func(v *ContainerResourceMetricSourcePatch) *MetricTargetPatch {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(MetricTargetPatchPtrOutput)\n}", "func (a *AdminApiService) GetTarget(ctx _context.Context, id string) (Target, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Target\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (s *DescribeMountTargetsInput) SetMountTargetId(v string) *DescribeMountTargetsInput {\n\ts.MountTargetId = &v\n\treturn s\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *genState) resolveLeafrefTarget(path string, contextEntry *yang.Entry) (*yang.Entry, error) {\n\tif s.schematree == nil {\n\t\t// This should not be possible if the calling code generation is\n\t\t// well structured and builds the schematree during parsing of YANG\n\t\t// files.\n\t\treturn nil, fmt.Errorf(\"could not map leafref path: %v, from contextEntry: %v\", path, contextEntry)\n\t}\n\n\tfixedPath, err := fixSchemaTreePath(path, contextEntry)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := s.schematree.GetLeafValue(fixedPath)\n\tif e == nil {\n\t\treturn nil, fmt.Errorf(\"could not resolve leafref path: %v from %v, tree: %v\", fixedPath, contextEntry, s.schematree)\n\t}\n\n\ttarget, ok := e.(*yang.Entry)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid element returned from schema tree, must be a yang.Entry for path %v from %v\", path, contextEntry)\n\t}\n\n\treturn target, nil\n}", "func (s *CreateTestSetDiscrepancyReportOutput) SetTarget(v *TestSetDiscrepancyReportResourceTarget) *CreateTestSetDiscrepancyReportOutput {\n\ts.Target = v\n\treturn s\n}", "func (s *DeleteMountTargetInput) SetMountTargetId(v string) *DeleteMountTargetInput {\n\ts.MountTargetId = &v\n\treturn s\n}", "func (launcher *Launcher) GetTarget() string {\n\tlauncher.Mutex.RLock()\n\targ := launcher.target\n\tlauncher.Mutex.RUnlock()\n\treturn arg\n}", "func (ktu *KqiTargetUpdate) SetKqiTargetFk(k *Kqi) *KqiTargetUpdate {\n\treturn ktu.SetKqiTargetFkID(k.ID)\n}", "func extractPeer(target string) (peer.ID, multiaddr.Multiaddr) {\n\tipfsAddr, err := multiaddr.NewMultiaddr(target)\n\tif err != nil {\n\t\tlog.Fatalln(\"fatal: could not get peer ipfs addr, \", err)\n\t}\n\n\tpproto, err := ipfsAddr.ValueForProtocol(multiaddr.P_IPFS)\n\tif err != nil {\n\t\tlog.Fatalln(\"fatal: could not get protocol, \", err)\n\t}\n\n\tpeerid, err := peer.IDB58Decode(pproto)\n\tif err != nil {\n\t\tlog.Fatalln(\"fatal: could not get peer id, \", err)\n\t}\n\n\tpeerAddrStr := fmt.Sprintf(\"/ipfs/%s\", peer.IDB58Encode(peerid))\n\tpeerAddr, err := multiaddr.NewMultiaddr(peerAddrStr)\n\tif err != nil {\n\t\tlog.Fatalln(\"fatal: could not get peer addr, \", err)\n\t}\n\n\ttargetAddr := ipfsAddr.Decapsulate(peerAddr)\n\n\treturn peerid, targetAddr\n}", "func (entity Entity) ClosestPointTo(target Entity, minDistance float64) Entity {\n\tdist := entity.CalculateDistanceTo(target) - target.Radius - minDistance\n\tangle := target.CalculateRadAngleTo(entity)\n\tx := target.X + dist*math.Cos(angle)\n\ty := target.Y + dist*math.Sin(angle)\n\treturn Entity{\n\t\tX: x,\n\t\tY: y,\n\t\tRadius: 0,\n\t\tHealth: 0,\n\t\tOwner: -1,\n\t\tID: -1,\n\t}\n}", "func (c *Client) ClosestPrecedingFinger(id string) (*pb.Node, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout)\n\tdefer cancel()\n\treq := &pb.ID{Id: id}\n\treturn c.rpcClient.ClosestPrecedingFinger(ctx, req)\n}" ]
[ "0.76947486", "0.7617694", "0.696665", "0.6809457", "0.6168926", "0.5618812", "0.5035765", "0.45517573", "0.41923484", "0.41883287", "0.40918034", "0.40671036", "0.40392116", "0.39618465", "0.38991943", "0.38789403", "0.38784805", "0.38142687", "0.37997288", "0.37832952", "0.37580422", "0.37492803", "0.3724782", "0.3699754", "0.3675274", "0.36438483", "0.36265934", "0.36166647", "0.3614325", "0.36075583", "0.36021248", "0.35948053", "0.3583285", "0.3577454", "0.35746506", "0.356023", "0.35474652", "0.35432562", "0.3532136", "0.35301694", "0.3493316", "0.34857062", "0.34680536", "0.34674665", "0.34669313", "0.3456838", "0.34564847", "0.34564084", "0.3456151", "0.3442751", "0.34374267", "0.34361523", "0.3434656", "0.34242433", "0.34222478", "0.3415357", "0.3407225", "0.33959633", "0.3395151", "0.33890212", "0.33868328", "0.3381846", "0.336944", "0.33659106", "0.33538762", "0.3353446", "0.33462667", "0.334357", "0.3341961", "0.33391705", "0.3337883", "0.33371466", "0.33342907", "0.33318877", "0.33286566", "0.33219984", "0.33193633", "0.3315397", "0.33123824", "0.33123478", "0.33104146", "0.33057708", "0.32955235", "0.32943496", "0.3293889", "0.32929915", "0.32877013", "0.32858506", "0.32828036", "0.32796267", "0.3269439", "0.32676515", "0.3265394", "0.32619295", "0.3260262", "0.3259265", "0.3259093", "0.32536638", "0.32456312", "0.32414886" ]
0.87738425
0
Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest is an autogenerated conversion function.
func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error { return autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_v1alpha1_RecoveryTarget_To_v1alpha2_RecoveryTarget(in *RecoveryTarget, out *v1alpha2.RecoveryTarget, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_RecoveryTarget_To_v1alpha2_RecoveryTarget(in, out, s)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func (o ElastigroupScalingDownPolicyStepAdjustmentActionOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingDownPolicyStepAdjustmentAction) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func (d *Dijkstra) PathToTarget() ([]graphEdge, error) {\n\tif d.err != nil {\n\t\treturn []graphEdge{}, d.err\n\t}\n\n\tvar path []graphEdge\n\tidx := d.target\n\tfor {\n\t\tif idx == d.source {\n\t\t\tbreak\n\t\t}\n\t\te, ok := d.spt[idx]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tpath = append(path, e)\n\t\tidx = e.From\n\t}\n\n\treturn reversePath(path), nil\n}", "func (o ElastigroupScalingUpPolicyStepAdjustmentActionOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingUpPolicyStepAdjustmentAction) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryptionSetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *string { return v.TargetDiskEncryptionSetId }).(pulumi.StringPtrOutput)\n}", "func Convert_v1alpha2_RecoveryTarget_To_v1alpha1_RecoveryTarget(in *v1alpha2.RecoveryTarget, out *RecoveryTarget, s conversion.Scope) error {\n\treturn autoConvert_v1alpha2_RecoveryTarget_To_v1alpha1_RecoveryTarget(in, out, s)\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func (o HTTP2HealthCheckPtrOutput) RequestPath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTP2HealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.RequestPath\n\t}).(pulumi.StringPtrOutput)\n}", "func (o HTTP2HealthCheckOutput) RequestPath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheck) *string { return v.RequestPath }).(pulumi.StringPtrOutput)\n}", "func (o HTTP2HealthCheckResponsePtrOutput) RequestPath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTP2HealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.RequestPath\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *SearchSLOResponseDataAttributesFacets) SetTarget(v []SearchSLOResponseDataAttributesFacetsObjectInt) {\n\to.Target = v\n}", "func (fs *FS) TargetIPLUNToDevicePath(ctx context.Context, targetIP string, lunID int) (map[string]string, error) {\n\treturn fs.targetIPLUNToDevicePath(ctx, targetIP, lunID)\n}", "func closestToTarget(arr []int, target int) int {\n\tmin := math.MaxInt32\n\tsize := len(arr)\n\n\tandProducts := make([]int, 0)\n\n\tfor r := 0; r < size; r++ {\n\t\tfor i := 0; i < len(andProducts); i++ {\n\t\t\tandProducts[i] &= arr[r]\n\t\t}\n\t\tandProducts = append(andProducts, arr[r])\n\t\tsort.Ints(andProducts)\n\t\tandProducts = dedup(andProducts)\n\n\t\tfor _, ap := range andProducts {\n\t\t\tdiff := myAbs(ap - target)\n\t\t\tif diff == 0 {\n\t\t\t\treturn 0\n\t\t\t}\n\n\t\t\tif min > diff {\n\t\t\t\tmin = diff\n\t\t\t} else if ap > target {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn min\n}", "func (o *FileInfoCollectionGetParams) SetTarget(target *string) {\n\to.Target = target\n}", "func (o HTTP2HealthCheckResponseOutput) RequestPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheckResponse) string { return v.RequestPath }).(pulumi.StringOutput)\n}", "func Convert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in *impl.PathExistsRequest, out *v2alpha1.PathExistsRequest) error {\n\treturn autoConvert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in, out)\n}", "func (o *FileInfoCollectionGetParams) WithTarget(target *string) *FileInfoCollectionGetParams {\n\to.SetTarget(target)\n\treturn o\n}", "func getFoodTarget(p *Player, data PlayerInput) (mgl32.Vec2, bool) {\n\ttargetAcquired := false\n\tok := false\n\tvar target mgl32.Vec2\n\tvar closestFood []int\n\ttmpPos := p.Pos[0]\n\tmin[0], min[1] = float64(tmpPos[0]-p.viewRadius), float64(tmpPos[1]-p.viewRadius)\n\tmax[0], max[1] = float64(tmpPos[0]+p.viewRadius), float64(tmpPos[1]+p.viewRadius)\n\n\tdata.Food.Search(min, max,\n\t\tfunc(min, max []float64, value interface{}) bool {\n\t\t\tif data.FoodDict[value.(int)].P.Sub(tmpPos).Len() < p.viewRadius {\n\t\t\t\tclosestFood = append(closestFood, value.(int))\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t)\n\n\tfor _, f := range closestFood {\n\t\tif !targetAcquired || tmpPos.Sub(data.FoodDict[f].P).Len() < tmpPos.Sub(target).Len() {\n\t\t\ttarget = data.FoodDict[f].P\n\t\t\ttargetAcquired = true\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn target, ok\n}", "func (o LookupAliasResultOutput) TargetKeyId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAliasResult) string { return v.TargetKeyId }).(pulumi.StringOutput)\n}", "func GetDeviceByMntPoint(targetPath string) string {\n\tdeviceCmd := fmt.Sprintf(\"mount | grep \\\"on %s\\\" | awk 'NR==1 {print $1}'\", targetPath)\n\tdeviceCmdOut, err := utils.Run(deviceCmd)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(deviceCmdOut)\n}", "func (o ElastigroupScalingDownPolicyOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingDownPolicy) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func (c *Calculator) ShortestPathTo(dstID string) ([]string, int, error) {\n\tvertMap := c.g.Vertices()\n\tv, exists := vertMap[dstID]\n\tif !exists {\n\t\treturn nil, 0, xerrors.Errorf(\"unknown vertex with ID %q\", dstID)\n\t}\n\n\tvar (\n\t\tminDist = v.Value().(*pathState).minDist\n\t\tpath []string\n\t)\n\n\tfor ; v.ID() != c.srcID; v = vertMap[v.Value().(*pathState).prevInPath] {\n\t\tpath = append(path, v.ID())\n\t}\n\tpath = append(path, c.srcID)\n\n\t// Reverse in place to get path from src->dst\n\tfor i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {\n\t\tpath[i], path[j] = path[j], path[i]\n\t}\n\treturn path, minDist, nil\n}", "func (a *HyperflexApiService) GetHyperflexTargetByMoid(ctx context.Context, moid string) ApiGetHyperflexTargetByMoidRequest {\n\treturn ApiGetHyperflexTargetByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (s *DescribeEffectivePolicyInput) SetTargetId(v string) *DescribeEffectivePolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (r *GetSLOHistoryOptionalParameters) WithTarget(target float64) *GetSLOHistoryOptionalParameters {\n\tr.Target = &target\n\treturn r\n}", "func findTargetIDAndMethod(reqPath string, headers http.Header) (targetID string, method string) {\n\tif appID := headers.Get(daprAppID); appID != \"\" {\n\t\treturn appID, strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t}\n\n\tif auth := headers.Get(\"Authorization\"); strings.HasPrefix(auth, \"Basic \") {\n\t\tif s, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, \"Basic \")); err == nil {\n\t\t\tpair := strings.Split(string(s), \":\")\n\t\t\tif len(pair) == 2 && pair[0] == daprAppID {\n\t\t\t\treturn pair[1], strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we're here, the handler was probably invoked with /v1.0/invoke/ (or the invocation is invalid, missing the app id provided as header or Basic auth)\n\t// However, we are not relying on wildcardParam because the URL may have been sanitized to remove `//``, so `http://` would have been turned into `http:/`\n\t// First, check to make sure that the path has the prefix\n\tif idx := pathHasPrefix(reqPath, apiVersionV1, \"invoke\"); idx > 0 {\n\t\treqPath = reqPath[idx:]\n\n\t\t// Scan to find app ID and method\n\t\t// Matches `<appid>/method/<method>`.\n\t\t// Examples:\n\t\t// - `appid/method/mymethod`\n\t\t// - `http://example.com/method/mymethod`\n\t\t// - `https://example.com/method/mymethod`\n\t\t// - `http%3A%2F%2Fexample.com/method/mymethod`\n\t\tif idx = strings.Index(reqPath, \"/method/\"); idx > 0 {\n\t\t\ttargetID := reqPath[:idx]\n\t\t\tmethod := reqPath[(idx + len(\"/method/\")):]\n\t\t\tif t, _ := url.QueryUnescape(targetID); t != \"\" {\n\t\t\t\ttargetID = t\n\t\t\t}\n\t\t\treturn targetID, method\n\t\t}\n\t}\n\n\treturn \"\", \"\"\n}", "func NewClosestDiffFinder(exp expectations.ReadOnly, dCounter digest_counter.DigestCounter, diffStore diff.DiffStore) *Impl {\n\treturn &Impl{\n\t\texpectations: exp,\n\t\tdCounter: dCounter,\n\t\tdiffStore: diffStore,\n\t}\n}", "func (c *Client) ClosestPrecedingFinger(id string) (*pb.Node, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout)\n\tdefer cancel()\n\treq := &pb.ID{Id: id}\n\treturn c.rpcClient.ClosestPrecedingFinger(ctx, req)\n}", "func LoginIscsiTarget(targetName string, isInformationalSession bool, initiatorInstance *string, initiatorPortNumber *uint32, targetPortal *iscsidsc.Portal,\n\tsecurityFlags *iscsidsc.SecurityFlags, loginOptions *iscsidsc.LoginOptions, key *string, isPersistent bool) (*iscsidsc.SessionID, *iscsidsc.ConnectionID, error) {\n\ttargetNamePtr, err := windows.UTF16PtrFromString(targetName)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"invalid target name: %q\", targetName)\n\t}\n\n\tinitiatorInstancePtr, initiatorPortNumberValue, err := internal.ConvertInitiatorArgs(initiatorInstance, initiatorPortNumber)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tinternalPortal, err := internal.CheckAndConvertPortal(targetPortal)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"invalid portal argument\")\n\t}\n\n\tvar securityFlagsValue iscsidsc.SecurityFlags\n\tif securityFlags != nil {\n\t\tsecurityFlagsValue = *securityFlags\n\t}\n\n\tinternalLoginOptions, userNamePtr, passwordPtr, err := internal.CheckAndConvertLoginOptions(loginOptions)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"invalid loginOptions argument\")\n\t}\n\n\tkeyPtr, keySize, err := internal.CheckAndConvertKey(key)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn callProcLoginIScsiTargetW(targetNamePtr, isInformationalSession, initiatorInstancePtr, initiatorPortNumberValue,\n\t\tinternalPortal, securityFlagsValue, internalLoginOptions, uintptr(unsafe.Pointer(userNamePtr)), uintptr(unsafe.Pointer(passwordPtr)),\n\t\tkeyPtr, keySize, isPersistent)\n}", "func (t *transpiler) VisitTargetPath(ctx *parser.TargetPathContext) interface{} {\n\tp := ctx.TargetPathHead().Accept(t).(pathSpec)\n\tfor i := range ctx.AllTargetPathSegment() {\n\t\tp.field += ctx.TargetPathSegment(i).Accept(t).(string)\n\t}\n\n\tif ctx.OWMOD() != nil && ctx.OWMOD().GetText() != \"\" {\n\t\tp.field += ctx.OWMOD().GetText()\n\t}\n\n\t// Only one of p.arg and p.index can be filled.\n\tif (p.arg == \"\") == (p.index == \"\") {\n\t\tt.fail(ctx, fmt.Errorf(\"invalid target path - expected arg xor index but got both or neither (arg %s and index %s)\", p.arg, p.index))\n\t}\n\n\treturn p\n}", "func (plugin *IscsiPlugin) LoginTarget(blockDev model.BlockDeviceAccessInfo) (err error) {\n\tlog.Tracef(\">>>>> LoginTarget, TargetName=%v\", blockDev.TargetName)\n\tdefer log.Traceln(\"<<<<< LoginTarget\")\n\n\t// If the iSCSI iqn is not provided, fail the request\n\tif blockDev.TargetName == \"\" {\n\t\terr := cerrors.NewChapiError(cerrors.InvalidArgument, errorMessageMissingIscsiTargetName)\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\t// If the IscsiAccessInfo object is not provided, fail the request\n\tif blockDev.IscsiAccessInfo == nil {\n\t\terr := cerrors.NewChapiError(cerrors.InvalidArgument, errorMessageMissingIscsiAccessInfo)\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\t// Use the platform specific routine to login to the iSCSI target\n\terr = plugin.loginTarget(blockDev)\n\n\t// If there was an error logging into the iSCSI target, but connections remain, clean up\n\t// after ourselves by logging out the target.\n\tif err != nil {\n\t\tif loggedIn, _ := plugin.IsTargetLoggedIn(blockDev.TargetName); loggedIn == true {\n\t\t\tplugin.LogoutTarget(blockDev.TargetName)\n\t\t}\n\t\treturn err\n\t}\n\n\t// Success!!!\n\treturn nil\n}", "func Convert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in *impl.IsSymlinkRequest, out *v2alpha1.IsSymlinkRequest) error {\n\treturn autoConvert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in, out)\n}", "func disectPath(basePath string, newPath string) ([]string, []string, error) {\n\tbaseComps := splitPath(basePath)\n\tnewComps := splitPath(newPath)\n\tif len(baseComps) > len(newComps) {\n\t\terr := errors.New(\"Base Path is longer than requested path!\")\n\t\treturn nil, nil, err\n\t}\n\tfor i, _ := range baseComps {\n\t\tif strings.Compare(newComps[i], baseComps[i]) != 0 {\n\t\t\terr := errors.New(\"Base Path is not a prefix of newPath\")\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\trelPath := newComps[len(baseComps):]\n\tfor i, e := range relPath {\n\t\tif isCommand(e) {\n\t\t\treturn relPath[:i], relPath[i:], nil\n\t\t}\n\t}\n\treturn relPath, nil, nil\n}", "func (s *Attribute) SetTargetId(v string) *Attribute {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *ListPoliciesForTargetInput) SetTargetId(v string) *ListPoliciesForTargetInput {\n\ts.TargetId = &v\n\treturn s\n}", "func newClosest() *Closest {\n\treturn &Closest{\n\t\tDigest: NoDigestFound,\n\t\tDiff: math.MaxFloat32,\n\t\tDiffPixels: math.MaxFloat32,\n\t\tMaxRGBA: [4]int{},\n\t}\n}", "func (mfs *MountedFS) Path2MpathInfo(path string) (info *MountpathInfo, relativePath string) {\n\tvar (\n\t\tmax int\n\t\tavailablePaths, _ = mfs.Get()\n\t\tcleanedPath = filepath.Clean(path)\n\t)\n\tfor mpath, mpathInfo := range availablePaths {\n\t\trel, ok := pathPrefixMatch(mpath, cleanedPath)\n\t\tif ok && len(mpath) > max {\n\t\t\tinfo = mpathInfo\n\t\t\tmax = len(mpath)\n\t\t\trelativePath = rel\n\t\t\tif relativePath == \".\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (in *TargetVolumeSpec) DeepCopy() *TargetVolumeSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TargetVolumeSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func assertClosest(t *testing.T, node, closest *Node, id byte) {\n\tremoteNode, err := node.closestPrecedingFingerRPC(node.Node, []byte{id})\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while getting closest:%v\", err)\n\t} else if !idsEqual(remoteNode.Id, closest.Id) {\n\t\tt.Fatalf(\"Expected %v, got %v\", closest.Id, remoteNode.Id)\n\t}\n}", "func (s *EffectivePolicy) SetTargetId(v string) *EffectivePolicy {\n\ts.TargetId = &v\n\treturn s\n}", "func (r *Request) FromV2(c *CheckRequestV2) (*Request, error) {\n\tpathURL, err := url.Parse(c.GetAttributes().GetRequest().GetHttp().GetPath())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryFromRequest, err := url.ParseQuery(c.GetAttributes().GetRequest().GetHttp().GetQuery())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// The actual query is often in `GetPath()`. And, `GetQuery()` can return\n\t// an empty string in this case. Thus, we should merge the query we get from\n\t// both of them. Otherwise, the actual query in `GetPath()` will be\n\t// URL-encoded / escaped if we pass it to `URL.Path` below, resulting\n\t// in the query params being doubley encoded / escaped when using `RequestURI()`.\n\tmergedQuery := pathURL.Query()\n\tfor k, v := range queryFromRequest {\n\t\tif existing, exists := mergedQuery[k]; exists {\n\t\t\tmergedQuery[k] = append(existing, v...)\n\t\t} else {\n\t\t\tmergedQuery[k] = v\n\t\t}\n\t}\n\n\tr.Request = http.Request{\n\t\tHost: c.GetAttributes().GetRequest().GetHttp().GetHost(),\n\t\tURL: &url.URL{\n\t\t\tScheme: c.GetAttributes().GetRequest().GetHttp().GetScheme(),\n\t\t\tHost: c.GetAttributes().GetRequest().GetHttp().GetHost(),\n\t\t\tPath: pathURL.Path,\n\t\t\tRawQuery: mergedQuery.Encode(),\n\t\t\tFragment: c.GetAttributes().GetRequest().GetHttp().GetFragment(),\n\t\t},\n\t\tHeader: http.Header{},\n\t\tMethod: c.GetAttributes().GetRequest().GetHttp().GetMethod(),\n\t\tProto: c.GetAttributes().GetRequest().GetHttp().GetProtocol(),\n\t}\n\n\tfor k, v := range c.GetAttributes().GetRequest().GetHttp().GetHeaders() {\n\t\t// Ignore SPDY headers as it is not valid sequence in HTTP header naming\n\t\t// and can cause problems for the forward auth service.\n\t\tif strings.HasPrefix(k, \":\") {\n\t\t\tcontinue\n\t\t}\n\t\tr.Request.Header.Add(k, v)\n\t}\n\n\tr.ID = c.GetAttributes().GetRequest().GetHttp().GetId()\n\tr.Context = c.GetAttributes().GetContextExtensions()\n\n\treturn r, nil\n}", "func (o *HyperflexVmSnapshotInfoAllOf) SetTargetCompletionTimestamp(v int64) {\n\to.TargetCompletionTimestamp = &v\n}", "func (s *DetachPolicyInput) SetTargetId(v string) *DetachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *DescribeMountTargetSecurityGroupsInput) SetMountTargetId(v string) *DescribeMountTargetSecurityGroupsInput {\n\ts.MountTargetId = &v\n\treturn s\n}", "func (s *AttachPolicyInput) SetTargetId(v string) *AttachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (o MrScalarCoreScalingDownPolicyOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MrScalarCoreScalingDownPolicy) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func (o MrScalarTaskScalingDownPolicyOutput) MinTargetCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MrScalarTaskScalingDownPolicy) *string { return v.MinTargetCapacity }).(pulumi.StringPtrOutput)\n}", "func GetAgedAnnTarget(target, annAgeBlocks uint32, packetCryptVersion int) uint32 {\n\tif packetCryptVersion >= 2 {\n\t\treturn getAgedAnnTarget2(target, annAgeBlocks)\n\t}\n\tif annAgeBlocks < util.Conf_PacketCrypt_ANN_WAIT_PERIOD {\n\t\t// announcement is not ready yet\n\t\treturn 0xffffffff\n\t}\n\tbnAnnTar := CompactToBig(target)\n\tif annAgeBlocks == util.Conf_PacketCrypt_ANN_WAIT_PERIOD {\n\t\t// fresh ann, no aging\n\t\treturn BigToCompact(bnAnnTar)\n\t}\n\tannAgeBlocks -= util.Conf_PacketCrypt_ANN_WAIT_PERIOD\n\tbnAnnWork := WorkForTarget(bnAnnTar)\n\tbnAnnWork.Div(bnAnnWork, big.NewInt(int64(annAgeBlocks)))\n\tbnAnnAgedTar := TargetForWork(bnAnnWork)\n\tout := BigToCompact(bnAnnAgedTar)\n\tif out > 0x207fffff {\n\t\treturn 0xffffffff\n\t}\n\treturn out\n}", "func ValidateRequestFromContext(c *fiber.Ctx, router routers.Router, options *Options) error {\n\n\tr, err := adaptor.ConvertRequest(c, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\troute, pathParams, err := router.FindRoute(r)\n\n\t// We failed to find a matching route for the request.\n\tif err != nil {\n\t\tswitch e := err.(type) {\n\t\tcase *routers.RouteError:\n\t\t\t// We've got a bad request, the path requested doesn't match\n\t\t\t// either server, or path, or something.\n\t\t\treturn errors.New(e.Reason)\n\t\tdefault:\n\t\t\t// This should never happen today, but if our upstream code changes,\n\t\t\t// we don't want to crash the server, so handle the unexpected error.\n\t\t\treturn fmt.Errorf(\"error validating route: %s\", err.Error())\n\t\t}\n\t}\n\n\t// Validate request\n\trequestValidationInput := &openapi3filter.RequestValidationInput{\n\t\tRequest: r,\n\t\tPathParams: pathParams,\n\t\tRoute: route,\n\t}\n\n\t// Pass the fiber context into the request validator, so that any callbacks\n\t// which it invokes make it available.\n\trequestContext := context.WithValue(context.Background(), ctxKeyFiberContext{}, c) //nolint:staticcheck\n\n\tif options != nil {\n\t\trequestValidationInput.Options = &options.Options\n\t\trequestValidationInput.ParamDecoder = options.ParamDecoder\n\t\trequestContext = context.WithValue(requestContext, ctxKeyUserData{}, options.UserData) //nolint:staticcheck\n\t}\n\n\terr = openapi3filter.ValidateRequest(requestContext, requestValidationInput)\n\tif err != nil {\n\t\tme := openapi3.MultiError{}\n\t\tif errors.As(err, &me) {\n\t\t\terrFunc := getMultiErrorHandlerFromOptions(options)\n\t\t\treturn errFunc(me)\n\t\t}\n\n\t\tswitch e := err.(type) {\n\t\tcase *openapi3filter.RequestError:\n\t\t\t// We've got a bad request\n\t\t\t// Split up the verbose error by lines and return the first one\n\t\t\t// openapi errors seem to be multi-line with a decent message on the first\n\t\t\terrorLines := strings.Split(e.Error(), \"\\n\")\n\t\t\treturn fmt.Errorf(\"error in openapi3filter.RequestError: %s\", errorLines[0])\n\t\tcase *openapi3filter.SecurityRequirementsError:\n\t\t\treturn fmt.Errorf(\"error in openapi3filter.SecurityRequirementsError: %s\", e.Error())\n\t\tdefault:\n\t\t\t// This should never happen today, but if our upstream code changes,\n\t\t\t// we don't want to crash the server, so handle the unexpected error.\n\t\t\treturn fmt.Errorf(\"error validating request: %w\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (r ApiPatchHyperflexTargetRequest) HyperflexTarget(hyperflexTarget HyperflexTarget) ApiPatchHyperflexTargetRequest {\n\tr.hyperflexTarget = &hyperflexTarget\n\treturn r\n}", "func BestPath(g *graphs.Graph, query *data.Query, circleFinder *CircleFinder) *data.Path {\n\tfromVertices := circleFinder.VerticesInCircle(query.From, query.WalkingRadius) // O(V*log(D))\n\ttoVertices := circleFinder.VerticesInCircle(query.To, query.WalkingRadius) // O(V*log(D))\n\n\tbestTime, walkingDistance, drivingDistance := g.Dijkstra(fromVertices, toVertices) // O(E*log(V))\n\n\treturn &data.Path{\n\t\tDrivingDistance: drivingDistance,\n\t\tWalkingDistance: walkingDistance,\n\t\tTime: bestTime * 60,\n\t}\n}", "func WithTargetSorter(sorter fab.TargetSorter) RequestOption {\n\treturn func(ctx context.Client, o *requestOptions) error {\n\t\to.TargetSorter = sorter\n\t\treturn nil\n\t}\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryption() ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *ReplicatedVMManagedDiskTargetDiskEncryption {\n\t\treturn v.TargetDiskEncryption\n\t}).(ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput)\n}", "func (o ElastigroupMultaiTargetSetOutput) TargetSetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ElastigroupMultaiTargetSet) string { return v.TargetSetId }).(pulumi.StringOutput)\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func PathToAttributePath(p cty.Path) *tftypes.AttributePath {\n\tif p == nil || len(p) < 1 {\n\t\treturn nil\n\t}\n\tap := tftypes.NewAttributePath()\n\tfor _, step := range p {\n\t\tswitch selector := step.(type) {\n\t\tcase cty.GetAttrStep:\n\t\t\tap = ap.WithAttributeName(selector.Name)\n\n\t\tcase cty.IndexStep:\n\t\t\tkey := selector.Key\n\t\t\tswitch key.Type() {\n\t\t\tcase cty.String:\n\t\t\t\tap = ap.WithElementKeyString(key.AsString())\n\t\t\tcase cty.Number:\n\t\t\t\tv, _ := key.AsBigFloat().Int64()\n\t\t\t\tap = ap.WithElementKeyInt(int(v))\n\t\t\tdefault:\n\t\t\t\t// We'll bail early if we encounter anything else, and just\n\t\t\t\t// return the valid prefix.\n\t\t\t\treturn ap\n\t\t\t}\n\t\t}\n\t}\n\treturn ap\n}", "func (s *ModifyMountTargetSecurityGroupsInput) SetMountTargetId(v string) *ModifyMountTargetSecurityGroupsInput {\n\ts.MountTargetId = &v\n\treturn s\n}", "func (r ApiUpdateHyperflexTargetRequest) HyperflexTarget(hyperflexTarget HyperflexTarget) ApiUpdateHyperflexTargetRequest {\n\tr.hyperflexTarget = &hyperflexTarget\n\treturn r\n}", "func (o HTTPSHealthCheckResponsePtrOutput) RequestPath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTPSHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.RequestPath\n\t}).(pulumi.StringPtrOutput)\n}", "func parseTargetPath(target, serverUrl string) string {\n\tif strings.Contains(target, serverUrl) {\n\t\treturn target[len(serverUrl):]\n\t}\n\treturn target\n}", "func parseTargetPath(target, serverUrl string) string {\n\tif strings.Contains(target, serverUrl) {\n\t\treturn target[len(serverUrl):]\n\t}\n\treturn target\n}", "func FindFirstVolumeMountPoint(lpszRootPathName string, lpszVolumeMountPoint LPWSTR, cchBufferLength DWORD) HANDLE {\n\tlpszRootPathNameStr := unicode16FromString(lpszRootPathName)\n\tret1 := syscall3(findFirstVolumeMountPoint, 3,\n\t\tuintptr(unsafe.Pointer(&lpszRootPathNameStr[0])),\n\t\tuintptr(unsafe.Pointer(lpszVolumeMountPoint)),\n\t\tuintptr(cchBufferLength))\n\treturn HANDLE(ret1)\n}", "func ValidatePath(w http.ResponseWriter, r *http.Request) (string, error) {\n\tm := ValidPath.FindStringSubmatch(r.URL.Path)\n\tif m == nil {\n\t\thttp.NotFound(w, r)\n\t\treturn \"\", errors.New(\"Invalid ID. IDs must only contain alpha characters.\")\n\t}\n\treturn m[2], nil\n}", "func GetEffectiveTarget(blockHeaderTarget uint32, minAnnTarget uint32, annCount uint64, packetCryptVersion int) uint32 {\n\tbnBlockHeaderTarget := CompactToBig(blockHeaderTarget)\n\tbnMinAnnTarget := CompactToBig(minAnnTarget)\n\n\tbnBlockHeaderWork := WorkForTarget(bnBlockHeaderTarget)\n\tbnMinAnnWork := WorkForTarget(bnMinAnnTarget)\n\n\tbnEffectiveWork := getEffectiveWorkRequirement(bnBlockHeaderWork, bnMinAnnWork, annCount, packetCryptVersion)\n\n\tbnEffectiveTarget := TargetForWork(bnEffectiveWork)\n\teffectiveTarget := BigToCompact(bnEffectiveTarget)\n\n\tif effectiveTarget > 0x207fffff {\n\t\treturn 0x207fffff\n\t}\n\treturn effectiveTarget\n}", "func (s *genState) resolveLeafrefTarget(path string, contextEntry *yang.Entry) (*yang.Entry, error) {\n\tif s.schematree == nil {\n\t\t// This should not be possible if the calling code generation is\n\t\t// well structured and builds the schematree during parsing of YANG\n\t\t// files.\n\t\treturn nil, fmt.Errorf(\"could not map leafref path: %v, from contextEntry: %v\", path, contextEntry)\n\t}\n\n\tfixedPath, err := fixSchemaTreePath(path, contextEntry)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := s.schematree.GetLeafValue(fixedPath)\n\tif e == nil {\n\t\treturn nil, fmt.Errorf(\"could not resolve leafref path: %v from %v, tree: %v\", fixedPath, contextEntry, s.schematree)\n\t}\n\n\ttarget, ok := e.(*yang.Entry)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid element returned from schema tree, must be a yang.Entry for path %v from %v\", path, contextEntry)\n\t}\n\n\treturn target, nil\n}", "func TargetPath(src, dst Part) string {\n\tpath, err := filepath.Rel(filepath.Dir(src.Path()), dst.Path())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn path\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func (a *HyperflexApiService) PatchHyperflexTarget(ctx context.Context, moid string) ApiPatchHyperflexTargetRequest {\n\treturn ApiPatchHyperflexTargetRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (a *HyperflexApiService) UpdateHyperflexTarget(ctx context.Context, moid string) ApiUpdateHyperflexTargetRequest {\n\treturn ApiUpdateHyperflexTargetRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func findAbsoluteDeviceByIDPath(volumeName string) (string, error) {\n\tpath := getDeviceByIDPath(volumeName)\n\n\t// EvalSymlinks returns relative link if the file is not a symlink\n\t// so we do not have to check if it is symlink prior to evaluation\n\tresolved, err := filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not resolve symlink %q: %v\", path, err)\n\t}\n\n\tif !strings.HasPrefix(resolved, \"/dev\") {\n\t\treturn \"\", fmt.Errorf(\"resolved symlink %q for %q was unexpected\", resolved, path)\n\t}\n\n\treturn resolved, nil\n}", "func (o HTTPSHealthCheckResponseOutput) RequestPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckResponse) string { return v.RequestPath }).(pulumi.StringOutput)\n}", "func (s *PolicyTargetSummary) SetTargetId(v string) *PolicyTargetSummary {\n\ts.TargetId = &v\n\treturn s\n}", "func (plugin *IscsiPlugin) RescanIscsiTarget(lunID string) error {\n\tlog.Tracef(\">>>>> RescanIscsiTarget initiated for lunID %v\", lunID)\n\tdefer log.Traceln(\"<<<<< RescanIscsiTarget\")\n\treturn rescanIscsiTarget(lunID)\n}", "func openPath(currentPath string, targetPath string) error {\n\tfullPath := path.Resolve(currentPath, targetPath)\n\n\todir, err := opendir.New(fullPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = odir.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o DeliveryPipelineSerialPipelineStageOutput) TargetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DeliveryPipelineSerialPipelineStage) *string { return v.TargetId }).(pulumi.StringPtrOutput)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func (f *FeeWindow) MinimumFeeRate(confirmationTarget uint) (float64, error) {\n\n\tif confirmationTarget <= 0 {\n\t\treturn 0, fmt.Errorf(\"can't get feeRate. Expected positive confirmation target, got %v\", confirmationTarget)\n\t}\n\n\t// Walk the available targets backwards, finding the highest target below the given one:\n\tfor closestTarget := confirmationTarget; closestTarget > 0; closestTarget-- {\n\t\tif feeRate, containsKey := f.TargetedFees[closestTarget]; containsKey {\n\t\t\t// Found! This is the lowest fee rate that hits the given target.\n\t\t\treturn feeRate, nil\n\t\t}\n\t}\n\n\t// No result? This is odd, but not illogical. It means *all* of our available targets\n\t// are above the requested one. Let's use the fastest:\n\treturn f.FastestFeeRate(), nil\n}", "func (client *ImplicitClient) getRequiredPathCreateRequest(ctx context.Context, pathParameter string, options *ImplicitClientGetRequiredPathOptions) (*policy.Request, error) {\n\turlPath := \"/reqopt/implicit/required/path/{pathParameter}\"\n\tif pathParameter == \"\" {\n\t\treturn nil, errors.New(\"parameter pathParameter cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{pathParameter}\", url.PathEscape(pathParameter))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func extractPeer(target string) (peer.ID, multiaddr.Multiaddr) {\n\tipfsAddr, err := multiaddr.NewMultiaddr(target)\n\tif err != nil {\n\t\tlog.Fatalln(\"fatal: could not get peer ipfs addr, \", err)\n\t}\n\n\tpproto, err := ipfsAddr.ValueForProtocol(multiaddr.P_IPFS)\n\tif err != nil {\n\t\tlog.Fatalln(\"fatal: could not get protocol, \", err)\n\t}\n\n\tpeerid, err := peer.IDB58Decode(pproto)\n\tif err != nil {\n\t\tlog.Fatalln(\"fatal: could not get peer id, \", err)\n\t}\n\n\tpeerAddrStr := fmt.Sprintf(\"/ipfs/%s\", peer.IDB58Encode(peerid))\n\tpeerAddr, err := multiaddr.NewMultiaddr(peerAddrStr)\n\tif err != nil {\n\t\tlog.Fatalln(\"fatal: could not get peer addr, \", err)\n\t}\n\n\ttargetAddr := ipfsAddr.Decapsulate(peerAddr)\n\n\treturn peerid, targetAddr\n}", "func (o HTTPHealthCheckResponsePtrOutput) RequestPath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTPHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.RequestPath\n\t}).(pulumi.StringPtrOutput)\n}", "func (kademliaID *KademliaID) CalcDistance(target *KademliaID) *KademliaID {\n\n\tresult := KademliaID{}\n\n\tfor i := 0; i < IDLength; i++ {\n\n\t\tresult[i] = kademliaID[i] ^ target[i]\n\t}\n\n\treturn &result\n}", "func (a *HyperflexApiService) GetHyperflexTargetList(ctx context.Context) ApiGetHyperflexTargetListRequest {\n\treturn ApiGetHyperflexTargetListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func WithTargetFilter(filter fab.TargetFilter) RequestOption {\n\treturn func(ctx context.Client, o *requestOptions) error {\n\t\to.TargetFilter = filter\n\t\treturn nil\n\t}\n}", "func (o *RequestTarget) SetTarget(v ResourceReference) {\n\to.Target = &v\n}", "func (s *IncorrectMountTargetState) RequestID() string {\n\treturn s.RespMetadata.RequestID\n}", "func symlinkTargetPath(targetDir string, path string) string {\n\ttarget := filepath.Clean(filepath.Join(targetDir, filepath.Base(path)))\n\tif rt.GOOS != \"windows\" {\n\t\treturn target\n\t}\n\n\toldExt := filepath.Ext(target)\n\treturn target[0:len(target)-len(oldExt)] + \".lnk\"\n}" ]
[ "0.7660567", "0.7514063", "0.74158865", "0.64221466", "0.6145906", "0.60564154", "0.48408124", "0.4334877", "0.42377296", "0.41459686", "0.40470752", "0.39127514", "0.3888388", "0.37636432", "0.37533844", "0.37227678", "0.3700313", "0.36923417", "0.36777872", "0.36534095", "0.3648196", "0.3626326", "0.361302", "0.3607843", "0.35829484", "0.35786527", "0.35644346", "0.35621613", "0.35431778", "0.35393405", "0.35330012", "0.3527516", "0.35138267", "0.34833276", "0.34813645", "0.34791413", "0.34774297", "0.34663814", "0.34614345", "0.3456847", "0.3451537", "0.3426517", "0.34245896", "0.34191957", "0.34179282", "0.34078506", "0.34030536", "0.33966166", "0.33944923", "0.33899733", "0.33851483", "0.33658633", "0.33576024", "0.33571705", "0.3348808", "0.3343321", "0.3343109", "0.3336693", "0.3315426", "0.33049044", "0.33034113", "0.32947686", "0.32823133", "0.32809684", "0.32780743", "0.3271717", "0.3271099", "0.32671523", "0.3266187", "0.32638723", "0.3258421", "0.3256653", "0.3249092", "0.32326838", "0.32326838", "0.32316962", "0.3223943", "0.32109788", "0.32058364", "0.32029715", "0.31950498", "0.31905648", "0.3187371", "0.31717575", "0.31693217", "0.31690004", "0.3168595", "0.31651348", "0.31601706", "0.31581622", "0.31506246", "0.31450033", "0.31400943", "0.3137176", "0.31367433", "0.31362018", "0.313487", "0.31261292", "0.31257877", "0.3119848" ]
0.87185645
0
Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse is an autogenerated conversion function.
func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error { return autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryptionSetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *string { return v.TargetDiskEncryptionSetId }).(pulumi.StringPtrOutput)\n}", "func (o LookupAliasResultOutput) TargetKeyId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAliasResult) string { return v.TargetKeyId }).(pulumi.StringOutput)\n}", "func (o DistributionPolicyResponsePtrOutput) TargetShape() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DistributionPolicyResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.TargetShape\n\t}).(pulumi.StringPtrOutput)\n}", "func NewClosestDiffFinder(exp expectations.ReadOnly, dCounter digest_counter.DigestCounter, diffStore diff.DiffStore) *Impl {\n\treturn &Impl{\n\t\texpectations: exp,\n\t\tdCounter: dCounter,\n\t\tdiffStore: diffStore,\n\t}\n}", "func (o InstanceGroupManagerStatusResponsePtrOutput) VersionTarget() InstanceGroupManagerStatusVersionTargetResponsePtrOutput {\n\treturn o.ApplyT(func(v *InstanceGroupManagerStatusResponse) *InstanceGroupManagerStatusVersionTargetResponse {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VersionTarget\n\t}).(InstanceGroupManagerStatusVersionTargetResponsePtrOutput)\n}", "func (o *SearchSLOResponseDataAttributesFacets) SetTarget(v []SearchSLOResponseDataAttributesFacetsObjectInt) {\n\to.Target = v\n}", "func (d *Dijkstra) PathToTarget() ([]graphEdge, error) {\n\tif d.err != nil {\n\t\treturn []graphEdge{}, d.err\n\t}\n\n\tvar path []graphEdge\n\tidx := d.target\n\tfor {\n\t\tif idx == d.source {\n\t\t\tbreak\n\t\t}\n\t\te, ok := d.spt[idx]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tpath = append(path, e)\n\t\tidx = e.From\n\t}\n\n\treturn reversePath(path), nil\n}", "func (o UserFacingErrorResponseOutput) Target() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v UserFacingErrorResponse) *string { return v.Target }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o DistributionPolicyResponseOutput) TargetShape() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DistributionPolicyResponse) string { return v.TargetShape }).(pulumi.StringOutput)\n}", "func (fs *FS) TargetIPLUNToDevicePath(ctx context.Context, targetIP string, lunID int) (map[string]string, error) {\n\treturn fs.targetIPLUNToDevicePath(ctx, targetIP, lunID)\n}", "func (o IopingSpecVolumeVolumeSourceDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o InstanceGroupManagerStatusResponseOutput) VersionTarget() InstanceGroupManagerStatusVersionTargetResponseOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerStatusResponse) InstanceGroupManagerStatusVersionTargetResponse {\n\t\treturn v.VersionTarget\n\t}).(InstanceGroupManagerStatusVersionTargetResponseOutput)\n}", "func (o ControlPolicyAttachmentOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ControlPolicyAttachment) pulumi.StringOutput { return v.TargetId }).(pulumi.StringOutput)\n}", "func (o UserFacingErrorResponsePtrOutput) Target() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *UserFacingErrorResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(pulumi.StringPtrOutput)\n}", "func (o EndpointResponseOutput) Target() pulumi.StringOutput {\n\treturn o.ApplyT(func(v EndpointResponse) string { return v.Target }).(pulumi.StringOutput)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryption() ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *ReplicatedVMManagedDiskTargetDiskEncryption {\n\t\treturn v.TargetDiskEncryption\n\t}).(ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput)\n}", "func (o *Operation) WithResponseTarget(v interface{}) *Operation {\n\t//Checking the value is a pointer. Need some better error handling here and this just swallows\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() != reflect.Ptr || rv.IsNil() {\n\t\treturn o\n\t}\n\to.responsePtr = v\n\treturn o\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (a *AdminApiService) GetTarget(ctx _context.Context, id string) (Target, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Target\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (o InstanceGroupManagerVersionResponseOutput) TargetSize() FixedOrPercentResponseOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerVersionResponse) FixedOrPercentResponse { return v.TargetSize }).(FixedOrPercentResponseOutput)\n}", "func (in *TargetVolumeSpec) DeepCopy() *TargetVolumeSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TargetVolumeSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (o AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpecOutput) Target() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec) *int { return v.Target }).(pulumi.IntPtrOutput)\n}", "func UnmarshalSharedTargetDataResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(SharedTargetDataResponse)\n\terr = core.UnmarshalPrimitive(m, \"cluster_id\", &obj.ClusterID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"cluster_name\", &obj.ClusterName)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"entitlement_keys\", &obj.EntitlementKeys)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"namespace\", &obj.Namespace)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"region\", &obj.Region)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"resource_group_id\", &obj.ResourceGroupID)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (s *DescribeEffectivePolicyInput) SetTargetId(v string) *DescribeEffectivePolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func (client *KeyVaultClient) decryptHandleResponse(resp *http.Response) (KeyVaultClientDecryptResponse, error) {\n\tresult := KeyVaultClientDecryptResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientDecryptResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) decryptHandleResponse(resp *http.Response) (KeyVaultClientDecryptResponse, error) {\n\tresult := KeyVaultClientDecryptResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientDecryptResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o *FileInfoCollectionGetParams) SetTarget(target *string) {\n\to.Target = target\n}", "func (s *DetachPolicyInput) SetTargetId(v string) *DetachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (client *KeyVaultClient) unwrapKeyHandleResponse(resp *http.Response) (KeyVaultClientUnwrapKeyResponse, error) {\n\tresult := KeyVaultClientUnwrapKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientUnwrapKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func findAbsoluteDeviceByIDPath(volumeName string) (string, error) {\n\tpath := getDeviceByIDPath(volumeName)\n\n\t// EvalSymlinks returns relative link if the file is not a symlink\n\t// so we do not have to check if it is symlink prior to evaluation\n\tresolved, err := filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not resolve symlink %q: %v\", path, err)\n\t}\n\n\tif !strings.HasPrefix(resolved, \"/dev\") {\n\t\treturn \"\", fmt.Errorf(\"resolved symlink %q for %q was unexpected\", resolved, path)\n\t}\n\n\treturn resolved, nil\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func (client *KeyVaultClient) unwrapKeyHandleResponse(resp *http.Response) (KeyVaultClientUnwrapKeyResponse, error) {\n\tresult := KeyVaultClientUnwrapKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientUnwrapKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func DecodeRelayTarget(decryptedCell []byte, payloadLength int) string {\n\tstart := CmdLength + StreamIDLength + DigestLength + PayloadLength + (RelayDataLength - payloadLength)\n\tpayload := decryptedCell[start:]\n\tend := TargetLength\n\treturn DecodeAddr(payload[:end])\n}", "func (o GetRulesRuleTargetOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetRulesRuleTarget) string { return v.TargetId }).(pulumi.StringOutput)\n}", "func (s *ListPoliciesForTargetInput) SetTargetId(v string) *ListPoliciesForTargetInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *EffectivePolicy) SetTargetId(v string) *EffectivePolicy {\n\ts.TargetId = &v\n\treturn s\n}", "func (sw *scrapeWork) getTargetResponse() ([]byte, error) {\n\t// use stream reader when stream mode enabled\n\tif *streamParse || sw.Config.StreamParse || sw.mustSwitchToStreamParseMode(sw.prevBodyLen) {\n\t\t// Read the response in stream mode.\n\t\tsr, err := sw.GetStreamReader()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata, err := io.ReadAll(sr)\n\t\tsr.MustClose()\n\t\treturn data, err\n\t}\n\t// Read the response in usual mode.\n\treturn sw.ReadData(nil)\n}", "func (r *GetSLOHistoryOptionalParameters) WithTarget(target float64) *GetSLOHistoryOptionalParameters {\n\tr.Target = &target\n\treturn r\n}", "func (s *Attribute) SetTargetId(v string) *Attribute {\n\ts.TargetId = &v\n\treturn s\n}", "func (o DeliveryPipelineSerialPipelineStageOutput) TargetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DeliveryPipelineSerialPipelineStage) *string { return v.TargetId }).(pulumi.StringPtrOutput)\n}", "func (d *MinioDriver) Path(r volume.Request) volume.Response {\n\td.m.RLock()\n\tdefer d.m.RUnlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func getFoodTarget(p *Player, data PlayerInput) (mgl32.Vec2, bool) {\n\ttargetAcquired := false\n\tok := false\n\tvar target mgl32.Vec2\n\tvar closestFood []int\n\ttmpPos := p.Pos[0]\n\tmin[0], min[1] = float64(tmpPos[0]-p.viewRadius), float64(tmpPos[1]-p.viewRadius)\n\tmax[0], max[1] = float64(tmpPos[0]+p.viewRadius), float64(tmpPos[1]+p.viewRadius)\n\n\tdata.Food.Search(min, max,\n\t\tfunc(min, max []float64, value interface{}) bool {\n\t\t\tif data.FoodDict[value.(int)].P.Sub(tmpPos).Len() < p.viewRadius {\n\t\t\t\tclosestFood = append(closestFood, value.(int))\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t)\n\n\tfor _, f := range closestFood {\n\t\tif !targetAcquired || tmpPos.Sub(data.FoodDict[f].P).Len() < tmpPos.Sub(target).Len() {\n\t\t\ttarget = data.FoodDict[f].P\n\t\t\ttargetAcquired = true\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn target, ok\n}", "func (r *CachesIscsiVolume) TargetArn() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"targetArn\"])\n}", "func (o ApplicationSpecRolloutplanPtrOutput) TargetSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationSpecRolloutplan) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.TargetSize\n\t}).(pulumi.IntPtrOutput)\n}", "func (a *AdminApiService) GetTargetSegment(ctx _context.Context, id string) (TargetSegment, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue TargetSegment\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target-segment/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func GetDeviceByMntPoint(targetPath string) string {\n\tdeviceCmd := fmt.Sprintf(\"mount | grep \\\"on %s\\\" | awk 'NR==1 {print $1}'\", targetPath)\n\tdeviceCmdOut, err := utils.Run(deviceCmd)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(deviceCmdOut)\n}", "func (o HostRuleResponseOutput) PathMatcher() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HostRuleResponse) string { return v.PathMatcher }).(pulumi.StringOutput)\n}", "func (s *AttachPolicyInput) SetTargetId(v string) *AttachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (c Client) decodeResponse(endpoint, verb string, params *url.Values, target interface{}) (err error) {\n\tfullURL, err := c.api(endpoint, params)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tc.l.WithFields(log.Fields{\n\t\t\"url\": fullURL.String(), // TODO: remove sensitive data\n\t\t\"HTTPverb\": verb,\n\t}).Debug(\"hitting API\")\n\n\tvar resp = &http.Response{}\n\tswitch verb {\n\tcase \"GET\":\n\t\tresp, err = c.httpclient.Get(fullURL.String())\n\tcase \"POST\":\n\t\tresp, err = c.httpclient.Post(fullURL.String(), \"application/x-www-form-urlencoded\", nil)\n\tcase \"DELETE\":\n\t\treq, _ := http.NewRequest(\"DELETE\", fullURL.String(), nil)\n\t\tresp, err = c.httpclient.Do(req)\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\treturn json.NewDecoder(resp.Body).Decode(target)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func (a *AdminApiService) DeleteTarget(ctx _context.Context, id string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (d *Dao) Target(c context.Context, id int64) (res *model.Target, err error) {\n\tres = &model.Target{}\n\tif err = d.db.QueryRow(c, _targetSQL, id).Scan(&res.ID, &res.SubEvent, &res.Event, &res.Product, &res.Source, &res.GroupIDs, &res.Threshold, &res.Duration, &res.State, &res.Ctime, &res.Mtime); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tres = nil\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"d.Target.Scan error(%+v), id(%d)\", err, id)\n\t}\n\tif res.GroupIDs != \"\" {\n\t\tvar gids []int64\n\t\tif gids, err = xstr.SplitInts(res.GroupIDs); err != nil {\n\t\t\tlog.Error(\"d.Product.SplitInts error(%+v), group ids(%s)\", err, res.GroupIDs)\n\t\t\treturn\n\t\t}\n\t\tif res.Groups, err = d.Groups(c, gids); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (o GetVolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetVolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o VolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ArgoCDSpecServerAutoscaleHpaPtrOutput) ScaleTargetRef() ArgoCDSpecServerAutoscaleHpaScaleTargetRefPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecServerAutoscaleHpa) *ArgoCDSpecServerAutoscaleHpaScaleTargetRef {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.ScaleTargetRef\n\t}).(ArgoCDSpecServerAutoscaleHpaScaleTargetRefPtrOutput)\n}", "func (mfs *MountedFS) Path2MpathInfo(path string) (info *MountpathInfo, relativePath string) {\n\tvar (\n\t\tmax int\n\t\tavailablePaths, _ = mfs.Get()\n\t\tcleanedPath = filepath.Clean(path)\n\t)\n\tfor mpath, mpathInfo := range availablePaths {\n\t\trel, ok := pathPrefixMatch(mpath, cleanedPath)\n\t\tif ok && len(mpath) > max {\n\t\t\tinfo = mpathInfo\n\t\t\tmax = len(mpath)\n\t\t\trelativePath = rel\n\t\t\tif relativePath == \".\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (client *IscsiTargetsClient) getHandleResponse(resp *http.Response) (IscsiTargetsClientGetResponse, error) {\n\tresult := IscsiTargetsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IscsiTarget); err != nil {\n\t\treturn IscsiTargetsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o IncludedPathResponseOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IncludedPathResponse) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (a *BackendOptionsApiService) GetLogTarget(ctx _context.Context, id int32, parentName string, parentType string, localVarOptionals *GetLogTargetOpts) (InlineResponse20029, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20029\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/services/haproxy/configuration/log_targets/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", id)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tlocalVarQueryParams.Add(\"parent_name\", parameterToString(parentName, \"\"))\n\tlocalVarQueryParams.Add(\"parent_type\", parameterToString(parentType, \"\"))\n\tif localVarOptionals != nil && localVarOptionals.TransactionId.IsSet() {\n\t\tlocalVarQueryParams.Add(\"transaction_id\", parameterToString(localVarOptionals.TransactionId.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20029\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v ModelError\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (o GetAppTemplateContainerVolumeMountOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerVolumeMount) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o ApplicationSpecRolloutplanOutput) TargetSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecRolloutplan) *int { return v.TargetSize }).(pulumi.IntPtrOutput)\n}", "func (s *PolicyTargetSummary) SetTargetId(v string) *PolicyTargetSummary {\n\ts.TargetId = &v\n\treturn s\n}", "func (o SecurityPolicyRuleRedirectOptionsResponseOutput) Target() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SecurityPolicyRuleRedirectOptionsResponse) string { return v.Target }).(pulumi.StringOutput)\n}", "func ConvertPath(c *Client, path string) (*Response, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treturn c.Convert(f, f.Name())\n}", "func (a *AdminApiService) DeleteTargetSegment(ctx _context.Context, id string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target-segment/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (plugin *EditCommandPlugin) EditTargetCommand(targetCommand *models.Command, targetResponse string) error {\n\n\t// Build default response with the given message.\n\tdefaultResponse := parser.ParseResponse(targetResponse)\n\terr := parser.Validate(defaultResponse)\n\tif err != nil {\n\t\tlog.Println(\"Failed to validate target response\")\n\t\treturn err\n\t}\n\n\ttargetCommand.Responses[models.DefaultResponseKey] = *defaultResponse\n\treturn nil\n}", "func (o *SearchSLOResponseDataAttributesFacets) GetTargetOk() (*[]SearchSLOResponseDataAttributesFacetsObjectInt, bool) {\n\tif o == nil || o.Target == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Target, true\n}", "func (r *CachesIscsiVolume) TargetName() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"targetName\"])\n}", "func (o CachesIscsiVolumeOutput) TargetArn() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.TargetArn }).(pulumi.StringOutput)\n}", "func (o ElastigroupMultaiTargetSetOutput) TargetSetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ElastigroupMultaiTargetSet) string { return v.TargetSetId }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (m *Mounter) HasTarget(targetPath string) (string, bool) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tfor k, v := range m.mounts {\n\t\tfor _, p := range v.Mountpoint {\n\t\t\tif p.Path == targetPath {\n\t\t\t\treturn k, true\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", false\n}", "func (s UpdateMaintenanceWindowTargetOutput) SDKResponseMetadata() aws.Response {\n\treturn s.responseMetadata\n}", "func (o ReplicatedVMManagedDiskOutput) TargetResourceGroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) string { return v.TargetResourceGroupId }).(pulumi.StringOutput)\n}", "func TargetPath(src, dst Part) string {\n\tpath, err := filepath.Rel(filepath.Dir(src.Path()), dst.Path())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn path\n}", "func (o RuleTargetOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v RuleTarget) string { return v.TargetId }).(pulumi.StringOutput)\n}", "func (o NetworkPacketCaptureOutput) TargetResourceId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *NetworkPacketCapture) pulumi.StringOutput { return v.TargetResourceId }).(pulumi.StringOutput)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func (s *DescribeMountTargetSecurityGroupsInput) SetMountTargetId(v string) *DescribeMountTargetSecurityGroupsInput {\n\ts.MountTargetId = &v\n\treturn s\n}", "func (agent *Agent) GetTargetDiff() float64 {\n\tagent.rwLock.RLock()\n\tdefer agent.rwLock.RUnlock()\n\treturn agent.targetDiff\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *FileInfoCollectionGetParams) WithTarget(target *string) *FileInfoCollectionGetParams {\n\to.SetTarget(target)\n\treturn o\n}", "func (o ArgoCDSpecServerAutoscaleHpaOutput) ScaleTargetRef() ArgoCDSpecServerAutoscaleHpaScaleTargetRefOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecServerAutoscaleHpa) ArgoCDSpecServerAutoscaleHpaScaleTargetRef {\n\t\treturn v.ScaleTargetRef\n\t}).(ArgoCDSpecServerAutoscaleHpaScaleTargetRefOutput)\n}", "func (o CachesIscsiVolumeOutput) TargetName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.TargetName }).(pulumi.StringOutput)\n}", "func UnmarshalSourceResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(SourceResponse)\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"enabled\", &obj.Enabled)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func findTargetIDAndMethod(reqPath string, headers http.Header) (targetID string, method string) {\n\tif appID := headers.Get(daprAppID); appID != \"\" {\n\t\treturn appID, strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t}\n\n\tif auth := headers.Get(\"Authorization\"); strings.HasPrefix(auth, \"Basic \") {\n\t\tif s, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, \"Basic \")); err == nil {\n\t\t\tpair := strings.Split(string(s), \":\")\n\t\t\tif len(pair) == 2 && pair[0] == daprAppID {\n\t\t\t\treturn pair[1], strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we're here, the handler was probably invoked with /v1.0/invoke/ (or the invocation is invalid, missing the app id provided as header or Basic auth)\n\t// However, we are not relying on wildcardParam because the URL may have been sanitized to remove `//``, so `http://` would have been turned into `http:/`\n\t// First, check to make sure that the path has the prefix\n\tif idx := pathHasPrefix(reqPath, apiVersionV1, \"invoke\"); idx > 0 {\n\t\treqPath = reqPath[idx:]\n\n\t\t// Scan to find app ID and method\n\t\t// Matches `<appid>/method/<method>`.\n\t\t// Examples:\n\t\t// - `appid/method/mymethod`\n\t\t// - `http://example.com/method/mymethod`\n\t\t// - `https://example.com/method/mymethod`\n\t\t// - `http%3A%2F%2Fexample.com/method/mymethod`\n\t\tif idx = strings.Index(reqPath, \"/method/\"); idx > 0 {\n\t\t\ttargetID := reqPath[:idx]\n\t\t\tmethod := reqPath[(idx + len(\"/method/\")):]\n\t\t\tif t, _ := url.QueryUnescape(targetID); t != \"\" {\n\t\t\t\ttargetID = t\n\t\t\t}\n\t\t\treturn targetID, method\n\t\t}\n\t}\n\n\treturn \"\", \"\"\n}", "func (c *MockFileStorageClient) DeleteMountTarget(ctx context.Context, id string) error {\n\treturn nil\n}" ]
[ "0.78340524", "0.7618291", "0.72192645", "0.6639702", "0.6438889", "0.612518", "0.54205424", "0.43133488", "0.41862682", "0.41504344", "0.4117307", "0.40790424", "0.40690252", "0.40327424", "0.39886332", "0.39636314", "0.39553627", "0.39446902", "0.3943201", "0.39154643", "0.39126107", "0.39110538", "0.39039436", "0.38750935", "0.38683763", "0.38658398", "0.38526464", "0.3850013", "0.38355675", "0.3831995", "0.3818834", "0.3811492", "0.38110283", "0.37996626", "0.37957537", "0.37946218", "0.37591755", "0.37529817", "0.37470403", "0.37425563", "0.37358728", "0.37327215", "0.37262666", "0.37106943", "0.3698583", "0.3689213", "0.36809558", "0.36716968", "0.3671249", "0.3670999", "0.3668192", "0.36661094", "0.36587662", "0.36546943", "0.36504093", "0.36458677", "0.3638044", "0.3636818", "0.3630804", "0.3626958", "0.36224112", "0.36207008", "0.3607405", "0.36020228", "0.36007884", "0.3597149", "0.35734013", "0.35665733", "0.35661712", "0.3557594", "0.35566512", "0.3551832", "0.35497087", "0.35407525", "0.35388058", "0.35360166", "0.35350627", "0.3529371", "0.35281408", "0.35218924", "0.35172424", "0.35144612", "0.3512759", "0.35035467", "0.35027215", "0.34902126", "0.3487332", "0.3481609", "0.34815535", "0.34803116", "0.34789", "0.34743264", "0.34696314", "0.34685618", "0.3467808", "0.3465406", "0.3463776", "0.34629917", "0.3460551", "0.3444998" ]
0.88408476
0
Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse is an autogenerated conversion function.
func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error { return autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func (o FioSpecVolumeVolumeSourceDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourceDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o LookupAliasResultOutput) TargetKeyId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAliasResult) string { return v.TargetKeyId }).(pulumi.StringOutput)\n}", "func Convert_v1alpha1_RecoveryTarget_To_v1alpha2_RecoveryTarget(in *RecoveryTarget, out *v1alpha2.RecoveryTarget, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_RecoveryTarget_To_v1alpha2_RecoveryTarget(in, out, s)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryptionSetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *string { return v.TargetDiskEncryptionSetId }).(pulumi.StringPtrOutput)\n}", "func Convert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in *impl.PathExistsResponse, out *v2alpha1.PathExistsResponse) error {\n\treturn autoConvert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in, out)\n}", "func (o FioSpecVolumeVolumeSourceProjectedSourcesDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceProjectedSourcesDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o InstanceGroupManagerStatusResponsePtrOutput) VersionTarget() InstanceGroupManagerStatusVersionTargetResponsePtrOutput {\n\treturn o.ApplyT(func(v *InstanceGroupManagerStatusResponse) *InstanceGroupManagerStatusVersionTargetResponse {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VersionTarget\n\t}).(InstanceGroupManagerStatusVersionTargetResponsePtrOutput)\n}", "func (o IopingSpecVolumeVolumeSourceProjectedSourcesDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceProjectedSourcesDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (o InstanceGroupManagerStatusResponseOutput) VersionTarget() InstanceGroupManagerStatusVersionTargetResponseOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerStatusResponse) InstanceGroupManagerStatusVersionTargetResponse {\n\t\treturn v.VersionTarget\n\t}).(InstanceGroupManagerStatusVersionTargetResponseOutput)\n}", "func (o *SearchSLOResponseDataAttributesFacets) SetTarget(v []SearchSLOResponseDataAttributesFacetsObjectInt) {\n\to.Target = v\n}", "func Convert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in *impl.IsSymlinkResponse, out *v2alpha1.IsSymlinkResponse) error {\n\treturn autoConvert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in, out)\n}", "func (d *Dijkstra) PathToTarget() ([]graphEdge, error) {\n\tif d.err != nil {\n\t\treturn []graphEdge{}, d.err\n\t}\n\n\tvar path []graphEdge\n\tidx := d.target\n\tfor {\n\t\tif idx == d.source {\n\t\t\tbreak\n\t\t}\n\t\te, ok := d.spt[idx]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tpath = append(path, e)\n\t\tidx = e.From\n\t}\n\n\treturn reversePath(path), nil\n}", "func (o HTTP2HealthCheckResponseOutput) RequestPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheckResponse) string { return v.RequestPath }).(pulumi.StringOutput)\n}", "func (o GetVolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetVolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o HostRuleResponseOutput) PathMatcher() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HostRuleResponse) string { return v.PathMatcher }).(pulumi.StringOutput)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (o UserFacingErrorResponseOutput) Target() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v UserFacingErrorResponse) *string { return v.Target }).(pulumi.StringPtrOutput)\n}", "func (o VolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func findAbsoluteDeviceByIDPath(volumeName string) (string, error) {\n\tpath := getDeviceByIDPath(volumeName)\n\n\t// EvalSymlinks returns relative link if the file is not a symlink\n\t// so we do not have to check if it is symlink prior to evaluation\n\tresolved, err := filepath.EvalSymlinks(path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not resolve symlink %q: %v\", path, err)\n\t}\n\n\tif !strings.HasPrefix(resolved, \"/dev\") {\n\t\treturn \"\", fmt.Errorf(\"resolved symlink %q for %q was unexpected\", resolved, path)\n\t}\n\n\treturn resolved, nil\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func (o *Operation) WithResponseTarget(v interface{}) *Operation {\n\t//Checking the value is a pointer. Need some better error handling here and this just swallows\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() != reflect.Ptr || rv.IsNil() {\n\t\treturn o\n\t}\n\to.responsePtr = v\n\treturn o\n}", "func (o ControlPolicyAttachmentOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ControlPolicyAttachment) pulumi.StringOutput { return v.TargetId }).(pulumi.StringOutput)\n}", "func (mfs *MountedFS) Path2MpathInfo(path string) (info *MountpathInfo, relativePath string) {\n\tvar (\n\t\tmax int\n\t\tavailablePaths, _ = mfs.Get()\n\t\tcleanedPath = filepath.Clean(path)\n\t)\n\tfor mpath, mpathInfo := range availablePaths {\n\t\trel, ok := pathPrefixMatch(mpath, cleanedPath)\n\t\tif ok && len(mpath) > max {\n\t\t\tinfo = mpathInfo\n\t\t\tmax = len(mpath)\n\t\t\trelativePath = rel\n\t\t\tif relativePath == \".\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (s *DescribeEffectivePolicyInput) SetTargetId(v string) *DescribeEffectivePolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (o IncludedPathResponseOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IncludedPathResponse) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (o *AnalyzeRecipeInstructions200ResponseParsedInstructionsInnerStepsInnerIngredientsInner) GetIdOk() (*float32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Id, true\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func (o EndpointResponseOutput) Target() pulumi.StringOutput {\n\treturn o.ApplyT(func(v EndpointResponse) string { return v.Target }).(pulumi.StringOutput)\n}", "func (fs *FS) TargetIPLUNToDevicePath(ctx context.Context, targetIP string, lunID int) (map[string]string, error) {\n\treturn fs.targetIPLUNToDevicePath(ctx, targetIP, lunID)\n}", "func (d *MinioDriver) Path(r volume.Request) volume.Response {\n\td.m.RLock()\n\tdefer d.m.RUnlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (o ArgoCDSpecServerAutoscaleHpaScaleTargetRefOutput) ApiVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecServerAutoscaleHpaScaleTargetRef) *string { return v.ApiVersion }).(pulumi.StringPtrOutput)\n}", "func (o UserFacingErrorResponsePtrOutput) Target() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *UserFacingErrorResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Target\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *ListPoliciesForTargetInput) SetTargetId(v string) *ListPoliciesForTargetInput {\n\ts.TargetId = &v\n\treturn s\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func (o HTTP2HealthCheckResponsePtrOutput) RequestPath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *HTTP2HealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.RequestPath\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ElastigroupMultaiTargetSetOutput) TargetSetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ElastigroupMultaiTargetSet) string { return v.TargetSetId }).(pulumi.StringOutput)\n}", "func Convert_v1alpha2_RecoveryTarget_To_v1alpha1_RecoveryTarget(in *v1alpha2.RecoveryTarget, out *RecoveryTarget, s conversion.Scope) error {\n\treturn autoConvert_v1alpha2_RecoveryTarget_To_v1alpha1_RecoveryTarget(in, out, s)\n}", "func (client *KeyVaultClient) decryptHandleResponse(resp *http.Response) (KeyVaultClientDecryptResponse, error) {\n\tresult := KeyVaultClientDecryptResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientDecryptResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) unwrapKeyHandleResponse(resp *http.Response) (KeyVaultClientUnwrapKeyResponse, error) {\n\tresult := KeyVaultClientUnwrapKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientUnwrapKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func (s *EffectivePolicy) SetTargetId(v string) *EffectivePolicy {\n\ts.TargetId = &v\n\treturn s\n}", "func NewGetDiscoverDeploymentTargetOK() *GetDiscoverDeploymentTargetOK {\n\treturn &GetDiscoverDeploymentTargetOK{}\n}", "func (client *KeyVaultClient) decryptHandleResponse(resp *http.Response) (KeyVaultClientDecryptResponse, error) {\n\tresult := KeyVaultClientDecryptResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientDecryptResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o GroupContainerLivenessProbeHttpGetOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerLivenessProbeHttpGet) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func UnmarshalSourceResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(SourceResponse)\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"enabled\", &obj.Enabled)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (client *KeyVaultClient) unwrapKeyHandleResponse(resp *http.Response) (KeyVaultClientUnwrapKeyResponse, error) {\n\tresult := KeyVaultClientUnwrapKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientUnwrapKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (s *Attribute) SetTargetId(v string) *Attribute {\n\ts.TargetId = &v\n\treturn s\n}", "func getFoodTarget(p *Player, data PlayerInput) (mgl32.Vec2, bool) {\n\ttargetAcquired := false\n\tok := false\n\tvar target mgl32.Vec2\n\tvar closestFood []int\n\ttmpPos := p.Pos[0]\n\tmin[0], min[1] = float64(tmpPos[0]-p.viewRadius), float64(tmpPos[1]-p.viewRadius)\n\tmax[0], max[1] = float64(tmpPos[0]+p.viewRadius), float64(tmpPos[1]+p.viewRadius)\n\n\tdata.Food.Search(min, max,\n\t\tfunc(min, max []float64, value interface{}) bool {\n\t\t\tif data.FoodDict[value.(int)].P.Sub(tmpPos).Len() < p.viewRadius {\n\t\t\t\tclosestFood = append(closestFood, value.(int))\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t)\n\n\tfor _, f := range closestFood {\n\t\tif !targetAcquired || tmpPos.Sub(data.FoodDict[f].P).Len() < tmpPos.Sub(target).Len() {\n\t\t\ttarget = data.FoodDict[f].P\n\t\t\ttargetAcquired = true\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn target, ok\n}", "func (o *RequestsDeploymentScheduledBackup) GetBackupTargetIdOk() (*string, bool) {\n\tif o == nil || o.BackupTargetId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.BackupTargetId, true\n}", "func (s *DetachPolicyInput) SetTargetId(v string) *DetachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (o *HyperflexVmSnapshotInfoAllOf) SetTargetCompletionTimestamp(v int64) {\n\to.TargetCompletionTimestamp = &v\n}", "func (o DeliveryPipelineSerialPipelineStageOutput) TargetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DeliveryPipelineSerialPipelineStage) *string { return v.TargetId }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func GetDeviceByMntPoint(targetPath string) string {\n\tdeviceCmd := fmt.Sprintf(\"mount | grep \\\"on %s\\\" | awk 'NR==1 {print $1}'\", targetPath)\n\tdeviceCmdOut, err := utils.Run(deviceCmd)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(deviceCmdOut)\n}", "func (a *AdminApiService) GetTarget(ctx _context.Context, id string) (Target, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Target\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (o DistributionPolicyResponsePtrOutput) TargetShape() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DistributionPolicyResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.TargetShape\n\t}).(pulumi.StringPtrOutput)\n}", "func (a *AdminApiService) DeleteTarget(ctx _context.Context, id string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/target/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (s *AttachPolicyInput) SetTargetId(v string) *AttachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (client ModelClient) GetIntentSuggestionsResponder(resp *http.Response) (result ListIntentsSuggestionExample, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (o GetRulesRuleTargetOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetRulesRuleTarget) string { return v.TargetId }).(pulumi.StringOutput)\n}", "func (o *LinkRouteTableResponse) GetLinkRouteTableIdOk() (*string, bool) {\n\tif o == nil || o.LinkRouteTableId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.LinkRouteTableId, true\n}", "func (o ExcludedPathResponseOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ExcludedPathResponse) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func ConvertPath(c *Client, path string) (*Response, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treturn c.Convert(f, f.Name())\n}", "func (d *VolumeDriver) Path(r volume.Request) volume.Response {\n\treturn volume.Response{Mountpoint: getMountPoint(r.Name)}\n}", "func (s *DescribeMountTargetSecurityGroupsInput) SetMountTargetId(v string) *DescribeMountTargetSecurityGroupsInput {\n\ts.MountTargetId = &v\n\treturn s\n}", "func (o *FileInfoCollectionGetParams) SetTarget(target *string) {\n\to.Target = target\n}", "func (o ArgoCDSpecServerIngressOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecServerIngress) *string { return v.Path }).(pulumi.StringPtrOutput)\n}", "func (r *GetSLOHistoryOptionalParameters) WithTarget(target float64) *GetSLOHistoryOptionalParameters {\n\tr.Target = &target\n\treturn r\n}", "func (o *SearchSLOResponseDataAttributesFacets) GetTargetOk() (*[]SearchSLOResponseDataAttributesFacetsObjectInt, bool) {\n\tif o == nil || o.Target == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Target, true\n}", "func (o AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpecOutput) Target() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec) *int { return v.Target }).(pulumi.IntPtrOutput)\n}", "func (r *CachesIscsiVolume) TargetArn() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"targetArn\"])\n}", "func (a *BackendOptionsApiService) GetLogTarget(ctx _context.Context, id int32, parentName string, parentType string, localVarOptionals *GetLogTargetOpts) (InlineResponse20029, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20029\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/services/haproxy/configuration/log_targets/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", id)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tlocalVarQueryParams.Add(\"parent_name\", parameterToString(parentName, \"\"))\n\tlocalVarQueryParams.Add(\"parent_type\", parameterToString(parentType, \"\"))\n\tif localVarOptionals != nil && localVarOptionals.TransactionId.IsSet() {\n\t\tlocalVarQueryParams.Add(\"transaction_id\", parameterToString(localVarOptionals.TransactionId.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20029\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v ModelError\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func findTargetIDAndMethod(reqPath string, headers http.Header) (targetID string, method string) {\n\tif appID := headers.Get(daprAppID); appID != \"\" {\n\t\treturn appID, strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t}\n\n\tif auth := headers.Get(\"Authorization\"); strings.HasPrefix(auth, \"Basic \") {\n\t\tif s, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, \"Basic \")); err == nil {\n\t\t\tpair := strings.Split(string(s), \":\")\n\t\t\tif len(pair) == 2 && pair[0] == daprAppID {\n\t\t\t\treturn pair[1], strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we're here, the handler was probably invoked with /v1.0/invoke/ (or the invocation is invalid, missing the app id provided as header or Basic auth)\n\t// However, we are not relying on wildcardParam because the URL may have been sanitized to remove `//``, so `http://` would have been turned into `http:/`\n\t// First, check to make sure that the path has the prefix\n\tif idx := pathHasPrefix(reqPath, apiVersionV1, \"invoke\"); idx > 0 {\n\t\treqPath = reqPath[idx:]\n\n\t\t// Scan to find app ID and method\n\t\t// Matches `<appid>/method/<method>`.\n\t\t// Examples:\n\t\t// - `appid/method/mymethod`\n\t\t// - `http://example.com/method/mymethod`\n\t\t// - `https://example.com/method/mymethod`\n\t\t// - `http%3A%2F%2Fexample.com/method/mymethod`\n\t\tif idx = strings.Index(reqPath, \"/method/\"); idx > 0 {\n\t\t\ttargetID := reqPath[:idx]\n\t\t\tmethod := reqPath[(idx + len(\"/method/\")):]\n\t\t\tif t, _ := url.QueryUnescape(targetID); t != \"\" {\n\t\t\t\ttargetID = t\n\t\t\t}\n\t\t\treturn targetID, method\n\t\t}\n\t}\n\n\treturn \"\", \"\"\n}", "func (o HTTPSHealthCheckResponseOutput) RequestPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckResponse) string { return v.RequestPath }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func (plugin *EditCommandPlugin) EditTargetCommand(targetCommand *models.Command, targetResponse string) error {\n\n\t// Build default response with the given message.\n\tdefaultResponse := parser.ParseResponse(targetResponse)\n\terr := parser.Validate(defaultResponse)\n\tif err != nil {\n\t\tlog.Println(\"Failed to validate target response\")\n\t\treturn err\n\t}\n\n\ttargetCommand.Responses[models.DefaultResponseKey] = *defaultResponse\n\treturn nil\n}", "func (o UrlMapTestResponseOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v UrlMapTestResponse) string { return v.Path }).(pulumi.StringOutput)\n}", "func (client MultipleResponsesClient) Get200ModelA400ValidResponder(resp *http.Response) (result A, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (o VolumeV2Output) SourceVolId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VolumeV2) pulumi.StringPtrOutput { return v.SourceVolId }).(pulumi.StringPtrOutput)\n}", "func (o HTTP2HealthCheckOutput) RequestPath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheck) *string { return v.RequestPath }).(pulumi.StringPtrOutput)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryption() ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *ReplicatedVMManagedDiskTargetDiskEncryption {\n\t\treturn v.TargetDiskEncryption\n\t}).(ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput)\n}", "func (o GetAppTemplateContainerVolumeMountOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerVolumeMount) string { return v.Path }).(pulumi.StringOutput)\n}", "func ValidatePath(w http.ResponseWriter, r *http.Request) (string, error) {\n\tm := ValidPath.FindStringSubmatch(r.URL.Path)\n\tif m == nil {\n\t\thttp.NotFound(w, r)\n\t\treturn \"\", errors.New(\"Invalid ID. IDs must only contain alpha characters.\")\n\t}\n\treturn m[2], nil\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func (s *PolicyTargetSummary) SetTargetId(v string) *PolicyTargetSummary {\n\ts.TargetId = &v\n\treturn s\n}", "func (o CachesIscsiVolumeOutput) TargetArn() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.TargetArn }).(pulumi.StringOutput)\n}" ]
[ "0.7777929", "0.7622105", "0.75057214", "0.65937686", "0.65636253", "0.6525607", "0.54358727", "0.44602463", "0.44493836", "0.42463046", "0.4175468", "0.40197146", "0.40040386", "0.39929202", "0.3983291", "0.39802206", "0.39770278", "0.39255393", "0.3922548", "0.3912627", "0.38464808", "0.3830771", "0.37726745", "0.37710482", "0.37171534", "0.36700678", "0.3669592", "0.36678192", "0.36671522", "0.36536852", "0.3644332", "0.36330462", "0.3631665", "0.36089352", "0.3607414", "0.36021718", "0.3595059", "0.35935396", "0.358156", "0.35674793", "0.356579", "0.35541925", "0.35507196", "0.3541566", "0.35366267", "0.35316834", "0.35310936", "0.3524973", "0.3517406", "0.35076714", "0.3505804", "0.3504065", "0.3501468", "0.35009858", "0.34965968", "0.34955424", "0.3495039", "0.3493089", "0.34902617", "0.34893176", "0.348764", "0.34801692", "0.34746355", "0.34701946", "0.34665966", "0.34651896", "0.34555298", "0.34553277", "0.3454851", "0.34467816", "0.34343693", "0.34205124", "0.3418405", "0.3410537", "0.3407237", "0.34064367", "0.33999807", "0.33933696", "0.3393047", "0.3391234", "0.33909726", "0.3389811", "0.33895198", "0.3385384", "0.33813125", "0.33737552", "0.33718672", "0.336946", "0.3367383", "0.33661845", "0.3358601", "0.33567002", "0.33536538", "0.33520904", "0.33512", "0.33489653", "0.33443078", "0.33428854", "0.33413237", "0.33365732" ]
0.8767852
0
Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest is an autogenerated conversion function.
func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error { return autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func (osUtils *OsUtils) GetDiskID(pubCtx map[string]string, log *zap.SugaredLogger) (string, error) {\n\tvar diskID string\n\tvar ok bool\n\tif diskID, ok = pubCtx[common.AttributeFirstClassDiskUUID]; !ok {\n\t\treturn \"\", logger.LogNewErrorCodef(log, codes.InvalidArgument,\n\t\t\t\"attribute: %s required in publish context\",\n\t\t\tcommon.AttributeFirstClassDiskUUID)\n\t}\n\treturn diskID, nil\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func (o *StoragePhysicalDiskExtension) SetDiskId(v int64) {\n\to.DiskId = &v\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func (o *StoragePhysicalDisk) GetDiskId() string {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func (o *StoragePhysicalDisk) SetDiskId(v string) {\n\to.DiskId = &v\n}", "func DiskNumber(disk syscall.Handle, number *int64) error {\n\tvar bytes uint32\n\tdevNum := StorageDeviceNumber{}\n\tbuflen := uint32(unsafe.Sizeof(devNum.DeviceType)) + uint32(unsafe.Sizeof(devNum.DeviceNumber)) + uint32(unsafe.Sizeof(devNum.PartitionNumber))\n\n\terr := syscall.DeviceIoControl(disk, IOCTL_STORAGE_GET_DEVICE_NUMBER, nil, 0, (*byte)(unsafe.Pointer(&devNum)), buflen, &bytes, nil)\n\n\tfmt.Printf(\"devNum: %v \\n\", devNum)\n\n\tif err == nil {\n\t\t*number = int64(devNum.DeviceNumber)\n\t}\n\treturn err\n}", "func (f *FileLocationUnavailable) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) {\n\tif vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && IsContentBlock(vol.contentType)) {\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\treturn volDevPath, nil\n\t}\n\n\treturn \"\", ErrNotSupported\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func (o *StoragePhysicalDiskAllOf) GetDiskId() string {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func (cs *ControllerServer) getNfsVolFromID(id string) (*nfsVolume, error) {\n\ttokens := strings.Split(id, \"/\")\n\tif len(tokens) != totalIDElements {\n\t\treturn nil, fmt.Errorf(\"volume id %q unexpected format: got %v token(s) instead of %v\", id, len(tokens), totalIDElements)\n\t}\n\n\treturn &nfsVolume{\n\t\tid: id,\n\t\tserver: tokens[1],\n\t\tbaseDir: tokens[2],\n\t\tsubDir: tokens[3],\n\t}, nil\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *StoragePhysicalDiskExtension) GetDiskId() int64 {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func (c *clustermgrClient) GetDiskInfo(ctx context.Context, diskID proto.DiskID) (ret *DiskInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\tret = &DiskInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func (o *StoragePhysicalDiskAllOf) SetDiskId(v string) {\n\to.DiskId = &v\n}", "func (f *FileLocation) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func (o FioSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func (a *HyperflexApiService) GetHyperflexVolumeByMoid(ctx context.Context, moid string) ApiGetHyperflexVolumeByMoidRequest {\n\treturn ApiGetHyperflexVolumeByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func GetVolume(volumeID string) (*apis.ZFSVolume, error) {\n\treturn volbuilder.NewKubeclient().\n\t\tWithNamespace(OpenEBSNamespace).\n\t\tGet(volumeID, metav1.GetOptions{})\n}", "func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {\n\tif (strings.HasPrefix(volumeID, \"vol-\")) && !(strings.HasPrefix(volumeID, \"aws://\")) {\n\t\treturn volumeID, nil\n\t}\n\tidx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1\n\tif idx == 0 {\n\t\treturn \"\", fmt.Errorf(\"malformed EBS volume id %q\", volumeID)\n\t}\n\treturn volumeID[idx:], nil\n}", "func (o IopingSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ReplicatedVMManagedDiskOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) string { return v.DiskId }).(pulumi.StringOutput)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func (o IopingSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func NewGetVMVolumeBadRequest() *GetVMVolumeBadRequest {\n\n\treturn &GetVMVolumeBadRequest{}\n}", "func getVirtualDeviceByDiskID(ctx context.Context, vm *object.VirtualMachine, diskID string) (vim25types.BaseVirtualDevice, error) {\n\tvmname, err := vm.Common.ObjectName(ctx)\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\tvmDevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\tframework.Logf(\"Failed to get the devices for VM: %q. err: %+v\", vmname, err)\n\t\treturn nil, err\n\t}\n\tfor _, device := range vmDevices {\n\t\tif vmDevices.TypeName(device) == \"VirtualDisk\" {\n\t\t\tif virtualDisk, ok := device.(*vim25types.VirtualDisk); ok {\n\t\t\t\tif virtualDisk.VDiskId != nil && virtualDisk.VDiskId.Id == diskID {\n\t\t\t\t\tframework.Logf(\"Found FCDID %q attached to VM %q\", diskID, vmname)\n\t\t\t\t\treturn device, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tframework.Logf(\"Failed to find FCDID %q attached to VM %q\", diskID, vmname)\n\treturn nil, nil\n}", "func OpenDisk(filename string) (SectorDisk, error) {\n\text := strings.ToLower(path.Ext(filename))\n\tswitch ext {\n\tcase \".dsk\":\n\t\treturn LoadDSK(filename)\n\t}\n\treturn nil, fmt.Errorf(\"Unimplemented/unknown disk file extension %q\", ext)\n}", "func (s *Simple) DiskInfo(req *acomm.Request) (interface{}, *url.URL, error) {\n\tvar args CPUInfoArgs\n\tif err := req.UnmarshalArgs(&args); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif args.GuestID == \"\" {\n\t\treturn nil, nil, errors.New(\"missing guest_id\")\n\t}\n\n\tresult := &DiskInfoResult{\n\t\t&DiskInfo{\n\t\t\tDevice: \"vda1\",\n\t\t\tSize: 10 * (1024 * 1024 * 1024), // 10 GB in bytes\n\t\t},\n\t}\n\n\treturn result, nil, nil\n}", "func (o *StoragePhysicalDisk) GetDiskIdOk() (*string, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func (d *Data) GetVolume(v dvid.VersionID, vox *Labels, supervoxels bool, scale uint8, roiname dvid.InstanceName) ([]byte, error) {\n\tr, err := imageblk.GetROI(v, roiname, vox)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := d.GetLabels(v, supervoxels, scale, vox, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn vox.Data(), nil\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourcePortworxVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func (o IopingSpecVolumeVolumeSourceAwsElasticBlockStorePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceAwsElasticBlockStore) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (cs *ControllerServer) getVolumeIDFromNfsVol(vol *nfsVolume) string {\n\tidElements := make([]string, totalIDElements)\n\tidElements[idServer] = strings.Trim(vol.server, \"/\")\n\tidElements[idBaseDir] = strings.Trim(vol.baseDir, \"/\")\n\tidElements[idSubDir] = strings.Trim(vol.subDir, \"/\")\n\treturn strings.Join(idElements, \"/\")\n}", "func (o FioSpecVolumeVolumeSourceAwsElasticBlockStorePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceAwsElasticBlockStore) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func GetVolume(id string, name string) (*xmsv3.Volume, error) {\n\tvolume, err := xms.GetVolume(id, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn volume.Content, nil\n}", "func NewLunGetSerialNumberRequest() *LunGetSerialNumberRequest {\n\treturn &LunGetSerialNumberRequest{}\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func (o *StoragePhysicalDiskAllOf) GetDiskIdOk() (*string, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func NewGetVMVolumeDefault(code int) *GetVMVolumeDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetVMVolumeDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (m *Membership) GetNodeWithVolumeByID(vid string) (*client.Client, error) {\n\tm.nodesLock.RLock()\n\tdefer m.nodesLock.RUnlock()\n\tfor _, n := range m.nodes {\n\t\tif _, ok := n.meta.Volumes[vid]; ok {\n\t\t\treturn n.conn, nil\n\t\t}\n\t}\n\n\treturn nil, errors.New(\"not found\")\n}", "func (o CachesIscsiVolumeOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (r *CachesIscsiVolume) VolumeId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"volumeId\"])\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func (o IopingSpecVolumeVolumeSourcePortworxVolumePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourcePortworxVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *StoragePhysicalDiskExtension) GetDiskIdOk() (*int64, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func (o IopingSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func (o DiskReplicaPairOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DiskReplicaPair) pulumi.StringOutput { return v.DiskId }).(pulumi.StringOutput)\n}", "func (c *clustermgrClient) GetVolumeInfo(ctx context.Context, vid proto.Vid) (*VolumeInfoSimple, error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\n\tinfo, err := c.client.GetVolumeInfo(ctx, &cmapi.GetVolumeArgs{Vid: vid})\n\tif err != nil {\n\t\tspan.Errorf(\"get volume info failed: err[%+v]\", err)\n\t\treturn nil, err\n\t}\n\tret := &VolumeInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func ValidateDiskNumber(disk string) error {\n\tif _, err := strconv.Atoi(disk); err != nil {\n\t\treturn fmt.Errorf(\"wrong disk number format: %q, err: %v\", disk, err)\n\t}\n\treturn nil\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in *v1beta1.ResizeVolumeRequest, out *internal.ResizeVolumeRequest) error {\n\treturn autoConvert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in, out)\n}", "func (ms *MachinePlugin) GetVolumeIDs(ctx context.Context, req *cmi.GetVolumeIDsRequest) (*cmi.GetVolumeIDsResponse, error) {\n\tvar (\n\t\tvolumeIDs []string\n\t\tvolumeSpecs []*corev1.PersistentVolumeSpec\n\t)\n\n\t// Log messages to track start and end of request\n\tglog.V(2).Infof(\"GetVolumeIDs request has been recieved for %q\", req.PVSpecList)\n\n\terr := json.Unmarshal(req.PVSpecList, &volumeSpecs)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor i := range volumeSpecs {\n\t\tspec := volumeSpecs[i]\n\t\tif spec.AWSElasticBlockStore == nil {\n\t\t\t// Not an aws volume\n\t\t\tcontinue\n\t\t}\n\t\tvolumeID := spec.AWSElasticBlockStore.VolumeID\n\t\tvolumeIDs = append(volumeIDs, volumeID)\n\t}\n\n\tglog.V(2).Infof(\"GetVolumeIDs machines request has been processed successfully. \\nList: %v\", volumeIDs)\n\n\tResp := &cmi.GetVolumeIDsResponse{\n\t\tVolumeIDs: volumeIDs,\n\t}\n\treturn Resp, nil\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (_InboxHelperTester *InboxHelperTesterCaller) RequestID(opts *bind.CallOpts, messageNum *big.Int, rollup common.Address) ([32]byte, error) {\n\tvar out []interface{}\n\terr := _InboxHelperTester.contract.Call(opts, &out, \"requestID\", messageNum, rollup)\n\n\tif err != nil {\n\t\treturn *new([32]byte), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)\n\n\treturn out0, err\n\n}", "func DiskNUMANodeID(disk string) int {\n\tmsg := `\nThe DiskNUMANodeID() function has been DEPRECATED and will be\nremoved in the 1.0 release of ghw. Please use the Disk.NUMANodeID attribute.\n`\n\twarn(msg)\n\tctx := contextFromEnv()\n\treturn ctx.diskNUMANodeID(disk)\n}", "func decodeGetDealByDIDRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\tvars := mux.Vars(r)\n\tid, ok := vars[\"dId\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"not a valid dId\")\n\t}\n\treq := endpoint.GetDealByDIDRequest{\n\t\tId: id,\n\t}\n\treturn req, nil\n}", "func gceDiskToVolume(gceDisk *compute.Disk) *csp.Volume {\n\tv := &csp.Volume{\n\t\tCSPDomainType: CSPDomainType,\n\t\tStorageTypeName: VolTypeToCSPStorageType(gceDisk.Type),\n\t\tIdentifier: VolumeIdentifierCreate(ServiceGCE, gceDisk.Name), // GCE disk names are immutable\n\t\tType: gceDisk.Type,\n\t\tSizeBytes: gceDisk.SizeGb * units.GiB,\n\t\tRaw: gceDisk,\n\t}\n\tif i := strings.LastIndex(v.Type, \"/\"); i >= 0 {\n\t\t// volType is typically a URL in the form of volTypeURL (see gc.go), actual type is the final part of the path\n\t\tv.Type = v.Type[i+1:]\n\t}\n\tvar vps csp.VolumeProvisioningState\n\tswitch gceDisk.Status {\n\tcase \"CREATING\":\n\t\tfallthrough\n\tcase \"RESTORING\":\n\t\tvps = csp.VolumeProvisioningProvisioning\n\tcase \"READY\":\n\t\tvps = csp.VolumeProvisioningProvisioned\n\tcase \"DELETING\":\n\t\tvps = csp.VolumeProvisioningUnprovisioning\n\tcase \"FAILED\":\n\t\tvps = csp.VolumeProvisioningError\n\t}\n\tv.ProvisioningState = vps\n\tv.Tags = gceLabelsToModel(gceDisk.Labels)\n\tv.Attachments = make([]csp.VolumeAttachment, len(gceDisk.Users))\n\tfor i, user := range gceDisk.Users {\n\t\tif i := strings.LastIndex(user, \"/\"); i >= 0 { // format: projects/project/zones/zone/instances/instance\n\t\t\tuser = user[i+1:]\n\t\t}\n\t\tv.Attachments[i] = csp.VolumeAttachment{\n\t\t\tNodeIdentifier: user,\n\t\t\tDevice: fmt.Sprintf(diskPathFormat, gceDisk.Name),\n\t\t\tState: csp.VolumeAttachmentAttached, // GCE does not track this outside an active compute.Operation so assume attached\n\t\t}\n\t}\n\treturn v\n}", "func (d *VolumeDriver) GetVolume(name string) (map[string]interface{}, error) {\n\tvar statusMap map[string]interface{}\n\tstatusMap = make(map[string]interface{})\n\tlog.Errorf(\"VolumeDriver GetVolume to be implemented\")\n\treturn statusMap, nil\n}", "func (o *Volume) GetVolumeIdOk() (*string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.VolumeId, true\n}", "func (o SnapshotOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Snapshot) pulumi.StringOutput { return v.DiskId }).(pulumi.StringOutput)\n}", "func NewGetDepositByIdRequest(server string, depositId string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParamWithLocation(\"simple\", false, \"depositId\", runtime.ParamLocationPath, depositId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserverURL, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toperationPath := fmt.Sprintf(\"/deposits/%s\", pathParam0)\n\tif operationPath[0] == '/' {\n\t\toperationPath = operationPath[1:]\n\t}\n\toperationURL := url.URL{\n\t\tPath: operationPath,\n\t}\n\n\tqueryURL := serverURL.ResolveReference(&operationURL)\n\n\treq, err := http.NewRequest(\"GET\", queryURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (o *Volume) GetVolumeIdOk() (string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.VolumeId, true\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func (o FioSpecVolumeVolumeSourceAwsElasticBlockStoreOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceAwsElasticBlockStore) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (d Disk) GetID() string {\n\treturn d.Serial\n}", "func (s *Stack) GetVolume(id string) (*resources.Volume, error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif id == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"id\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", id), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tr := volumesv2.Get(s.VolumeClient, id)\n\tvolume, err := r.Extract()\n\tif err != nil {\n\t\tif _, ok := err.(gc.ErrDefault404); ok {\n\t\t\treturn nil, resources.ResourceNotFoundError(\"volume\", id)\n\t\t}\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error getting volume: %s\", ProviderErrorToString(err)))\n\t}\n\n\tav := resources.Volume{\n\t\tID: volume.ID,\n\t\tName: volume.Name,\n\t\tSize: volume.Size,\n\t\tSpeed: s.getVolumeSpeed(volume.VolumeType),\n\t\tState: toVolumeState(volume.Status),\n\t}\n\treturn &av, nil\n}", "func Convert_v1beta1_VolumeDiskNumberResponse_To_internal_VolumeDiskNumberResponse(in *v1beta1.VolumeDiskNumberResponse, out *internal.VolumeDiskNumberResponse) error {\n\treturn autoConvert_v1beta1_VolumeDiskNumberResponse_To_internal_VolumeDiskNumberResponse(in, out)\n}", "func decodeGetKeyPersonByIDRequest(_ context.Context, request interface{}) (interface{}, error) {\n\treq := request.(*pb.GetKeyPersonByIDRequest)\n\treturn endpoints.GetKeyPersonByIDRequest{ID: req.Id}, nil\n}", "func (o *QtreeCreateRequest) Volume() string {\n\tvar r string\n\tif o.VolumePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.VolumePtr\n\treturn r\n}", "func (digitalocean DigitalOcean) GetVolume(id string) (*godo.Volume, error) {\n\tdoc, err := DigitalOceanClient()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvolume, _, err := doc.client.Storage.GetVolume(doc.context, id)\n\n\tif err != nil {\n\t\t//log.Fatal(err)\n\t}\n\n\treturn volume, err\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}" ]
[ "0.78430533", "0.65811366", "0.6314425", "0.5931806", "0.584158", "0.5502955", "0.5465722", "0.5376929", "0.53634536", "0.50806665", "0.49934435", "0.4913398", "0.48624837", "0.48392114", "0.48272622", "0.4803466", "0.48016518", "0.47957292", "0.47828266", "0.4764811", "0.4745335", "0.47386298", "0.4704475", "0.46836257", "0.46836257", "0.46689767", "0.46635148", "0.46542692", "0.46483314", "0.4628247", "0.4622995", "0.46229538", "0.46194363", "0.46057174", "0.45711517", "0.457068", "0.45552096", "0.4535177", "0.4522693", "0.45177802", "0.4503275", "0.44853324", "0.44569534", "0.44039303", "0.44039303", "0.439674", "0.437468", "0.4353695", "0.43480948", "0.43416724", "0.43326515", "0.430825", "0.43072858", "0.4303891", "0.43036094", "0.4296138", "0.4295488", "0.42884701", "0.42695212", "0.42491248", "0.4233909", "0.42229167", "0.42180786", "0.4213799", "0.4204681", "0.4196894", "0.4186364", "0.418356", "0.41799712", "0.41778275", "0.417034", "0.41656217", "0.4163054", "0.41467217", "0.41428095", "0.41423097", "0.41285917", "0.41277596", "0.41246632", "0.41234136", "0.4104133", "0.4103878", "0.4100203", "0.4098997", "0.40975654", "0.4094176", "0.4094019", "0.4089744", "0.40817502", "0.40653792", "0.40648168", "0.4064561", "0.40455437", "0.4044258", "0.40418386", "0.4039501", "0.40312344", "0.4028603", "0.4017774", "0.40120494" ]
0.84491986
0
Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest is an autogenerated conversion function.
func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error { return autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func (a *HyperflexApiService) GetHyperflexVolumeByMoid(ctx context.Context, moid string) ApiGetHyperflexVolumeByMoidRequest {\n\treturn ApiGetHyperflexVolumeByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func DiskNumber(disk syscall.Handle, number *int64) error {\n\tvar bytes uint32\n\tdevNum := StorageDeviceNumber{}\n\tbuflen := uint32(unsafe.Sizeof(devNum.DeviceType)) + uint32(unsafe.Sizeof(devNum.DeviceNumber)) + uint32(unsafe.Sizeof(devNum.PartitionNumber))\n\n\terr := syscall.DeviceIoControl(disk, IOCTL_STORAGE_GET_DEVICE_NUMBER, nil, 0, (*byte)(unsafe.Pointer(&devNum)), buflen, &bytes, nil)\n\n\tfmt.Printf(\"devNum: %v \\n\", devNum)\n\n\tif err == nil {\n\t\t*number = int64(devNum.DeviceNumber)\n\t}\n\treturn err\n}", "func (c *clustermgrClient) GetDiskInfo(ctx context.Context, diskID proto.DiskID) (ret *DiskInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\tret = &DiskInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func NewLunGetSerialNumberRequest() *LunGetSerialNumberRequest {\n\treturn &LunGetSerialNumberRequest{}\n}", "func (osUtils *OsUtils) GetDiskID(pubCtx map[string]string, log *zap.SugaredLogger) (string, error) {\n\tvar diskID string\n\tvar ok bool\n\tif diskID, ok = pubCtx[common.AttributeFirstClassDiskUUID]; !ok {\n\t\treturn \"\", logger.LogNewErrorCodef(log, codes.InvalidArgument,\n\t\t\t\"attribute: %s required in publish context\",\n\t\t\tcommon.AttributeFirstClassDiskUUID)\n\t}\n\treturn diskID, nil\n}", "func GetVolume(volumeID string) (*apis.ZFSVolume, error) {\n\treturn volbuilder.NewKubeclient().\n\t\tWithNamespace(OpenEBSNamespace).\n\t\tGet(volumeID, metav1.GetOptions{})\n}", "func NewGetVMVolumeBadRequest() *GetVMVolumeBadRequest {\n\n\treturn &GetVMVolumeBadRequest{}\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) {\n\tif vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && IsContentBlock(vol.contentType)) {\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\treturn volDevPath, nil\n\t}\n\n\treturn \"\", ErrNotSupported\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func ValidateDiskNumber(disk string) error {\n\tif _, err := strconv.Atoi(disk); err != nil {\n\t\treturn fmt.Errorf(\"wrong disk number format: %q, err: %v\", disk, err)\n\t}\n\treturn nil\n}", "func (cs *ControllerServer) getNfsVolFromID(id string) (*nfsVolume, error) {\n\ttokens := strings.Split(id, \"/\")\n\tif len(tokens) != totalIDElements {\n\t\treturn nil, fmt.Errorf(\"volume id %q unexpected format: got %v token(s) instead of %v\", id, len(tokens), totalIDElements)\n\t}\n\n\treturn &nfsVolume{\n\t\tid: id,\n\t\tserver: tokens[1],\n\t\tbaseDir: tokens[2],\n\t\tsubDir: tokens[3],\n\t}, nil\n}", "func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {\n\tif (strings.HasPrefix(volumeID, \"vol-\")) && !(strings.HasPrefix(volumeID, \"aws://\")) {\n\t\treturn volumeID, nil\n\t}\n\tidx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1\n\tif idx == 0 {\n\t\treturn \"\", fmt.Errorf(\"malformed EBS volume id %q\", volumeID)\n\t}\n\treturn volumeID[idx:], nil\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func (o *StoragePhysicalDiskExtension) SetDiskId(v int64) {\n\to.DiskId = &v\n}", "func OpenDisk(filename string) (SectorDisk, error) {\n\text := strings.ToLower(path.Ext(filename))\n\tswitch ext {\n\tcase \".dsk\":\n\t\treturn LoadDSK(filename)\n\t}\n\treturn nil, fmt.Errorf(\"Unimplemented/unknown disk file extension %q\", ext)\n}", "func (ms *MachinePlugin) GetVolumeIDs(ctx context.Context, req *cmi.GetVolumeIDsRequest) (*cmi.GetVolumeIDsResponse, error) {\n\tvar (\n\t\tvolumeIDs []string\n\t\tvolumeSpecs []*corev1.PersistentVolumeSpec\n\t)\n\n\t// Log messages to track start and end of request\n\tglog.V(2).Infof(\"GetVolumeIDs request has been recieved for %q\", req.PVSpecList)\n\n\terr := json.Unmarshal(req.PVSpecList, &volumeSpecs)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor i := range volumeSpecs {\n\t\tspec := volumeSpecs[i]\n\t\tif spec.AWSElasticBlockStore == nil {\n\t\t\t// Not an aws volume\n\t\t\tcontinue\n\t\t}\n\t\tvolumeID := spec.AWSElasticBlockStore.VolumeID\n\t\tvolumeIDs = append(volumeIDs, volumeID)\n\t}\n\n\tglog.V(2).Infof(\"GetVolumeIDs machines request has been processed successfully. \\nList: %v\", volumeIDs)\n\n\tResp := &cmi.GetVolumeIDsResponse{\n\t\tVolumeIDs: volumeIDs,\n\t}\n\treturn Resp, nil\n}", "func (o ReplicatedVMManagedDiskOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) string { return v.DiskId }).(pulumi.StringOutput)\n}", "func (f *FileLocationUnavailable) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *StoragePhysicalDisk) SetDiskId(v string) {\n\to.DiskId = &v\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_v1alpha4_ManagedDisk_To_v1alpha3_ManagedDisk(in *v1alpha4.ManagedDisk, out *ManagedDisk, s apiconversion.Scope) error {\n\treturn autoConvert_v1alpha4_ManagedDisk_To_v1alpha3_ManagedDisk(in, out, s)\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func (o *StoragePhysicalDisk) GetDiskId() string {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func (s *Simple) DiskInfo(req *acomm.Request) (interface{}, *url.URL, error) {\n\tvar args CPUInfoArgs\n\tif err := req.UnmarshalArgs(&args); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif args.GuestID == \"\" {\n\t\treturn nil, nil, errors.New(\"missing guest_id\")\n\t}\n\n\tresult := &DiskInfoResult{\n\t\t&DiskInfo{\n\t\t\tDevice: \"vda1\",\n\t\t\tSize: 10 * (1024 * 1024 * 1024), // 10 GB in bytes\n\t\t},\n\t}\n\n\treturn result, nil, nil\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func DiskNUMANodeID(disk string) int {\n\tmsg := `\nThe DiskNUMANodeID() function has been DEPRECATED and will be\nremoved in the 1.0 release of ghw. Please use the Disk.NUMANodeID attribute.\n`\n\twarn(msg)\n\tctx := contextFromEnv()\n\treturn ctx.diskNUMANodeID(disk)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func (o *StoragePhysicalDiskAllOf) GetDiskId() string {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func (d *Data) GetVolume(v dvid.VersionID, vox *Labels, supervoxels bool, scale uint8, roiname dvid.InstanceName) ([]byte, error) {\n\tr, err := imageblk.GetROI(v, roiname, vox)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := d.GetLabels(v, supervoxels, scale, vox, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn vox.Data(), nil\n}", "func (c *Controller) GetVolumeName(getVolumeNameRequest k8sresources.FlexVolumeGetVolumeNameRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-isAttached-start\")\n\tdefer c.logger.Println(\"controller-isAttached-end\")\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func (o FioSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func (o *StoragePhysicalDisk) GetDiskIdOk() (*string, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func NewGetDepositByIdRequest(server string, depositId string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParamWithLocation(\"simple\", false, \"depositId\", runtime.ParamLocationPath, depositId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserverURL, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toperationPath := fmt.Sprintf(\"/deposits/%s\", pathParam0)\n\tif operationPath[0] == '/' {\n\t\toperationPath = operationPath[1:]\n\t}\n\toperationURL := url.URL{\n\t\tPath: operationPath,\n\t}\n\n\tqueryURL := serverURL.ResolveReference(&operationURL)\n\n\treq, err := http.NewRequest(\"GET\", queryURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (r *CachesIscsiVolume) VolumeId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"volumeId\"])\n}", "func decodeGetDealByDIDRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\tvars := mux.Vars(r)\n\tid, ok := vars[\"dId\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"not a valid dId\")\n\t}\n\treq := endpoint.GetDealByDIDRequest{\n\t\tId: id,\n\t}\n\treturn req, nil\n}", "func (o IopingSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o CachesIscsiVolumeOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (f *FileLocation) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (_InboxHelperTester *InboxHelperTesterCaller) RequestID(opts *bind.CallOpts, messageNum *big.Int, rollup common.Address) ([32]byte, error) {\n\tvar out []interface{}\n\terr := _InboxHelperTester.contract.Call(opts, &out, \"requestID\", messageNum, rollup)\n\n\tif err != nil {\n\t\treturn *new([32]byte), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)\n\n\treturn out0, err\n\n}", "func (svc *Service) getRequestByEncryptedID(requestIDEncoded string) (types.WhitelistRequest, int, error) {\n\tlog := svc.logger\n\trequestID, err := utils.DecodeAndDecrypt(requestIDEncoded, viper.GetString(\"passphrase\"))\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t\t\"urlParam\": requestIDEncoded,\n\t\t}).Warn(\"Unable to decode requestID token\")\n\t\treturn types.WhitelistRequest{}, http.StatusBadRequest, errors.New(\"Unable to decode token\")\n\t}\n\n\t_id, _ := primitive.ObjectIDFromHex(string(requestID))\n\trequests, err := svc.dbService.GetRequests(1, bson.D{{\"_id\", _id}})\n\tif err != nil {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t\t\"requestID\": requestID,\n\t\t}).Error(\"Unable to get reqeuest by ID\")\n\t\treturn types.WhitelistRequest{}, http.StatusInternalServerError, errors.New(\"Unable to get reqeuest by ID\")\n\t}\n\tif len(requests) == 0 {\n\t\treturn types.WhitelistRequest{}, http.StatusBadRequest, errors.New(\"Resource not found\")\n\t}\n\trequest := requests[0]\n\treturn request, http.StatusOK, nil\n}", "func (o IopingSpecVolumeVolumeSourceAwsElasticBlockStorePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceAwsElasticBlockStore) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *StoragePhysicalDiskAllOf) SetDiskId(v string) {\n\to.DiskId = &v\n}", "func (o *StoragePhysicalDiskAllOf) GetDiskIdOk() (*string, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func (o *StoragePhysicalDiskExtension) GetDiskId() int64 {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func decodeGetKeyPersonByIDRequest(_ context.Context, request interface{}) (interface{}, error) {\n\treq := request.(*pb.GetKeyPersonByIDRequest)\n\treturn endpoints.GetKeyPersonByIDRequest{ID: req.Id}, nil\n}", "func (o *Volume) GetVolumeIdOk() (*string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.VolumeId, true\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func (o *StoragePhysicalDiskExtension) GetDiskIdOk() (*int64, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func (o DiskReplicaPairOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DiskReplicaPair) pulumi.StringOutput { return v.DiskId }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (o *Volume) GetVolumeIdOk() (string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.VolumeId, true\n}", "func (o IopingSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func IDLT(id int) predicate.DeviceRequest {\n\treturn predicate.DeviceRequest(sql.FieldLT(FieldID, id))\n}", "func (o FioSpecVolumeVolumeSourceAwsElasticBlockStorePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceAwsElasticBlockStore) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceAwsElasticBlockStoreOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceAwsElasticBlockStore) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func GetVolumeV2(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *VolumeV2State, opts ...pulumi.ResourceOption) (*VolumeV2, error) {\n\tvar resource VolumeV2\n\terr := ctx.ReadResource(\"openstack:blockstorage/volumeV2:VolumeV2\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (o IopingSpecVolumeVolumeSourceAwsElasticBlockStoreOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceAwsElasticBlockStore) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func gceDiskToVolume(gceDisk *compute.Disk) *csp.Volume {\n\tv := &csp.Volume{\n\t\tCSPDomainType: CSPDomainType,\n\t\tStorageTypeName: VolTypeToCSPStorageType(gceDisk.Type),\n\t\tIdentifier: VolumeIdentifierCreate(ServiceGCE, gceDisk.Name), // GCE disk names are immutable\n\t\tType: gceDisk.Type,\n\t\tSizeBytes: gceDisk.SizeGb * units.GiB,\n\t\tRaw: gceDisk,\n\t}\n\tif i := strings.LastIndex(v.Type, \"/\"); i >= 0 {\n\t\t// volType is typically a URL in the form of volTypeURL (see gc.go), actual type is the final part of the path\n\t\tv.Type = v.Type[i+1:]\n\t}\n\tvar vps csp.VolumeProvisioningState\n\tswitch gceDisk.Status {\n\tcase \"CREATING\":\n\t\tfallthrough\n\tcase \"RESTORING\":\n\t\tvps = csp.VolumeProvisioningProvisioning\n\tcase \"READY\":\n\t\tvps = csp.VolumeProvisioningProvisioned\n\tcase \"DELETING\":\n\t\tvps = csp.VolumeProvisioningUnprovisioning\n\tcase \"FAILED\":\n\t\tvps = csp.VolumeProvisioningError\n\t}\n\tv.ProvisioningState = vps\n\tv.Tags = gceLabelsToModel(gceDisk.Labels)\n\tv.Attachments = make([]csp.VolumeAttachment, len(gceDisk.Users))\n\tfor i, user := range gceDisk.Users {\n\t\tif i := strings.LastIndex(user, \"/\"); i >= 0 { // format: projects/project/zones/zone/instances/instance\n\t\t\tuser = user[i+1:]\n\t\t}\n\t\tv.Attachments[i] = csp.VolumeAttachment{\n\t\t\tNodeIdentifier: user,\n\t\t\tDevice: fmt.Sprintf(diskPathFormat, gceDisk.Name),\n\t\t\tState: csp.VolumeAttachmentAttached, // GCE does not track this outside an active compute.Operation so assume attached\n\t\t}\n\t}\n\treturn v\n}", "func NewBlockIDRequest(blockID iotago.BlockID, msIndex iotago.MilestoneIndex) *Request {\n\treturn &Request{RequestType: RequestTypeBlockID, BlockID: blockID, MilestoneIndex: msIndex}\n}", "func NewGetVMVolumeDefault(code int) *GetVMVolumeDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetVMVolumeDefault{\n\t\t_statusCode: code,\n\t}\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func getVolumeAndAttachmentByVolumeId(volId string) (*model.VolumeSpec, *model.VolumeAttachmentSpec, error) {\n\tif r := getReplicationByVolume(volId); r != nil {\n\t\tvolId = r.Metadata[KAttachedVolumeId]\n\t}\n\n\tvol, err := client.GetVolume(volId)\n\tif nil != err || nil == vol {\n\t\tmsg := fmt.Sprintf(\"volume does not exist, %v\", err)\n\t\tglog.Error(msg)\n\t\treturn nil, nil, status.Error(codes.NotFound, msg)\n\t}\n\n\tattachments, err := client.ListVolumeAttachments()\n\tif nil != err {\n\t\tmsg := fmt.Sprintf(\"list volume attachments failed, %v\", err)\n\t\tglog.Error(msg)\n\t\treturn nil, nil, status.Error(codes.NotFound, msg)\n\t}\n\n\tvar attachment *model.VolumeAttachmentSpec\n\n\thostName, err := connector.GetHostName()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"faild to get host name %v\", err)\n\t\tglog.Error(msg)\n\t\treturn nil, nil, status.Error(codes.FailedPrecondition, msg)\n\t}\n\n\tfor _, attach := range attachments {\n\t\tif attach.VolumeId == volId && attach.Host == hostName {\n\t\t\tattachment = attach\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif attachment == nil {\n\t\tmsg := fmt.Sprintf(\"attachment is not exist\")\n\t\tglog.Error(msg)\n\t\treturn nil, nil, status.Error(codes.FailedPrecondition, msg)\n\t}\n\n\treturn vol, attachment, nil\n}", "func GetVolume(id string, name string) (*xmsv3.Volume, error) {\n\tvolume, err := xms.GetVolume(id, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn volume.Content, nil\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}" ]
[ "0.7107712", "0.68261075", "0.66954076", "0.5990659", "0.597668", "0.5967294", "0.58322895", "0.56966877", "0.5631915", "0.5500076", "0.5259046", "0.5254814", "0.5236093", "0.5232025", "0.5219707", "0.52102387", "0.5059303", "0.50411695", "0.49812785", "0.4970905", "0.4818848", "0.46827525", "0.455881", "0.45542553", "0.4492583", "0.4448769", "0.44415504", "0.44260567", "0.44163758", "0.43930942", "0.43764225", "0.43432128", "0.43326584", "0.43288207", "0.43236834", "0.4315377", "0.42922297", "0.42805147", "0.42790073", "0.4269688", "0.42658824", "0.42563912", "0.4246003", "0.4229873", "0.42234954", "0.42234954", "0.4222434", "0.4174081", "0.41722482", "0.41606954", "0.4136187", "0.413456", "0.4134517", "0.41301656", "0.41293615", "0.41288224", "0.41143903", "0.41074282", "0.41058522", "0.41038957", "0.40904662", "0.40752587", "0.40664384", "0.4064076", "0.40439847", "0.40401208", "0.4038562", "0.40355834", "0.40333787", "0.40255213", "0.40222633", "0.40219632", "0.40218994", "0.402006", "0.402006", "0.4015342", "0.39830756", "0.39824632", "0.39795646", "0.39737523", "0.39677775", "0.3967352", "0.39544985", "0.3954453", "0.39486167", "0.3941456", "0.39386308", "0.3937799", "0.39163035", "0.39039388", "0.39011937", "0.38995722", "0.38990283", "0.38946208", "0.38913816", "0.3889698", "0.38876092", "0.38845396", "0.3884349", "0.38741127" ]
0.82834536
0
Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse is an autogenerated conversion function.
func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error { return autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func Convert_v1beta1_VolumeDiskNumberResponse_To_internal_VolumeDiskNumberResponse(in *v1beta1.VolumeDiskNumberResponse, out *internal.VolumeDiskNumberResponse) error {\n\treturn autoConvert_v1beta1_VolumeDiskNumberResponse_To_internal_VolumeDiskNumberResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func (o *StoragePhysicalDisk) GetDiskId() string {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (f *FileLocationUnavailable) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (cs *ControllerServer) getNfsVolFromID(id string) (*nfsVolume, error) {\n\ttokens := strings.Split(id, \"/\")\n\tif len(tokens) != totalIDElements {\n\t\treturn nil, fmt.Errorf(\"volume id %q unexpected format: got %v token(s) instead of %v\", id, len(tokens), totalIDElements)\n\t}\n\n\treturn &nfsVolume{\n\t\tid: id,\n\t\tserver: tokens[1],\n\t\tbaseDir: tokens[2],\n\t\tsubDir: tokens[3],\n\t}, nil\n}", "func (o *StoragePhysicalDiskExtension) SetDiskId(v int64) {\n\to.DiskId = &v\n}", "func (o CachesIscsiVolumeOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {\n\tif (strings.HasPrefix(volumeID, \"vol-\")) && !(strings.HasPrefix(volumeID, \"aws://\")) {\n\t\treturn volumeID, nil\n\t}\n\tidx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1\n\tif idx == 0 {\n\t\treturn \"\", fmt.Errorf(\"malformed EBS volume id %q\", volumeID)\n\t}\n\treturn volumeID[idx:], nil\n}", "func (o *StoragePhysicalDisk) SetDiskId(v string) {\n\to.DiskId = &v\n}", "func (o ReplicatedVMManagedDiskOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) string { return v.DiskId }).(pulumi.StringOutput)\n}", "func (c *clustermgrClient) GetDiskInfo(ctx context.Context, diskID proto.DiskID) (ret *DiskInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\tret = &DiskInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func (o FioSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o *StoragePhysicalDiskExtension) GetDiskId() int64 {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func GetVolume(volumeID string) (*apis.ZFSVolume, error) {\n\treturn volbuilder.NewKubeclient().\n\t\tWithNamespace(OpenEBSNamespace).\n\t\tGet(volumeID, metav1.GetOptions{})\n}", "func (o *StoragePhysicalDiskAllOf) GetDiskId() string {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func (o IopingSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func (o *StoragePhysicalDisk) GetDiskIdOk() (*string, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func (r *CachesIscsiVolume) VolumeId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"volumeId\"])\n}", "func (o IopingSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func Convert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in *v1beta1.DismountVolumeResponse, out *internal.DismountVolumeResponse) error {\n\treturn autoConvert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in, out)\n}", "func (o DiskReplicaPairOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DiskReplicaPair) pulumi.StringOutput { return v.DiskId }).(pulumi.StringOutput)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (o FioSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *StoragePhysicalDiskAllOf) SetDiskId(v string) {\n\to.DiskId = &v\n}", "func (f *FileLocation) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func (o *Volume) GetVolumeIdOk() (string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.VolumeId, true\n}", "func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) {\n\tif vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && IsContentBlock(vol.contentType)) {\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\treturn volDevPath, nil\n\t}\n\n\treturn \"\", ErrNotSupported\n}", "func (o *StoragePhysicalDiskExtension) GetDiskIdOk() (*int64, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func (digitalocean DigitalOcean) GetVolume(id string) (*godo.Volume, error) {\n\tdoc, err := DigitalOceanClient()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvolume, _, err := doc.client.Storage.GetVolume(doc.context, id)\n\n\tif err != nil {\n\t\t//log.Fatal(err)\n\t}\n\n\treturn volume, err\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func (o IopingSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *Volume) GetVolumeIdOk() (*string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.VolumeId, true\n}", "func (d *Data) GetVolume(v dvid.VersionID, vox *Labels, supervoxels bool, scale uint8, roiname dvid.InstanceName) ([]byte, error) {\n\tr, err := imageblk.GetROI(v, roiname, vox)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := d.GetLabels(v, supervoxels, scale, vox, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn vox.Data(), nil\n}", "func (o SnapshotOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Snapshot) pulumi.StringOutput { return v.DiskId }).(pulumi.StringOutput)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (o LookupOpenZfsSnapshotResultOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupOpenZfsSnapshotResult) string { return v.VolumeId }).(pulumi.StringOutput)\n}", "func GetVolume(id string, name string) (*xmsv3.Volume, error) {\n\tvolume, err := xms.GetVolume(id, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn volume.Content, nil\n}", "func DiskNumber(disk syscall.Handle, number *int64) error {\n\tvar bytes uint32\n\tdevNum := StorageDeviceNumber{}\n\tbuflen := uint32(unsafe.Sizeof(devNum.DeviceType)) + uint32(unsafe.Sizeof(devNum.DeviceNumber)) + uint32(unsafe.Sizeof(devNum.PartitionNumber))\n\n\terr := syscall.DeviceIoControl(disk, IOCTL_STORAGE_GET_DEVICE_NUMBER, nil, 0, (*byte)(unsafe.Pointer(&devNum)), buflen, &bytes, nil)\n\n\tfmt.Printf(\"devNum: %v \\n\", devNum)\n\n\tif err == nil {\n\t\t*number = int64(devNum.DeviceNumber)\n\t}\n\treturn err\n}", "func (o *StoragePhysicalDiskAllOf) GetDiskIdOk() (*string, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func (client *Client) GetVolume(id string) (*api.Volume, error) {\n\tvol, err := volumes.Get(client.Volume, id).Extract()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error getting volume: %s\", ProviderErrorToString(err))\n\t}\n\tav := api.Volume{\n\t\tID: vol.ID,\n\t\tName: vol.Name,\n\t\tSize: vol.Size,\n\t\tSpeed: client.getVolumeSpeed(vol.VolumeType),\n\t\tState: toVolumeState(vol.Status),\n\t}\n\treturn &av, nil\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourcePortworxVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (osUtils *OsUtils) GetDiskID(pubCtx map[string]string, log *zap.SugaredLogger) (string, error) {\n\tvar diskID string\n\tvar ok bool\n\tif diskID, ok = pubCtx[common.AttributeFirstClassDiskUUID]; !ok {\n\t\treturn \"\", logger.LogNewErrorCodef(log, codes.InvalidArgument,\n\t\t\t\"attribute: %s required in publish context\",\n\t\t\tcommon.AttributeFirstClassDiskUUID)\n\t}\n\treturn diskID, nil\n}", "func (s *Stack) GetVolume(id string) (*resources.Volume, error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif id == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"id\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", id), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tr := volumesv2.Get(s.VolumeClient, id)\n\tvolume, err := r.Extract()\n\tif err != nil {\n\t\tif _, ok := err.(gc.ErrDefault404); ok {\n\t\t\treturn nil, resources.ResourceNotFoundError(\"volume\", id)\n\t\t}\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error getting volume: %s\", ProviderErrorToString(err)))\n\t}\n\n\tav := resources.Volume{\n\t\tID: volume.ID,\n\t\tName: volume.Name,\n\t\tSize: volume.Size,\n\t\tSpeed: s.getVolumeSpeed(volume.VolumeType),\n\t\tState: toVolumeState(volume.Status),\n\t}\n\treturn &av, nil\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func (c *clustermgrClient) GetVolumeInfo(ctx context.Context, vid proto.Vid) (*VolumeInfoSimple, error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\n\tinfo, err := c.client.GetVolumeInfo(ctx, &cmapi.GetVolumeArgs{Vid: vid})\n\tif err != nil {\n\t\tspan.Errorf(\"get volume info failed: err[%+v]\", err)\n\t\treturn nil, err\n\t}\n\tret := &VolumeInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func (v *VolumeService) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(volumeID) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound\n\tif strings.Contains(volumeID, \"notfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// check if the volume is not-found\n\tif strings.Contains(volumeID, \"not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: \"local\",\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: volumeID,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}", "func (s *OsdCsiServer) ControllerGetVolume(\n\tctx context.Context,\n\treq *csi.ControllerGetVolumeRequest,\n) (*csi.ControllerGetVolumeResponse, error) {\n\n\tclogger.WithContext(ctx).Tracef(\"ControllerGetVolume request received. VolumeID: %s\", req.GetVolumeId())\n\n\tvol, err := s.driverGetVolume(ctx, req.GetVolumeId())\n\tif err != nil {\n\t\tif s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {\n\t\t\treturn &csi.ControllerGetVolumeResponse{\n\t\t\t\tVolume: &csi.Volume{\n\t\t\t\t\tVolumeId: req.GetVolumeId(),\n\t\t\t\t},\n\t\t\t\tStatus: &csi.ControllerGetVolumeResponse_VolumeStatus{\n\t\t\t\t\tVolumeCondition: &csi.VolumeCondition{\n\t\t\t\t\t\tAbnormal: true,\n\t\t\t\t\t\tMessage: fmt.Sprintf(\"Volume ID %s not found\", req.GetVolumeId()),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn &csi.ControllerGetVolumeResponse{\n\t\tVolume: &csi.Volume{\n\t\t\tCapacityBytes: int64(vol.Spec.Size),\n\t\t\tVolumeId: vol.Id,\n\t\t},\n\t\tStatus: &csi.ControllerGetVolumeResponse_VolumeStatus{\n\t\t\tVolumeCondition: getVolumeCondition(vol),\n\t\t},\n\t}, nil\n}", "func (o IopingSpecVolumeVolumeSourcePortworxVolumePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourcePortworxVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceAwsElasticBlockStorePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceAwsElasticBlockStore) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (ms *MachinePlugin) GetVolumeIDs(ctx context.Context, req *cmi.GetVolumeIDsRequest) (*cmi.GetVolumeIDsResponse, error) {\n\tvar (\n\t\tvolumeIDs []string\n\t\tvolumeSpecs []*corev1.PersistentVolumeSpec\n\t)\n\n\t// Log messages to track start and end of request\n\tglog.V(2).Infof(\"GetVolumeIDs request has been recieved for %q\", req.PVSpecList)\n\n\terr := json.Unmarshal(req.PVSpecList, &volumeSpecs)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor i := range volumeSpecs {\n\t\tspec := volumeSpecs[i]\n\t\tif spec.AWSElasticBlockStore == nil {\n\t\t\t// Not an aws volume\n\t\t\tcontinue\n\t\t}\n\t\tvolumeID := spec.AWSElasticBlockStore.VolumeID\n\t\tvolumeIDs = append(volumeIDs, volumeID)\n\t}\n\n\tglog.V(2).Infof(\"GetVolumeIDs machines request has been processed successfully. \\nList: %v\", volumeIDs)\n\n\tResp := &cmi.GetVolumeIDsResponse{\n\t\tVolumeIDs: volumeIDs,\n\t}\n\treturn Resp, nil\n}", "func (o FioSpecVolumeVolumeSourceAwsElasticBlockStoreOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceAwsElasticBlockStore) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourceAwsElasticBlockStorePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceAwsElasticBlockStore) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func getVirtualDeviceByDiskID(ctx context.Context, vm *object.VirtualMachine, diskID string) (vim25types.BaseVirtualDevice, error) {\n\tvmname, err := vm.Common.ObjectName(ctx)\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\tvmDevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\tframework.Logf(\"Failed to get the devices for VM: %q. err: %+v\", vmname, err)\n\t\treturn nil, err\n\t}\n\tfor _, device := range vmDevices {\n\t\tif vmDevices.TypeName(device) == \"VirtualDisk\" {\n\t\t\tif virtualDisk, ok := device.(*vim25types.VirtualDisk); ok {\n\t\t\t\tif virtualDisk.VDiskId != nil && virtualDisk.VDiskId.Id == diskID {\n\t\t\t\t\tframework.Logf(\"Found FCDID %q attached to VM %q\", diskID, vmname)\n\t\t\t\t\treturn device, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tframework.Logf(\"Failed to find FCDID %q attached to VM %q\", diskID, vmname)\n\treturn nil, nil\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func (c *clustermgrClient) ListDiskVolumeUnits(ctx context.Context, diskID proto.DiskID) (rets []*VunitInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfos, err := c.client.ListVolumeUnit(ctx, &cmapi.ListVolumeUnitArgs{DiskID: diskID})\n\tif err != nil {\n\t\tspan.Errorf(\"list disk volume units failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tdiskInfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range infos {\n\t\tele := VunitInfoSimple{}\n\t\tele.set(info, diskInfo.Host)\n\t\trets = append(rets, &ele)\n\t}\n\treturn rets, nil\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (d *VolumeDriver) GetVolume(name string) (map[string]interface{}, error) {\n\tvar statusMap map[string]interface{}\n\tstatusMap = make(map[string]interface{})\n\tlog.Errorf(\"VolumeDriver GetVolume to be implemented\")\n\treturn statusMap, nil\n}", "func (r *RPCTractserverTalker) GetDiskInfo(ctx context.Context, addr string) ([]core.FsStatus, core.Error) {\n\treq := core.GetDiskInfoReq{}\n\tvar reply core.GetDiskInfoReply\n\tif err := r.cc.Send(ctx, addr, core.GetDiskInfoMethod, req, &reply); err != nil {\n\t\tlog.Errorf(\"GetDiskInfo RPC error on tractserver %s: %s\", addr, err)\n\t\treturn nil, core.ErrRPC\n\t}\n\tif reply.Err != core.NoError {\n\t\tlog.Errorf(\"GetDiskInfo error on tractserver %s: %s\", addr, reply.Err)\n\t}\n\treturn reply.Disks, reply.Err\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func (cs *ControllerServer) getVolumeIDFromNfsVol(vol *nfsVolume) string {\n\tidElements := make([]string, totalIDElements)\n\tidElements[idServer] = strings.Trim(vol.server, \"/\")\n\tidElements[idBaseDir] = strings.Trim(vol.baseDir, \"/\")\n\tidElements[idSubDir] = strings.Trim(vol.subDir, \"/\")\n\treturn strings.Join(idElements, \"/\")\n}", "func getSmbVolFromID(id string) (*smbVolume, error) {\n\tsegments := strings.Split(id, separator)\n\tif len(segments) < 2 {\n\t\treturn nil, fmt.Errorf(\"could not split %q into server and subDir\", id)\n\t}\n\tsource := segments[0]\n\tif !strings.HasPrefix(segments[0], \"//\") {\n\t\tsource = \"//\" + source\n\t}\n\tvol := &smbVolume{\n\t\tid: id,\n\t\tsource: source,\n\t\tsubDir: segments[1],\n\t}\n\tif len(segments) >= 3 {\n\t\tvol.uuid = segments[2]\n\t}\n\treturn vol, nil\n}", "func (o IopingSpecVolumeVolumeSourceAwsElasticBlockStoreOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceAwsElasticBlockStore) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func (a *Client) GetContainersUUIDVolumesVolumeUUID(params *GetContainersUUIDVolumesVolumeUUIDParams, authInfo runtime.ClientAuthInfoWriter) (*GetContainersUUIDVolumesVolumeUUIDOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetContainersUUIDVolumesVolumeUUIDParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetContainersUUIDVolumesVolumeUUID\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/containers/{uuid}/volumes/{volume_uuid}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetContainersUUIDVolumesVolumeUUIDReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetContainersUUIDVolumesVolumeUUIDOK), nil\n\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func (d *driver) GetVolumeName(id *csi.VolumeID) (string, error) {\n\tidVal, ok := id.Values[\"id\"]\n\tif !ok {\n\t\treturn \"\", errMissingIDKeyPath\n\t}\n\n\topts := &apitypes.VolumeInspectOpts{\n\t\tOpts: apiutils.NewStore(),\n\t}\n\n\tvol, err := d.client.Storage().VolumeInspect(d.ctx, idVal, opts)\n\tif err != nil {\n\n\t\t// If the volume is not found then return an empty string\n\t\t// for the name to indicate such.\n\t\tif isNotFoundErr(err) {\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\treturn \"\", err\n\t}\n\n\treturn vol.Name, nil\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudPvminstancesVolumesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesGetUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func DecodeStorageVolumesDeleteResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_delete\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (s *Simple) DiskInfo(req *acomm.Request) (interface{}, *url.URL, error) {\n\tvar args CPUInfoArgs\n\tif err := req.UnmarshalArgs(&args); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif args.GuestID == \"\" {\n\t\treturn nil, nil, errors.New(\"missing guest_id\")\n\t}\n\n\tresult := &DiskInfoResult{\n\t\t&DiskInfo{\n\t\t\tDevice: \"vda1\",\n\t\t\tSize: 10 * (1024 * 1024 * 1024), // 10 GB in bytes\n\t\t},\n\t}\n\n\treturn result, nil, nil\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func (d Disk) GetID() string {\n\treturn d.Serial\n}", "func GetBootDiskID(client *ovc.Client, id int) (int, error) {\n\tmachineInfo, err := client.Machines.Get(id)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tfor _, disk := range machineInfo.Disks {\n\t\tif disk.Type == \"B\" {\n\t\t\treturn disk.ID, nil\n\t\t}\n\t}\n\treturn 0, fmt.Errorf(\"Machine %s has no boot disk\", machineInfo.Name)\n}", "func (c *Core) GetVolume(id types.VolumeID) (*types.Volume, error) {\n\tc.lock.Lock(id.Name)\n\tdefer c.lock.Unlock(id.Name)\n\n\treturn c.getVolume(id)\n}", "func (d *driver) GetVolumeInfo(name string) (*csi.VolumeInfo, error) {\n\ttd, ok := d.client.Storage().(apitypes.StorageDriverVolInspectByName)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"stor driver not by name: %T\", d.client.Storage())\n\t}\n\n\topts := &apitypes.VolumeInspectOpts{\n\t\tOpts: apiutils.NewStore(),\n\t}\n\n\tvol, err := td.VolumeInspectByName(d.ctx, name, opts)\n\tif err != nil {\n\n\t\t// If the volume is not found then return nil for the\n\t\t// volume info to indicate such.\n\t\tif isNotFoundErr(err) {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\treturn toVolumeInfo(vol), nil\n}", "func GetVolumesFromNASIDV2(id string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?nas.export_policy.id=\" + id\n\treturn getVolumesV2(query)\n}", "func getDiskUUID() string {\n\treturn vboxmanage.GetVMInfoByRegexp(boxName, \"\\\"SATA Controller-ImageUUID-0-0\\\"=\\\"(.*?)\\\"\")\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}" ]
[ "0.77099955", "0.708421", "0.6664196", "0.5974895", "0.5782266", "0.5770552", "0.5733449", "0.5544065", "0.55222964", "0.52940583", "0.5191995", "0.5148249", "0.5148249", "0.50481355", "0.50211746", "0.5013893", "0.50044495", "0.5000257", "0.49990043", "0.49971843", "0.49969724", "0.49933052", "0.49814254", "0.49626434", "0.49429274", "0.4925776", "0.49101052", "0.48720306", "0.48629916", "0.48611167", "0.48396367", "0.48342463", "0.483326", "0.48275095", "0.4819454", "0.48083124", "0.47994176", "0.47844493", "0.47769237", "0.47769237", "0.47601822", "0.47592425", "0.47583726", "0.4738563", "0.47380033", "0.47279033", "0.47171882", "0.4716873", "0.47007445", "0.46991327", "0.46511146", "0.46492645", "0.46461573", "0.4631865", "0.46186095", "0.46177804", "0.46127072", "0.46071002", "0.46015877", "0.45909885", "0.45496687", "0.4535918", "0.4524044", "0.4510038", "0.45043647", "0.44966218", "0.44958362", "0.44753045", "0.44646505", "0.44606602", "0.44567335", "0.44516882", "0.4422281", "0.44148123", "0.44136566", "0.44120649", "0.44035232", "0.43920186", "0.4388512", "0.43690413", "0.43679506", "0.43649217", "0.43406639", "0.43374738", "0.43308374", "0.43200204", "0.43036044", "0.42967418", "0.42959884", "0.42934406", "0.4289801", "0.428677", "0.42856085", "0.42831326", "0.4261426", "0.4256861", "0.42557627", "0.42534348", "0.42470855", "0.42382005" ]
0.82611114
0
Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse is an autogenerated conversion function.
func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error { return autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func Convert_v1beta1_VolumeDiskNumberResponse_To_internal_VolumeDiskNumberResponse(in *v1beta1.VolumeDiskNumberResponse, out *internal.VolumeDiskNumberResponse) error {\n\treturn autoConvert_v1beta1_VolumeDiskNumberResponse_To_internal_VolumeDiskNumberResponse(in, out)\n}", "func (o CachesIscsiVolumeOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (o ReplicatedVMManagedDiskOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) string { return v.DiskId }).(pulumi.StringOutput)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {\n\tif (strings.HasPrefix(volumeID, \"vol-\")) && !(strings.HasPrefix(volumeID, \"aws://\")) {\n\t\treturn volumeID, nil\n\t}\n\tidx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1\n\tif idx == 0 {\n\t\treturn \"\", fmt.Errorf(\"malformed EBS volume id %q\", volumeID)\n\t}\n\treturn volumeID[idx:], nil\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func GetVolume(volumeID string) (*apis.ZFSVolume, error) {\n\treturn volbuilder.NewKubeclient().\n\t\tWithNamespace(OpenEBSNamespace).\n\t\tGet(volumeID, metav1.GetOptions{})\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (c *clustermgrClient) GetDiskInfo(ctx context.Context, diskID proto.DiskID) (ret *DiskInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\tret = &DiskInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func (o DiskReplicaPairOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DiskReplicaPair) pulumi.StringOutput { return v.DiskId }).(pulumi.StringOutput)\n}", "func (r *CachesIscsiVolume) VolumeId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"volumeId\"])\n}", "func (o *Volume) GetVolumeIdOk() (string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.VolumeId, true\n}", "func (ms *MachinePlugin) GetVolumeIDs(ctx context.Context, req *cmi.GetVolumeIDsRequest) (*cmi.GetVolumeIDsResponse, error) {\n\tvar (\n\t\tvolumeIDs []string\n\t\tvolumeSpecs []*corev1.PersistentVolumeSpec\n\t)\n\n\t// Log messages to track start and end of request\n\tglog.V(2).Infof(\"GetVolumeIDs request has been recieved for %q\", req.PVSpecList)\n\n\terr := json.Unmarshal(req.PVSpecList, &volumeSpecs)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor i := range volumeSpecs {\n\t\tspec := volumeSpecs[i]\n\t\tif spec.AWSElasticBlockStore == nil {\n\t\t\t// Not an aws volume\n\t\t\tcontinue\n\t\t}\n\t\tvolumeID := spec.AWSElasticBlockStore.VolumeID\n\t\tvolumeIDs = append(volumeIDs, volumeID)\n\t}\n\n\tglog.V(2).Infof(\"GetVolumeIDs machines request has been processed successfully. \\nList: %v\", volumeIDs)\n\n\tResp := &cmi.GetVolumeIDsResponse{\n\t\tVolumeIDs: volumeIDs,\n\t}\n\treturn Resp, nil\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (cs *ControllerServer) getNfsVolFromID(id string) (*nfsVolume, error) {\n\ttokens := strings.Split(id, \"/\")\n\tif len(tokens) != totalIDElements {\n\t\treturn nil, fmt.Errorf(\"volume id %q unexpected format: got %v token(s) instead of %v\", id, len(tokens), totalIDElements)\n\t}\n\n\treturn &nfsVolume{\n\t\tid: id,\n\t\tserver: tokens[1],\n\t\tbaseDir: tokens[2],\n\t\tsubDir: tokens[3],\n\t}, nil\n}", "func (o *StoragePhysicalDisk) GetDiskIdOk() (*string, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func (o *Volume) GetVolumeIdOk() (*string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.VolumeId, true\n}", "func (o *StoragePhysicalDiskExtension) GetDiskIdOk() (*int64, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (o LookupOpenZfsSnapshotResultOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupOpenZfsSnapshotResult) string { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o IopingSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o *StoragePhysicalDisk) GetDiskId() string {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func (f *FileLocationUnavailable) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func (o IopingSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o *StoragePhysicalDiskExtension) SetDiskId(v int64) {\n\to.DiskId = &v\n}", "func (d *Data) GetVolume(v dvid.VersionID, vox *Labels, supervoxels bool, scale uint8, roiname dvid.InstanceName) ([]byte, error) {\n\tr, err := imageblk.GetROI(v, roiname, vox)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := d.GetLabels(v, supervoxels, scale, vox, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn vox.Data(), nil\n}", "func (o *StoragePhysicalDisk) SetDiskId(v string) {\n\to.DiskId = &v\n}", "func (o *StoragePhysicalDiskAllOf) GetDiskIdOk() (*string, bool) {\n\tif o == nil || o.DiskId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DiskId, true\n}", "func (digitalocean DigitalOcean) GetVolume(id string) (*godo.Volume, error) {\n\tdoc, err := DigitalOceanClient()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvolume, _, err := doc.client.Storage.GetVolume(doc.context, id)\n\n\tif err != nil {\n\t\t//log.Fatal(err)\n\t}\n\n\treturn volume, err\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func GetVolumeV2(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *VolumeV2State, opts ...pulumi.ResourceOption) (*VolumeV2, error) {\n\tvar resource VolumeV2\n\terr := ctx.ReadResource(\"openstack:blockstorage/volumeV2:VolumeV2\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (client *Client) GetVolume(id string) (*api.Volume, error) {\n\tvol, err := volumes.Get(client.Volume, id).Extract()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error getting volume: %s\", ProviderErrorToString(err))\n\t}\n\tav := api.Volume{\n\t\tID: vol.ID,\n\t\tName: vol.Name,\n\t\tSize: vol.Size,\n\t\tSpeed: client.getVolumeSpeed(vol.VolumeType),\n\t\tState: toVolumeState(vol.Status),\n\t}\n\treturn &av, nil\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func GetVolumesFromNASIDV2(id string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?nas.export_policy.id=\" + id\n\treturn getVolumesV2(query)\n}", "func GetVolumesV2() (VolumeV2, error) {\n\tvar volumes VolumeV2\n\tquery := \"/api/datacenter/storage/volume\"\n\tbodyText, err := getResponseBody(query)\n\tif err != nil {\n\t\treturn VolumeV2{}, err\n\t}\n\terr = json.Unmarshal(bodyText, &volumes)\n\tif err != nil {\n\t\tlog.Printf(\"verita-core: Error: %v\", err)\n\t\treturn volumes, err\n\t}\n\treturn volumes, nil\n}", "func (s *Stack) GetVolume(id string) (*resources.Volume, error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif id == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"id\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", id), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tr := volumesv2.Get(s.VolumeClient, id)\n\tvolume, err := r.Extract()\n\tif err != nil {\n\t\tif _, ok := err.(gc.ErrDefault404); ok {\n\t\t\treturn nil, resources.ResourceNotFoundError(\"volume\", id)\n\t\t}\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error getting volume: %s\", ProviderErrorToString(err)))\n\t}\n\n\tav := resources.Volume{\n\t\tID: volume.ID,\n\t\tName: volume.Name,\n\t\tSize: volume.Size,\n\t\tSpeed: s.getVolumeSpeed(volume.VolumeType),\n\t\tState: toVolumeState(volume.Status),\n\t}\n\treturn &av, nil\n}", "func (o *StoragePhysicalDiskAllOf) GetDiskId() string {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) {\n\tif vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && IsContentBlock(vol.contentType)) {\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\treturn volDevPath, nil\n\t}\n\n\treturn \"\", ErrNotSupported\n}", "func (v *VolumeService) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(volumeID) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound\n\tif strings.Contains(volumeID, \"notfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// check if the volume is not-found\n\tif strings.Contains(volumeID, \"not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: \"local\",\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: volumeID,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}", "func (o SnapshotOutput) DiskId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Snapshot) pulumi.StringOutput { return v.DiskId }).(pulumi.StringOutput)\n}", "func (o *StoragePhysicalDiskExtension) GetDiskId() int64 {\n\tif o == nil || o.DiskId == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.DiskId\n}", "func GetVolumesFromUUIDV2(uuid string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?uuid=\" + uuid\n\treturn getVolumesV2(query)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func (s *OsdCsiServer) ControllerGetVolume(\n\tctx context.Context,\n\treq *csi.ControllerGetVolumeRequest,\n) (*csi.ControllerGetVolumeResponse, error) {\n\n\tclogger.WithContext(ctx).Tracef(\"ControllerGetVolume request received. VolumeID: %s\", req.GetVolumeId())\n\n\tvol, err := s.driverGetVolume(ctx, req.GetVolumeId())\n\tif err != nil {\n\t\tif s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {\n\t\t\treturn &csi.ControllerGetVolumeResponse{\n\t\t\t\tVolume: &csi.Volume{\n\t\t\t\t\tVolumeId: req.GetVolumeId(),\n\t\t\t\t},\n\t\t\t\tStatus: &csi.ControllerGetVolumeResponse_VolumeStatus{\n\t\t\t\t\tVolumeCondition: &csi.VolumeCondition{\n\t\t\t\t\t\tAbnormal: true,\n\t\t\t\t\t\tMessage: fmt.Sprintf(\"Volume ID %s not found\", req.GetVolumeId()),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn &csi.ControllerGetVolumeResponse{\n\t\tVolume: &csi.Volume{\n\t\t\tCapacityBytes: int64(vol.Spec.Size),\n\t\t\tVolumeId: vol.Id,\n\t\t},\n\t\tStatus: &csi.ControllerGetVolumeResponse_VolumeStatus{\n\t\t\tVolumeCondition: getVolumeCondition(vol),\n\t\t},\n\t}, nil\n}", "func (o FioSpecVolumeVolumeSourceAwsElasticBlockStoreOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceAwsElasticBlockStore) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func GetVolume(id string, name string) (*xmsv3.Volume, error) {\n\tvolume, err := xms.GetVolume(id, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn volume.Content, nil\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func (o FioSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (f *FileLocation) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (o IopingSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumeVolumeSourceAwsElasticBlockStoreOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceAwsElasticBlockStore) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o *StoragePhysicalDiskAllOf) SetDiskId(v string) {\n\to.DiskId = &v\n}", "func (r *RPCTractserverTalker) GetDiskInfo(ctx context.Context, addr string) ([]core.FsStatus, core.Error) {\n\treq := core.GetDiskInfoReq{}\n\tvar reply core.GetDiskInfoReply\n\tif err := r.cc.Send(ctx, addr, core.GetDiskInfoMethod, req, &reply); err != nil {\n\t\tlog.Errorf(\"GetDiskInfo RPC error on tractserver %s: %s\", addr, err)\n\t\treturn nil, core.ErrRPC\n\t}\n\tif reply.Err != core.NoError {\n\t\tlog.Errorf(\"GetDiskInfo error on tractserver %s: %s\", addr, reply.Err)\n\t}\n\treturn reply.Disks, reply.Err\n}", "func DiskNumber(disk syscall.Handle, number *int64) error {\n\tvar bytes uint32\n\tdevNum := StorageDeviceNumber{}\n\tbuflen := uint32(unsafe.Sizeof(devNum.DeviceType)) + uint32(unsafe.Sizeof(devNum.DeviceNumber)) + uint32(unsafe.Sizeof(devNum.PartitionNumber))\n\n\terr := syscall.DeviceIoControl(disk, IOCTL_STORAGE_GET_DEVICE_NUMBER, nil, 0, (*byte)(unsafe.Pointer(&devNum)), buflen, &bytes, nil)\n\n\tfmt.Printf(\"devNum: %v \\n\", devNum)\n\n\tif err == nil {\n\t\t*number = int64(devNum.DeviceNumber)\n\t}\n\treturn err\n}", "func Get(c *golangsdk.ServiceClient, server_id string, volume_id string) (r GetResult) {\n\t_, r.Err = c.Get(getURL(c, server_id, volume_id), &r.Body, nil)\n\treturn\n}", "func (m *Info) GetEBSVolumeID(devName string) string {\n\tif m.ebsVolume != nil {\n\t\treturn m.ebsVolume.getEBSVolumeID(devName)\n\t}\n\n\treturn \"\"\n}", "func Convert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in *v1beta1.DismountVolumeResponse, out *internal.DismountVolumeResponse) error {\n\treturn autoConvert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in, out)\n}", "func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudPvminstancesVolumesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesGetUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (c *clustermgrClient) GetVolumeInfo(ctx context.Context, vid proto.Vid) (*VolumeInfoSimple, error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\n\tinfo, err := c.client.GetVolumeInfo(ctx, &cmapi.GetVolumeArgs{Vid: vid})\n\tif err != nil {\n\t\tspan.Errorf(\"get volume info failed: err[%+v]\", err)\n\t\treturn nil, err\n\t}\n\tret := &VolumeInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func (o IopingSpecVolumeVolumeSourceAwsElasticBlockStorePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceAwsElasticBlockStore) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (c *clustermgrClient) ListDiskVolumeUnits(ctx context.Context, diskID proto.DiskID) (rets []*VunitInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfos, err := c.client.ListVolumeUnit(ctx, &cmapi.ListVolumeUnitArgs{DiskID: diskID})\n\tif err != nil {\n\t\tspan.Errorf(\"list disk volume units failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tdiskInfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range infos {\n\t\tele := VunitInfoSimple{}\n\t\tele.set(info, diskInfo.Host)\n\t\trets = append(rets, &ele)\n\t}\n\treturn rets, nil\n}", "func (c *Controller) GetVolumeName(getVolumeNameRequest k8sresources.FlexVolumeGetVolumeNameRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-isAttached-start\")\n\tdefer c.logger.Println(\"controller-isAttached-end\")\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func (*ResponseGetVolumes) Descriptor() ([]byte, []int) {\n\treturn file_pkg_metadata_metadata_proto_rawDescGZIP(), []int{1}\n}", "func (o FioSpecVolumeVolumeSourceAwsElasticBlockStorePtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceAwsElasticBlockStore) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *StackEbrc) GetVolumeAttachment(serverID, id string) (*abstract.VolumeAttachment, fail.Error) {\n\tlogrus.Debugf(\">>> stacks.ebrc::GetVolumeAttachment(%s)\", id)\n\tdefer logrus.Debugf(\"<<< stacks.ebrc::GetVolumeAttachment(%s)\", id)\n\n\tvats, err := s.ListVolumeAttachments(serverID)\n\tif err != nil {\n\t\treturn nil, fail.Wrap(err, fmt.Sprintf(\"Error getting attachment\"))\n\t}\n\n\tfor _, vat := range vats {\n\t\tif vat.ID == id && vat.ServerID == serverID {\n\t\t\treturn &vat, nil\n\t\t}\n\t}\n\n\treturn nil, fail.Errorf(fmt.Sprintf(\"Attachment [%s] to [%s] not found\", id, serverID), nil)\n}", "func (d DobsClient) GetVolume(ctx Context, name string) (*APIVolume, error) {\n\n\tapiVolume, err := d.getVolumeByName(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvol := &APIVolume{\n\t\tName: apiVolume.Name,\n\t\tID: apiVolume.ID,\n\t\t// DropletID: apiVolume.DropletIDs[0],\n\t}\n\n\treturn vol, nil\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func getDiskUUID() string {\n\treturn vboxmanage.GetVMInfoByRegexp(boxName, \"\\\"SATA Controller-ImageUUID-0-0\\\"=\\\"(.*?)\\\"\")\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) {\n\targs := []string{\"-p\", \"-s\", \"TYPE\", \"-s\", \"PTTYPE\", \"-o\", \"export\", disk}\n\tmounter.Logger.Infof(\"Attempting to determine if disk %q is formatted using blkid with args: (%v)\", disk, args)\n\tcmd := mounter.Runner.Command(\"blkid\", args...)\n\tdataOut, err := cmd.CombinedOutput()\n\toutput := string(dataOut)\n\tmounter.Logger.Infof(\"Output: %q, err: %v\", output, err)\n\n\tif err != nil {\n\t\tif exit, ok := err.(utilexec.ExitError); ok {\n\t\t\tif exit.ExitStatus() == 2 {\n\t\t\t\t// Disk device is unformatted.\n\t\t\t\t// For `blkid`, if the specified token (TYPE/PTTYPE, etc) was\n\t\t\t\t// not found, or no (specified) devices could be identified, an\n\t\t\t\t// exit code of 2 is returned.\n\t\t\t\treturn \"\", nil\n\t\t\t}\n\t\t}\n\t\tmounter.Logger.Errorf(\"Could not determine if disk %q is formatted (%v)\", disk, err)\n\t\treturn \"\", err\n\t}\n\n\tvar fstype, pttype string\n\n\tlines := strings.Split(output, \"\\n\")\n\tfor _, l := range lines {\n\t\tif len(l) <= 0 {\n\t\t\t// Ignore empty line.\n\t\t\tcontinue\n\t\t}\n\t\tcs := strings.Split(l, \"=\")\n\t\tif len(cs) != 2 {\n\t\t\treturn \"\", fmt.Errorf(\"blkid returns invalid output: %s\", output)\n\t\t}\n\t\t// TYPE is filesystem type, and PTTYPE is partition table type, according\n\t\t// to https://www.kernel.org/pub/linux/utils/util-linux/v2.21/libblkid-docs/.\n\t\tif cs[0] == \"TYPE\" {\n\t\t\tfstype = cs[1]\n\t\t} else if cs[0] == \"PTTYPE\" {\n\t\t\tpttype = cs[1]\n\t\t}\n\t}\n\n\tif len(pttype) > 0 {\n\t\tmounter.Logger.Infof(\"Disk %s detected partition table type: %s\", disk, pttype)\n\t\t// Returns a special non-empty string as filesystem type, then kubelet\n\t\t// will not format it.\n\t\treturn \"unknown data, probably partitions\", nil\n\t}\n\n\treturn fstype, nil\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}" ]
[ "0.7162575", "0.70565593", "0.66211486", "0.6366637", "0.5995894", "0.59791255", "0.5870378", "0.57527435", "0.5721588", "0.54494816", "0.5445511", "0.54289883", "0.538686", "0.53238314", "0.53045374", "0.5237207", "0.52239496", "0.5141038", "0.50503945", "0.49240676", "0.48260102", "0.48182446", "0.4809545", "0.47582942", "0.47404557", "0.47359133", "0.47316363", "0.4704718", "0.4704718", "0.47016215", "0.4675547", "0.46712238", "0.46538183", "0.46393573", "0.46364492", "0.4626431", "0.46049306", "0.46031296", "0.457289", "0.45668182", "0.45617577", "0.4561241", "0.45560175", "0.4549383", "0.4548807", "0.45475847", "0.45413142", "0.45359594", "0.4500755", "0.4483377", "0.44689092", "0.44625163", "0.44553018", "0.4443734", "0.4438241", "0.44120333", "0.44120333", "0.43922272", "0.4392029", "0.4386517", "0.43856144", "0.43801534", "0.4370474", "0.43625164", "0.43560135", "0.4354286", "0.43529555", "0.43512282", "0.43415385", "0.43396747", "0.4321504", "0.43027753", "0.4294844", "0.429466", "0.4279527", "0.42749342", "0.42614132", "0.42564845", "0.424861", "0.42427737", "0.4241113", "0.42333424", "0.42256305", "0.42119175", "0.4199721", "0.4197748", "0.41946274", "0.4192719", "0.41918865", "0.4175481", "0.4171039", "0.41664678", "0.41482708", "0.41433164", "0.41393656", "0.4137595", "0.41359895", "0.41300127", "0.4124263", "0.41213804" ]
0.8193799
0
Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest is an autogenerated conversion function.
func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error { return autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func (c *Core) VolumePath(id types.VolumeID) (string, error) {\n\tc.lock.Lock(id.Name)\n\tdefer c.lock.Unlock(id.Name)\n\n\tv, dv, err := c.getVolumeDriver(id)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, fmt.Sprintf(\"Get volume: %s path\", id.String()))\n\t}\n\n\treturn c.volumePath(v, dv)\n}", "func (m *MockProvisioner) GetVolumePath(volume api.Volume) (string, error) {\n\targs := m.Mock.Called(volume)\n\n\treturn args.String(0), args.Error(1)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryptionSetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *string { return v.TargetDiskEncryptionSetId }).(pulumi.StringPtrOutput)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {\n\tif req == nil {\n\t\treturn \"\", fmt.Errorf(\"must provide non-nil request to GetVolumePath: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlogrus.Infof(\"Getting volume %s path using plugin %s\", req.Name, p.Name)\n\n\tresp, err := p.sendRequest(req, hostVirtualPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, hostVirtualPath, req.Name); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpathRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tpathResp := new(volume.PathResponse)\n\tif err := json.Unmarshal(pathRespBytes, pathResp); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshalling volume plugin %s path response: %w\", p.Name, err)\n\t}\n\n\treturn pathResp.Mountpoint, nil\n}", "func (o *FileInfoCollectionGetParams) WithTarget(target *string) *FileInfoCollectionGetParams {\n\to.SetTarget(target)\n\treturn o\n}", "func (fs *FS) TargetIPLUNToDevicePath(ctx context.Context, targetIP string, lunID int) (map[string]string, error) {\n\treturn fs.targetIPLUNToDevicePath(ctx, targetIP, lunID)\n}", "func (o *FileInfoCollectionGetParams) SetTarget(target *string) {\n\to.Target = target\n}", "func (in *TargetVolumeSpec) DeepCopy() *TargetVolumeSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TargetVolumeSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func getDeviceByIDPath(volumeName string) string {\n\treturn filepath.Join(diskIDPath, fmt.Sprintf(\"%s%s\", diskDOPrefix, volumeName))\n}", "func (t *transpiler) VisitTargetPath(ctx *parser.TargetPathContext) interface{} {\n\tp := ctx.TargetPathHead().Accept(t).(pathSpec)\n\tfor i := range ctx.AllTargetPathSegment() {\n\t\tp.field += ctx.TargetPathSegment(i).Accept(t).(string)\n\t}\n\n\tif ctx.OWMOD() != nil && ctx.OWMOD().GetText() != \"\" {\n\t\tp.field += ctx.OWMOD().GetText()\n\t}\n\n\t// Only one of p.arg and p.index can be filled.\n\tif (p.arg == \"\") == (p.index == \"\") {\n\t\tt.fail(ctx, fmt.Errorf(\"invalid target path - expected arg xor index but got both or neither (arg %s and index %s)\", p.arg, p.index))\n\t}\n\n\treturn p\n}", "func (m *ItemTranslateExchangeIdsPostRequestBody) GetTargetIdType()(*iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.ExchangeIdFormat) {\n val, err := m.GetBackingStore().Get(\"targetIdType\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.ExchangeIdFormat)\n }\n return nil\n}", "func findTargetIDAndMethod(reqPath string, headers http.Header) (targetID string, method string) {\n\tif appID := headers.Get(daprAppID); appID != \"\" {\n\t\treturn appID, strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t}\n\n\tif auth := headers.Get(\"Authorization\"); strings.HasPrefix(auth, \"Basic \") {\n\t\tif s, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, \"Basic \")); err == nil {\n\t\t\tpair := strings.Split(string(s), \":\")\n\t\t\tif len(pair) == 2 && pair[0] == daprAppID {\n\t\t\t\treturn pair[1], strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we're here, the handler was probably invoked with /v1.0/invoke/ (or the invocation is invalid, missing the app id provided as header or Basic auth)\n\t// However, we are not relying on wildcardParam because the URL may have been sanitized to remove `//``, so `http://` would have been turned into `http:/`\n\t// First, check to make sure that the path has the prefix\n\tif idx := pathHasPrefix(reqPath, apiVersionV1, \"invoke\"); idx > 0 {\n\t\treqPath = reqPath[idx:]\n\n\t\t// Scan to find app ID and method\n\t\t// Matches `<appid>/method/<method>`.\n\t\t// Examples:\n\t\t// - `appid/method/mymethod`\n\t\t// - `http://example.com/method/mymethod`\n\t\t// - `https://example.com/method/mymethod`\n\t\t// - `http%3A%2F%2Fexample.com/method/mymethod`\n\t\tif idx = strings.Index(reqPath, \"/method/\"); idx > 0 {\n\t\t\ttargetID := reqPath[:idx]\n\t\t\tmethod := reqPath[(idx + len(\"/method/\")):]\n\t\t\tif t, _ := url.QueryUnescape(targetID); t != \"\" {\n\t\t\t\ttargetID = t\n\t\t\t}\n\t\t\treturn targetID, method\n\t\t}\n\t}\n\n\treturn \"\", \"\"\n}", "func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {\n\tif (strings.HasPrefix(volumeID, \"vol-\")) && !(strings.HasPrefix(volumeID, \"aws://\")) {\n\t\treturn volumeID, nil\n\t}\n\tidx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1\n\tif idx == 0 {\n\t\treturn \"\", fmt.Errorf(\"malformed EBS volume id %q\", volumeID)\n\t}\n\treturn volumeID[idx:], nil\n}", "func (o LookupAliasResultOutput) TargetKeyId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAliasResult) string { return v.TargetKeyId }).(pulumi.StringOutput)\n}", "func (f *FileLocationUnavailable) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func (s *Store) documentationPathIDToFilePath(ctx context.Context, bundleID int, pathID string) (_ *string, err error) {\n\tctx, _, endObservation := s.operations.documentationPathIDToFilePath.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{\n\t\tlog.Int(\"bundleID\", bundleID),\n\t\tlog.String(\"pathID\", pathID),\n\t}})\n\tdefer endObservation(1, observation.Args{})\n\n\treturn s.scanFirstDocumentationFilePath(s.Store.Query(ctx, sqlf.Sprintf(documentationPathIDToFilePathQuery, bundleID, pathID)))\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func (c *Client) lockIdFromPath(path string) (string, error) {\n\tlist, _, err := c.client.Search(c.Remote, &lockSearchRequest{\n\t\tFilters: []lockFilter{\n\t\t\t{Property: \"path\", Value: path},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch len(list.Locks) {\n\tcase 0:\n\t\treturn \"\", ErrNoMatchingLocks\n\tcase 1:\n\t\treturn list.Locks[0].Id, nil\n\tdefault:\n\t\treturn \"\", ErrLockAmbiguous\n\t}\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *GetContainersUUIDVolumesVolumeUUIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\t// path param uuid\n\tif err := r.SetPathParam(\"uuid\", o.UUID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param volume_uuid\n\tif err := r.SetPathParam(\"volume_uuid\", o.VolumeUUID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (f *FileLocation) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (r *CachesIscsiVolume) VolumeId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"volumeId\"])\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func (e *EndToEndTest) GetVolumePath(repo string, volume string) (string, error) {\n\tv, _, err := e.Client.VolumesApi.GetVolume(context.Background(), repo, volume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn v.Config[\"mountpoint\"].(string), nil\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func getIDFromVirtualPath(vpath string) (string, string, error) {\n\tif strings.Contains(vpath, \":\") == false {\n\t\treturn \"\", \"\", errors.New(\"Path missing Virtual Device ID. Given: \" + vpath)\n\t}\n\n\ttmp := strings.Split(vpath, \":\")\n\tvdID := tmp[0]\n\tpathSlice := tmp[1:]\n\tpath := strings.Join(pathSlice, \":\")\n\n\treturn vdID, path, nil\n}", "func brokerIDFromPath(req *http.Request) (string, error) {\n\tpaths := parsePaths(req)\n\tif len(paths) < 2 {\n\t\treturn \"\", errBrokerIDNotProvided\n\t}\n\n\tvar idStr string\n\n\t// If we're calling remove vs get/set, i.e. /throttle/remove/123\n\t// vs /throttle/123.\n\tif paths[1] == \"remove\" {\n\t\tif len(paths) < 3 {\n\t\t\treturn \"\", errBrokerIDNotProvided\n\t\t}\n\t\t// Path elements = [throttle, remove, 1230].\n\t\tidStr = paths[2]\n\t} else {\n\t\t// Path elements = [throttle, 1230].\n\t\tidStr = paths[1]\n\t}\n\n\tif idStr == \"all\" {\n\t\treturn idStr, nil\n\t}\n\n\t_, err := strconv.Atoi(idStr)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"broker param must be provided as integer or the string 'all'\")\n\t}\n\n\treturn idStr, nil\n}", "func ValidateIscsiTargetID(input interface{}, key string) (warnings []string, errors []error) {\n\tv, ok := input.(string)\n\tif !ok {\n\t\terrors = append(errors, fmt.Errorf(\"expected %q to be a string\", key))\n\t\treturn\n\t}\n\n\tif _, err := ParseIscsiTargetID(v); err != nil {\n\t\terrors = append(errors, err)\n\t}\n\n\treturn\n}", "func (driver *Driver) Path(volumeName, volumeID string) (string, error) {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn \"\", errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn \"\", errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn \"\", errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn \"\", errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn \"\", errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\treturn mounts[0].Mountpoint, nil\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func LoginIscsiTarget(targetName string, isInformationalSession bool, initiatorInstance *string, initiatorPortNumber *uint32, targetPortal *iscsidsc.Portal,\n\tsecurityFlags *iscsidsc.SecurityFlags, loginOptions *iscsidsc.LoginOptions, key *string, isPersistent bool) (*iscsidsc.SessionID, *iscsidsc.ConnectionID, error) {\n\ttargetNamePtr, err := windows.UTF16PtrFromString(targetName)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"invalid target name: %q\", targetName)\n\t}\n\n\tinitiatorInstancePtr, initiatorPortNumberValue, err := internal.ConvertInitiatorArgs(initiatorInstance, initiatorPortNumber)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tinternalPortal, err := internal.CheckAndConvertPortal(targetPortal)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"invalid portal argument\")\n\t}\n\n\tvar securityFlagsValue iscsidsc.SecurityFlags\n\tif securityFlags != nil {\n\t\tsecurityFlagsValue = *securityFlags\n\t}\n\n\tinternalLoginOptions, userNamePtr, passwordPtr, err := internal.CheckAndConvertLoginOptions(loginOptions)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"invalid loginOptions argument\")\n\t}\n\n\tkeyPtr, keySize, err := internal.CheckAndConvertKey(key)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn callProcLoginIScsiTargetW(targetNamePtr, isInformationalSession, initiatorInstancePtr, initiatorPortNumberValue,\n\t\tinternalPortal, securityFlagsValue, internalLoginOptions, uintptr(unsafe.Pointer(userNamePtr)), uintptr(unsafe.Pointer(passwordPtr)),\n\t\tkeyPtr, keySize, isPersistent)\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *Volume) GetVolumeIdOk() (*string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.VolumeId, true\n}", "func (o *SnapmirrorResyncRequest) SourceVolume() string {\n\tvar r string\n\tif o.SourceVolumePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.SourceVolumePtr\n\treturn r\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func GetIDDevicePath(id int) string {\n\tparam0 := strconv.Itoa(id)\n\n\treturn fmt.Sprintf(\"/sources/devices/%s\", param0)\n}", "func (o FioSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func getDevicePath(ns *GCENodeServer, volumeID, partition string) (string, error) {\n\t_, volumeKey, err := common.VolumeIDToKey(volumeID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdeviceName, err := common.GetDeviceName(volumeKey)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting device name: %w\", err)\n\t}\n\n\tproxy, ok := ns.Mounter.Interface.(mounter.CSIProxyMounter)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"could not cast to csi proxy class\")\n\t}\n\treturn proxy.GetDiskNumber(deviceName, partition, volumeKey.Name)\n}", "func (r *vdm) Path(volumeName, volumeID string) (string, error) {\n\tfor _, d := range r.drivers {\n\t\tfields := log.Fields{\n\t\t\t\"moduleName\": r.rexray.Context,\n\t\t\t\"driverName\": d.Name(),\n\t\t\t\"volumeName\": volumeName,\n\t\t\t\"volumeID\": volumeID}\n\n\t\tlog.WithFields(fields).Info(\"vdm.Path\")\n\n\t\tif !r.pathCache() {\n\t\t\treturn d.Path(volumeName, volumeID)\n\t\t}\n\n\t\tif _, ok := r.mapUsedCount[volumeName]; !ok {\n\t\t\tlog.WithFields(fields).Debug(\"skipping path lookup\")\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\treturn d.Path(volumeName, volumeID)\n\t}\n\treturn \"\", errors.ErrNoVolumesDetected\n}", "func (s *DetachPolicyInput) SetTargetId(v string) *DetachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (d *MinioDriver) Path(r volume.Request) volume.Response {\n\td.m.RLock()\n\tdefer d.m.RUnlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryption() ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *ReplicatedVMManagedDiskTargetDiskEncryption {\n\t\treturn v.TargetDiskEncryption\n\t}).(ReplicatedVMManagedDiskTargetDiskEncryptionPtrOutput)\n}", "func ParseIscsiTargetID(input string) (*IscsiTargetId, error) {\n\tparser := resourceids.NewParserFromResourceIdType(IscsiTargetId{})\n\tparsed, err := parser.Parse(input, false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing %q: %+v\", input, err)\n\t}\n\n\tvar ok bool\n\tid := IscsiTargetId{}\n\n\tif id.SubscriptionId, ok = parsed.Parsed[\"subscriptionId\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'subscriptionId' was not found in the resource id %q\", input)\n\t}\n\n\tif id.ResourceGroupName, ok = parsed.Parsed[\"resourceGroupName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'resourceGroupName' was not found in the resource id %q\", input)\n\t}\n\n\tif id.DiskPoolName, ok = parsed.Parsed[\"diskPoolName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'diskPoolName' was not found in the resource id %q\", input)\n\t}\n\n\tif id.IscsiTargetName, ok = parsed.Parsed[\"iscsiTargetName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'iscsiTargetName' was not found in the resource id %q\", input)\n\t}\n\n\treturn &id, nil\n}", "func (o CachesIscsiVolumeOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (o GetVolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetVolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o *SnapmirrorCreateRequest) SourceVolume() string {\n\tvar r string\n\tif o.SourceVolumePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.SourceVolumePtr\n\treturn r\n}", "func (o VolumeV2Output) SourceVolId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VolumeV2) pulumi.StringPtrOutput { return v.SourceVolId }).(pulumi.StringPtrOutput)\n}", "func (d *Dao) Target(c context.Context, id int64) (res *model.Target, err error) {\n\tres = &model.Target{}\n\tif err = d.db.QueryRow(c, _targetSQL, id).Scan(&res.ID, &res.SubEvent, &res.Event, &res.Product, &res.Source, &res.GroupIDs, &res.Threshold, &res.Duration, &res.State, &res.Ctime, &res.Mtime); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tres = nil\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"d.Target.Scan error(%+v), id(%d)\", err, id)\n\t}\n\tif res.GroupIDs != \"\" {\n\t\tvar gids []int64\n\t\tif gids, err = xstr.SplitInts(res.GroupIDs); err != nil {\n\t\t\tlog.Error(\"d.Product.SplitInts error(%+v), group ids(%s)\", err, res.GroupIDs)\n\t\t\treturn\n\t\t}\n\t\tif res.Groups, err = d.Groups(c, gids); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (s *DescribeEffectivePolicyInput) SetTargetId(v string) *DescribeEffectivePolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func TargetPath(src, dst Part) string {\n\tpath, err := filepath.Rel(filepath.Dir(src.Path()), dst.Path())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn path\n}", "func (o LookupOpenZfsSnapshotResultOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupOpenZfsSnapshotResult) string { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (o VolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func PathToRIDAction(path, query, prefix string) (string, string) {\n\tif len(path) == len(prefix) || !strings.HasPrefix(path, prefix) {\n\t\treturn \"\", \"\"\n\t}\n\n\tpath = path[len(prefix):]\n\n\t// Dot separator not allowed in path\n\tif strings.ContainsRune(path, '.') {\n\t\treturn \"\", \"\"\n\t}\n\n\tif path[0] == '/' {\n\t\tpath = path[1:]\n\t}\n\tparts := strings.Split(path, \"/\")\n\tif len(parts) < 2 {\n\t\treturn \"\", \"\"\n\t}\n\n\tfor i := len(parts) - 1; i >= 0; i-- {\n\t\tpart, err := url.PathUnescape(parts[i])\n\t\tif err != nil {\n\t\t\treturn \"\", \"\"\n\t\t}\n\t\tparts[i] = part\n\t}\n\n\trid := strings.Join(parts[:len(parts)-1], \".\")\n\tif query != \"\" {\n\t\trid += \"?\" + query\n\t}\n\n\treturn rid, parts[len(parts)-1]\n}", "func (o *Volume) GetVolumeIdOk() (string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.VolumeId, true\n}", "func (s *ListPoliciesForTargetInput) SetTargetId(v string) *ListPoliciesForTargetInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (o FioSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (s *EffectivePolicy) SetTargetId(v string) *EffectivePolicy {\n\ts.TargetId = &v\n\treturn s\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func (d *lvm) GetVolumeDiskPath(vol Volume) (string, error) {\n\tif vol.IsVMBlock() || (vol.volType == VolumeTypeCustom && IsContentBlock(vol.contentType)) {\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\treturn volDevPath, nil\n\t}\n\n\treturn \"\", ErrNotSupported\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func getArrayIDFromVolumeContext(s *service, contextVolID string) (string, error) {\n\treturn s.getArrayIDFromVolumeContext(contextVolID)\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func (s *AttachPolicyInput) SetTargetId(v string) *AttachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *Attribute) SetTargetId(v string) *Attribute {\n\ts.TargetId = &v\n\treturn s\n}", "func (r *CachesIscsiVolume) TargetArn() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"targetArn\"])\n}", "func (o IopingSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (a *Agent) DirGetId(path string) (dirId string, err error) {\n\tpath = strings.TrimPrefix(path, \"/\")\n\tspec := (&api.DirLocateSpec{}).Init(path)\n\tif err = a.pc.ExecuteApi(spec); err != nil {\n\t\treturn\n\t}\n\tif spec.Result == \"0\" {\n\t\terr = os.ErrNotExist\n\t} else {\n\t\tdirId = spec.Result\n\t}\n\treturn\n}", "func (a *Agent) DirGetId(path string) (directoryId string, err error) {\n\tif strings.HasPrefix(path, \"/\") {\n\t\tpath = path[1:]\n\t}\n\tqs := core.NewQueryString().\n\t\tWithString(\"path\", path)\n\tresult := &types.DirGetIdResult{}\n\terr = a.hc.JsonApi(apiDirGetId, qs, nil, result)\n\tif err == nil && result.IsFailed() {\n\t\terr = types.MakeFileError(int(result.ErrorCode), result.Error)\n\t}\n\tif err == nil {\n\t\tif directoryId = string(result.Id); directoryId == \"0\" {\n\t\t\tdirectoryId, err = \"\", errDirNotExist\n\t\t}\n\t}\n\treturn\n}", "func (a *HyperflexApiService) GetHyperflexTargetByMoid(ctx context.Context, moid string) ApiGetHyperflexTargetByMoidRequest {\n\treturn ApiGetHyperflexTargetByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (o ControlPolicyAttachmentOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ControlPolicyAttachment) pulumi.StringOutput { return v.TargetId }).(pulumi.StringOutput)\n}", "func PathToRID(path, query, prefix string) string {\n\tif len(path) == len(prefix) || !strings.HasPrefix(path, prefix) {\n\t\treturn \"\"\n\t}\n\n\tpath = path[len(prefix):]\n\n\t// Dot separator not allowed in path\n\tif strings.ContainsRune(path, '.') {\n\t\treturn \"\"\n\t}\n\n\tif path[0] == '/' {\n\t\tpath = path[1:]\n\t}\n\tparts := strings.Split(path, \"/\")\n\tfor i := len(parts) - 1; i >= 0; i-- {\n\t\tpart, err := url.PathUnescape(parts[i])\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\tparts[i] = part\n\t}\n\n\trid := strings.Join(parts, \".\")\n\tif query != \"\" {\n\t\trid += \"?\" + query\n\t}\n\n\treturn rid\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func delTargetPathInAttachment(attachment *model.VolumeAttachmentSpec, key string, TargetPath string) error {\n\ttargetPathList, exist := attachment.Metadata[key]\n\tif !exist {\n\t\treturn nil\n\t}\n\n\tpaths := strings.Split(targetPathList, \";\")\n\tfor index, path := range paths {\n\t\tif path == TargetPath {\n\t\t\tpaths = append(paths[:index], paths[index+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif 0 == len(paths) {\n\t\tglog.V(5).Info(\"no more \" + key)\n\t\tdelete(attachment.Metadata, key)\n\t} else {\n\t\tattachment.Metadata[key] = strings.Join(paths, \";\")\n\t}\n\n\tif KStagingTargetPath == key {\n\t\tvolConnector := connector.NewConnector(attachment.DriverVolumeType)\n\n\t\tif volConnector == nil {\n\t\t\tmsg := fmt.Sprintf(\"unsupport driverVolumeType: %s\", attachment.DriverVolumeType)\n\t\t\tglog.Error(msg)\n\t\t\treturn status.Error(codes.FailedPrecondition, msg)\n\t\t}\n\n\t\terr := volConnector.Detach(attachment.ConnectionData)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"detach failed, %v\", err)\n\t\t\tglog.Error(msg)\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"%s\", msg)\n\t\t}\n\n\t\tattachment.Mountpoint = \"-\"\n\t}\n\n\t_, err := client.UpdateVolumeAttachment(attachment.Id, attachment)\n\tif err != nil {\n\t\tmsg := \"update volume attachment failed\"\n\t\tglog.Error(msg)\n\t\treturn status.Error(codes.FailedPrecondition, msg)\n\t}\n\n\treturn nil\n}", "func (d *Dijkstra) PathToTarget() ([]graphEdge, error) {\n\tif d.err != nil {\n\t\treturn []graphEdge{}, d.err\n\t}\n\n\tvar path []graphEdge\n\tidx := d.target\n\tfor {\n\t\tif idx == d.source {\n\t\t\tbreak\n\t\t}\n\t\te, ok := d.spt[idx]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tpath = append(path, e)\n\t\tidx = e.From\n\t}\n\n\treturn reversePath(path), nil\n}", "func getQueryParam(c *gin.Context, paramName string) (int64, error) {\n\tp := c.Query(\"target\")\n\n\tid, err := strconv.ParseInt(p, 10, 0)\n\tif err != nil {\n\t\treturn 0, models.ValidationError{\n\t\t\t\"target\": ErrParseError,\n\t\t}\n\t}\n\n\treturn id, nil\n}", "func (r *CachesIscsiVolume) TargetName() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"targetName\"])\n}", "func getInternalVolumePath(workingMountDir string, vol *smbVolume) string {\n\treturn filepath.Join(getInternalMountPath(workingMountDir, vol), vol.subDir)\n}", "func ParseIscsiTargetIDInsensitively(input string) (*IscsiTargetId, error) {\n\tparser := resourceids.NewParserFromResourceIdType(IscsiTargetId{})\n\tparsed, err := parser.Parse(input, true)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing %q: %+v\", input, err)\n\t}\n\n\tvar ok bool\n\tid := IscsiTargetId{}\n\n\tif id.SubscriptionId, ok = parsed.Parsed[\"subscriptionId\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'subscriptionId' was not found in the resource id %q\", input)\n\t}\n\n\tif id.ResourceGroupName, ok = parsed.Parsed[\"resourceGroupName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'resourceGroupName' was not found in the resource id %q\", input)\n\t}\n\n\tif id.DiskPoolName, ok = parsed.Parsed[\"diskPoolName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'diskPoolName' was not found in the resource id %q\", input)\n\t}\n\n\tif id.IscsiTargetName, ok = parsed.Parsed[\"iscsiTargetName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'iscsiTargetName' was not found in the resource id %q\", input)\n\t}\n\n\treturn &id, nil\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func GetPath(mounter volume.Mounter) (string, error) {\n\tpath := mounter.GetPath()\n\tif path == \"\" {\n\t\treturn \"\", fmt.Errorf(\"path is empty %s\", reflect.TypeOf(mounter).String())\n\t}\n\treturn path, nil\n}", "func (o DeliveryPipelineSerialPipelineStageOutput) TargetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DeliveryPipelineSerialPipelineStage) *string { return v.TargetId }).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func idToPath(id int64) string {\n\ttwo := fmt.Sprintf(\"%02d\", id)\n\treturn path.Join(two[0:1], two[1:2])\n}", "func RequireFromPath(td TrustDomain, path string) ID {\n\tid, err := FromPath(td, path)\n\tpanicOnErr(err)\n\treturn id\n}", "func (c *Client) NewGetIDDeviceRequest(ctx context.Context, path string) (*http.Request, error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"https\"\n\t}\n\tu := url.URL{Host: c.Host, Scheme: scheme, Path: path}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.JWTSigner != nil {\n\t\tif err := c.JWTSigner.Sign(req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn req, nil\n}", "func (Hnd Env) GetPathFileByID(id string) (models.Files, error) {\n\tvar getData interface{}\n\t// Define files struct.\n\tfiles := models.Files{}\n\t// Check valid id.\n\tif bson.IsObjectIdHex(id) {\n\t\tqueryGetData := bson.M{\"_id\": bson.ObjectIdHex(id)}\n\t\terr := Hnd.Mp.GetOne(files.TableName(), queryGetData, &getData)\n\t\terr = files.ToModel(getData, &files)\n\t\tif err != nil {\n\t\t\treturn files, err\n\t\t}\n\t\t// Return files error.\n\t\treturn files, err\n\t}\n\t// Return error.\n\treturn files, errors.New(\"Invalid ID\")\n}", "func (ms *MachinePlugin) GetVolumeIDs(ctx context.Context, req *cmi.GetVolumeIDsRequest) (*cmi.GetVolumeIDsResponse, error) {\n\tvar (\n\t\tvolumeIDs []string\n\t\tvolumeSpecs []*corev1.PersistentVolumeSpec\n\t)\n\n\t// Log messages to track start and end of request\n\tglog.V(2).Infof(\"GetVolumeIDs request has been recieved for %q\", req.PVSpecList)\n\n\terr := json.Unmarshal(req.PVSpecList, &volumeSpecs)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor i := range volumeSpecs {\n\t\tspec := volumeSpecs[i]\n\t\tif spec.AWSElasticBlockStore == nil {\n\t\t\t// Not an aws volume\n\t\t\tcontinue\n\t\t}\n\t\tvolumeID := spec.AWSElasticBlockStore.VolumeID\n\t\tvolumeIDs = append(volumeIDs, volumeID)\n\t}\n\n\tglog.V(2).Infof(\"GetVolumeIDs machines request has been processed successfully. \\nList: %v\", volumeIDs)\n\n\tResp := &cmi.GetVolumeIDsResponse{\n\t\tVolumeIDs: volumeIDs,\n\t}\n\treturn Resp, nil\n}" ]
[ "0.77421737", "0.74304825", "0.72372514", "0.65324247", "0.6491574", "0.64577705", "0.5854011", "0.44721726", "0.44675246", "0.4394458", "0.43608132", "0.4273545", "0.42500415", "0.42369637", "0.41847834", "0.41766235", "0.41596717", "0.3979977", "0.39590117", "0.39502946", "0.39199433", "0.39026663", "0.38980693", "0.38855532", "0.3881543", "0.38716173", "0.38662007", "0.38607597", "0.3859746", "0.38358462", "0.38288605", "0.38243976", "0.38237262", "0.37988672", "0.37921545", "0.37890938", "0.37865293", "0.3786463", "0.37846643", "0.37825432", "0.3780003", "0.3764924", "0.37643892", "0.37643892", "0.37643355", "0.37601346", "0.3758797", "0.3753961", "0.37495098", "0.37475482", "0.37458134", "0.37422606", "0.37281117", "0.3727587", "0.37190333", "0.37126887", "0.37040868", "0.36976063", "0.36911476", "0.3690042", "0.36877564", "0.36692333", "0.36677247", "0.36656812", "0.36541262", "0.36484656", "0.3646403", "0.3642417", "0.36412635", "0.36390302", "0.36331838", "0.36329758", "0.36329758", "0.36163926", "0.3613861", "0.36081105", "0.3583609", "0.35821027", "0.35810408", "0.3577368", "0.35610324", "0.35578692", "0.3550235", "0.35493144", "0.3548537", "0.35467502", "0.3545982", "0.3533665", "0.35334447", "0.3532632", "0.35283458", "0.3526659", "0.3522543", "0.35189015", "0.3513763", "0.35072342", "0.3503626", "0.3502731", "0.3500621", "0.3498941" ]
0.87550557
0
Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest is an autogenerated conversion function.
func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error { return autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func (c *Core) VolumePath(id types.VolumeID) (string, error) {\n\tc.lock.Lock(id.Name)\n\tdefer c.lock.Unlock(id.Name)\n\n\tv, dv, err := c.getVolumeDriver(id)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, fmt.Sprintf(\"Get volume: %s path\", id.String()))\n\t}\n\n\treturn c.volumePath(v, dv)\n}", "func ValidatePath(w http.ResponseWriter, r *http.Request) (string, error) {\n\tm := ValidPath.FindStringSubmatch(r.URL.Path)\n\tif m == nil {\n\t\thttp.NotFound(w, r)\n\t\treturn \"\", errors.New(\"Invalid ID. IDs must only contain alpha characters.\")\n\t}\n\treturn m[2], nil\n}", "func (m *MockProvisioner) GetVolumePath(volume api.Volume) (string, error) {\n\targs := m.Mock.Called(volume)\n\n\treturn args.String(0), args.Error(1)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryptionSetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *string { return v.TargetDiskEncryptionSetId }).(pulumi.StringPtrOutput)\n}", "func (o *FileInfoCollectionGetParams) WithTarget(target *string) *FileInfoCollectionGetParams {\n\to.SetTarget(target)\n\treturn o\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func (t *transpiler) VisitTargetPath(ctx *parser.TargetPathContext) interface{} {\n\tp := ctx.TargetPathHead().Accept(t).(pathSpec)\n\tfor i := range ctx.AllTargetPathSegment() {\n\t\tp.field += ctx.TargetPathSegment(i).Accept(t).(string)\n\t}\n\n\tif ctx.OWMOD() != nil && ctx.OWMOD().GetText() != \"\" {\n\t\tp.field += ctx.OWMOD().GetText()\n\t}\n\n\t// Only one of p.arg and p.index can be filled.\n\tif (p.arg == \"\") == (p.index == \"\") {\n\t\tt.fail(ctx, fmt.Errorf(\"invalid target path - expected arg xor index but got both or neither (arg %s and index %s)\", p.arg, p.index))\n\t}\n\n\treturn p\n}", "func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {\n\tif req == nil {\n\t\treturn \"\", fmt.Errorf(\"must provide non-nil request to GetVolumePath: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlogrus.Infof(\"Getting volume %s path using plugin %s\", req.Name, p.Name)\n\n\tresp, err := p.sendRequest(req, hostVirtualPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, hostVirtualPath, req.Name); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpathRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tpathResp := new(volume.PathResponse)\n\tif err := json.Unmarshal(pathRespBytes, pathResp); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshalling volume plugin %s path response: %w\", p.Name, err)\n\t}\n\n\treturn pathResp.Mountpoint, nil\n}", "func (fs *FS) TargetIPLUNToDevicePath(ctx context.Context, targetIP string, lunID int) (map[string]string, error) {\n\treturn fs.targetIPLUNToDevicePath(ctx, targetIP, lunID)\n}", "func findTargetIDAndMethod(reqPath string, headers http.Header) (targetID string, method string) {\n\tif appID := headers.Get(daprAppID); appID != \"\" {\n\t\treturn appID, strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t}\n\n\tif auth := headers.Get(\"Authorization\"); strings.HasPrefix(auth, \"Basic \") {\n\t\tif s, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, \"Basic \")); err == nil {\n\t\t\tpair := strings.Split(string(s), \":\")\n\t\t\tif len(pair) == 2 && pair[0] == daprAppID {\n\t\t\t\treturn pair[1], strings.TrimPrefix(path.Clean(reqPath), \"/\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we're here, the handler was probably invoked with /v1.0/invoke/ (or the invocation is invalid, missing the app id provided as header or Basic auth)\n\t// However, we are not relying on wildcardParam because the URL may have been sanitized to remove `//``, so `http://` would have been turned into `http:/`\n\t// First, check to make sure that the path has the prefix\n\tif idx := pathHasPrefix(reqPath, apiVersionV1, \"invoke\"); idx > 0 {\n\t\treqPath = reqPath[idx:]\n\n\t\t// Scan to find app ID and method\n\t\t// Matches `<appid>/method/<method>`.\n\t\t// Examples:\n\t\t// - `appid/method/mymethod`\n\t\t// - `http://example.com/method/mymethod`\n\t\t// - `https://example.com/method/mymethod`\n\t\t// - `http%3A%2F%2Fexample.com/method/mymethod`\n\t\tif idx = strings.Index(reqPath, \"/method/\"); idx > 0 {\n\t\t\ttargetID := reqPath[:idx]\n\t\t\tmethod := reqPath[(idx + len(\"/method/\")):]\n\t\t\tif t, _ := url.QueryUnescape(targetID); t != \"\" {\n\t\t\t\ttargetID = t\n\t\t\t}\n\t\t\treturn targetID, method\n\t\t}\n\t}\n\n\treturn \"\", \"\"\n}", "func (o *FileInfoCollectionGetParams) SetTarget(target *string) {\n\to.Target = target\n}", "func Convert_v1alpha1_RecoveryTarget_To_v1alpha2_RecoveryTarget(in *RecoveryTarget, out *v1alpha2.RecoveryTarget, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_RecoveryTarget_To_v1alpha2_RecoveryTarget(in, out, s)\n}", "func ValidateIscsiTargetID(input interface{}, key string) (warnings []string, errors []error) {\n\tv, ok := input.(string)\n\tif !ok {\n\t\terrors = append(errors, fmt.Errorf(\"expected %q to be a string\", key))\n\t\treturn\n\t}\n\n\tif _, err := ParseIscsiTargetID(v); err != nil {\n\t\terrors = append(errors, err)\n\t}\n\n\treturn\n}", "func brokerIDFromPath(req *http.Request) (string, error) {\n\tpaths := parsePaths(req)\n\tif len(paths) < 2 {\n\t\treturn \"\", errBrokerIDNotProvided\n\t}\n\n\tvar idStr string\n\n\t// If we're calling remove vs get/set, i.e. /throttle/remove/123\n\t// vs /throttle/123.\n\tif paths[1] == \"remove\" {\n\t\tif len(paths) < 3 {\n\t\t\treturn \"\", errBrokerIDNotProvided\n\t\t}\n\t\t// Path elements = [throttle, remove, 1230].\n\t\tidStr = paths[2]\n\t} else {\n\t\t// Path elements = [throttle, 1230].\n\t\tidStr = paths[1]\n\t}\n\n\tif idStr == \"all\" {\n\t\treturn idStr, nil\n\t}\n\n\t_, err := strconv.Atoi(idStr)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"broker param must be provided as integer or the string 'all'\")\n\t}\n\n\treturn idStr, nil\n}", "func getDeviceByIDPath(volumeName string) string {\n\treturn filepath.Join(diskIDPath, fmt.Sprintf(\"%s%s\", diskDOPrefix, volumeName))\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func (c *Client) lockIdFromPath(path string) (string, error) {\n\tlist, _, err := c.client.Search(c.Remote, &lockSearchRequest{\n\t\tFilters: []lockFilter{\n\t\t\t{Property: \"path\", Value: path},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch len(list.Locks) {\n\tcase 0:\n\t\treturn \"\", ErrNoMatchingLocks\n\tcase 1:\n\t\treturn list.Locks[0].Id, nil\n\tdefault:\n\t\treturn \"\", ErrLockAmbiguous\n\t}\n}", "func RequireFromPath(td TrustDomain, path string) ID {\n\tid, err := FromPath(td, path)\n\tpanicOnErr(err)\n\treturn id\n}", "func getArrayIDFromVolumeContext(s *service, contextVolID string) (string, error) {\n\treturn s.getArrayIDFromVolumeContext(contextVolID)\n}", "func (o GetVolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetVolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func PathToRIDAction(path, query, prefix string) (string, string) {\n\tif len(path) == len(prefix) || !strings.HasPrefix(path, prefix) {\n\t\treturn \"\", \"\"\n\t}\n\n\tpath = path[len(prefix):]\n\n\t// Dot separator not allowed in path\n\tif strings.ContainsRune(path, '.') {\n\t\treturn \"\", \"\"\n\t}\n\n\tif path[0] == '/' {\n\t\tpath = path[1:]\n\t}\n\tparts := strings.Split(path, \"/\")\n\tif len(parts) < 2 {\n\t\treturn \"\", \"\"\n\t}\n\n\tfor i := len(parts) - 1; i >= 0; i-- {\n\t\tpart, err := url.PathUnescape(parts[i])\n\t\tif err != nil {\n\t\t\treturn \"\", \"\"\n\t\t}\n\t\tparts[i] = part\n\t}\n\n\trid := strings.Join(parts[:len(parts)-1], \".\")\n\tif query != \"\" {\n\t\trid += \"?\" + query\n\t}\n\n\treturn rid, parts[len(parts)-1]\n}", "func (in *TargetVolumeSpec) DeepCopy() *TargetVolumeSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TargetVolumeSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Convert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in *impl.PathExistsRequest, out *v2alpha1.PathExistsRequest) error {\n\treturn autoConvert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (o VolumeV2Output) SourceVolId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VolumeV2) pulumi.StringPtrOutput { return v.SourceVolId }).(pulumi.StringPtrOutput)\n}", "func (driver *Driver) Path(volumeName, volumeID string) (string, error) {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn \"\", errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn \"\", errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn \"\", errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn \"\", errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn \"\", errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\treturn mounts[0].Mountpoint, nil\n}", "func (o VolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func PathToAttributePath(p cty.Path) *tftypes.AttributePath {\n\tif p == nil || len(p) < 1 {\n\t\treturn nil\n\t}\n\tap := tftypes.NewAttributePath()\n\tfor _, step := range p {\n\t\tswitch selector := step.(type) {\n\t\tcase cty.GetAttrStep:\n\t\t\tap = ap.WithAttributeName(selector.Name)\n\n\t\tcase cty.IndexStep:\n\t\t\tkey := selector.Key\n\t\t\tswitch key.Type() {\n\t\t\tcase cty.String:\n\t\t\t\tap = ap.WithElementKeyString(key.AsString())\n\t\t\tcase cty.Number:\n\t\t\t\tv, _ := key.AsBigFloat().Int64()\n\t\t\t\tap = ap.WithElementKeyInt(int(v))\n\t\t\tdefault:\n\t\t\t\t// We'll bail early if we encounter anything else, and just\n\t\t\t\t// return the valid prefix.\n\t\t\t\treturn ap\n\t\t\t}\n\t\t}\n\t}\n\treturn ap\n}", "func LoginIscsiTarget(targetName string, isInformationalSession bool, initiatorInstance *string, initiatorPortNumber *uint32, targetPortal *iscsidsc.Portal,\n\tsecurityFlags *iscsidsc.SecurityFlags, loginOptions *iscsidsc.LoginOptions, key *string, isPersistent bool) (*iscsidsc.SessionID, *iscsidsc.ConnectionID, error) {\n\ttargetNamePtr, err := windows.UTF16PtrFromString(targetName)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"invalid target name: %q\", targetName)\n\t}\n\n\tinitiatorInstancePtr, initiatorPortNumberValue, err := internal.ConvertInitiatorArgs(initiatorInstance, initiatorPortNumber)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tinternalPortal, err := internal.CheckAndConvertPortal(targetPortal)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"invalid portal argument\")\n\t}\n\n\tvar securityFlagsValue iscsidsc.SecurityFlags\n\tif securityFlags != nil {\n\t\tsecurityFlagsValue = *securityFlags\n\t}\n\n\tinternalLoginOptions, userNamePtr, passwordPtr, err := internal.CheckAndConvertLoginOptions(loginOptions)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"invalid loginOptions argument\")\n\t}\n\n\tkeyPtr, keySize, err := internal.CheckAndConvertKey(key)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn callProcLoginIScsiTargetW(targetNamePtr, isInformationalSession, initiatorInstancePtr, initiatorPortNumberValue,\n\t\tinternalPortal, securityFlagsValue, internalLoginOptions, uintptr(unsafe.Pointer(userNamePtr)), uintptr(unsafe.Pointer(passwordPtr)),\n\t\tkeyPtr, keySize, isPersistent)\n}", "func (o *LunGetSerialNumberRequest) Path() string {\n\tvar r string\n\tif o.PathPtr == nil {\n\t\treturn r\n\t}\n\tr = *o.PathPtr\n\treturn r\n}", "func disectPath(basePath string, newPath string) ([]string, []string, error) {\n\tbaseComps := splitPath(basePath)\n\tnewComps := splitPath(newPath)\n\tif len(baseComps) > len(newComps) {\n\t\terr := errors.New(\"Base Path is longer than requested path!\")\n\t\treturn nil, nil, err\n\t}\n\tfor i, _ := range baseComps {\n\t\tif strings.Compare(newComps[i], baseComps[i]) != 0 {\n\t\t\terr := errors.New(\"Base Path is not a prefix of newPath\")\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\trelPath := newComps[len(baseComps):]\n\tfor i, e := range relPath {\n\t\tif isCommand(e) {\n\t\t\treturn relPath[:i], relPath[i:], nil\n\t\t}\n\t}\n\treturn relPath, nil, nil\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (o LookupAliasResultOutput) TargetKeyId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAliasResult) string { return v.TargetKeyId }).(pulumi.StringOutput)\n}", "func (d *Dijkstra) PathToTarget() ([]graphEdge, error) {\n\tif d.err != nil {\n\t\treturn []graphEdge{}, d.err\n\t}\n\n\tvar path []graphEdge\n\tidx := d.target\n\tfor {\n\t\tif idx == d.source {\n\t\t\tbreak\n\t\t}\n\t\te, ok := d.spt[idx]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tpath = append(path, e)\n\t\tidx = e.From\n\t}\n\n\treturn reversePath(path), nil\n}", "func parseID(path string) (string, string, error) {\n\tif parts := strings.SplitN(path, \"/\", 2); len(parts) == 2 {\n\t\tif parts[0] != \"\" && parts[1] != \"\" {\n\t\t\treturn parts[0], parts[1], nil\n\t\t}\n\t}\n\treturn \"\", \"\", errInvalidOrUnspecifiedID\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func GetDerivePath(path string, symbolID uint32, ap *AdditionalDeriveParam) (accounts.DerivationPath, error) {\n\tcount := strings.Count(path, \"%d\")\n\tswitch count {\n\tcase 1:\n\t\treturn accounts.ParseDerivationPath(fmt.Sprintf(path, symbolID))\n\tcase 4:\n\t\tif ap == nil {\n\t\t\treturn nil, errors.Errorf(\"bip44 additional param not provided\")\n\t\t}\n\t\tif ap.AccountIndex < 0 || ap.Index < 0 {\n\t\t\treturn nil, errors.Errorf(\"invalid account index or index\")\n\t\t}\n\t\tif ap.ChangeType != ChangeTypeExternal && ap.ChangeType != ChangeTypeInternal {\n\t\t\treturn nil, errors.Errorf(\"invalid change type %d\", ap.ChangeType)\n\t\t}\n\t\treturn accounts.ParseDerivationPath(fmt.Sprintf(path, symbolID, ap.AccountIndex, ap.ChangeType, ap.Index))\n\tdefault:\n\t\treturn nil, errors.Errorf(\"bip44 derive path unknown format: %s\", path)\n\t}\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func delTargetPathInAttachment(attachment *model.VolumeAttachmentSpec, key string, TargetPath string) error {\n\ttargetPathList, exist := attachment.Metadata[key]\n\tif !exist {\n\t\treturn nil\n\t}\n\n\tpaths := strings.Split(targetPathList, \";\")\n\tfor index, path := range paths {\n\t\tif path == TargetPath {\n\t\t\tpaths = append(paths[:index], paths[index+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif 0 == len(paths) {\n\t\tglog.V(5).Info(\"no more \" + key)\n\t\tdelete(attachment.Metadata, key)\n\t} else {\n\t\tattachment.Metadata[key] = strings.Join(paths, \";\")\n\t}\n\n\tif KStagingTargetPath == key {\n\t\tvolConnector := connector.NewConnector(attachment.DriverVolumeType)\n\n\t\tif volConnector == nil {\n\t\t\tmsg := fmt.Sprintf(\"unsupport driverVolumeType: %s\", attachment.DriverVolumeType)\n\t\t\tglog.Error(msg)\n\t\t\treturn status.Error(codes.FailedPrecondition, msg)\n\t\t}\n\n\t\terr := volConnector.Detach(attachment.ConnectionData)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"detach failed, %v\", err)\n\t\t\tglog.Error(msg)\n\t\t\treturn status.Errorf(codes.FailedPrecondition, \"%s\", msg)\n\t\t}\n\n\t\tattachment.Mountpoint = \"-\"\n\t}\n\n\t_, err := client.UpdateVolumeAttachment(attachment.Id, attachment)\n\tif err != nil {\n\t\tmsg := \"update volume attachment failed\"\n\t\tglog.Error(msg)\n\t\treturn status.Error(codes.FailedPrecondition, msg)\n\t}\n\n\treturn nil\n}", "func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {\n\tif (strings.HasPrefix(volumeID, \"vol-\")) && !(strings.HasPrefix(volumeID, \"aws://\")) {\n\t\treturn volumeID, nil\n\t}\n\tidx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1\n\tif idx == 0 {\n\t\treturn \"\", fmt.Errorf(\"malformed EBS volume id %q\", volumeID)\n\t}\n\treturn volumeID[idx:], nil\n}", "func (o *SnapmirrorResyncRequest) SourceVolume() string {\n\tvar r string\n\tif o.SourceVolumePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.SourceVolumePtr\n\treturn r\n}", "func GetIDDevicePath(id int) string {\n\tparam0 := strconv.Itoa(id)\n\n\treturn fmt.Sprintf(\"/sources/devices/%s\", param0)\n}", "func (d *MinioDriver) Path(r volume.Request) volume.Response {\n\td.m.RLock()\n\tdefer d.m.RUnlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func (r *vdm) Path(volumeName, volumeID string) (string, error) {\n\tfor _, d := range r.drivers {\n\t\tfields := log.Fields{\n\t\t\t\"moduleName\": r.rexray.Context,\n\t\t\t\"driverName\": d.Name(),\n\t\t\t\"volumeName\": volumeName,\n\t\t\t\"volumeID\": volumeID}\n\n\t\tlog.WithFields(fields).Info(\"vdm.Path\")\n\n\t\tif !r.pathCache() {\n\t\t\treturn d.Path(volumeName, volumeID)\n\t\t}\n\n\t\tif _, ok := r.mapUsedCount[volumeName]; !ok {\n\t\t\tlog.WithFields(fields).Debug(\"skipping path lookup\")\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\treturn d.Path(volumeName, volumeID)\n\t}\n\treturn \"\", errors.ErrNoVolumesDetected\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (a *HyperflexApiService) GetHyperflexTargetByMoid(ctx context.Context, moid string) ApiGetHyperflexTargetByMoidRequest {\n\treturn ApiGetHyperflexTargetByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (fp *BatchGetAuditedResourceDescriptorsRequest_FieldTerminalPath) Get(source *BatchGetAuditedResourceDescriptorsRequest) (values []interface{}) {\n\tif source != nil {\n\t\tswitch fp.selector {\n\t\tcase BatchGetAuditedResourceDescriptorsRequest_FieldPathSelectorNames:\n\t\t\tfor _, value := range source.GetNames() {\n\t\t\t\tvalues = append(values, value)\n\t\t\t}\n\t\tcase BatchGetAuditedResourceDescriptorsRequest_FieldPathSelectorFieldMask:\n\t\t\tif source.FieldMask != nil {\n\t\t\t\tvalues = append(values, source.FieldMask)\n\t\t\t}\n\t\tcase BatchGetAuditedResourceDescriptorsRequest_FieldPathSelectorView:\n\t\t\tvalues = append(values, source.View)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Invalid selector for BatchGetAuditedResourceDescriptorsRequest: %d\", fp.selector))\n\t\t}\n\t}\n\treturn\n}", "func (o *SnapmirrorCreateRequest) SourceVolume() string {\n\tvar r string\n\tif o.SourceVolumePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.SourceVolumePtr\n\treturn r\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func RequireFromPathf(td TrustDomain, format string, args ...interface{}) ID {\n\tid, err := FromPathf(td, format, args...)\n\tpanicOnErr(err)\n\treturn id\n}", "func NewGetDiscoverDeploymentTargetBadRequest() *GetDiscoverDeploymentTargetBadRequest {\n\treturn &GetDiscoverDeploymentTargetBadRequest{}\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func ParseDeploymentPath(parts []string) (DeploymentID, error) {\n\tif len(parts) != 2 {\n\t\treturn DeploymentID{}, ErrInvalidIDPath\n\t}\n\n\towner, err := sdk.AccAddressFromBech32(parts[0])\n\tif err != nil {\n\t\treturn DeploymentID{}, err\n\t}\n\n\tdseq, err := strconv.ParseUint(parts[1], 10, 64)\n\tif err != nil {\n\t\treturn DeploymentID{}, err\n\t}\n\n\treturn DeploymentID{\n\t\tOwner: owner.String(),\n\t\tDSeq: dseq,\n\t}, nil\n}", "func ParseIscsiTargetID(input string) (*IscsiTargetId, error) {\n\tparser := resourceids.NewParserFromResourceIdType(IscsiTargetId{})\n\tparsed, err := parser.Parse(input, false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing %q: %+v\", input, err)\n\t}\n\n\tvar ok bool\n\tid := IscsiTargetId{}\n\n\tif id.SubscriptionId, ok = parsed.Parsed[\"subscriptionId\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'subscriptionId' was not found in the resource id %q\", input)\n\t}\n\n\tif id.ResourceGroupName, ok = parsed.Parsed[\"resourceGroupName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'resourceGroupName' was not found in the resource id %q\", input)\n\t}\n\n\tif id.DiskPoolName, ok = parsed.Parsed[\"diskPoolName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'diskPoolName' was not found in the resource id %q\", input)\n\t}\n\n\tif id.IscsiTargetName, ok = parsed.Parsed[\"iscsiTargetName\"]; !ok {\n\t\treturn nil, fmt.Errorf(\"the segment 'iscsiTargetName' was not found in the resource id %q\", input)\n\t}\n\n\treturn &id, nil\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func TargetPath(src, dst Part) string {\n\tpath, err := filepath.Rel(filepath.Dir(src.Path()), dst.Path())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn path\n}", "func (fp *ListAuditedResourceDescriptorsRequest_FieldTerminalPath) Get(source *ListAuditedResourceDescriptorsRequest) (values []interface{}) {\n\tif source != nil {\n\t\tswitch fp.selector {\n\t\tcase ListAuditedResourceDescriptorsRequest_FieldPathSelectorPageSize:\n\t\t\tvalues = append(values, source.PageSize)\n\t\tcase ListAuditedResourceDescriptorsRequest_FieldPathSelectorPageToken:\n\t\t\tif source.PageToken != nil {\n\t\t\t\tvalues = append(values, source.PageToken)\n\t\t\t}\n\t\tcase ListAuditedResourceDescriptorsRequest_FieldPathSelectorOrderBy:\n\t\t\tif source.OrderBy != nil {\n\t\t\t\tvalues = append(values, source.OrderBy)\n\t\t\t}\n\t\tcase ListAuditedResourceDescriptorsRequest_FieldPathSelectorFilter:\n\t\t\tif source.Filter != nil {\n\t\t\t\tvalues = append(values, source.Filter)\n\t\t\t}\n\t\tcase ListAuditedResourceDescriptorsRequest_FieldPathSelectorFieldMask:\n\t\t\tif source.FieldMask != nil {\n\t\t\t\tvalues = append(values, source.FieldMask)\n\t\t\t}\n\t\tcase ListAuditedResourceDescriptorsRequest_FieldPathSelectorView:\n\t\t\tvalues = append(values, source.View)\n\t\tcase ListAuditedResourceDescriptorsRequest_FieldPathSelectorIncludePagingInfo:\n\t\t\tvalues = append(values, source.IncludePagingInfo)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Invalid selector for ListAuditedResourceDescriptorsRequest: %d\", fp.selector))\n\t\t}\n\t}\n\treturn\n}", "func (r *CachesIscsiVolume) VolumeId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"volumeId\"])\n}", "func VendorPath(root string, dgst digest.Digest) string {\n\tencoded := dgst.Encoded()\n\treturn filepath.Join(root, dgst.Algorithm().String(), encoded[:2], encoded)\n}", "func Fid2Path(device string, f *lustre.Fid, recno *int64, linkno *int) (string, error) {\n\tvar buffer [4096]C.char\n\tvar clinkno = C.int(*linkno)\n\n\tdevStr := C.CString(device)\n\tdefer C.free(unsafe.Pointer(devStr))\n\tfidStr := C.CString(f.String())\n\tdefer C.free(unsafe.Pointer(fidStr))\n\n\trc, err := C.llapi_fid2path(devStr, fidStr, &buffer[0],\n\t\tC.int(len(buffer)), (*C.longlong)(recno), &clinkno)\n\t*linkno = int(clinkno)\n\tif err := isError(rc, err); err != nil {\n\t\treturn \"\", &FidPathError{f, int(rc), err}\n\t}\n\tp := C.GoString(&buffer[0])\n\n\t// This is a relative path, so make sure it doesn't start with a '/'\n\tif p[0] == '/' {\n\t\tp = p[1:]\n\t}\n\treturn p, err\n}", "func (s *DescribeEffectivePolicyInput) SetTargetId(v string) *DescribeEffectivePolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (s *ListPoliciesForTargetInput) SetTargetId(v string) *ListPoliciesForTargetInput {\n\ts.TargetId = &v\n\treturn s\n}", "func FilesystemEntryFromIDWithPath(ctx context.Context, rep repo.Repository, rootID string, consistentAttributes bool) (fs.Entry, error) {\n\tpathElements := strings.Split(filepath.ToSlash(rootID), \"/\")\n\n\tif len(pathElements) > 1 {\n\t\t// if a path is provided, consistentAttributes is meaningless since descending into nested path is\n\t\t// always unambiguous because parent always has full attributes.\n\t\tconsistentAttributes = false\n\t}\n\n\tvar startingEntry fs.Entry\n\n\tman, err := findSnapshotByRootObjectIDOrManifestID(ctx, rep, pathElements[0], consistentAttributes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif man != nil {\n\t\t// ID was unambiguously resolved to a snapshot, which means we have data about the root directory itself.\n\t\tstartingEntry, err = SnapshotRoot(rep, man)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\toid, err := object.ParseID(pathElements[0])\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"can't parse object ID %v\", rootID)\n\t\t}\n\n\t\tstartingEntry = AutoDetectEntryFromObjectID(ctx, rep, oid, \"\")\n\t}\n\n\treturn GetNestedEntry(ctx, startingEntry, pathElements[1:])\n}", "func (o *Volume) GetVolumeIdOk() (*string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.VolumeId, true\n}", "func PathToAttributePath(p cty.Path) *proto.AttributePath {\n\tap := &proto.AttributePath{}\n\tfor _, step := range p {\n\t\tswitch selector := step.(type) {\n\t\tcase cty.GetAttrStep:\n\t\t\tap.Steps = append(ap.Steps, &proto.AttributePath_Step{\n\t\t\t\tSelector: &proto.AttributePath_Step_AttributeName{\n\t\t\t\t\tAttributeName: selector.Name,\n\t\t\t\t},\n\t\t\t})\n\t\tcase cty.IndexStep:\n\t\t\tkey := selector.Key\n\t\t\tswitch key.Type() {\n\t\t\tcase cty.String:\n\t\t\t\tap.Steps = append(ap.Steps, &proto.AttributePath_Step{\n\t\t\t\t\tSelector: &proto.AttributePath_Step_ElementKeyString{\n\t\t\t\t\t\tElementKeyString: key.AsString(),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\tcase cty.Number:\n\t\t\t\tv, _ := key.AsBigFloat().Int64()\n\t\t\t\tap.Steps = append(ap.Steps, &proto.AttributePath_Step{\n\t\t\t\t\tSelector: &proto.AttributePath_Step_ElementKeyInt{\n\t\t\t\t\t\tElementKeyInt: v,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\tdefault:\n\t\t\t\t// We'll bail early if we encounter anything else, and just\n\t\t\t\t// return the valid prefix.\n\t\t\t\treturn ap\n\t\t\t}\n\t\t}\n\t}\n\treturn ap\n}", "func (s *DetachPolicyInput) SetTargetId(v string) *DetachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func (o HTTP2HealthCheckOutput) RequestPath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheck) *string { return v.RequestPath }).(pulumi.StringPtrOutput)\n}", "func (m *ItemTranslateExchangeIdsPostRequestBody) GetTargetIdType()(*iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.ExchangeIdFormat) {\n val, err := m.GetBackingStore().Get(\"targetIdType\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.ExchangeIdFormat)\n }\n return nil\n}", "func (o HTTP2HealthCheckResponseOutput) RequestPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheckResponse) string { return v.RequestPath }).(pulumi.StringOutput)\n}", "func NewGetInvoicesIdRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/invoices/%s\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func NewGetExpensesIdRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/expenses/%s\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (ms *MachinePlugin) GetVolumeIDs(ctx context.Context, req *cmi.GetVolumeIDsRequest) (*cmi.GetVolumeIDsResponse, error) {\n\tvar (\n\t\tvolumeIDs []string\n\t\tvolumeSpecs []*corev1.PersistentVolumeSpec\n\t)\n\n\t// Log messages to track start and end of request\n\tglog.V(2).Infof(\"GetVolumeIDs request has been recieved for %q\", req.PVSpecList)\n\n\terr := json.Unmarshal(req.PVSpecList, &volumeSpecs)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor i := range volumeSpecs {\n\t\tspec := volumeSpecs[i]\n\t\tif spec.AWSElasticBlockStore == nil {\n\t\t\t// Not an aws volume\n\t\t\tcontinue\n\t\t}\n\t\tvolumeID := spec.AWSElasticBlockStore.VolumeID\n\t\tvolumeIDs = append(volumeIDs, volumeID)\n\t}\n\n\tglog.V(2).Infof(\"GetVolumeIDs machines request has been processed successfully. \\nList: %v\", volumeIDs)\n\n\tResp := &cmi.GetVolumeIDsResponse{\n\t\tVolumeIDs: volumeIDs,\n\t}\n\treturn Resp, nil\n}", "func parseTarget(target string) (result Target, err error) {\n\n\tparts := strings.Split(target, \" \")\n\n\tif len(parts) > 2 {\n\t\terr = errors.New(\"TOO_MANY_PARTS_IN_TARGET\")\n\t\treturn\n\t}\n\tvar item string\n\tif len(parts) == 2 {\n\t\t// \"x xyz\"\n\t\tvar quant int\n\t\tquant, err = strconv.Atoi(parts[0])\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tresult.Quantity = quant\n\t\titem = parts[1]\n\t} else {\n\t\t// \"xyz\"\n\t\titem = parts[0]\n\t}\n\n\tdotparts := strings.Split(item, \".\")\n\tif len(dotparts) == 1 {\n\t\tif strings.EqualFold(item, \"all\") {\n\t\t\t// \"all\"\n\t\t\tresult.All = true\n\t\t} else {\n\t\t\t// \"foo\"\n\t\t\tresult.Identifier = 0\n\t\t\tresult.Name = item\n\t\t}\n\t} else if len(dotparts) == 2 {\n\t\t// \"x.foo\"\n\t\tif strings.EqualFold(dotparts[0], \"all\") {\n\t\t\t// \"all.foo\"\n\t\t\tresult.All = true\n\t\t\tresult.Name = dotparts[1]\n\t\t} else {\n\t\t\t// \"4.foo\"\n\t\t\tvar identifier int\n\t\t\tidentifier, err = strconv.Atoi(dotparts[0])\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresult.Identifier = identifier\n\t\t\tresult.Name = dotparts[1]\n\t\t}\n\t} else {\n\t\t// \"x.x.x\"\n\t\terr = errors.New(\"TOO_MANY_DOTS\")\n\t}\n\treturn\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func (s *EffectivePolicy) SetTargetId(v string) *EffectivePolicy {\n\ts.TargetId = &v\n\treturn s\n}", "func (fp *GetAuditedResourceDescriptorRequest_FieldTerminalPath) Get(source *GetAuditedResourceDescriptorRequest) (values []interface{}) {\n\tif source != nil {\n\t\tswitch fp.selector {\n\t\tcase GetAuditedResourceDescriptorRequest_FieldPathSelectorName:\n\t\t\tif source.Name != nil {\n\t\t\t\tvalues = append(values, source.Name)\n\t\t\t}\n\t\tcase GetAuditedResourceDescriptorRequest_FieldPathSelectorFieldMask:\n\t\t\tif source.FieldMask != nil {\n\t\t\t\tvalues = append(values, source.FieldMask)\n\t\t\t}\n\t\tcase GetAuditedResourceDescriptorRequest_FieldPathSelectorView:\n\t\t\tvalues = append(values, source.View)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Invalid selector for GetAuditedResourceDescriptorRequest: %d\", fp.selector))\n\t\t}\n\t}\n\treturn\n}", "func parseTargetPath(target, serverUrl string) string {\n\tif strings.Contains(target, serverUrl) {\n\t\treturn target[len(serverUrl):]\n\t}\n\treturn target\n}", "func parseTargetPath(target, serverUrl string) string {\n\tif strings.Contains(target, serverUrl) {\n\t\treturn target[len(serverUrl):]\n\t}\n\treturn target\n}", "func decodeGetKeyPersonByIDRequest(_ context.Context, request interface{}) (interface{}, error) {\n\treq := request.(*pb.GetKeyPersonByIDRequest)\n\treturn endpoints.GetKeyPersonByIDRequest{ID: req.Id}, nil\n}", "func GetProductPath(id string) string {\n\treturn fmt.Sprintf(\"/api/product/%v\", id)\n}", "func decodeGetByIDRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\tvars := mux.Vars(r)\n\tid, ok := vars[\"id\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"not a valid ID\")\n\t}\n\treq := endpoint.GetByIDRequest{\n\t\tId: id,\n\t}\n\treturn req, nil\n}", "func decodeGetByIDRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\tvars := mux.Vars(r)\n\tid, ok := vars[\"id\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"not a valid ID\")\n\t}\n\treq := endpoint.GetByIDRequest{\n\t\tId: id,\n\t}\n\treturn req, nil\n}", "func (s *AttachPolicyInput) SetTargetId(v string) *AttachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}" ]
[ "0.7642856", "0.7575218", "0.7314613", "0.6769844", "0.64868087", "0.62545204", "0.55791044", "0.47349438", "0.46141884", "0.45024619", "0.43004805", "0.4246971", "0.40988505", "0.4076236", "0.40026626", "0.3999551", "0.39716315", "0.39353424", "0.39153662", "0.38932514", "0.38354325", "0.38106507", "0.38040006", "0.3798046", "0.37567556", "0.37518647", "0.37512058", "0.37437233", "0.37135023", "0.36954013", "0.36918366", "0.36829954", "0.36758715", "0.366829", "0.366356", "0.3645583", "0.36334947", "0.36315456", "0.36146116", "0.36022455", "0.3601168", "0.35883707", "0.3582038", "0.35819665", "0.35751683", "0.35720685", "0.35712758", "0.35679832", "0.35644448", "0.35631126", "0.35596073", "0.35544658", "0.3548261", "0.3531811", "0.3527455", "0.35264295", "0.3524415", "0.35232213", "0.3516711", "0.35123843", "0.35120195", "0.34983516", "0.34935802", "0.3483865", "0.34817982", "0.34780416", "0.3473951", "0.34716448", "0.34709874", "0.34682736", "0.3464575", "0.345763", "0.3453485", "0.34512186", "0.3440304", "0.3439723", "0.34373116", "0.34370664", "0.34317726", "0.3429298", "0.34277356", "0.34256867", "0.34203535", "0.34157205", "0.3413049", "0.34016627", "0.33990803", "0.3397659", "0.33966032", "0.3396358", "0.33939683", "0.33934543", "0.3387856", "0.3387841", "0.3387841", "0.3385922", "0.33854493", "0.33838904", "0.33838904", "0.3380241" ]
0.86907834
0
Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse is an autogenerated conversion function.
func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error { return autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (c *Core) VolumePath(id types.VolumeID) (string, error) {\n\tc.lock.Lock(id.Name)\n\tdefer c.lock.Unlock(id.Name)\n\n\tv, dv, err := c.getVolumeDriver(id)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, fmt.Sprintf(\"Get volume: %s path\", id.String()))\n\t}\n\n\treturn c.volumePath(v, dv)\n}", "func (client VolumesClient) GetResponder(resp *http.Response) (result Volume, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (m *MockProvisioner) GetVolumePath(volume api.Volume) (string, error) {\n\targs := m.Mock.Called(volume)\n\n\treturn args.String(0), args.Error(1)\n}", "func (o LookupOpenZfsSnapshotResultOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupOpenZfsSnapshotResult) string { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (o *Volume) GetVolumeIdOk() (*string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.VolumeId, true\n}", "func (o VolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {\n\tif (strings.HasPrefix(volumeID, \"vol-\")) && !(strings.HasPrefix(volumeID, \"aws://\")) {\n\t\treturn volumeID, nil\n\t}\n\tidx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1\n\tif idx == 0 {\n\t\treturn \"\", fmt.Errorf(\"malformed EBS volume id %q\", volumeID)\n\t}\n\treturn volumeID[idx:], nil\n}", "func (o *Volume) GetVolumeIdOk() (string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.VolumeId, true\n}", "func (o GetVolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetVolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o CachesIscsiVolumeOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.VolumeId }).(pulumi.StringOutput)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryptionSetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *string { return v.TargetDiskEncryptionSetId }).(pulumi.StringPtrOutput)\n}", "func (r *CachesIscsiVolume) VolumeId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"volumeId\"])\n}", "func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {\n\tif req == nil {\n\t\treturn \"\", fmt.Errorf(\"must provide non-nil request to GetVolumePath: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlogrus.Infof(\"Getting volume %s path using plugin %s\", req.Name, p.Name)\n\n\tresp, err := p.sendRequest(req, hostVirtualPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, hostVirtualPath, req.Name); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpathRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tpathResp := new(volume.PathResponse)\n\tif err := json.Unmarshal(pathRespBytes, pathResp); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshalling volume plugin %s path response: %w\", p.Name, err)\n\t}\n\n\treturn pathResp.Mountpoint, nil\n}", "func getDeviceByIDPath(volumeName string) string {\n\treturn filepath.Join(diskIDPath, fmt.Sprintf(\"%s%s\", diskDOPrefix, volumeName))\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (f *FileLocationUnavailable) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (client *KeyVaultClient) decryptHandleResponse(resp *http.Response) (KeyVaultClientDecryptResponse, error) {\n\tresult := KeyVaultClientDecryptResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientDecryptResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) decryptHandleResponse(resp *http.Response) (KeyVaultClientDecryptResponse, error) {\n\tresult := KeyVaultClientDecryptResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientDecryptResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o LookupAliasResultOutput) TargetKeyId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAliasResult) string { return v.TargetKeyId }).(pulumi.StringOutput)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func (c Client) decodeResponse(endpoint, verb string, params *url.Values, target interface{}) (err error) {\n\tfullURL, err := c.api(endpoint, params)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tc.l.WithFields(log.Fields{\n\t\t\"url\": fullURL.String(), // TODO: remove sensitive data\n\t\t\"HTTPverb\": verb,\n\t}).Debug(\"hitting API\")\n\n\tvar resp = &http.Response{}\n\tswitch verb {\n\tcase \"GET\":\n\t\tresp, err = c.httpclient.Get(fullURL.String())\n\tcase \"POST\":\n\t\tresp, err = c.httpclient.Post(fullURL.String(), \"application/x-www-form-urlencoded\", nil)\n\tcase \"DELETE\":\n\t\treq, _ := http.NewRequest(\"DELETE\", fullURL.String(), nil)\n\t\tresp, err = c.httpclient.Do(req)\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\treturn json.NewDecoder(resp.Body).Decode(target)\n}", "func (o FioSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func (a *Agent) DirGetId(path string) (directoryId string, err error) {\n\tif strings.HasPrefix(path, \"/\") {\n\t\tpath = path[1:]\n\t}\n\tqs := core.NewQueryString().\n\t\tWithString(\"path\", path)\n\tresult := &types.DirGetIdResult{}\n\terr = a.hc.JsonApi(apiDirGetId, qs, nil, result)\n\tif err == nil && result.IsFailed() {\n\t\terr = types.MakeFileError(int(result.ErrorCode), result.Error)\n\t}\n\tif err == nil {\n\t\tif directoryId = string(result.Id); directoryId == \"0\" {\n\t\t\tdirectoryId, err = \"\", errDirNotExist\n\t\t}\n\t}\n\treturn\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func getIDFromVirtualPath(vpath string) (string, string, error) {\n\tif strings.Contains(vpath, \":\") == false {\n\t\treturn \"\", \"\", errors.New(\"Path missing Virtual Device ID. Given: \" + vpath)\n\t}\n\n\ttmp := strings.Split(vpath, \":\")\n\tvdID := tmp[0]\n\tpathSlice := tmp[1:]\n\tpath := strings.Join(pathSlice, \":\")\n\n\treturn vdID, path, nil\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func (o *Volume) GetVolumeId() string {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.VolumeId\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func (o IopingSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (s *Store) documentationPathIDToFilePath(ctx context.Context, bundleID int, pathID string) (_ *string, err error) {\n\tctx, _, endObservation := s.operations.documentationPathIDToFilePath.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{\n\t\tlog.Int(\"bundleID\", bundleID),\n\t\tlog.String(\"pathID\", pathID),\n\t}})\n\tdefer endObservation(1, observation.Args{})\n\n\treturn s.scanFirstDocumentationFilePath(s.Store.Query(ctx, sqlf.Sprintf(documentationPathIDToFilePathQuery, bundleID, pathID)))\n}", "func ParseGetInvoicesIdResponse(rsp *http.Response) (*GetInvoicesIdResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &GetInvoicesIdResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest Invoice\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func (a *Agent) DirGetId(path string) (dirId string, err error) {\n\tpath = strings.TrimPrefix(path, \"/\")\n\tspec := (&api.DirLocateSpec{}).Init(path)\n\tif err = a.pc.ExecuteApi(spec); err != nil {\n\t\treturn\n\t}\n\tif spec.Result == \"0\" {\n\t\terr = os.ErrNotExist\n\t} else {\n\t\tdirId = spec.Result\n\t}\n\treturn\n}", "func (f *FileLocation) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (d *MinioDriver) Path(r volume.Request) volume.Response {\n\td.m.RLock()\n\tdefer d.m.RUnlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecVolumeVolumeSourceCinderOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceCinder) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func getArrayIDFromVolumeContext(s *service, contextVolID string) (string, error) {\n\treturn s.getArrayIDFromVolumeContext(contextVolID)\n}", "func (fs *FS) TargetIPLUNToDevicePath(ctx context.Context, targetIP string, lunID int) (map[string]string, error) {\n\treturn fs.targetIPLUNToDevicePath(ctx, targetIP, lunID)\n}", "func (driver *Driver) Path(volumeName, volumeID string) (string, error) {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn \"\", errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn \"\", errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn \"\", errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn \"\", errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn \"\", errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\treturn mounts[0].Mountpoint, nil\n}", "func ParseDeleteInvoicesIdResponse(rsp *http.Response) (*DeleteInvoicesIdResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &DeleteInvoicesIdResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\t}\n\n\treturn response, nil\n}", "func (ms *MachinePlugin) GetVolumeIDs(ctx context.Context, req *cmi.GetVolumeIDsRequest) (*cmi.GetVolumeIDsResponse, error) {\n\tvar (\n\t\tvolumeIDs []string\n\t\tvolumeSpecs []*corev1.PersistentVolumeSpec\n\t)\n\n\t// Log messages to track start and end of request\n\tglog.V(2).Infof(\"GetVolumeIDs request has been recieved for %q\", req.PVSpecList)\n\n\terr := json.Unmarshal(req.PVSpecList, &volumeSpecs)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor i := range volumeSpecs {\n\t\tspec := volumeSpecs[i]\n\t\tif spec.AWSElasticBlockStore == nil {\n\t\t\t// Not an aws volume\n\t\t\tcontinue\n\t\t}\n\t\tvolumeID := spec.AWSElasticBlockStore.VolumeID\n\t\tvolumeIDs = append(volumeIDs, volumeID)\n\t}\n\n\tglog.V(2).Infof(\"GetVolumeIDs machines request has been processed successfully. \\nList: %v\", volumeIDs)\n\n\tResp := &cmi.GetVolumeIDsResponse{\n\t\tVolumeIDs: volumeIDs,\n\t}\n\treturn Resp, nil\n}", "func (e *EndToEndTest) GetVolumePath(repo string, volume string) (string, error) {\n\tv, _, err := e.Client.VolumesApi.GetVolume(context.Background(), repo, volume)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn v.Config[\"mountpoint\"].(string), nil\n}", "func (client *KeyVaultClient) unwrapKeyHandleResponse(resp *http.Response) (KeyVaultClientUnwrapKeyResponse, error) {\n\tresult := KeyVaultClientUnwrapKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientUnwrapKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) getDeletedKeyHandleResponse(resp *http.Response) (KeyVaultClientGetDeletedKeyResponse, error) {\n\tresult := KeyVaultClientGetDeletedKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyBundle); err != nil {\n\t\treturn KeyVaultClientGetDeletedKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) unwrapKeyHandleResponse(resp *http.Response) (KeyVaultClientUnwrapKeyResponse, error) {\n\tresult := KeyVaultClientUnwrapKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientUnwrapKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IscsiTargetsClient) getHandleResponse(resp *http.Response) (IscsiTargetsClientGetResponse, error) {\n\tresult := IscsiTargetsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IscsiTarget); err != nil {\n\t\treturn IscsiTargetsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) getKeyHandleResponse(resp *http.Response) (KeyVaultClientGetKeyResponse, error) {\n\tresult := KeyVaultClientGetKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientGetKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o ControlPolicyAttachmentOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ControlPolicyAttachment) pulumi.StringOutput { return v.TargetId }).(pulumi.StringOutput)\n}", "func (r *vdm) Path(volumeName, volumeID string) (string, error) {\n\tfor _, d := range r.drivers {\n\t\tfields := log.Fields{\n\t\t\t\"moduleName\": r.rexray.Context,\n\t\t\t\"driverName\": d.Name(),\n\t\t\t\"volumeName\": volumeName,\n\t\t\t\"volumeID\": volumeID}\n\n\t\tlog.WithFields(fields).Info(\"vdm.Path\")\n\n\t\tif !r.pathCache() {\n\t\t\treturn d.Path(volumeName, volumeID)\n\t\t}\n\n\t\tif _, ok := r.mapUsedCount[volumeName]; !ok {\n\t\t\tlog.WithFields(fields).Debug(\"skipping path lookup\")\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\treturn d.Path(volumeName, volumeID)\n\t}\n\treturn \"\", errors.ErrNoVolumesDetected\n}", "func (proxy *remoteDriverProxy) Path(name string) (string, error) {\n\tvar req = remoteVolumePathReq{\n\t\tName: name,\n\t}\n\n\tvar resp remoteVolumePathResp\n\n\tif err := proxy.client.CallService(remoteVolumePathService, &req, &resp, true); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn \"\", errors.New(resp.Err)\n\t}\n\n\treturn resp.Mountpoint, nil\n}", "func (s *Stack) GetVolume(id string) (*resources.Volume, error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif id == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"id\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", id), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tr := volumesv2.Get(s.VolumeClient, id)\n\tvolume, err := r.Extract()\n\tif err != nil {\n\t\tif _, ok := err.(gc.ErrDefault404); ok {\n\t\t\treturn nil, resources.ResourceNotFoundError(\"volume\", id)\n\t\t}\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error getting volume: %s\", ProviderErrorToString(err)))\n\t}\n\n\tav := resources.Volume{\n\t\tID: volume.ID,\n\t\tName: volume.Name,\n\t\tSize: volume.Size,\n\t\tSpeed: s.getVolumeSpeed(volume.VolumeType),\n\t\tState: toVolumeState(volume.Status),\n\t}\n\treturn &av, nil\n}", "func GetVolumeInfo(awsSession *session.Session, targets []string) (info []VolInfo, err error) {\n\tclient := ec2.New(awsSession)\n\tinfo = make([]VolInfo, 0)\n\n\tfilters := make([]*ec2.Filter, 0)\n\n\tparams := &ec2.DescribeVolumesInput{}\n\n\t// process targets and massage them into aws type variables\n\tif targets != nil {\n\t\tawsnames := make([]*string, 0)\n\n\t\tfor _, name := range targets {\n\t\t\tawsnames = append(awsnames, aws.String(name))\n\t\t}\n\n\t\tnameFilter := ec2.Filter{\n\t\t\tName: aws.String(\"attachment.instance-id\"),\n\t\t\tValues: awsnames,\n\t\t}\n\n\t\tfilters = append(filters, &nameFilter)\n\t}\n\n\t// add the filters if they exist\n\tif len(filters) > 0 {\n\t\tparams.Filters = filters\n\t}\n\n\t// actually call aws for volume information\n\tresult, err := client.DescribeVolumes(params)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tdefault:\n\t\t\t\terr = errors.Wrapf(aerr, \"error searching volumes\")\n\t\t\t\treturn info, err\n\t\t\t}\n\t\t} else {\n\t\t\terr = errors.Wrapf(err, \"error searching volumes\")\n\t\t\treturn info, err\n\t\t}\n\t}\n\n\t// loop through the resulting info, and set up the info we need\n\tfor _, vol := range result.Volumes {\n\t\tinstanceId := *vol.Attachments[0].InstanceId\n\t\tdeviceName := *vol.Attachments[0].Device\n\n\t\ti := VolInfo{\n\t\t\tInstanceId: instanceId,\n\t\t\tDeviceName: deviceName,\n\t\t\tVolumeId: *vol.VolumeId,\n\t\t}\n\n\t\tinfo = append(info, i)\n\t}\n\n\treturn info, err\n}", "func (client *Client) GetVolume(id string) (*api.Volume, error) {\n\tvol, err := volumes.Get(client.Volume, id).Extract()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error getting volume: %s\", ProviderErrorToString(err))\n\t}\n\tav := api.Volume{\n\t\tID: vol.ID,\n\t\tName: vol.Name,\n\t\tSize: vol.Size,\n\t\tSpeed: client.getVolumeSpeed(vol.VolumeType),\n\t\tState: toVolumeState(vol.Status),\n\t}\n\treturn &av, nil\n}", "func (client *KeyVaultClient) getDeletedKeyHandleResponse(resp *http.Response) (KeyVaultClientGetDeletedKeyResponse, error) {\n\tresult := KeyVaultClientGetDeletedKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyBundle); err != nil {\n\t\treturn KeyVaultClientGetDeletedKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) getKeyVersionsHandleResponse(resp *http.Response) (KeyVaultClientGetKeyVersionsResponse, error) {\n\tresult := KeyVaultClientGetKeyVersionsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyListResult); err != nil {\n\t\treturn KeyVaultClientGetKeyVersionsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client StorageTargetsClient) GetResponder(resp *http.Response) (result StorageTarget, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client *KeyVaultClient) recoverDeletedKeyHandleResponse(resp *http.Response) (KeyVaultClientRecoverDeletedKeyResponse, error) {\n\tresult := KeyVaultClientRecoverDeletedKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientRecoverDeletedKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (r *CachesIscsiVolume) TargetArn() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"targetArn\"])\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func (o *Volume) SetVolumeId(v string) {\n\to.VolumeId = &v\n}", "func DecodeStorageVolumesDeleteResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_delete\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (client *KeyVaultClient) getKeyHandleResponse(resp *http.Response) (KeyVaultClientGetKeyResponse, error) {\n\tresult := KeyVaultClientGetKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientGetKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o CachesIscsiVolumeOutput) TargetArn() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.TargetArn }).(pulumi.StringOutput)\n}", "func (o FioSpecVolumeVolumeSourceCinderPtrOutput) VolumeID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCinder) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumeID\n\t}).(pulumi.StringPtrOutput)\n}", "func (d *VolumeDriver) Path(r volume.Request) volume.Response {\n\treturn volume.Response{Mountpoint: getMountPoint(r.Name)}\n}", "func GetIDDevicePath(id int) string {\n\tparam0 := strconv.Itoa(id)\n\n\treturn fmt.Sprintf(\"/sources/devices/%s\", param0)\n}", "func (o *AnalyzeRecipeInstructions200ResponseParsedInstructionsInnerStepsInnerIngredientsInner) GetIdOk() (*float32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Id, true\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (client BaseClient) GetEventsByPathResponder(resp *http.Response) (result ListEvent, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func (client *KeyVaultClient) recoverDeletedKeyHandleResponse(resp *http.Response) (KeyVaultClientRecoverDeletedKeyResponse, error) {\n\tresult := KeyVaultClientRecoverDeletedKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientRecoverDeletedKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (a *BackendOptionsApiService) GetLogTarget(ctx _context.Context, id int32, parentName string, parentType string, localVarOptionals *GetLogTargetOpts) (InlineResponse20029, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20029\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/services/haproxy/configuration/log_targets/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", id)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tlocalVarQueryParams.Add(\"parent_name\", parameterToString(parentName, \"\"))\n\tlocalVarQueryParams.Add(\"parent_type\", parameterToString(parentType, \"\"))\n\tif localVarOptionals != nil && localVarOptionals.TransactionId.IsSet() {\n\t\tlocalVarQueryParams.Add(\"transaction_id\", parameterToString(localVarOptionals.TransactionId.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20029\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v ModelError\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (client ListManagementImageClient) GetAllImageIdsResponder(resp *http.Response) (result ImageIds, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (in *TargetVolumeSpec) DeepCopy() *TargetVolumeSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TargetVolumeSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func (o *FileInfoCollectionGetParams) SetTarget(target *string) {\n\to.Target = target\n}", "func (Hnd Env) GetPathFileByID(id string) (models.Files, error) {\n\tvar getData interface{}\n\t// Define files struct.\n\tfiles := models.Files{}\n\t// Check valid id.\n\tif bson.IsObjectIdHex(id) {\n\t\tqueryGetData := bson.M{\"_id\": bson.ObjectIdHex(id)}\n\t\terr := Hnd.Mp.GetOne(files.TableName(), queryGetData, &getData)\n\t\terr = files.ToModel(getData, &files)\n\t\tif err != nil {\n\t\t\treturn files, err\n\t\t}\n\t\t// Return files error.\n\t\treturn files, err\n\t}\n\t// Return error.\n\treturn files, errors.New(\"Invalid ID\")\n}", "func (d *Dao) Target(c context.Context, id int64) (res *model.Target, err error) {\n\tres = &model.Target{}\n\tif err = d.db.QueryRow(c, _targetSQL, id).Scan(&res.ID, &res.SubEvent, &res.Event, &res.Product, &res.Source, &res.GroupIDs, &res.Threshold, &res.Duration, &res.State, &res.Ctime, &res.Mtime); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tres = nil\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"d.Target.Scan error(%+v), id(%d)\", err, id)\n\t}\n\tif res.GroupIDs != \"\" {\n\t\tvar gids []int64\n\t\tif gids, err = xstr.SplitInts(res.GroupIDs); err != nil {\n\t\t\tlog.Error(\"d.Product.SplitInts error(%+v), group ids(%s)\", err, res.GroupIDs)\n\t\t\treturn\n\t\t}\n\t\tif res.Groups, err = d.Groups(c, gids); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) {\n\ttitle := \"hcsshim.GetLayerVhdMountPath\"\n\tctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck\n\tdefer span.End()\n\tdefer func() { oc.SetSpanStatus(span, err) }()\n\n\tvar mountPath *uint16\n\terr = hcsGetLayerVhdMountPath(vhdHandle, &mountPath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get vhd mount path\")\n\t}\n\tpath = interop.ConvertAndFreeCoTaskMemString(mountPath)\n\treturn path, nil\n}", "func (o *GetContainersUUIDVolumesVolumeUUIDParams) WithTimeout(timeout time.Duration) *GetContainersUUIDVolumesVolumeUUIDParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (m *ItemTranslateExchangeIdsPostRequestBody) GetTargetIdType()(*iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.ExchangeIdFormat) {\n val, err := m.GetBackingStore().Get(\"targetIdType\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.ExchangeIdFormat)\n }\n return nil\n}", "func (s *DetachPolicyInput) SetTargetId(v string) *DetachPolicyInput {\n\ts.TargetId = &v\n\treturn s\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (client SmartGroupsClient) GetByIDResponder(resp *http.Response) (result SmartGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}" ]
[ "0.7863537", "0.77346945", "0.75235456", "0.68859404", "0.66203177", "0.6234967", "0.5369284", "0.48230836", "0.47107923", "0.46666646", "0.4567488", "0.45483878", "0.45163965", "0.45079204", "0.4500821", "0.45008132", "0.4459958", "0.4442204", "0.44300577", "0.44058344", "0.43898615", "0.436969", "0.43562058", "0.43354926", "0.42949593", "0.42948705", "0.4289998", "0.42623392", "0.42431438", "0.42423293", "0.4229545", "0.4221692", "0.42191148", "0.4212846", "0.4206597", "0.42040047", "0.42008626", "0.41703156", "0.41675657", "0.41576713", "0.41576713", "0.41404116", "0.4126726", "0.4111338", "0.4110571", "0.40998358", "0.4097182", "0.40873963", "0.4080494", "0.4073489", "0.406988", "0.40627557", "0.40492526", "0.40465134", "0.40349388", "0.40202466", "0.40199748", "0.40025207", "0.3993242", "0.398053", "0.3976234", "0.39723635", "0.3970515", "0.39695737", "0.39672107", "0.39663368", "0.39577937", "0.39493027", "0.39384195", "0.39321706", "0.3932028", "0.39290422", "0.39224863", "0.39192995", "0.39192995", "0.39172748", "0.39165372", "0.39045334", "0.38979527", "0.38958976", "0.38899025", "0.3889561", "0.38894296", "0.38887876", "0.38876176", "0.3883312", "0.38808036", "0.38705343", "0.3858022", "0.38544118", "0.3852408", "0.38510627", "0.38468057", "0.38464645", "0.38402015", "0.38399398", "0.38352674", "0.38326633", "0.38295332", "0.38292244" ]
0.88067997
0
Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse is an autogenerated conversion function.
func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error { return autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func (client VolumesClient) GetResponder(resp *http.Response) (result Volume, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (o VolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (o GetVolumeGroupSapHanaVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetVolumeGroupSapHanaVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (c *Core) VolumePath(id types.VolumeID) (string, error) {\n\tc.lock.Lock(id.Name)\n\tdefer c.lock.Unlock(id.Name)\n\n\tv, dv, err := c.getVolumeDriver(id)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, fmt.Sprintf(\"Get volume: %s path\", id.String()))\n\t}\n\n\treturn c.volumePath(v, dv)\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func (m *MockProvisioner) GetVolumePath(volume api.Volume) (string, error) {\n\targs := m.Mock.Called(volume)\n\n\treturn args.String(0), args.Error(1)\n}", "func (o *Volume) GetVolumeIdOk() (*string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.VolumeId, true\n}", "func (o LookupOpenZfsSnapshotResultOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupOpenZfsSnapshotResult) string { return v.VolumeId }).(pulumi.StringOutput)\n}", "func Convert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in *impl.PathExistsResponse, out *v2alpha1.PathExistsResponse) error {\n\treturn autoConvert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in, out)\n}", "func (o *Volume) GetVolumeIdOk() (string, bool) {\n\tif o == nil || o.VolumeId == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.VolumeId, true\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func (o ReplicatedVMManagedDiskOutput) TargetDiskEncryptionSetId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ReplicatedVMManagedDisk) *string { return v.TargetDiskEncryptionSetId }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {\n\tif req == nil {\n\t\treturn \"\", fmt.Errorf(\"must provide non-nil request to GetVolumePath: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlogrus.Infof(\"Getting volume %s path using plugin %s\", req.Name, p.Name)\n\n\tresp, err := p.sendRequest(req, hostVirtualPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, hostVirtualPath, req.Name); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpathRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tpathResp := new(volume.PathResponse)\n\tif err := json.Unmarshal(pathRespBytes, pathResp); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshalling volume plugin %s path response: %w\", p.Name, err)\n\t}\n\n\treturn pathResp.Mountpoint, nil\n}", "func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {\n\tif (strings.HasPrefix(volumeID, \"vol-\")) && !(strings.HasPrefix(volumeID, \"aws://\")) {\n\t\treturn volumeID, nil\n\t}\n\tidx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1\n\tif idx == 0 {\n\t\treturn \"\", fmt.Errorf(\"malformed EBS volume id %q\", volumeID)\n\t}\n\treturn volumeID[idx:], nil\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumeOutput) VolumePath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceVsphereVolume) string { return v.VolumePath }).(pulumi.StringOutput)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func getArrayIDFromVolumeContext(s *service, contextVolID string) (string, error) {\n\treturn s.getArrayIDFromVolumeContext(contextVolID)\n}", "func (o CachesIscsiVolumeOutput) VolumeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.VolumeId }).(pulumi.StringOutput)\n}", "func ValidatePath(w http.ResponseWriter, r *http.Request) (string, error) {\n\tm := ValidPath.FindStringSubmatch(r.URL.Path)\n\tif m == nil {\n\t\thttp.NotFound(w, r)\n\t\treturn \"\", errors.New(\"Invalid ID. IDs must only contain alpha characters.\")\n\t}\n\treturn m[2], nil\n}", "func getDeviceByIDPath(volumeName string) string {\n\treturn filepath.Join(diskIDPath, fmt.Sprintf(\"%s%s\", diskDOPrefix, volumeName))\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (r *CachesIscsiVolume) VolumeId() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"volumeId\"])\n}", "func (client BaseClient) GetEventsByPathResponder(resp *http.Response) (result ListEvent, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (o LookupAliasResultOutput) TargetKeyId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAliasResult) string { return v.TargetKeyId }).(pulumi.StringOutput)\n}", "func (d *MinioDriver) Path(r volume.Request) volume.Response {\n\td.m.RLock()\n\tdefer d.m.RUnlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (driver *Driver) Path(volumeName, volumeID string) (string, error) {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn \"\", errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn \"\", errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn \"\", errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn \"\", errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn \"\", errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\treturn mounts[0].Mountpoint, nil\n}", "func ParseDeploymentPath(parts []string) (DeploymentID, error) {\n\tif len(parts) != 2 {\n\t\treturn DeploymentID{}, ErrInvalidIDPath\n\t}\n\n\towner, err := sdk.AccAddressFromBech32(parts[0])\n\tif err != nil {\n\t\treturn DeploymentID{}, err\n\t}\n\n\tdseq, err := strconv.ParseUint(parts[1], 10, 64)\n\tif err != nil {\n\t\treturn DeploymentID{}, err\n\t}\n\n\treturn DeploymentID{\n\t\tOwner: owner.String(),\n\t\tDSeq: dseq,\n\t}, nil\n}", "func (client *KeyVaultClient) getKeyHandleResponse(resp *http.Response) (KeyVaultClientGetKeyResponse, error) {\n\tresult := KeyVaultClientGetKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientGetKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func GetLunsFromVolumeUUIDV2(uuid string) (LunV2, error) {\n\tquery := \"/api/datacenter/storage/luns?volume.uuid=\" + uuid\n\treturn getLunsInfoV2(query)\n}", "func (ms *MachinePlugin) GetVolumeIDs(ctx context.Context, req *cmi.GetVolumeIDsRequest) (*cmi.GetVolumeIDsResponse, error) {\n\tvar (\n\t\tvolumeIDs []string\n\t\tvolumeSpecs []*corev1.PersistentVolumeSpec\n\t)\n\n\t// Log messages to track start and end of request\n\tglog.V(2).Infof(\"GetVolumeIDs request has been recieved for %q\", req.PVSpecList)\n\n\terr := json.Unmarshal(req.PVSpecList, &volumeSpecs)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tfor i := range volumeSpecs {\n\t\tspec := volumeSpecs[i]\n\t\tif spec.AWSElasticBlockStore == nil {\n\t\t\t// Not an aws volume\n\t\t\tcontinue\n\t\t}\n\t\tvolumeID := spec.AWSElasticBlockStore.VolumeID\n\t\tvolumeIDs = append(volumeIDs, volumeID)\n\t}\n\n\tglog.V(2).Infof(\"GetVolumeIDs machines request has been processed successfully. \\nList: %v\", volumeIDs)\n\n\tResp := &cmi.GetVolumeIDsResponse{\n\t\tVolumeIDs: volumeIDs,\n\t}\n\treturn Resp, nil\n}", "func (client *KeyVaultClient) decryptHandleResponse(resp *http.Response) (KeyVaultClientDecryptResponse, error) {\n\tresult := KeyVaultClientDecryptResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientDecryptResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) decryptHandleResponse(resp *http.Response) (KeyVaultClientDecryptResponse, error) {\n\tresult := KeyVaultClientDecryptResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientDecryptResponse{}, err\n\t}\n\treturn result, nil\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func (client *KeyVaultClient) getKeyVersionsHandleResponse(resp *http.Response) (KeyVaultClientGetKeyVersionsResponse, error) {\n\tresult := KeyVaultClientGetKeyVersionsResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyListResult); err != nil {\n\t\treturn KeyVaultClientGetKeyVersionsResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) getKeyHandleResponse(resp *http.Response) (KeyVaultClientGetKeyResponse, error) {\n\tresult := KeyVaultClientGetKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientGetKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (a *Agent) DirGetId(path string) (directoryId string, err error) {\n\tif strings.HasPrefix(path, \"/\") {\n\t\tpath = path[1:]\n\t}\n\tqs := core.NewQueryString().\n\t\tWithString(\"path\", path)\n\tresult := &types.DirGetIdResult{}\n\terr = a.hc.JsonApi(apiDirGetId, qs, nil, result)\n\tif err == nil && result.IsFailed() {\n\t\terr = types.MakeFileError(int(result.ErrorCode), result.Error)\n\t}\n\tif err == nil {\n\t\tif directoryId = string(result.Id); directoryId == \"0\" {\n\t\t\tdirectoryId, err = \"\", errDirNotExist\n\t\t}\n\t}\n\treturn\n}", "func (d *VolumeDriver) Path(r volume.Request) volume.Response {\n\treturn volume.Response{Mountpoint: getMountPoint(r.Name)}\n}", "func (o IopingSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func (o InstanceGroupManagerStatusResponseOutput) VersionTarget() InstanceGroupManagerStatusVersionTargetResponseOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerStatusResponse) InstanceGroupManagerStatusVersionTargetResponse {\n\t\treturn v.VersionTarget\n\t}).(InstanceGroupManagerStatusVersionTargetResponseOutput)\n}", "func (fp *BatchGetAuditedResourceDescriptorsResponse_FieldTerminalPath) Get(source *BatchGetAuditedResourceDescriptorsResponse) (values []interface{}) {\n\tif source != nil {\n\t\tswitch fp.selector {\n\t\tcase BatchGetAuditedResourceDescriptorsResponse_FieldPathSelectorAuditedResourceDescriptors:\n\t\t\tfor _, value := range source.GetAuditedResourceDescriptors() {\n\t\t\t\tvalues = append(values, value)\n\t\t\t}\n\t\tcase BatchGetAuditedResourceDescriptorsResponse_FieldPathSelectorMissing:\n\t\t\tfor _, value := range source.GetMissing() {\n\t\t\t\tvalues = append(values, value)\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Invalid selector for BatchGetAuditedResourceDescriptorsResponse: %d\", fp.selector))\n\t\t}\n\t}\n\treturn\n}", "func (a *BackendOptionsApiService) GetLogTarget(ctx _context.Context, id int32, parentName string, parentType string, localVarOptionals *GetLogTargetOpts) (InlineResponse20029, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20029\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/services/haproxy/configuration/log_targets/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(fmt.Sprintf(\"%v\", id)), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tlocalVarQueryParams.Add(\"parent_name\", parameterToString(parentName, \"\"))\n\tlocalVarQueryParams.Add(\"parent_type\", parameterToString(parentType, \"\"))\n\tif localVarOptionals != nil && localVarOptionals.TransactionId.IsSet() {\n\t\tlocalVarQueryParams.Add(\"transaction_id\", parameterToString(localVarOptionals.TransactionId.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20029\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v ModelError\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (o *AnalyzeRecipeInstructions200ResponseParsedInstructionsInnerStepsInnerIngredientsInner) GetIdOk() (*float32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Id, true\n}", "func (c Client) decodeResponse(endpoint, verb string, params *url.Values, target interface{}) (err error) {\n\tfullURL, err := c.api(endpoint, params)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tc.l.WithFields(log.Fields{\n\t\t\"url\": fullURL.String(), // TODO: remove sensitive data\n\t\t\"HTTPverb\": verb,\n\t}).Debug(\"hitting API\")\n\n\tvar resp = &http.Response{}\n\tswitch verb {\n\tcase \"GET\":\n\t\tresp, err = c.httpclient.Get(fullURL.String())\n\tcase \"POST\":\n\t\tresp, err = c.httpclient.Post(fullURL.String(), \"application/x-www-form-urlencoded\", nil)\n\tcase \"DELETE\":\n\t\treq, _ := http.NewRequest(\"DELETE\", fullURL.String(), nil)\n\t\tresp, err = c.httpclient.Do(req)\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\treturn json.NewDecoder(resp.Body).Decode(target)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (client StorageTargetsClient) GetResponder(resp *http.Response) (result StorageTarget, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (r *vdm) Path(volumeName, volumeID string) (string, error) {\n\tfor _, d := range r.drivers {\n\t\tfields := log.Fields{\n\t\t\t\"moduleName\": r.rexray.Context,\n\t\t\t\"driverName\": d.Name(),\n\t\t\t\"volumeName\": volumeName,\n\t\t\t\"volumeID\": volumeID}\n\n\t\tlog.WithFields(fields).Info(\"vdm.Path\")\n\n\t\tif !r.pathCache() {\n\t\t\treturn d.Path(volumeName, volumeID)\n\t\t}\n\n\t\tif _, ok := r.mapUsedCount[volumeName]; !ok {\n\t\t\tlog.WithFields(fields).Debug(\"skipping path lookup\")\n\t\t\treturn \"\", nil\n\t\t}\n\n\t\treturn d.Path(volumeName, volumeID)\n\t}\n\treturn \"\", errors.ErrNoVolumesDetected\n}", "func (o FioSpecVolumeVolumeSourceVsphereVolumePtrOutput) VolumePath() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceVsphereVolume) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VolumePath\n\t}).(pulumi.StringPtrOutput)\n}", "func (client *KeyVaultClient) getDeletedKeyHandleResponse(resp *http.Response) (KeyVaultClientGetDeletedKeyResponse, error) {\n\tresult := KeyVaultClientGetDeletedKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyBundle); err != nil {\n\t\treturn KeyVaultClientGetDeletedKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) recoverDeletedKeyHandleResponse(resp *http.Response) (KeyVaultClientRecoverDeletedKeyResponse, error) {\n\tresult := KeyVaultClientRecoverDeletedKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientRecoverDeletedKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (fp *ListAuditedResourceDescriptorsResponse_FieldTerminalPath) Get(source *ListAuditedResourceDescriptorsResponse) (values []interface{}) {\n\tif source != nil {\n\t\tswitch fp.selector {\n\t\tcase ListAuditedResourceDescriptorsResponse_FieldPathSelectorAuditedResourceDescriptors:\n\t\t\tfor _, value := range source.GetAuditedResourceDescriptors() {\n\t\t\t\tvalues = append(values, value)\n\t\t\t}\n\t\tcase ListAuditedResourceDescriptorsResponse_FieldPathSelectorPrevPageToken:\n\t\t\tif source.PrevPageToken != nil {\n\t\t\t\tvalues = append(values, source.PrevPageToken)\n\t\t\t}\n\t\tcase ListAuditedResourceDescriptorsResponse_FieldPathSelectorNextPageToken:\n\t\t\tif source.NextPageToken != nil {\n\t\t\t\tvalues = append(values, source.NextPageToken)\n\t\t\t}\n\t\tcase ListAuditedResourceDescriptorsResponse_FieldPathSelectorCurrentOffset:\n\t\t\tvalues = append(values, source.CurrentOffset)\n\t\tcase ListAuditedResourceDescriptorsResponse_FieldPathSelectorTotalResultsCount:\n\t\t\tvalues = append(values, source.TotalResultsCount)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Invalid selector for ListAuditedResourceDescriptorsResponse: %d\", fp.selector))\n\t\t}\n\t}\n\treturn\n}", "func Get(c *golangsdk.ServiceClient, server_id string, volume_id string) (r GetResult) {\n\t_, r.Err = c.Get(getURL(c, server_id, volume_id), &r.Body, nil)\n\treturn\n}", "func (proxy *remoteDriverProxy) Path(name string) (string, error) {\n\tvar req = remoteVolumePathReq{\n\t\tName: name,\n\t}\n\n\tvar resp remoteVolumePathResp\n\n\tif err := proxy.client.CallService(remoteVolumePathService, &req, &resp, true); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn \"\", errors.New(resp.Err)\n\t}\n\n\treturn resp.Mountpoint, nil\n}", "func (client *KeyVaultClient) importKeyHandleResponse(resp *http.Response) (KeyVaultClientImportKeyResponse, error) {\n\tresult := KeyVaultClientImportKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientImportKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func GetVolumeInfo(awsSession *session.Session, targets []string) (info []VolInfo, err error) {\n\tclient := ec2.New(awsSession)\n\tinfo = make([]VolInfo, 0)\n\n\tfilters := make([]*ec2.Filter, 0)\n\n\tparams := &ec2.DescribeVolumesInput{}\n\n\t// process targets and massage them into aws type variables\n\tif targets != nil {\n\t\tawsnames := make([]*string, 0)\n\n\t\tfor _, name := range targets {\n\t\t\tawsnames = append(awsnames, aws.String(name))\n\t\t}\n\n\t\tnameFilter := ec2.Filter{\n\t\t\tName: aws.String(\"attachment.instance-id\"),\n\t\t\tValues: awsnames,\n\t\t}\n\n\t\tfilters = append(filters, &nameFilter)\n\t}\n\n\t// add the filters if they exist\n\tif len(filters) > 0 {\n\t\tparams.Filters = filters\n\t}\n\n\t// actually call aws for volume information\n\tresult, err := client.DescribeVolumes(params)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tdefault:\n\t\t\t\terr = errors.Wrapf(aerr, \"error searching volumes\")\n\t\t\t\treturn info, err\n\t\t\t}\n\t\t} else {\n\t\t\terr = errors.Wrapf(err, \"error searching volumes\")\n\t\t\treturn info, err\n\t\t}\n\t}\n\n\t// loop through the resulting info, and set up the info we need\n\tfor _, vol := range result.Volumes {\n\t\tinstanceId := *vol.Attachments[0].InstanceId\n\t\tdeviceName := *vol.Attachments[0].Device\n\n\t\ti := VolInfo{\n\t\t\tInstanceId: instanceId,\n\t\t\tDeviceName: deviceName,\n\t\t\tVolumeId: *vol.VolumeId,\n\t\t}\n\n\t\tinfo = append(info, i)\n\t}\n\n\treturn info, err\n}", "func (client MultipleResponsesClient) Get200ModelA400ValidResponder(resp *http.Response) (result A, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (o FioSpecVolumeVolumeSourcePortworxVolumeOutput) VolumeID() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourcePortworxVolume) string { return v.VolumeID }).(pulumi.StringOutput)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func GetIDDevicePath(id int) string {\n\tparam0 := strconv.Itoa(id)\n\n\treturn fmt.Sprintf(\"/sources/devices/%s\", param0)\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func (client *KeyVaultClient) getDeletedKeyHandleResponse(resp *http.Response) (KeyVaultClientGetDeletedKeyResponse, error) {\n\tresult := KeyVaultClientGetDeletedKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyBundle); err != nil {\n\t\treturn KeyVaultClientGetDeletedKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) recoverDeletedKeyHandleResponse(resp *http.Response) (KeyVaultClientRecoverDeletedKeyResponse, error) {\n\tresult := KeyVaultClientRecoverDeletedKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientRecoverDeletedKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client Client) GetListResponder(resp *http.Response) (result VolumeInstanceListResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client ListManagementImageClient) GetAllImageIdsResponder(resp *http.Response) (result ImageIds, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (f *FileLocationUnavailable) GetVolumeID() (value int64) {\n\treturn f.VolumeID\n}", "func (d *DirDriver) Path(req *volume.PathRequest) (*volume.PathResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Path() endpoint\")\n\n\t// TODO: Should we return error if not mounted?\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn nil, fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\treturn &volume.PathResponse{\n\t\tMountpoint: vol.path,\n\t}, nil\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (client *KeyVaultClient) importKeyHandleResponse(resp *http.Response) (KeyVaultClientImportKeyResponse, error) {\n\tresult := KeyVaultClientImportKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil {\n\t\treturn KeyVaultClientImportKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *IscsiTargetsClient) getHandleResponse(resp *http.Response) (IscsiTargetsClientGetResponse, error) {\n\tresult := IscsiTargetsClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.IscsiTarget); err != nil {\n\t\treturn IscsiTargetsClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func (client *KeyVaultClient) unwrapKeyHandleResponse(resp *http.Response) (KeyVaultClientUnwrapKeyResponse, error) {\n\tresult := KeyVaultClientUnwrapKeyResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientUnwrapKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o InstanceGroupManagerStatusResponsePtrOutput) VersionTarget() InstanceGroupManagerStatusVersionTargetResponsePtrOutput {\n\treturn o.ApplyT(func(v *InstanceGroupManagerStatusResponse) *InstanceGroupManagerStatusVersionTargetResponse {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.VersionTarget\n\t}).(InstanceGroupManagerStatusVersionTargetResponsePtrOutput)\n}", "func GetProductPath(id string) string {\n\treturn fmt.Sprintf(\"/api/product/%v\", id)\n}", "func (s *Store) documentationPathIDToFilePath(ctx context.Context, bundleID int, pathID string) (_ *string, err error) {\n\tctx, _, endObservation := s.operations.documentationPathIDToFilePath.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{\n\t\tlog.Int(\"bundleID\", bundleID),\n\t\tlog.String(\"pathID\", pathID),\n\t}})\n\tdefer endObservation(1, observation.Args{})\n\n\treturn s.scanFirstDocumentationFilePath(s.Store.Query(ctx, sqlf.Sprintf(documentationPathIDToFilePathQuery, bundleID, pathID)))\n}", "func GetVolumeV2(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *VolumeV2State, opts ...pulumi.ResourceOption) (*VolumeV2, error) {\n\tvar resource VolumeV2\n\terr := ctx.ReadResource(\"openstack:blockstorage/volumeV2:VolumeV2\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (fs *FS) TargetIPLUNToDevicePath(ctx context.Context, targetIP string, lunID int) (map[string]string, error) {\n\treturn fs.targetIPLUNToDevicePath(ctx, targetIP, lunID)\n}", "func ParseGetInvoicesIdResponse(rsp *http.Response) (*GetInvoicesIdResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &GetInvoicesIdResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest Invoice\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func (o IopingSpecVolumeVolumeSourceDownwardAPIItemsOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceDownwardAPIItems) string { return v.Path }).(pulumi.StringOutput)\n}", "func (a *Agent) DirGetId(path string) (dirId string, err error) {\n\tpath = strings.TrimPrefix(path, \"/\")\n\tspec := (&api.DirLocateSpec{}).Init(path)\n\tif err = a.pc.ExecuteApi(spec); err != nil {\n\t\treturn\n\t}\n\tif spec.Result == \"0\" {\n\t\terr = os.ErrNotExist\n\t} else {\n\t\tdirId = spec.Result\n\t}\n\treturn\n}", "func (o ControlPolicyAttachmentOutput) TargetId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ControlPolicyAttachment) pulumi.StringOutput { return v.TargetId }).(pulumi.StringOutput)\n}", "func (client *KeyVaultClient) unwrapKeyHandleResponse(resp *http.Response) (KeyVaultClientUnwrapKeyResponse, error) {\n\tresult := KeyVaultClientUnwrapKeyResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil {\n\t\treturn KeyVaultClientUnwrapKeyResponse{}, err\n\t}\n\treturn result, nil\n}", "func (o *GetMenuItemInformation200Response) GetIdOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Id, true\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func (client Client) GetDetailResponder(resp *http.Response) (result VolumeInstanceDetailResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (o CachesIscsiVolumeOutput) TargetArn() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *CachesIscsiVolume) pulumi.StringOutput { return v.TargetArn }).(pulumi.StringOutput)\n}", "func (fp *WatchAuditedResourceDescriptorsResponse_FieldTerminalPath) Get(source *WatchAuditedResourceDescriptorsResponse) (values []interface{}) {\n\tif source != nil {\n\t\tswitch fp.selector {\n\t\tcase WatchAuditedResourceDescriptorsResponse_FieldPathSelectorAuditedResourceDescriptorChanges:\n\t\t\tfor _, value := range source.GetAuditedResourceDescriptorChanges() {\n\t\t\t\tvalues = append(values, value)\n\t\t\t}\n\t\tcase WatchAuditedResourceDescriptorsResponse_FieldPathSelectorIsCurrent:\n\t\t\tvalues = append(values, source.IsCurrent)\n\t\tcase WatchAuditedResourceDescriptorsResponse_FieldPathSelectorPageTokenChange:\n\t\t\tif source.PageTokenChange != nil {\n\t\t\t\tvalues = append(values, source.PageTokenChange)\n\t\t\t}\n\t\tcase WatchAuditedResourceDescriptorsResponse_FieldPathSelectorResumeToken:\n\t\t\tvalues = append(values, source.ResumeToken)\n\t\tcase WatchAuditedResourceDescriptorsResponse_FieldPathSelectorSnapshotSize:\n\t\t\tvalues = append(values, source.SnapshotSize)\n\t\tcase WatchAuditedResourceDescriptorsResponse_FieldPathSelectorIsSoftReset:\n\t\t\tvalues = append(values, source.IsSoftReset)\n\t\tcase WatchAuditedResourceDescriptorsResponse_FieldPathSelectorIsHardReset:\n\t\t\tvalues = append(values, source.IsHardReset)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Invalid selector for WatchAuditedResourceDescriptorsResponse: %d\", fp.selector))\n\t\t}\n\t}\n\treturn\n}", "func (client SmartGroupsClient) GetByIDResponder(resp *http.Response) (result SmartGroup, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}" ]
[ "0.7914075", "0.7818267", "0.7605349", "0.69360304", "0.67227316", "0.64546144", "0.55662817", "0.501362", "0.49515504", "0.481254", "0.47631708", "0.4599975", "0.45420316", "0.44987702", "0.44666418", "0.43836895", "0.4374087", "0.42389584", "0.42324293", "0.4218155", "0.41752484", "0.41747043", "0.41603392", "0.41360754", "0.4135206", "0.4134939", "0.4132842", "0.4119672", "0.41134948", "0.40973976", "0.40809965", "0.4069349", "0.40591145", "0.4054494", "0.40342477", "0.40319723", "0.400652", "0.39583436", "0.39556956", "0.3944406", "0.39408553", "0.39329883", "0.39273053", "0.39263314", "0.39142", "0.3908815", "0.38996047", "0.3888147", "0.38829267", "0.3877381", "0.38773412", "0.38656324", "0.38651258", "0.38647974", "0.38601348", "0.38595435", "0.38467616", "0.38465783", "0.3841073", "0.38262868", "0.38231993", "0.38194302", "0.38026005", "0.37916586", "0.3790803", "0.3788979", "0.37888277", "0.3781359", "0.37804228", "0.3777557", "0.3776697", "0.37758172", "0.37756902", "0.377182", "0.3770149", "0.37614176", "0.37594652", "0.375918", "0.37578326", "0.37550887", "0.37501308", "0.37392834", "0.37381294", "0.37291193", "0.37274346", "0.37257925", "0.37257817", "0.37254992", "0.37248883", "0.3723912", "0.37228513", "0.37162164", "0.37108734", "0.37108475", "0.37058693", "0.36991528", "0.36967614", "0.36939186", "0.36926368", "0.36865735" ]
0.86672944
0
Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest is an autogenerated conversion function.
func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error { return autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (d *VolumeDriver) Get(r volume.Request) volume.Response {\n\tlog.Errorf(\"VolumeDriver Get to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (d *DirDriver) Get(req *volume.GetRequest) (*volume.GetResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Get() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Did not find volume %s\", req.Name)\n\t\treturn nil, fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tlogrus.Debugf(\"Found volume %s\", req.Name)\n\n\tresp := new(volume.GetResponse)\n\tresp.Volume = new(volume.Volume)\n\tresp.Volume.Name = vol.name\n\tresp.Volume.Mountpoint = vol.path\n\tresp.Volume.CreatedAt = vol.createTime.String()\n\n\treturn resp, nil\n}", "func Convert_v1alpha1_GetServiceRequest_To_internal_GetServiceRequest(in *v1alpha1.GetServiceRequest, out *internal.GetServiceRequest) error {\n\treturn autoConvert_v1alpha1_GetServiceRequest_To_internal_GetServiceRequest(in, out)\n}", "func (d *VolumeDriver) GetVolume(name string) (map[string]interface{}, error) {\n\tvar statusMap map[string]interface{}\n\tstatusMap = make(map[string]interface{})\n\tlog.Errorf(\"VolumeDriver GetVolume to be implemented\")\n\treturn statusMap, nil\n}", "func (c *clustermgrClient) GetVolumeInfo(ctx context.Context, vid proto.Vid) (*VolumeInfoSimple, error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\n\tinfo, err := c.client.GetVolumeInfo(ctx, &cmapi.GetVolumeArgs{Vid: vid})\n\tif err != nil {\n\t\tspan.Errorf(\"get volume info failed: err[%+v]\", err)\n\t\treturn nil, err\n\t}\n\tret := &VolumeInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func (d *MinioDriver) Get(r volume.Request) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func NewGetMonitoringReportRequest() *GetMonitoringReportRequest {\n\treturn &GetMonitoringReportRequest{}\n}", "func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"must provide non-nil request to GetVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogrus.Infof(\"Getting volume %s using plugin %s\", req.Name, p.Name)\n\n\tresp, err := p.sendRequest(req, getPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, getPath, req.Name); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgetRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tgetResp := new(volume.GetResponse)\n\tif err := json.Unmarshal(getRespBytes, getResp); err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling volume plugin %s get response: %w\", p.Name, err)\n\t}\n\n\treturn getResp.Volume, nil\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func GetSystemRequestRPC(req systemsproto.GetSystemsRequest) (*systemsproto.SystemsResponse, error) {\n\tconn, err := services.ODIMService.Client(services.Systems)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create client connection: %v\", err)\n\t}\n\tdefer conn.Close()\n\tasService := systemsproto.NewSystemsClient(conn)\n\tresp, err := asService.GetSystems(context.TODO(), &req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error: RPC error: %v\", err)\n\t}\n\treturn resp, nil\n}", "func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func (adminport *Adminport) doGetStatisticsRequest(request *http.Request) (*ap.Response, error) {\n\tlogger_ap.Debugf(\"doGetStatisticsRequest\\n\")\n\n\tresponse, err := authWebCreds(request, base.PermissionXDCRInternalRead)\n\tif response != nil || err != nil {\n\t\treturn response, err\n\t}\n\n\t//pass the request to get the bucket name\n\tbucket, err := DecodeDynamicParamInURL(request, StatisticsPrefix, \"Bucket Name\")\n\tif err != nil {\n\t\treturn EncodeReplicationValidationErrorIntoResponse(err)\n\t}\n\n\tstatsMap, err := GetStatistics(bucket)\n\tif err == nil {\n\t\tif statsMap == nil {\n\t\t\treturn NewEmptyArrayResponse()\n\t\t}\n\t\treturn EncodeByteArrayIntoResponse([]byte(statsMap.String()))\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func DecodeHTTPGetStatRequest(_ context.Context, r *http.Request) (interface{}, error) {\n\tvar req GetStatRequest\n\terr := json.NewDecoder(r.Body).Decode(&req)\n\tif err != nil {\n\t\treturn nil, RequestError\n\t}\n\treturn req, err\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func getServiceCmdRequest(cmd cmdType, cred credential, body []byte) (*http.Request, error) {\n\treq, err := newTestRequest(cmd.apiMethod(), \"/?service\", 0, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set body\n\treq.Body = ioutil.NopCloser(bytes.NewReader(body))\n\n\t// minioAdminOpHeader is to identify the request as a\n\t// management REST API request.\n\treq.Header.Set(minioAdminOpHeader, cmd.String())\n\treq.Header.Set(\"X-Amz-Content-Sha256\", getSHA256Hash(body))\n\n\t// management REST API uses signature V4 for authentication.\n\terr = signRequestV4(req, cred.AccessKey, cred.SecretKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (mr *MockNuvoVMMockRecorder) GetVolumeStats(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetVolumeStats\", reflect.TypeOf((*MockNuvoVM)(nil).GetVolumeStats), arg0, arg1)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (or *operationRequestStore) GetRequestDetails(\n\tctx context.Context,\n\tname string,\n) (*VolumeOperationRequestDetails, error) {\n\tlog := logger.GetLogger(ctx)\n\tinstanceKey := client.ObjectKey{Name: name, Namespace: csiNamespace}\n\tlog.Debugf(\"Getting CnsVolumeOperationRequest instance with name %s/%s\", instanceKey.Namespace, instanceKey.Name)\n\n\tinstance := &cnsvolumeoprequestv1alpha1.CnsVolumeOperationRequest{}\n\terr := or.k8sclient.Get(ctx, instanceKey, instance)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Debugf(\"Found CnsVolumeOperationRequest instance %v\", spew.Sdump(instance))\n\n\tif len(instance.Status.LatestOperationDetails) == 0 {\n\t\treturn nil, fmt.Errorf(\"length of LatestOperationDetails expected to be greater than 1 if the instance exists\")\n\t}\n\n\t// Callers only need to know about the last operation that was invoked on a volume.\n\toperationDetailsToReturn := instance.Status.LatestOperationDetails[len(instance.Status.LatestOperationDetails)-1]\n\n\treturn CreateVolumeOperationRequestDetails(instance.Spec.Name, instance.Status.VolumeID, instance.Status.SnapshotID,\n\t\t\tinstance.Status.Capacity, operationDetailsToReturn.TaskInvocationTimestamp, operationDetailsToReturn.TaskID,\n\t\t\toperationDetailsToReturn.VCenterServer, operationDetailsToReturn.OpID, operationDetailsToReturn.TaskStatus,\n\t\t\toperationDetailsToReturn.Error),\n\t\tnil\n}", "func (c *UFSClient) NewDescribeUFSVolumeMountpointRequest() *DescribeUFSVolumeMountpointRequest {\n\treq := &DescribeUFSVolumeMountpointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (a *HyperflexApiService) GetHyperflexVolumeByMoid(ctx context.Context, moid string) ApiGetHyperflexVolumeByMoidRequest {\n\treturn ApiGetHyperflexVolumeByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func (*GetSystemStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_system_service_proto_rawDescGZIP(), []int{2}\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func (*CBroadcast_GetBroadcastViewerStats_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{62}\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif req.VolumeId == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats Volume ID must be provided\")\n\t}\n\n\tvolumePath := req.VolumePath\n\tif volumePath == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats Volume Path must be provided\")\n\t}\n\n\tlog := d.log.WithFields(logrus.Fields{\n\t\t\"volume_id\": req.VolumeId,\n\t\t\"volume_path\": req.VolumePath,\n\t\t\"method\": \"node_get_volume_stats\",\n\t})\n\tlog.Info(\"node get volume stats called\")\n\n\tmounted, err := d.mounter.IsMounted(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to check if volume path %q is mounted: %s\", volumePath, err)\n\t}\n\n\tif !mounted {\n\t\treturn nil, status.Errorf(codes.NotFound, \"volume path %q is not mounted\", volumePath)\n\t}\n\n\tisBlock, err := d.mounter.IsBlockDevice(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to determine if %q is block device: %s\", volumePath, err)\n\t}\n\n\tstats, err := d.mounter.GetStatistics(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to retrieve capacity statistics for volume path %q: %s\", volumePath, err)\n\t}\n\n\t// only can retrieve total capacity for a block device\n\tif isBlock {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"volume_mode\": volumeModeBlock,\n\t\t\t\"bytes_total\": stats.totalBytes,\n\t\t}).Info(\"node capacity statistics retrieved\")\n\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t\tTotal: stats.totalBytes,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"volume_mode\": volumeModeFilesystem,\n\t\t\"bytes_available\": stats.availableBytes,\n\t\t\"bytes_total\": stats.totalBytes,\n\t\t\"bytes_used\": stats.usedBytes,\n\t\t\"inodes_available\": stats.availableInodes,\n\t\t\"inodes_total\": stats.totalInodes,\n\t\t\"inodes_used\": stats.usedInodes,\n\t}).Info(\"node capacity statistics retrieved\")\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t&csi.VolumeUsage{\n\t\t\t\tAvailable: stats.availableBytes,\n\t\t\t\tTotal: stats.totalBytes,\n\t\t\t\tUsed: stats.usedBytes,\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t},\n\t\t\t&csi.VolumeUsage{\n\t\t\t\tAvailable: stats.availableInodes,\n\t\t\t\tTotal: stats.totalInodes,\n\t\t\t\tUsed: stats.usedInodes,\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func GetVolumeStats(address string, obj interface{}) (error, int) {\n\tcontroller, err := NewControllerClient(address)\n\tif err != nil {\n\t\treturn err, -1\n\t}\n\turl := controller.address + \"/stats\"\n\tresp, err := controller.httpClient.Get(url)\n\tif resp != nil {\n\t\tif resp.StatusCode == 500 {\n\t\t\treturn err, 500\n\t\t} else if resp.StatusCode == 503 {\n\t\t\treturn err, 503\n\t\t}\n\t} else {\n\t\treturn err, -1\n\t}\n\tif err != nil {\n\t\treturn err, -1\n\t}\n\tdefer resp.Body.Close()\n\trc := json.NewDecoder(resp.Body).Decode(obj)\n\treturn rc, 0\n}", "func (a *HyperflexApiService) GetHyperflexVolumeList(ctx context.Context) ApiGetHyperflexVolumeListRequest {\n\treturn ApiGetHyperflexVolumeListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func (s *Service) Stats(ctx context.Context, in *pb.Request) (*pb.Stats, error) {\n\ts.Entry.Debugf(\"Received new stats request: %v\", in)\n\n\ts.Entry.Debugf(\"Contex is: %q\", ctx)\n\n\tstats, err := s.Slack.GetStatsPb()\n\n\tif err != nil {\n\t\ts.Entry.Errorf(\"Failed to retreive stats from slack: %s\", err)\n\t}\n\n\treturn stats, err\n}", "func NewGetReportRequest() *GetReportRequest {\n\treturn &GetReportRequest{}\n}", "func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\t_, err := os.Stat(req.VolumePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tisBlock, err := hostutil.NewHostUtil().PathIsDevice(req.VolumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.NotFound, \"failed to determine whether %s is block device: %v\", req.VolumePath, err)\n\t}\n\tif isBlock {\n\t\tbcap, err := d.mounter.GetStatistics(req.GetVolumePath())\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"failed to get block capacity on path %s: %v\", req.VolumePath, err)\n\t\t}\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t\tTotal: bcap.TotalBytes,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func NewLunGetSerialNumberRequest() *LunGetSerialNumberRequest {\n\treturn &LunGetSerialNumberRequest{}\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func (vk *VK) StatsGet(params Params) (response StatsGetResponse, err error) {\n\terr = vk.RequestUnmarshal(\"stats.get\", &response, params)\n\treturn\n}", "func InitVolumeOperationRequestInterface(ctx context.Context, cleanupInterval int,\n\tisBlockVolumeSnapshotEnabled func() bool) (VolumeOperationRequest, error) {\n\tlog := logger.GetLogger(ctx)\n\tcsiNamespace = getCSINamespace()\n\n\toperationStoreInitLock.Lock()\n\tdefer operationStoreInitLock.Unlock()\n\tif operationRequestStoreInstance == nil {\n\t\t// Create CnsVolumeOperationRequest definition on API server.\n\t\tlog.Info(\n\t\t\t\"Creating CnsVolumeOperationRequest definition on API server and initializing VolumeOperationRequest instance\",\n\t\t)\n\t\terr := k8s.CreateCustomResourceDefinitionFromManifest(ctx,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFile,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFileName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create CnsVolumeOperationRequest CRD with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get in cluster config for client to API server.\n\t\tconfig, err := k8s.GetKubeConfig(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get kubeconfig with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create client to API server.\n\t\tk8sclient, err := k8s.NewClientForGroup(ctx, config, cnsvolumeoprequestv1alpha1.SchemeGroupVersion.Group)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create k8sClient with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Initialize the operationRequestStoreOnETCD implementation of\n\t\t// VolumeOperationRequest interface.\n\t\t// NOTE: Currently there is only a single implementation of this\n\t\t// interface. Future implementations will need modify this step.\n\t\toperationRequestStoreInstance = &operationRequestStore{\n\t\t\tk8sclient: k8sclient,\n\t\t}\n\t\tgo operationRequestStoreInstance.cleanupStaleInstances(cleanupInterval, isBlockVolumeSnapshotEnabled)\n\t}\n\n\treturn operationRequestStoreInstance, nil\n}", "func Convert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in *v2alpha1.RmdirRequest, out *impl.RmdirRequest) error {\n\treturn autoConvert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in, out)\n}", "func (me *XsdGoPkgHasElem_GetRequesterStatisticRequestsequenceTxsdRequestRequestschema_GetRequesterStatisticRequest_TGetRequesterStatisticRequest_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_GetRequesterStatisticRequestsequenceTxsdRequestRequestschema_GetRequesterStatisticRequest_TGetRequesterStatisticRequest_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.GetRequesterStatisticRequest.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func Convert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in *v1beta1.ResizeVolumeRequest, out *internal.ResizeVolumeRequest) error {\n\treturn autoConvert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in, out)\n}", "func GetStat() map[string]interface{} {\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\t// For info on each, see: https://golang.org/pkg/runtime/#MemStats\n\talloc := m.Alloc / 1024 / 1024\n\ttotalAlloc := m.TotalAlloc / 1024 / 1024\n\tsys := m.Sys / 1024 / 1024\n\tday, hour, min, sec := reqcounter.GetCounters()\n\treturn map[string]interface{}{\n\t\t\"alloc\": alloc,\n\t\t\"total_alloc\": totalAlloc,\n\t\t\"sys\": sys,\n\t\t\"requests_per_day\": day,\n\t\t\"requests_per_hour\": hour,\n\t\t\"requests_per_minute\": min,\n\t\t\"requests_per_second\": sec,\n\t}\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func hnsEndpointStatsRequest(id string) (*EndpointStats, error) {\n\tvar stats EndpointStats\n\terr := hnsCall(\"GET\", \"/endpointstats/\"+id, \"\", &stats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &stats, nil\n}", "func NewGetOSListRequest(page int, size int) *GetOSListRequest {\n\treturn &GetOSListRequest{\n\t\tPage: page,\n\t\tSize: size,\n\t}\n}", "func (client VolumesClient) Get(ctx context.Context, location string, storageSubSystem string, storagePool string, volume string) (result Volume, err error) {\n\treq, err := client.GetPreparer(ctx, location, storageSubSystem, storagePool, volume)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"Get\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func newGetScoreRequest(name string) *http.Request {\n\treq, _ := http.NewRequest(http.MethodGet, fmt.Sprintf(\"/players/%s\", name), nil)\n\treturn req\n}", "func (ns *NodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\tif _, err := os.Lstat(req.VolumePath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get metrics: %v\", err)\n\t}\n\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func (v *VolumesServiceMock) Get(podUID string, name string) (vol *api.Volume, err error) {\n\targs := v.Called(podUID, name)\n\tx := args.Get(0)\n\tif x != nil {\n\t\tvol = x.(*api.Volume)\n\t}\n\terr = args.Error(1)\n\treturn\n}", "func GetVolumeStatus(hostName, volumeName string) (map[string]string, error) {\n\tformatStr1 := \" --format '{{index .Status.access}} {{index .Status \\\"attach-as\\\"}} {{index .Status.capacity.allocated}} {{index .Status.capacity.size}} {{index .Status \\\"clone-from\\\"}}\"\n\tformatStr2 := \" {{index .Status \\\"created by VM\\\"}} {{index .Status.datastore}} {{index .Status.diskformat}} {{index .Status.fstype}} {{index .Status.status}} {{index .Status \\\"attached to VM\\\"}}'\"\n\n\tcmd := dockercli.InspectVolume + volumeName + formatStr1 + formatStr2\n\tout, err := ssh.InvokeCommand(hostName, cmd)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstatus := make(map[string]string)\n\tval := strings.Fields(out)\n\n\tfor i := 0; i < len(dockercli.VolumeStatusFields); i++ {\n\t\tstatus[dockercli.VolumeStatusFields[i]] = val[i]\n\t}\n\treturn status, nil\n}", "func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func (o *VersionedFlowUpdateRequestEntity) GetRequest() VersionedFlowUpdateRequestDTO {\n\tif o == nil || o.Request == nil {\n\t\tvar ret VersionedFlowUpdateRequestDTO\n\t\treturn ret\n\t}\n\treturn *o.Request\n}", "func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tvolumePath := req.GetVolumePath()\n\tif volumePath == \"\" {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"volumePath %v is empty\", volumePath)\n\t}\n\n\texists, err := utilpath.Exists(utilpath.CheckFollowSymlink, volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to check whether volumePath exists: %s\", err)\n\t}\n\tif !exists {\n\t\treturn nil, status.Errorf(codes.NotFound, \"target: %s not found\", volumePath)\n\t}\n\n\tstats, err := util.GetDeviceStats(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get stats by path: %s\", err)\n\t}\n\n\tklog.V(5).Infof(util.Log(ctx, \"get volumePath %q stats: %+v\"), volumePath, stats)\n\n\tif stats.Block {\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tTotal: stats.TotalBytes,\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tTotal: stats.TotalBytes,\n\t\t\t\tAvailable: stats.AvailableBytes,\n\t\t\t\tUsed: stats.UsedBytes,\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t}, {\n\t\t\t\tTotal: stats.TotalInodes,\n\t\t\t\tAvailable: stats.AvailableInodes,\n\t\t\t\tUsed: stats.UsedInodes,\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func QueryVolumeUtil(ctx context.Context, m cnsvolume.Manager, queryFilter cnstypes.CnsQueryFilter,\n\tquerySelection *cnstypes.CnsQuerySelection, useQueryVolumeAsync bool) (*cnstypes.CnsQueryResult, error) {\n\tlog := logger.GetLogger(ctx)\n\tvar queryAsyncNotSupported bool\n\tvar queryResult *cnstypes.CnsQueryResult\n\tvar err error\n\tif useQueryVolumeAsync {\n\t\t// AsyncQueryVolume feature switch is enabled.\n\t\tqueryResult, err = m.QueryVolumeAsync(ctx, queryFilter, querySelection)\n\t\tif err != nil {\n\t\t\tif err.Error() == cnsvsphere.ErrNotSupported.Error() {\n\t\t\t\tlog.Warn(\"QueryVolumeAsync is not supported. Invoking QueryVolume API\")\n\t\t\t\tqueryAsyncNotSupported = true\n\t\t\t} else { // Return for any other failures.\n\t\t\t\treturn nil, logger.LogNewErrorCodef(log, codes.Internal,\n\t\t\t\t\t\"queryVolumeAsync failed for queryFilter: %v. Err=%+v\", queryFilter, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tif !useQueryVolumeAsync || queryAsyncNotSupported {\n\t\tqueryResult, err = m.QueryVolume(ctx, queryFilter)\n\t\tif err != nil {\n\t\t\treturn nil, logger.LogNewErrorCodef(log, codes.Internal,\n\t\t\t\t\"queryVolume failed for queryFilter: %+v. Err=%+v\", queryFilter, err.Error())\n\t\t}\n\t}\n\treturn queryResult, nil\n}", "func GetVolInfo(name string) (int32, *vp.GetVolInfoAck) {\n\n\t_, conn, err := utils.DialVolMgr(VolMgrHosts)\n\tif err != nil {\n\t\tlogger.Error(\"GetVolInfo failed,Dial to VolMgrHosts fail :%v\", err)\n\t\treturn -1, nil\n\t}\n\tdefer conn.Close()\n\tvc := vp.NewVolMgrClient(conn)\n\n\tpGetVolInfoReq := &vp.GetVolInfoReq{\n\t\tUUID: name,\n\t}\n\tctx, _ := context.WithTimeout(context.Background(), VOLUME_TIMEOUT_SECONDS*time.Second)\n\tack, err := vc.GetVolInfo(ctx, pGetVolInfoReq)\n\tif err != nil || ack.Ret != 0 {\n\t\treturn -1, &vp.GetVolInfoAck{}\n\t}\n\treturn 0, ack\n}", "func (d *NodeDriver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"NodeGetVolumeStats is not supported yet\")\n}", "func DecodeGrpcReqMetricsQuerySpec(ctx context.Context, request interface{}) (interface{}, error) {\n\treq := request.(*MetricsQuerySpec)\n\treturn req, nil\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func (a *RequestServiceApiService) GetStatus(ctx _context.Context) ApiGetStatusRequest {\n\treturn ApiGetStatusRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *MockNuvoVM) GetVolumeStats(arg0 bool, arg1 string) (*nuvoapi.StatsCombinedVolume, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetVolumeStats\", arg0, arg1)\n\tret0, _ := ret[0].(*nuvoapi.StatsCombinedVolume)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func NewGetVMVolumeMetricsOK() *GetVMVolumeMetricsOK {\n\treturn &GetVMVolumeMetricsOK{}\n}", "func (state *State) GetVolumeMetrics() []*metrics.ZMetricVolume {\n\treturn state.deviceInfo.VolumeMetrics\n}", "func (me *PROTECTEDSOURCEVOLUMEINFO_IMPL) GetProtectedSourceVolumeInfo(\n\tSourceInfo *models.GetSourceVolumeInfoParams) (*models.ProtectedSourceVolumeInfo, error) {\n\t//the endpoint path uri\n\t_pathUrl := \"/protectedSourceVolumeInfo/{sourceId}\"\n\n\t//variable to hold errors\n\tvar err error = nil\n\t//process optional template parameters\n\t_pathUrl, err = apihelper.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{}{\n\t\t\"sourceId\": SourceInfo.SourceId,\n\t})\n\tif err != nil {\n\t\t//error in template param handling\n\t\treturn nil, err\n\t}\n\n\t//the base uri for api requests\n\t_queryBuilder := configuration.GetBaseURI(configuration.ENUM_DEFAULT, me.config)\n\n\t//prepare query string for API call\n\t_queryBuilder = _queryBuilder + _pathUrl\n\n\t//validate and preprocess url\n\t_queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\n\tif err != nil {\n\t\t//error in url validation or cleaning\n\t\treturn nil, err\n\t}\n\t//prepare headers for the outgoing request\n\theaders := map[string]interface{}{\n\t\t\"user-agent\": \"app-Go-sdk-1.1.1\",\n\t\t\"accept\": \"application/json\",\n\t\t\"Authorization\": fmt.Sprintf(\"Bearer %s\", me.config.OAuthAccessToken()),\n\t}\n\n\t//prepare API request\n\t_request := unirest.Get(_queryBuilder, headers)\n\t//and invoke the API call request to fetch the response\n\t_response, err := unirest.AsString(_request, me.config.SkipSSL())\n\tif err != nil {\n\t\t//error in API invocation\n\t\treturn nil, err\n\t}\n\n\t//error handling using HTTP status codes\n\tif _response.Code == 401 {\n\t\terr = apihelper.NewAPIError(\"Unauthorized\", _response.Code, _response.RawBody)\n\t} else if _response.Code == 404 {\n\t\terr = apihelper.NewAPIError(\"Snapshot does not exist.\", _response.Code, _response.RawBody)\n\t} else if _response.Code == 500 {\n\t\terr = apihelper.NewAPIError(\"Unexpected error\", _response.Code, _response.RawBody)\n\t} else if _response.Code == 502 {\n\t\terr = apihelper.NewAPIError(\"Bad Gateway.\", _response.Code, _response.RawBody)\n\t} else if _response.Code == 504 {\n\t\terr = apihelper.NewAPIError(\"Gateway Timeout.\", _response.Code, _response.RawBody)\n\t} else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\n\t\terr = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\n\t}\n\tif err != nil {\n\t\t//error detected in status code validation\n\t\treturn nil, err\n\t}\n\n\t//returning the response\n\tvar retVal *models.ProtectedSourceVolumeInfo = &models.ProtectedSourceVolumeInfo{}\n\terr = json.Unmarshal(_response.RawBody, &retVal)\n\n\tif err != nil {\n\t\t//error in parsing\n\t\treturn nil, err\n\t}\n\treturn retVal, nil\n\n}", "func (a *RequestServiceApiService) GetRequest(ctx _context.Context, uuid string) ApiGetRequestRequest {\n\treturn ApiGetRequestRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tuuid: uuid,\n\t}\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func (c *UFSClient) NewDescribeUFSVolumePriceRequest() *DescribeUFSVolumePriceRequest {\n\treq := &DescribeUFSVolumePriceRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (me *XsdGoPkgHasElem_RequestsequenceGetRequesterStatisticschema_Request_TGetRequesterStatisticRequest_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_RequestsequenceGetRequesterStatisticschema_Request_TGetRequesterStatisticRequest_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.Request.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (c *UPHostClient) NewGetPhostDiskUpgradePriceRequest() *GetPhostDiskUpgradePriceRequest {\n\treq := &GetPhostDiskUpgradePriceRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (*MemcacheStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_memcache_service_proto_rawDescGZIP(), []int{15}\n}", "func (_m *Route53API) GetHealthCheckStatusRequest(_a0 *route53.GetHealthCheckStatusInput) route53.GetHealthCheckStatusRequest {\n\tret := _m.Called(_a0)\n\n\tvar r0 route53.GetHealthCheckStatusRequest\n\tif rf, ok := ret.Get(0).(func(*route53.GetHealthCheckStatusInput) route53.GetHealthCheckStatusRequest); ok {\n\t\tr0 = rf(_a0)\n\t} else {\n\t\tr0 = ret.Get(0).(route53.GetHealthCheckStatusRequest)\n\t}\n\n\treturn r0\n}", "func (me *XsdGoPkgHasElems_GetRequesterStatisticRequestsequenceTxsdRequestRequestschema_GetRequesterStatisticRequest_TGetRequesterStatisticRequest_) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElems_GetRequesterStatisticRequestsequenceTxsdRequestRequestschema_GetRequesterStatisticRequest_TGetRequesterStatisticRequest_; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tfor _, x := range me.GetRequesterStatisticRequests {\n\t\t\tif err = x.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func NewGetVMVolumeDefault(code int) *GetVMVolumeDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetVMVolumeDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (r Virtual_Guest) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getUpgradeRequest\", nil, &r.Options, &resp)\n\treturn\n}", "func NewRequestStats(method, path string) *RequestStats {\n\treturn &RequestStats{\n\t\tmethod: method,\n\t\tpath: path,\n\t\tRespAvg: NewSampleCollector(),\n\t}\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (proxy *remoteDriverProxy) Get(name string) (*remoteVolumeDesc, error) {\n\tvar req = remoteVolumeGetReq{\n\t\tName: name,\n\t}\n\n\tvar resp remoteVolumeGetResp\n\n\tif err := proxy.client.CallService(remoteVolumeGetService, &req, &resp, true); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn nil, errors.New(resp.Err)\n\t}\n\n\treturn resp.Volume, nil\n}", "func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}" ]
[ "0.69365644", "0.58307946", "0.57160366", "0.54200524", "0.5203708", "0.51507676", "0.5114978", "0.50979364", "0.493653", "0.49263847", "0.4827913", "0.4814798", "0.4759872", "0.47529024", "0.4683138", "0.46795532", "0.4608202", "0.46007058", "0.4578901", "0.4578121", "0.45747557", "0.4535788", "0.45308223", "0.45277813", "0.45214775", "0.4494844", "0.44851208", "0.4477937", "0.44708863", "0.44689295", "0.44170037", "0.44038177", "0.44018453", "0.43975925", "0.43947083", "0.43473527", "0.43130723", "0.42895114", "0.42829633", "0.4282384", "0.4274504", "0.427261", "0.42716423", "0.42716423", "0.426225", "0.4244514", "0.42430088", "0.4231421", "0.42146075", "0.42005715", "0.41899687", "0.41884306", "0.41686007", "0.41636318", "0.41519526", "0.41258803", "0.41118142", "0.41093606", "0.4109221", "0.41004512", "0.4097198", "0.40736464", "0.40343463", "0.40321115", "0.40286922", "0.4024406", "0.40213668", "0.4018634", "0.401039", "0.4005", "0.40028882", "0.39980942", "0.39974093", "0.39928064", "0.3986276", "0.39837983", "0.39832857", "0.39696553", "0.39641064", "0.39631718", "0.39607757", "0.39598462", "0.39582488", "0.39542624", "0.39486265", "0.39429733", "0.39395976", "0.39386332", "0.39289722", "0.39236292", "0.39181912", "0.39134544", "0.39099914", "0.39098877", "0.39097258", "0.3906381", "0.39052713", "0.39031968", "0.38897452", "0.38854906" ]
0.8682149
0
Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest is an autogenerated conversion function.
func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error { return autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func NewGetMonitoringReportRequest() *GetMonitoringReportRequest {\n\treturn &GetMonitoringReportRequest{}\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in *impl.RmdirContentsRequest, out *v2alpha1.RmdirContentsRequest) error {\n\treturn autoConvert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in, out)\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func (r *RequestAPI) ListRequestV1(ctx context.Context, req *desc.ListRequestsV1Request) (*desc.ListRequestsV1Response, error) {\n\tlog.Printf(\"Got list request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"ListRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.ReadEvent); err != nil {\n\t\treturn nil, err\n\t}\n\tvar (\n\t\trequests []models.Request\n\t\terr error\n\t)\n\n\tif req.SearchQuery != \"\" { // ideally would move search to a separate endpoint, so it's easier to extend\n\t\trequests, err = r.searcher.Search(ctx, req.SearchQuery, req.Limit, req.Offset)\n\t} else {\n\t\trequests, err = r.repo.List(ctx, req.Limit, req.Offset)\n\t}\n\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tErr(err).\n\t\t\tStr(\"endpoint\", \"ListRequestV1\").\n\t\t\tUint64(\"limit\", req.Limit).\n\t\t\tUint64(\"offset\", req.Offset).\n\t\t\tMsgf(\"Failed to list requests\")\n\t\tr.producer.Send(producer.NewEvent(ctx, 0, producer.ReadEvent, err))\n\t\treturn nil, err\n\t}\n\n\tret := make([]*desc.Request, 0, len(requests))\n\teventMsgs := make([]producer.EventMsg, 0, len(requests))\n\n\tfor _, req := range requests {\n\t\tret = append(ret, &desc.Request{\n\t\t\tId: req.Id,\n\t\t\tUserId: req.UserId,\n\t\t\tType: req.Type,\n\t\t\tText: req.Text,\n\t\t})\n\t\teventMsgs = append(eventMsgs, producer.NewEvent(ctx, req.Id, producer.ReadEvent, nil))\n\t\tr.producer.Send(eventMsgs...)\n\n\t}\n\tr.metrics.IncList(1, \"ListRequestV1\")\n\treturn &desc.ListRequestsV1Response{\n\t\tRequests: ret,\n\t}, nil\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func NewGetReportRequest() *GetReportRequest {\n\treturn &GetReportRequest{}\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (a *HyperflexApiService) GetHyperflexVolumeList(ctx context.Context) ApiGetHyperflexVolumeListRequest {\n\treturn ApiGetHyperflexVolumeListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func NewLunGetSerialNumberRequest() *LunGetSerialNumberRequest {\n\treturn &LunGetSerialNumberRequest{}\n}", "func Convert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in *impl.IsSymlinkRequest, out *v2alpha1.IsSymlinkRequest) error {\n\treturn autoConvert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in, out)\n}", "func DecodeHTTPGetStatRequest(_ context.Context, r *http.Request) (interface{}, error) {\n\tvar req GetStatRequest\n\terr := json.NewDecoder(r.Body).Decode(&req)\n\tif err != nil {\n\t\treturn nil, RequestError\n\t}\n\treturn req, err\n}", "func NewRequestStats(method, path string) *RequestStats {\n\treturn &RequestStats{\n\t\tmethod: method,\n\t\tpath: path,\n\t\tRespAvg: NewSampleCollector(),\n\t}\n}", "func (*CBroadcast_GetBroadcastViewerStats_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{62}\n}", "func (a *RequestServiceApiService) GetRequest(ctx _context.Context, uuid string) ApiGetRequestRequest {\n\treturn ApiGetRequestRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tuuid: uuid,\n\t}\n}", "func (client *KeyVaultClient) GetKeyVersionsCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientGetKeyVersionsOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/versions\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Maxresults != nil {\n\t\treqQP.Set(\"maxresults\", strconv.FormatInt(int64(*options.Maxresults), 10))\n\t}\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func newGetScoreRequest(name string) *http.Request {\n\treq, _ := http.NewRequest(http.MethodGet, fmt.Sprintf(\"/players/%s\", name), nil)\n\treturn req\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_internal_StopServiceRequest_To_v1alpha1_StopServiceRequest(in *internal.StopServiceRequest, out *v1alpha1.StopServiceRequest) error {\n\treturn autoConvert_internal_StopServiceRequest_To_v1alpha1_StopServiceRequest(in, out)\n}", "func Convert_v1alpha1_GetServiceRequest_To_internal_GetServiceRequest(in *v1alpha1.GetServiceRequest, out *internal.GetServiceRequest) error {\n\treturn autoConvert_v1alpha1_GetServiceRequest_To_internal_GetServiceRequest(in, out)\n}", "func (r *RequestAPI) DescribeRequestV1(ctx context.Context, req *desc.DescribeRequestV1Request) (*desc.DescribeRequestV1Response, error) {\n\tlog.Printf(\"Got describe request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"DescribeRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.ReadEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\tret, err := r.repo.Describe(ctx, req.RequestId)\n\n\tif errors.Is(err, repository.NotFound) {\n\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t} else if err != nil {\n\t\tlog.Error().\n\t\t\tStr(\"endpoint\", \"DescribeRequestV1\").\n\t\t\tUint64(\"request_id\", req.RequestId).\n\t\t\tErr(err).\n\t\t\tMsgf(\"Failed to read request\")\n\t\treturn nil, err\n\t}\n\n\tr.producer.Send(producer.NewEvent(ctx, req.RequestId, producer.ReadEvent, err))\n\tr.metrics.IncRead(1, \"DescribeRequestV1\")\n\n\treturn &desc.DescribeRequestV1Response{\n\t\tRequest: &desc.Request{\n\t\t\tId: ret.Id,\n\t\t\tUserId: ret.UserId,\n\t\t\tType: ret.Type,\n\t\t\tText: ret.Text,\n\t\t},\n\t}, nil\n\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func (a *RequestServiceApiService) GetStatus(ctx _context.Context) ApiGetStatusRequest {\n\treturn ApiGetStatusRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif req.VolumeId == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats Volume ID must be provided\")\n\t}\n\n\tvolumePath := req.VolumePath\n\tif volumePath == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats Volume Path must be provided\")\n\t}\n\n\tlog := d.log.WithFields(logrus.Fields{\n\t\t\"volume_id\": req.VolumeId,\n\t\t\"volume_path\": req.VolumePath,\n\t\t\"method\": \"node_get_volume_stats\",\n\t})\n\tlog.Info(\"node get volume stats called\")\n\n\tmounted, err := d.mounter.IsMounted(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to check if volume path %q is mounted: %s\", volumePath, err)\n\t}\n\n\tif !mounted {\n\t\treturn nil, status.Errorf(codes.NotFound, \"volume path %q is not mounted\", volumePath)\n\t}\n\n\tisBlock, err := d.mounter.IsBlockDevice(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to determine if %q is block device: %s\", volumePath, err)\n\t}\n\n\tstats, err := d.mounter.GetStatistics(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to retrieve capacity statistics for volume path %q: %s\", volumePath, err)\n\t}\n\n\t// only can retrieve total capacity for a block device\n\tif isBlock {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"volume_mode\": volumeModeBlock,\n\t\t\t\"bytes_total\": stats.totalBytes,\n\t\t}).Info(\"node capacity statistics retrieved\")\n\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t\tTotal: stats.totalBytes,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"volume_mode\": volumeModeFilesystem,\n\t\t\"bytes_available\": stats.availableBytes,\n\t\t\"bytes_total\": stats.totalBytes,\n\t\t\"bytes_used\": stats.usedBytes,\n\t\t\"inodes_available\": stats.availableInodes,\n\t\t\"inodes_total\": stats.totalInodes,\n\t\t\"inodes_used\": stats.usedInodes,\n\t}).Info(\"node capacity statistics retrieved\")\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t&csi.VolumeUsage{\n\t\t\t\tAvailable: stats.availableBytes,\n\t\t\t\tTotal: stats.totalBytes,\n\t\t\t\tUsed: stats.usedBytes,\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t},\n\t\t\t&csi.VolumeUsage{\n\t\t\t\tAvailable: stats.availableInodes,\n\t\t\t\tTotal: stats.totalInodes,\n\t\t\t\tUsed: stats.usedInodes,\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (adminport *Adminport) doGetStatisticsRequest(request *http.Request) (*ap.Response, error) {\n\tlogger_ap.Debugf(\"doGetStatisticsRequest\\n\")\n\n\tresponse, err := authWebCreds(request, base.PermissionXDCRInternalRead)\n\tif response != nil || err != nil {\n\t\treturn response, err\n\t}\n\n\t//pass the request to get the bucket name\n\tbucket, err := DecodeDynamicParamInURL(request, StatisticsPrefix, \"Bucket Name\")\n\tif err != nil {\n\t\treturn EncodeReplicationValidationErrorIntoResponse(err)\n\t}\n\n\tstatsMap, err := GetStatistics(bucket)\n\tif err == nil {\n\t\tif statsMap == nil {\n\t\t\treturn NewEmptyArrayResponse()\n\t\t}\n\t\treturn EncodeByteArrayIntoResponse([]byte(statsMap.String()))\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func (or *operationRequestStore) GetRequestDetails(\n\tctx context.Context,\n\tname string,\n) (*VolumeOperationRequestDetails, error) {\n\tlog := logger.GetLogger(ctx)\n\tinstanceKey := client.ObjectKey{Name: name, Namespace: csiNamespace}\n\tlog.Debugf(\"Getting CnsVolumeOperationRequest instance with name %s/%s\", instanceKey.Namespace, instanceKey.Name)\n\n\tinstance := &cnsvolumeoprequestv1alpha1.CnsVolumeOperationRequest{}\n\terr := or.k8sclient.Get(ctx, instanceKey, instance)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Debugf(\"Found CnsVolumeOperationRequest instance %v\", spew.Sdump(instance))\n\n\tif len(instance.Status.LatestOperationDetails) == 0 {\n\t\treturn nil, fmt.Errorf(\"length of LatestOperationDetails expected to be greater than 1 if the instance exists\")\n\t}\n\n\t// Callers only need to know about the last operation that was invoked on a volume.\n\toperationDetailsToReturn := instance.Status.LatestOperationDetails[len(instance.Status.LatestOperationDetails)-1]\n\n\treturn CreateVolumeOperationRequestDetails(instance.Spec.Name, instance.Status.VolumeID, instance.Status.SnapshotID,\n\t\t\tinstance.Status.Capacity, operationDetailsToReturn.TaskInvocationTimestamp, operationDetailsToReturn.TaskID,\n\t\t\toperationDetailsToReturn.VCenterServer, operationDetailsToReturn.OpID, operationDetailsToReturn.TaskStatus,\n\t\t\toperationDetailsToReturn.Error),\n\t\tnil\n}", "func (*CachingApiV1) NewGetCacheLevelOptions() *GetCacheLevelOptions {\n\treturn &GetCacheLevelOptions{}\n}", "func (g *Group) GetWhitelistEntryRequest(in string) (req *request.Request, out *GetWhitelistEntryOutput) {\n op := &request.Operation{\n Name: \"GetWhitelistEntry\",\n HTTPMethod: \"GET\",\n HTTPPath: \"/groups/\" + g.GroupID + \"/whitelist/\" + url.QueryEscape(in),\n }\n\n out = &GetWhitelistEntryOutput{}\n\n handlers := &request.Handlers{\n ResponseHandler: request.ResponseHandler,\n }\n\n req = g.newRequest(op, nil, out, handlers)\n\n return req, out\n}", "func (c *APIGateway) GetApiKeysRequest(input *GetApiKeysInput) (req *request.Request, output *GetApiKeysOutput) {\n\top := &request.Operation{\n\t\tName: opGetApiKeys,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/apikeys\",\n\t\tPaginator: &request.Paginator{\n\t\t\tInputTokens: []string{\"position\"},\n\t\t\tOutputTokens: []string{\"position\"},\n\t\t\tLimitToken: \"limit\",\n\t\t\tTruncationToken: \"\",\n\t\t},\n\t}\n\n\tif input == nil {\n\t\tinput = &GetApiKeysInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &GetApiKeysOutput{}\n\treq.Data = output\n\treturn\n}", "func (*GetSystemStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_system_service_proto_rawDescGZIP(), []int{2}\n}", "func (*GetCheckerStatusV1Request) Descriptor() ([]byte, []int) {\n\treturn file_checker_v1_proto_rawDescGZIP(), []int{12}\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func (m *ListDocsV1Request) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Limit\n\n\t// no validation rules for Offset\n\n\treturn nil\n}", "func (client *KeyVaultClient) getKeyVersionsCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, options *KeyVaultClientGetKeyVersionsOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/versions\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Maxresults != nil {\n\t\treqQP.Set(\"maxresults\", strconv.FormatInt(int64(*options.Maxresults), 10))\n\t}\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *UFSClient) NewDescribeUFSVolume2Request() *DescribeUFSVolume2Request {\n\treq := &DescribeUFSVolume2Request{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (o *ReadStorageV1alpha1VolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Exact != nil {\n\n\t\t// query param exact\n\t\tvar qrExact bool\n\t\tif o.Exact != nil {\n\t\t\tqrExact = *o.Exact\n\t\t}\n\t\tqExact := swag.FormatBool(qrExact)\n\t\tif qExact != \"\" {\n\t\t\tif err := r.SetQueryParam(\"exact\", qExact); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Export != nil {\n\n\t\t// query param export\n\t\tvar qrExport bool\n\t\tif o.Export != nil {\n\t\t\tqrExport = *o.Export\n\t\t}\n\t\tqExport := swag.FormatBool(qrExport)\n\t\tif qExport != \"\" {\n\t\t\tif err := r.SetQueryParam(\"export\", qExport); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Pretty != nil {\n\n\t\t// query param pretty\n\t\tvar qrPretty string\n\t\tif o.Pretty != nil {\n\t\t\tqrPretty = *o.Pretty\n\t\t}\n\t\tqPretty := qrPretty\n\t\tif qPretty != \"\" {\n\t\t\tif err := r.SetQueryParam(\"pretty\", qPretty); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r Virtual_Guest) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getUpgradeRequest\", nil, &r.Options, &resp)\n\treturn\n}", "func NewGetL2OrderBookRequest(server string, symbol SymbolParam) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParamWithLocation(\"simple\", false, \"symbol\", runtime.ParamLocationPath, symbol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserverURL, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toperationPath := fmt.Sprintf(\"/l2/%s\", pathParam0)\n\tif operationPath[0] == '/' {\n\t\toperationPath = operationPath[1:]\n\t}\n\toperationURL := url.URL{\n\t\tPath: operationPath,\n\t}\n\n\tqueryURL := serverURL.ResolveReference(&operationURL)\n\n\treq, err := http.NewRequest(\"GET\", queryURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (*CBroadcast_GetBroadcastStatus_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{11}\n}", "func (client *CassandraClustersClient) statusCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *CassandraClustersClientStatusOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/status\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func GetSystemRequestRPC(req systemsproto.GetSystemsRequest) (*systemsproto.SystemsResponse, error) {\n\tconn, err := services.ODIMService.Client(services.Systems)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create client connection: %v\", err)\n\t}\n\tdefer conn.Close()\n\tasService := systemsproto.NewSystemsClient(conn)\n\tresp, err := asService.GetSystems(context.TODO(), &req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error: RPC error: %v\", err)\n\t}\n\treturn resp, nil\n}", "func (d *DirDriver) Get(req *volume.GetRequest) (*volume.GetResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Get() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Did not find volume %s\", req.Name)\n\t\treturn nil, fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tlogrus.Debugf(\"Found volume %s\", req.Name)\n\n\tresp := new(volume.GetResponse)\n\tresp.Volume = new(volume.Volume)\n\tresp.Volume.Name = vol.name\n\tresp.Volume.Mountpoint = vol.path\n\tresp.Volume.CreatedAt = vol.createTime.String()\n\n\treturn resp, nil\n}", "func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\t_, err := os.Stat(req.VolumePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tisBlock, err := hostutil.NewHostUtil().PathIsDevice(req.VolumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.NotFound, \"failed to determine whether %s is block device: %v\", req.VolumePath, err)\n\t}\n\tif isBlock {\n\t\tbcap, err := d.mounter.GetStatistics(req.GetVolumePath())\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"failed to get block capacity on path %s: %v\", req.VolumePath, err)\n\t\t}\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t\tTotal: bcap.TotalBytes,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (g *Group) GetWhitelistRequest() (req *request.Request, out *GetWhitelistOutput) {\n op := &request.Operation{\n Name: \"GetWhitelist\",\n HTTPMethod: \"GET\",\n HTTPPath: \"/groups/\" + g.GroupID + \"/whitelist\",\n }\n\n out = &GetWhitelistOutput{}\n\n handlers := &request.Handlers {\n ResponseHandler: request.ListResponseHandler,\n }\n\n req = g.newRequest(op, nil, out, handlers)\n return req, out\n}", "func (mr *MockNuvoVMMockRecorder) GetVolumeStats(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetVolumeStats\", reflect.TypeOf((*MockNuvoVM)(nil).GetVolumeStats), arg0, arg1)\n}", "func (a *ClusterControllerApiService) GetServerGroupsUsingGET1(ctx _context.Context, account string, application string, clusterName string) apiGetServerGroupsUsingGET1Request {\n\treturn apiGetServerGroupsUsingGET1Request{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\taccount: account,\n\t\tapplication: application,\n\t\tclusterName: clusterName,\n\t}\n}", "func parseStatsRequest(r *http.Request) (statsRequest, error) {\n\terr := r.ParseForm()\n\tif err != nil {\n\t\treturn statsRequest{}, errors.WithStack(err)\n\t}\n\treturn statsRequest{}, nil\n}", "func (client *AvailabilityGroupListenersClient) getCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, availabilityGroupListenerName string, options *AvailabilityGroupListenersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif availabilityGroupListenerName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilityGroupListenerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilityGroupListenerName}\", url.PathEscape(availabilityGroupListenerName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func (m *LoadStatsRequest) Validate() error {\n\treturn m.validate(false)\n}", "func (a *Client) V1GetConfig(params *V1GetConfigParams) (*V1GetConfigOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1GetConfigParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1GetConfig\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/all\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1GetConfigReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V1GetConfigOK), nil\n\n}", "func (m *LivenessRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func (a *HyperflexApiService) GetHyperflexVolumeByMoid(ctx context.Context, moid string) ApiGetHyperflexVolumeByMoidRequest {\n\treturn ApiGetHyperflexVolumeByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func (a *api) GetTeamV1(\n\tctx context.Context,\n\treq *desc.GetTeamV1Request) (*desc.GetTeamV1Response, error) {\n\tmetrics.IncTotalRequestsCounter()\n\tif err := req.Validate(); err != nil {\n\t\tmetrics.IncInvalidRequestsCounter()\n\t\tlog.Error().Err(err).Msg(\"invalid argument\")\n\t\treturn nil, status.Error(codes.InvalidArgument, err.Error())\n\t}\n\tlog.Debug().Msgf(\"GetTeamV1() was called (id=%d)\", req.Id)\n\n\ttracer := opentracing.GlobalTracer()\n\tspan := tracer.StartSpan(\"GetTeamV1\")\n\tdefer span.Finish()\n\n\tteam, err := a.repo.GetTeam(ctx, req.Id)\n\n\tif err != nil {\n\t\tlog.Error().Err(err)\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\tresponse := &desc.GetTeamV1Response{Team: converter.TeamToDTO(team)}\n\n\treturn response, nil\n}", "func (c *APIGateway) GetExportRequest(input *GetExportInput) (req *request.Request, output *GetExportOutput) {\n\top := &request.Operation{\n\t\tName: opGetExport,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/stages/{stage_name}/exports/{export_type}\",\n\t}\n\n\tif input == nil {\n\t\tinput = &GetExportInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &GetExportOutput{}\n\treq.Data = output\n\treturn\n}", "func NewGetBlocksRequestMessage(lowHash string, includeBlockHexes bool, includeBlockVerboseData bool) *GetBlocksRequestMessage {\n\treturn &GetBlocksRequestMessage{\n\t\tLowHash: lowHash,\n\t\tIncludeBlockHexes: includeBlockHexes,\n\t\tIncludeBlockVerboseData: includeBlockVerboseData,\n\t}\n}", "func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func getServiceCmdRequest(cmd cmdType, cred credential, body []byte) (*http.Request, error) {\n\treq, err := newTestRequest(cmd.apiMethod(), \"/?service\", 0, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set body\n\treq.Body = ioutil.NopCloser(bytes.NewReader(body))\n\n\t// minioAdminOpHeader is to identify the request as a\n\t// management REST API request.\n\treq.Header.Set(minioAdminOpHeader, cmd.String())\n\treq.Header.Set(\"X-Amz-Content-Sha256\", getSHA256Hash(body))\n\n\t// management REST API uses signature V4 for authentication.\n\terr = signRequestV4(req, cred.AccessKey, cred.SecretKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn req, nil\n}", "func (a *ServiceRequestNamingApiService) Get(ctx _context.Context) ApiGetRequest {\n\treturn ApiGetRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func Convert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in *FakeRequest, out *v1alpha2.FakeRequest, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in, out, s)\n}", "func (m *ListTenantsV1Request) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Limit\n\n\t// no validation rules for Offset\n\n\treturn nil\n}", "func (d *MinioDriver) Get(r volume.Request) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (o *GetV1LoansParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {\n\tvar res []error\n\n\to.HTTPRequest = r\n\n\tqs := runtime.Values(r.URL.Query())\n\n\tqAsc, qhkAsc, _ := qs.GetOK(\"asc\")\n\tif err := o.bindAsc(qAsc, qhkAsc, route.Formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tqOrder, qhkOrder, _ := qs.GetOK(\"order\")\n\tif err := o.bindOrder(qOrder, qhkOrder, route.Formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tqPage, qhkPage, _ := qs.GetOK(\"page\")\n\tif err := o.bindPage(qPage, qhkPage, route.Formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tqPerPage, qhkPerPage, _ := qs.GetOK(\"per_page\")\n\tif err := o.bindPerPage(qPerPage, qhkPerPage, route.Formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func NewGetOSListRequest(page int, size int) *GetOSListRequest {\n\treturn &GetOSListRequest{\n\t\tPage: page,\n\t\tSize: size,\n\t}\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func (vk *VK) StatsGet(params Params) (response StatsGetResponse, err error) {\n\terr = vk.RequestUnmarshal(\"stats.get\", &response, params)\n\treturn\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskEncryptionSetsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *APIGateway) GetStagesRequest(input *GetStagesInput) (req *request.Request, output *GetStagesOutput) {\n\top := &request.Operation{\n\t\tName: opGetStages,\n\t\tHTTPMethod: \"GET\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/stages\",\n\t}\n\n\tif input == nil {\n\t\tinput = &GetStagesInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\toutput = &GetStagesOutput{}\n\treq.Data = output\n\treturn\n}", "func Convert_impl_CreateSymlinkRequest_To_v2alpha1_CreateSymlinkRequest(in *impl.CreateSymlinkRequest, out *v2alpha1.CreateSymlinkRequest) error {\n\treturn autoConvert_impl_CreateSymlinkRequest_To_v2alpha1_CreateSymlinkRequest(in, out)\n}", "func FromRequest(ctx context.Context, r *http.Request) (p *Passenger, err error) {\n\tauth := \"\"\n\tif auth = r.Header.Get(\"Authorization\"); auth == \"\" {\n\t\treturn nil, ErrNoAuthHeader\n\t}\n\n\tif strings.HasPrefix(auth, \"Token \") {\n\t\treturn FromAccessToken(ctx, auth[6:])\n\t}\n\n\tusername, password, ok := \"\", \"\", false\n\tif username, password, ok = r.BasicAuth(); !ok {\n\t\treturn nil, ErrUnkAuthHeader\n\t}\n\n\tp, err = FromBasicAuth(ctx, username, password)\n\treturn\n}", "func (client *MetricAlertsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *MetricAlertsClientListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-03-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_GetBIOSSerialNumberRequest_To_v1alpha1_GetBIOSSerialNumberRequest(in *internal.GetBIOSSerialNumberRequest, out *v1alpha1.GetBIOSSerialNumberRequest) error {\n\treturn autoConvert_internal_GetBIOSSerialNumberRequest_To_v1alpha1_GetBIOSSerialNumberRequest(in, out)\n}", "func versionRequest() {\n\t// if there are no other arguments display the version for sparts.\n\tif len(os.Args[1:]) == 1 {\n\t\t// Display version\n\t\tfmt.Printf(\" %s version: %s\\n\", filepath.Base(os.Args[0]), _VERSION)\n\t\treturn\n\t}\n\t// there is an additional argument\n\tswitch os.Args[2] {\n\tcase \"--help\", \"-help\", \"-h\":\n\t\t// Display help\n\t\tfmt.Println(_VERSION_HELP_CONTENT)\n\tcase \"--all\", \"-a\":\n\t\tfmt.Printf(\"%s version: %s data model: %s\\n\", filepath.Base(os.Args[0]), _VERSION, _DB_Model)\n\tdefault:\n\t\tfmt.Printf(\" '%s' is not a valid version option. Try --help\\n\", os.Args[2])\n\t}\n\n}", "func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif req.GetName() == \"\" {\n\t\treturn errors.New(\"volume name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn errors.New(\"volume capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn errors.New(\"block access type not allowed\")\n\t\t}\n\t}\n\n\tif req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {\n\t\treturn errors.New(\"secrets cannot be nil or empty\")\n\t}\n\n\treturn nil\n}" ]
[ "0.7061536", "0.6564887", "0.6469063", "0.6229472", "0.58826023", "0.5766348", "0.56705314", "0.56251806", "0.5619288", "0.54504144", "0.53813803", "0.5162646", "0.5073106", "0.4988676", "0.49808604", "0.49403647", "0.4837975", "0.47544888", "0.47227532", "0.46668306", "0.46139675", "0.46133193", "0.45976332", "0.45866573", "0.4575442", "0.45744553", "0.45203784", "0.44636178", "0.44201475", "0.43462685", "0.43383002", "0.42983118", "0.42634168", "0.42602405", "0.4251939", "0.42464387", "0.42053637", "0.4204255", "0.420285", "0.41871822", "0.4178524", "0.41716293", "0.41482675", "0.41408062", "0.41399834", "0.41366443", "0.413597", "0.41319633", "0.4129245", "0.412095", "0.41178104", "0.41103953", "0.41089296", "0.410661", "0.41034818", "0.41002426", "0.40970665", "0.40969595", "0.40949878", "0.40861756", "0.4083108", "0.40773293", "0.40660623", "0.4059036", "0.40452063", "0.40328118", "0.40293223", "0.4008062", "0.40039292", "0.4002448", "0.4001842", "0.39985892", "0.39952955", "0.39951938", "0.39882138", "0.39843944", "0.397992", "0.39789554", "0.39763975", "0.39736798", "0.3969962", "0.39657858", "0.39641058", "0.39583385", "0.39557156", "0.39508897", "0.39490113", "0.39454657", "0.39434886", "0.3941101", "0.39371654", "0.39282984", "0.39278916", "0.39275905", "0.39266026", "0.39248583", "0.39198023", "0.39197263", "0.39163652", "0.39118674" ]
0.8569152
0
Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse is an autogenerated conversion function.
func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error { return autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in *v1beta1.VolumeStatsResponse, out *internal.VolumeStatsResponse) error {\n\treturn autoConvert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func GetVolumeStats(address string, obj interface{}) (error, int) {\n\tcontroller, err := NewControllerClient(address)\n\tif err != nil {\n\t\treturn err, -1\n\t}\n\turl := controller.address + \"/stats\"\n\tresp, err := controller.httpClient.Get(url)\n\tif resp != nil {\n\t\tif resp.StatusCode == 500 {\n\t\t\treturn err, 500\n\t\t} else if resp.StatusCode == 503 {\n\t\t\treturn err, 503\n\t\t}\n\t} else {\n\t\treturn err, -1\n\t}\n\tif err != nil {\n\t\treturn err, -1\n\t}\n\tdefer resp.Body.Close()\n\trc := json.NewDecoder(resp.Body).Decode(obj)\n\treturn rc, 0\n}", "func Convert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in *v2alpha1.WriteVolumeCacheResponse, out *impl.WriteVolumeCacheResponse) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func DecodeHTTPGetStatResponse(_ context.Context, r *http.Response) (interface{}, error) {\n\tif r.StatusCode != http.StatusOK {\n\t\treturn nil, errorDecoder(r)\n\t}\n\tvar resp GetStatResponse\n\terr := json.NewDecoder(r.Body).Decode(&resp)\n\treturn resp, err\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func (client IotHubResourceClient) GetStatsResponder(resp *http.Response) (result RegistryStatistics, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func Convert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in *v2alpha1.RmdirContentsResponse, out *impl.RmdirContentsResponse) error {\n\treturn autoConvert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in *v1beta1.ResizeVolumeResponse, out *internal.ResizeVolumeResponse) error {\n\treturn autoConvert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in, out)\n}", "func (c *clustermgrClient) GetVolumeInfo(ctx context.Context, vid proto.Vid) (*VolumeInfoSimple, error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\n\tinfo, err := c.client.GetVolumeInfo(ctx, &cmapi.GetVolumeArgs{Vid: vid})\n\tif err != nil {\n\t\tspan.Errorf(\"get volume info failed: err[%+v]\", err)\n\t\treturn nil, err\n\t}\n\tret := &VolumeInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in *v1beta1.DismountVolumeResponse, out *internal.DismountVolumeResponse) error {\n\treturn autoConvert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (client VolumesClient) GetResponder(resp *http.Response) (result Volume, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in *v1beta1.IsVolumeFormattedResponse, out *internal.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in, out)\n}", "func (o *GetVMVolumeMetricsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetVMVolumeMetricsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetVMVolumeMetricsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewGetVMVolumeMetricsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewGetVMVolumeMetricsInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (m *MockNuvoVM) GetVolumeStats(arg0 bool, arg1 string) (*nuvoapi.StatsCombinedVolume, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetVolumeStats\", arg0, arg1)\n\tret0, _ := ret[0].(*nuvoapi.StatsCombinedVolume)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func (d *VolumeDriver) GetVolume(name string) (map[string]interface{}, error) {\n\tvar statusMap map[string]interface{}\n\tstatusMap = make(map[string]interface{})\n\tlog.Errorf(\"VolumeDriver GetVolume to be implemented\")\n\treturn statusMap, nil\n}", "func respStat(file *schema.File) *protocol.StatResp {\n\treturn &protocol.StatResp{\n\t\tDataFileID: file.ID,\n\t\tName: file.Name,\n\t\tDataDirs: file.DataDirs,\n\t\tChecksum: file.Checksum,\n\t\tSize: file.Size,\n\t\tBirthtime: file.Birthtime,\n\t\tMTime: file.MTime,\n\t}\n}", "func (*ResponseGetVolumes) Descriptor() ([]byte, []int) {\n\treturn file_pkg_metadata_metadata_proto_rawDescGZIP(), []int{1}\n}", "func GetVolInfo(name string) (int32, *vp.GetVolInfoAck) {\n\n\t_, conn, err := utils.DialVolMgr(VolMgrHosts)\n\tif err != nil {\n\t\tlogger.Error(\"GetVolInfo failed,Dial to VolMgrHosts fail :%v\", err)\n\t\treturn -1, nil\n\t}\n\tdefer conn.Close()\n\tvc := vp.NewVolMgrClient(conn)\n\n\tpGetVolInfoReq := &vp.GetVolInfoReq{\n\t\tUUID: name,\n\t}\n\tctx, _ := context.WithTimeout(context.Background(), VOLUME_TIMEOUT_SECONDS*time.Second)\n\tack, err := vc.GetVolInfo(ctx, pGetVolInfoReq)\n\tif err != nil || ack.Ret != 0 {\n\t\treturn -1, &vp.GetVolInfoAck{}\n\t}\n\treturn 0, ack\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func (*CBroadcast_GetBroadcastViewerStats_Response) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{63}\n}", "func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudPvminstancesVolumesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesGetUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func Convert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in *v2alpha1.MkdirResponse, out *impl.MkdirResponse) error {\n\treturn autoConvert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in, out)\n}", "func (vk VK) StreamingGetStats(params map[string]string) (response StreamingGetStatsResponse, vkErr Error) {\n\trawResponse, vkErr := vk.Request(\"streaming.getStats\", params)\n\tif vkErr.Code != 0 {\n\t\treturn\n\t}\n\n\terr := json.Unmarshal(rawResponse, &response)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn\n}", "func CreateNormalRpcHsfApiResponse() (response *NormalRpcHsfApiResponse) {\n\tresponse = &NormalRpcHsfApiResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (*GetSystemStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_system_service_proto_rawDescGZIP(), []int{3}\n}", "func GetVolumeStatus(hostName, volumeName string) (map[string]string, error) {\n\tformatStr1 := \" --format '{{index .Status.access}} {{index .Status \\\"attach-as\\\"}} {{index .Status.capacity.allocated}} {{index .Status.capacity.size}} {{index .Status \\\"clone-from\\\"}}\"\n\tformatStr2 := \" {{index .Status \\\"created by VM\\\"}} {{index .Status.datastore}} {{index .Status.diskformat}} {{index .Status.fstype}} {{index .Status.status}} {{index .Status \\\"attached to VM\\\"}}'\"\n\n\tcmd := dockercli.InspectVolume + volumeName + formatStr1 + formatStr2\n\tout, err := ssh.InvokeCommand(hostName, cmd)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstatus := make(map[string]string)\n\tval := strings.Fields(out)\n\n\tfor i := 0; i < len(dockercli.VolumeStatusFields); i++ {\n\t\tstatus[dockercli.VolumeStatusFields[i]] = val[i]\n\t}\n\treturn status, nil\n}", "func (sss StorageServiceStats) Response() *http.Response {\n\treturn sss.rawResponse\n}", "func (vk *VK) StatsGet(params Params) (response StatsGetResponse, err error) {\n\terr = vk.RequestUnmarshal(\"stats.get\", &response, params)\n\treturn\n}", "func ParseGetSymbolsResponse(rsp *http.Response) (*GetSymbolsResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &GetSymbolsResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest struct {\n\t\t\tAdditionalProperties map[string]SymbolStatus `json:\"-\"`\n\t\t}\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (d *VolumeDriver) Get(r volume.Request) volume.Response {\n\tlog.Errorf(\"VolumeDriver Get to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func (c *ClientWithResponses) GetSymbolsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetSymbolsResponse, error) {\n\trsp, err := c.GetSymbols(ctx, reqEditors...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetSymbolsResponse(rsp)\n}", "func (proxy *remoteDriverProxy) Get(name string) (*remoteVolumeDesc, error) {\n\tvar req = remoteVolumeGetReq{\n\t\tName: name,\n\t}\n\n\tvar resp remoteVolumeGetResp\n\n\tif err := proxy.client.CallService(remoteVolumeGetService, &req, &resp, true); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn nil, errors.New(resp.Err)\n\t}\n\n\treturn resp.Volume, nil\n}", "func GetAllVolumeInfos() (int32, []*vp.Volume) {\n\n\t_, conn, err := utils.DialVolMgr(VolMgrHosts)\n\tif err != nil {\n\t\tlogger.Error(\"GetAllDatanode failed,Dial to VolMgrHosts fail :%v\", err)\n\t\treturn -1, nil\n\t}\n\tdefer conn.Close()\n\tvc := vp.NewVolMgrClient(conn)\n\n\tpVolumeInfosReq := &vp.VolumeInfosReq{}\n\tctx, _ := context.WithTimeout(context.Background(), VOLUME_TIMEOUT_SECONDS*time.Second)\n\tpVolumeInfosAck, err := vc.VolumeInfos(ctx, pVolumeInfosReq)\n\tif err != nil {\n\t\tlogger.Error(\"GetAllVolumeInfos failed,grpc func err :%v\", err)\n\t\treturn -1, nil\n\t}\n\tif pVolumeInfosAck.Ret != 0 {\n\t\tlogger.Error(\"GetAllVolumeInfos failed,grpc func ret :%v\", pVolumeInfosAck.Ret)\n\t\treturn -1, nil\n\t}\n\treturn 0, pVolumeInfosAck.Volumes\n}", "func GetVolumesV2() (VolumeV2, error) {\n\tvar volumes VolumeV2\n\tquery := \"/api/datacenter/storage/volume\"\n\tbodyText, err := getResponseBody(query)\n\tif err != nil {\n\t\treturn VolumeV2{}, err\n\t}\n\terr = json.Unmarshal(bodyText, &volumes)\n\tif err != nil {\n\t\tlog.Printf(\"verita-core: Error: %v\", err)\n\t\treturn volumes, err\n\t}\n\treturn volumes, nil\n}", "func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tvolumePath := req.GetVolumePath()\n\tif volumePath == \"\" {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"volumePath %v is empty\", volumePath)\n\t}\n\n\texists, err := utilpath.Exists(utilpath.CheckFollowSymlink, volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to check whether volumePath exists: %s\", err)\n\t}\n\tif !exists {\n\t\treturn nil, status.Errorf(codes.NotFound, \"target: %s not found\", volumePath)\n\t}\n\n\tstats, err := util.GetDeviceStats(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get stats by path: %s\", err)\n\t}\n\n\tklog.V(5).Infof(util.Log(ctx, \"get volumePath %q stats: %+v\"), volumePath, stats)\n\n\tif stats.Block {\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tTotal: stats.TotalBytes,\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tTotal: stats.TotalBytes,\n\t\t\t\tAvailable: stats.AvailableBytes,\n\t\t\t\tUsed: stats.UsedBytes,\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t}, {\n\t\t\t\tTotal: stats.TotalInodes,\n\t\t\t\tAvailable: stats.AvailableInodes,\n\t\t\t\tUsed: stats.UsedInodes,\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func (state *State) GetVolumeMetrics() []*metrics.ZMetricVolume {\n\treturn state.deviceInfo.VolumeMetrics\n}", "func (api *API) GetVolume() (*Volume, error) {\n\tvar resp Volume\n\terr := api.call(\"market_history\", \"get_volume\", EmptyParams, &resp)\n\treturn &resp, err\n}", "func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) {\n\tstatus := &DriverStatus{\n\t\tVolume: v1.PersistentVolume{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tAnnotations: map[string]string{},\n\t\t\t\tLabels: map[string]string{},\n\t\t\t}}}\n\tif err := json.Unmarshal(output, status); err != nil {\n\t\tglog.Errorf(\"Failed to unmarshal output for command: %s, output: %q, error: %s\", cmd, string(output), err.Error())\n\t\treturn nil, err\n\t} else if status.Status == StatusNotSupported {\n\t\tglog.V(5).Infof(\"%s command is not supported by the driver\", cmd)\n\t\treturn nil, errors.New(status.Status)\n\t} else if status.Status != StatusSuccess {\n\t\terrMsg := fmt.Sprintf(\"%s command failed, status: %s, reason: %s\", cmd, status.Status, status.Message)\n\t\tglog.Errorf(errMsg)\n\t\treturn nil, fmt.Errorf(\"%s\", errMsg)\n\t}\n\n\treturn status, nil\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func (*VodGetMediaInfosResponse) Descriptor() ([]byte, []int) {\n\treturn file_vod_response_response_vod_proto_rawDescGZIP(), []int{9}\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func (d *MinioDriver) Get(r volume.Request) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (h *Handlers) GetStats(w http.ResponseWriter, r *http.Request) {\n\tdataJSON, err := h.pkgManager.GetStatsJSON(r.Context())\n\tif err != nil {\n\t\th.logger.Error().Err(err).Str(\"method\", \"GetStats\").Send()\n\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\thelpers.RenderJSON(w, dataJSON, helpers.DefaultAPICacheMaxAge)\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\t_, err := os.Stat(req.VolumePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tisBlock, err := hostutil.NewHostUtil().PathIsDevice(req.VolumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.NotFound, \"failed to determine whether %s is block device: %v\", req.VolumePath, err)\n\t}\n\tif isBlock {\n\t\tbcap, err := d.mounter.GetStatistics(req.GetVolumePath())\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"failed to get block capacity on path %s: %v\", req.VolumePath, err)\n\t\t}\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t\tTotal: bcap.TotalBytes,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (client GroupClient) GetTableStatisticResponder(resp *http.Response) (result USQLTableStatistics, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (tn *ListNamespaceDescriptors) NewResponse() proto.Message {\n\treturn &pb.ListNamespaceDescriptorsResponse{}\n}", "func (d DobsClient) GetVolume(ctx Context, name string) (*APIVolume, error) {\n\n\tapiVolume, err := d.getVolumeByName(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvol := &APIVolume{\n\t\tName: apiVolume.Name,\n\t\tID: apiVolume.ID,\n\t\t// DropletID: apiVolume.DropletIDs[0],\n\t}\n\n\treturn vol, nil\n}", "func (ns *NodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\tif _, err := os.Lstat(req.VolumePath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get metrics: %v\", err)\n\t}\n\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func (*MStatisticsResponse) Descriptor() ([]byte, []int) {\n\treturn file_s_stats_proto_rawDescGZIP(), []int{1}\n}", "func (d *DirDriver) List() (*volume.ListResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit List() endpoint\")\n\n\tvols := new(volume.ListResponse)\n\tvols.Volumes = []*volume.Volume{}\n\n\tfor _, vol := range d.volumes {\n\t\tnewVol := new(volume.Volume)\n\t\tnewVol.Name = vol.name\n\t\tnewVol.Mountpoint = vol.path\n\t\tnewVol.CreatedAt = vol.createTime.String()\n\t\tvols.Volumes = append(vols.Volumes, newVol)\n\t\tlogrus.Debugf(\"Adding volume %s to list response\", newVol.Name)\n\t}\n\n\treturn vols, nil\n}", "func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif req.VolumeId == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats Volume ID must be provided\")\n\t}\n\n\tvolumePath := req.VolumePath\n\tif volumePath == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats Volume Path must be provided\")\n\t}\n\n\tlog := d.log.WithFields(logrus.Fields{\n\t\t\"volume_id\": req.VolumeId,\n\t\t\"volume_path\": req.VolumePath,\n\t\t\"method\": \"node_get_volume_stats\",\n\t})\n\tlog.Info(\"node get volume stats called\")\n\n\tmounted, err := d.mounter.IsMounted(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to check if volume path %q is mounted: %s\", volumePath, err)\n\t}\n\n\tif !mounted {\n\t\treturn nil, status.Errorf(codes.NotFound, \"volume path %q is not mounted\", volumePath)\n\t}\n\n\tisBlock, err := d.mounter.IsBlockDevice(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to determine if %q is block device: %s\", volumePath, err)\n\t}\n\n\tstats, err := d.mounter.GetStatistics(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to retrieve capacity statistics for volume path %q: %s\", volumePath, err)\n\t}\n\n\t// only can retrieve total capacity for a block device\n\tif isBlock {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"volume_mode\": volumeModeBlock,\n\t\t\t\"bytes_total\": stats.totalBytes,\n\t\t}).Info(\"node capacity statistics retrieved\")\n\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t\tTotal: stats.totalBytes,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"volume_mode\": volumeModeFilesystem,\n\t\t\"bytes_available\": stats.availableBytes,\n\t\t\"bytes_total\": stats.totalBytes,\n\t\t\"bytes_used\": stats.usedBytes,\n\t\t\"inodes_available\": stats.availableInodes,\n\t\t\"inodes_total\": stats.totalInodes,\n\t\t\"inodes_used\": stats.usedInodes,\n\t}).Info(\"node capacity statistics retrieved\")\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t&csi.VolumeUsage{\n\t\t\t\tAvailable: stats.availableBytes,\n\t\t\t\tTotal: stats.totalBytes,\n\t\t\t\tUsed: stats.usedBytes,\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t},\n\t\t\t&csi.VolumeUsage{\n\t\t\t\tAvailable: stats.availableInodes,\n\t\t\t\tTotal: stats.totalInodes,\n\t\t\t\tUsed: stats.usedInodes,\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (s *LifecyclerRPCServer) Volumes(opts HostOpts, resp *[]string) (err error) {\n\t*resp, err = s.Plugin.Volumes(opts.Version, opts.FlagValues)\n\treturn err\n}", "func NewVolumeModifyIterAsyncResponse() *VolumeModifyIterAsyncResponse {\n\treturn &VolumeModifyIterAsyncResponse{}\n}", "func (c *ClientWithResponses) ListSnapshotsWithResponse(ctx context.Context) (*ListSnapshotsResponse, error) {\n\trsp, err := c.ListSnapshots(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseListSnapshotsResponse(rsp)\n}", "func GetStats(w http.ResponseWriter, r *http.Request) {\n\tstats, err := service.GetStats()\n\n\tif err != nil {\n\t\terrors.WriteError(w, r, errors.InternalError(err.Error(), \"Could not get RSVP service statistics.\"))\n\t\treturn\n\t}\n\n\tjson.NewEncoder(w).Encode(stats)\n}", "func (m *ApiEndpointsStatsRsp) ToJSON() (string, error) {\n\treturn codec.ToJSON(m)\n}", "func (v *VolumesServiceMock) Get(podUID string, name string) (vol *api.Volume, err error) {\n\targs := v.Called(podUID, name)\n\tx := args.Get(0)\n\tif x != nil {\n\t\tvol = x.(*api.Volume)\n\t}\n\terr = args.Error(1)\n\treturn\n}", "func (c *ClientWithResponses) ListPrivateNetworksWithResponse(ctx context.Context) (*ListPrivateNetworksResponse, error) {\n\trsp, err := c.ListPrivateNetworks(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseListPrivateNetworksResponse(rsp)\n}", "func (o *PcloudVolumegroupsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudVolumegroupsGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudVolumegroupsGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudVolumegroupsGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudVolumegroupsGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudVolumegroupsGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func GetVolDetails(volName string, obj interface{}) error {\n\taddr := os.Getenv(\"MAPI_ADDR\")\n\tif addr == \"\" {\n\t\terr := util.MAPIADDRNotSet\n\t\tfmt.Printf(\"error getting env variable: %v\", err)\n\t\treturn err\n\t}\n\n\turl := addr + \"/latest/volumes/info/\" + volName\n\tclient := &http.Client{\n\t\tTimeout: timeout,\n\t}\n\tresp, err := client.Get(url)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Could not get response, found error: %v\", err)\n\t\treturn err\n\t}\n\n\tif resp != nil {\n\t\tif resp.StatusCode == 500 {\n\t\t\tfmt.Printf(\"Volume: %s not found at M_API server\\n\", volName)\n\t\t\treturn errors.New(\"Internal Server Error\")\n\t\t} else if resp.StatusCode == 503 {\n\t\t\tfmt.Println(\"M_API server not reachable\")\n\t\t\treturn errors.New(\"Service Unavailable\")\n\t\t} else if resp.StatusCode == 404 {\n\t\t\tfmt.Printf(\"Volume: %s not found at M_API server\\n\", volName)\n\t\t\treturn errors.New(\"Page Not Found\")\n\t\t}\n\n\t} else {\n\t\tfmt.Println(\"M_API server not reachable\")\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\treturn json.NewDecoder(resp.Body).Decode(obj)\n}", "func (o *GetAPI24RemoteVolumeSnapshotsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetApi24RemoteVolumeSnapshotsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetApi24RemoteVolumeSnapshotsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (m *ServicePolicyHitsResponse) ToJSON() (string, error) {\n\treturn codec.ToJSON(m)\n}", "func (s *Server) GetStats(w rest.ResponseWriter, r *rest.Request) {\n\tresponse := models.BaseResponse{}\n\tresponse.Init(w)\n\n\tcurrentUser, err := s.LoginProcess(response, r)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tuserID, err := s.GetUserIDFromParams(r)\n\n\tif err != nil {\n\t\tresponse.SendError(err.Error())\n\t\treturn\n\t}\n\n\tif userID == 0 {\n\t\tuserID = currentUser.ID\n\t}\n\n\tb := new(badgecontroller.Badge)\n\n\tstats, err := b.List(s.Db, userID)\n\n\tif err != nil {\n\t\tresponse.SendError(err.Error())\n\t\treturn\n\t}\n\n\tresponse.SendSuccess(stats)\n}", "func NewGetMenuItemInformation200ResponseWithDefaults() *GetMenuItemInformation200Response {\n\tthis := GetMenuItemInformation200Response{}\n\treturn &this\n}", "func (d *DirDriver) Get(req *volume.GetRequest) (*volume.GetResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Get() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Did not find volume %s\", req.Name)\n\t\treturn nil, fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tlogrus.Debugf(\"Found volume %s\", req.Name)\n\n\tresp := new(volume.GetResponse)\n\tresp.Volume = new(volume.Volume)\n\tresp.Volume.Name = vol.name\n\tresp.Volume.Mountpoint = vol.path\n\tresp.Volume.CreatedAt = vol.createTime.String()\n\n\treturn resp, nil\n}", "func CreateGetSnapshotSettingsResponse() (response *GetSnapshotSettingsResponse) {\n\tresponse = &GetSnapshotSettingsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (*MClusterStatisticsResponse) Descriptor() ([]byte, []int) {\n\treturn file_s_stats_proto_rawDescGZIP(), []int{3}\n}", "func (o TableOutput) Stats() TableStatsResponseOutput {\n\treturn o.ApplyT(func(v *Table) TableStatsResponseOutput { return v.Stats }).(TableStatsResponseOutput)\n}", "func (o *RemoveVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewRemoveVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewRemoveVolumeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 409:\n\t\tresult := NewRemoveVolumeConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewRemoveVolumeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (a *Client) GetTableStats(params *GetTableStatsParams) (*GetTableStatsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetTableStatsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getTableStats\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/tables/{tableName}/stats\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetTableStatsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetTableStatsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for getTableStats: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (v *varnishClient) GetStats() (*Stats, error) {\n\tcommand, argList := v.BuildCommand()\n\n\toutput, err := v.exec.Execute(command, argList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parseStats(output)\n}", "func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}", "func (v *VolumeService) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(volumeID) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound\n\tif strings.Contains(volumeID, \"notfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// check if the volume is not-found\n\tif strings.Contains(volumeID, \"not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: \"local\",\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: volumeID,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}" ]
[ "0.6955535", "0.6266805", "0.62136406", "0.62044823", "0.5852595", "0.5684157", "0.56427455", "0.56361014", "0.55806714", "0.5427179", "0.54269564", "0.5313576", "0.52097315", "0.51222646", "0.5119095", "0.50108427", "0.49995625", "0.49526092", "0.48958632", "0.4879696", "0.47260126", "0.47213426", "0.471893", "0.47122595", "0.47016922", "0.46343428", "0.4620165", "0.46115184", "0.46056503", "0.4587654", "0.4586654", "0.45586744", "0.45440403", "0.4535652", "0.45234534", "0.45021117", "0.44835776", "0.44777784", "0.44720536", "0.44643593", "0.44640982", "0.44590905", "0.4455373", "0.4441919", "0.44196528", "0.4416254", "0.44127014", "0.44028777", "0.4393893", "0.43894553", "0.43864268", "0.4383341", "0.43797073", "0.43786496", "0.43708685", "0.43656033", "0.43647954", "0.43570855", "0.43486547", "0.43480834", "0.43387404", "0.43241096", "0.43164638", "0.43130928", "0.4312925", "0.43058902", "0.42977053", "0.42868775", "0.42747065", "0.4261092", "0.4259815", "0.42446825", "0.4240464", "0.42362463", "0.42319208", "0.42295474", "0.4223925", "0.4223571", "0.42160812", "0.4205555", "0.42028433", "0.4200383", "0.4188124", "0.41869193", "0.41835153", "0.4177773", "0.41702232", "0.4170163", "0.41595656", "0.41561654", "0.41527358", "0.41510627", "0.41491812", "0.4147041", "0.41362917", "0.41339898", "0.41322786", "0.41292945", "0.4125817", "0.41237995" ]
0.82052696
0
Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse is an autogenerated conversion function.
func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error { return autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func Convert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in *v1beta1.VolumeStatsResponse, out *internal.VolumeStatsResponse) error {\n\treturn autoConvert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in, out)\n}", "func Convert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in *impl.RmdirContentsResponse, out *v2alpha1.RmdirContentsResponse) error {\n\treturn autoConvert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func GetVolumeStats(address string, obj interface{}) (error, int) {\n\tcontroller, err := NewControllerClient(address)\n\tif err != nil {\n\t\treturn err, -1\n\t}\n\turl := controller.address + \"/stats\"\n\tresp, err := controller.httpClient.Get(url)\n\tif resp != nil {\n\t\tif resp.StatusCode == 500 {\n\t\t\treturn err, 500\n\t\t} else if resp.StatusCode == 503 {\n\t\t\treturn err, 503\n\t\t}\n\t} else {\n\t\treturn err, -1\n\t}\n\tif err != nil {\n\t\treturn err, -1\n\t}\n\tdefer resp.Body.Close()\n\trc := json.NewDecoder(resp.Body).Decode(obj)\n\treturn rc, 0\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func GetVolumesV2() (VolumeV2, error) {\n\tvar volumes VolumeV2\n\tquery := \"/api/datacenter/storage/volume\"\n\tbodyText, err := getResponseBody(query)\n\tif err != nil {\n\t\treturn VolumeV2{}, err\n\t}\n\terr = json.Unmarshal(bodyText, &volumes)\n\tif err != nil {\n\t\tlog.Printf(\"verita-core: Error: %v\", err)\n\t\treturn volumes, err\n\t}\n\treturn volumes, nil\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (c *restClient) GetVolume(ctx context.Context, req *netapppb.GetVolumeRequest, opts ...gax.CallOption) (*netapppb.Volume, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetVolume[0:len((*c.CallOptions).GetVolume):len((*c.CallOptions).GetVolume)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &netapppb.Volume{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in *impl.IsSymlinkResponse, out *v2alpha1.IsSymlinkResponse) error {\n\treturn autoConvert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func NewGetAvailableStates_Response() *GetAvailableStates_Response {\n\tself := GetAvailableStates_Response{}\n\tself.SetDefaults()\n\treturn &self\n}", "func Convert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in *impl.CreateSymlinkResponse, out *v2alpha1.CreateSymlinkResponse) error {\n\treturn autoConvert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in, out)\n}", "func (r *Response) AsV2() *CheckResponseV2 {\n\tconvertHeaders := func(h http.Header) []*envoy_api_v2_core.HeaderValueOption {\n\t\tvar headers []*envoy_api_v2_core.HeaderValueOption\n\n\t\tfor k, v := range h {\n\t\t\theaders = append(headers,\n\t\t\t\t&envoy_api_v2_core.HeaderValueOption{\n\t\t\t\t\tHeader: &envoy_api_v2_core.HeaderValue{Key: k, Value: v[0]},\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\n\t\treturn headers\n\t}\n\n\tif r.Allow {\n\t\treturn &CheckResponseV2{\n\t\t\tStatus: &status.Status{Code: int32(codes.OK)},\n\t\t\tHttpResponse: &envoy_service_auth_v2.CheckResponse_OkResponse{\n\t\t\t\tOkResponse: &envoy_service_auth_v2.OkHttpResponse{\n\t\t\t\t\tHeaders: convertHeaders(r.Response.Header),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\treturn &CheckResponseV2{\n\t\tStatus: &status.Status{Code: int32(codes.PermissionDenied)},\n\t\tHttpResponse: &envoy_service_auth_v2.CheckResponse_DeniedResponse{\n\t\t\tDeniedResponse: &envoy_service_auth_v2.DeniedHttpResponse{\n\t\t\t\tHeaders: convertHeaders(r.Response.Header),\n\t\t\t\tStatus: &envoy_type.HttpStatus{\n\t\t\t\t\tCode: envoy_type.StatusCode(r.Response.StatusCode),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func DecodeHTTPGetStatResponse(_ context.Context, r *http.Response) (interface{}, error) {\n\tif r.StatusCode != http.StatusOK {\n\t\treturn nil, errorDecoder(r)\n\t}\n\tvar resp GetStatResponse\n\terr := json.NewDecoder(r.Body).Decode(&resp)\n\treturn resp, err\n}", "func GetVolumeStatus(hostName, volumeName string) (map[string]string, error) {\n\tformatStr1 := \" --format '{{index .Status.access}} {{index .Status \\\"attach-as\\\"}} {{index .Status.capacity.allocated}} {{index .Status.capacity.size}} {{index .Status \\\"clone-from\\\"}}\"\n\tformatStr2 := \" {{index .Status \\\"created by VM\\\"}} {{index .Status.datastore}} {{index .Status.diskformat}} {{index .Status.fstype}} {{index .Status.status}} {{index .Status \\\"attached to VM\\\"}}'\"\n\n\tcmd := dockercli.InspectVolume + volumeName + formatStr1 + formatStr2\n\tout, err := ssh.InvokeCommand(hostName, cmd)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstatus := make(map[string]string)\n\tval := strings.Fields(out)\n\n\tfor i := 0; i < len(dockercli.VolumeStatusFields); i++ {\n\t\tstatus[dockercli.VolumeStatusFields[i]] = val[i]\n\t}\n\treturn status, nil\n}", "func (client VolumesClient) GetResponder(resp *http.Response) (result Volume, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (m *MockNuvoVM) GetVolumeStats(arg0 bool, arg1 string) (*nuvoapi.StatsCombinedVolume, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetVolumeStats\", arg0, arg1)\n\tret0, _ := ret[0].(*nuvoapi.StatsCombinedVolume)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func (o *GetVMVolumeMetricsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetVMVolumeMetricsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetVMVolumeMetricsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewGetVMVolumeMetricsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewGetVMVolumeMetricsInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (*ResponseGetVolumes) Descriptor() ([]byte, []int) {\n\treturn file_pkg_metadata_metadata_proto_rawDescGZIP(), []int{1}\n}", "func Convert_internal_StopServiceResponse_To_v1alpha1_StopServiceResponse(in *internal.StopServiceResponse, out *v1alpha1.StopServiceResponse) error {\n\treturn autoConvert_internal_StopServiceResponse_To_v1alpha1_StopServiceResponse(in, out)\n}", "func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudPvminstancesVolumesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesGetUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (tn *ListNamespaceDescriptors) NewResponse() proto.Message {\n\treturn &pb.ListNamespaceDescriptorsResponse{}\n}", "func (*CBroadcast_GetBroadcastViewerStats_Response) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_broadcast_steamclient_proto_rawDescGZIP(), []int{63}\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func ParseGetSymbolsResponse(rsp *http.Response) (*GetSymbolsResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &GetSymbolsResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest struct {\n\t\t\tAdditionalProperties map[string]SymbolStatus `json:\"-\"`\n\t\t}\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func Convert_internal_GetServiceResponse_To_v1alpha1_GetServiceResponse(in *internal.GetServiceResponse, out *v1alpha1.GetServiceResponse) error {\n\treturn autoConvert_internal_GetServiceResponse_To_v1alpha1_GetServiceResponse(in, out)\n}", "func GetVolumeV2(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *VolumeV2State, opts ...pulumi.ResourceOption) (*VolumeV2, error) {\n\tvar resource VolumeV2\n\terr := ctx.ReadResource(\"openstack:blockstorage/volumeV2:VolumeV2\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s *Server) GetStatusResponse() *healthModels.HealthStatusResponse {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\tvar name string\n\t// Check if localStatus is populated already. If not, the name is empty\n\tif s.localStatus != nil {\n\t\tname = s.localStatus.Name\n\t}\n\n\treturn &healthModels.HealthStatusResponse{\n\t\tLocal: &healthModels.SelfStatus{\n\t\t\tName: name,\n\t\t},\n\t\tNodes: s.connectivity.nodes,\n\t\tTimestamp: s.connectivity.startTime.Format(time.RFC3339),\n\t}\n}", "func CreateNormalRpcHsfApiResponse() (response *NormalRpcHsfApiResponse) {\n\tresponse = &NormalRpcHsfApiResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (vk VK) StreamingGetStats(params map[string]string) (response StreamingGetStatsResponse, vkErr Error) {\n\trawResponse, vkErr := vk.Request(\"streaming.getStats\", params)\n\tif vkErr.Code != 0 {\n\t\treturn\n\t}\n\n\terr := json.Unmarshal(rawResponse, &response)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn\n}", "func NewGetMenuItemInformation200ResponseWithDefaults() *GetMenuItemInformation200Response {\n\tthis := GetMenuItemInformation200Response{}\n\treturn &this\n}", "func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tvolumePath := req.GetVolumePath()\n\tif volumePath == \"\" {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"volumePath %v is empty\", volumePath)\n\t}\n\n\texists, err := utilpath.Exists(utilpath.CheckFollowSymlink, volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to check whether volumePath exists: %s\", err)\n\t}\n\tif !exists {\n\t\treturn nil, status.Errorf(codes.NotFound, \"target: %s not found\", volumePath)\n\t}\n\n\tstats, err := util.GetDeviceStats(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get stats by path: %s\", err)\n\t}\n\n\tklog.V(5).Infof(util.Log(ctx, \"get volumePath %q stats: %+v\"), volumePath, stats)\n\n\tif stats.Block {\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tTotal: stats.TotalBytes,\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tTotal: stats.TotalBytes,\n\t\t\t\tAvailable: stats.AvailableBytes,\n\t\t\t\tUsed: stats.UsedBytes,\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t}, {\n\t\t\t\tTotal: stats.TotalInodes,\n\t\t\t\tAvailable: stats.AvailableInodes,\n\t\t\t\tUsed: stats.UsedInodes,\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\t_, err := os.Stat(req.VolumePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tisBlock, err := hostutil.NewHostUtil().PathIsDevice(req.VolumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.NotFound, \"failed to determine whether %s is block device: %v\", req.VolumePath, err)\n\t}\n\tif isBlock {\n\t\tbcap, err := d.mounter.GetStatistics(req.GetVolumePath())\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"failed to get block capacity on path %s: %v\", req.VolumePath, err)\n\t\t}\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t\tTotal: bcap.TotalBytes,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (d *Driver) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif req.VolumeId == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats Volume ID must be provided\")\n\t}\n\n\tvolumePath := req.VolumePath\n\tif volumePath == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats Volume Path must be provided\")\n\t}\n\n\tlog := d.log.WithFields(logrus.Fields{\n\t\t\"volume_id\": req.VolumeId,\n\t\t\"volume_path\": req.VolumePath,\n\t\t\"method\": \"node_get_volume_stats\",\n\t})\n\tlog.Info(\"node get volume stats called\")\n\n\tmounted, err := d.mounter.IsMounted(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to check if volume path %q is mounted: %s\", volumePath, err)\n\t}\n\n\tif !mounted {\n\t\treturn nil, status.Errorf(codes.NotFound, \"volume path %q is not mounted\", volumePath)\n\t}\n\n\tisBlock, err := d.mounter.IsBlockDevice(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to determine if %q is block device: %s\", volumePath, err)\n\t}\n\n\tstats, err := d.mounter.GetStatistics(volumePath)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to retrieve capacity statistics for volume path %q: %s\", volumePath, err)\n\t}\n\n\t// only can retrieve total capacity for a block device\n\tif isBlock {\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"volume_mode\": volumeModeBlock,\n\t\t\t\"bytes_total\": stats.totalBytes,\n\t\t}).Info(\"node capacity statistics retrieved\")\n\n\t\treturn &csi.NodeGetVolumeStatsResponse{\n\t\t\tUsage: []*csi.VolumeUsage{\n\t\t\t\t{\n\t\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\t\tTotal: stats.totalBytes,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"volume_mode\": volumeModeFilesystem,\n\t\t\"bytes_available\": stats.availableBytes,\n\t\t\"bytes_total\": stats.totalBytes,\n\t\t\"bytes_used\": stats.usedBytes,\n\t\t\"inodes_available\": stats.availableInodes,\n\t\t\"inodes_total\": stats.totalInodes,\n\t\t\"inodes_used\": stats.usedInodes,\n\t}).Info(\"node capacity statistics retrieved\")\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t&csi.VolumeUsage{\n\t\t\t\tAvailable: stats.availableBytes,\n\t\t\t\tTotal: stats.totalBytes,\n\t\t\t\tUsed: stats.usedBytes,\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t},\n\t\t\t&csi.VolumeUsage{\n\t\t\t\tAvailable: stats.availableInodes,\n\t\t\t\tTotal: stats.totalInodes,\n\t\t\t\tUsed: stats.usedInodes,\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (*GetSystemStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_system_service_proto_rawDescGZIP(), []int{3}\n}", "func ParseGetDNSZoneVersionDiffResponse(rsp *http.Response) (*GetDNSZoneVersionDiffResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &GetDNSZoneVersionDiffResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest ScalewayDomainV2alpha2GetDNSZoneVersionDiffResponse\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func Convert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in *impl.PathExistsResponse, out *v2alpha1.PathExistsResponse) error {\n\treturn autoConvert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in, out)\n}", "func (a *HyperflexApiService) GetHyperflexVolumeList(ctx context.Context) ApiGetHyperflexVolumeListRequest {\n\treturn ApiGetHyperflexVolumeListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (*MClusterStatisticsResponse) Descriptor() ([]byte, []int) {\n\treturn file_s_stats_proto_rawDescGZIP(), []int{3}\n}", "func (*VodGetHlsDecryptionKeyResponse) Descriptor() ([]byte, []int) {\n\treturn file_vod_response_response_vod_proto_rawDescGZIP(), []int{3}\n}", "func NewLunGetSerialNumberResponse() *LunGetSerialNumberResponse {\n\treturn &LunGetSerialNumberResponse{}\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func (c *clustermgrClient) GetVolumeInfo(ctx context.Context, vid proto.Vid) (*VolumeInfoSimple, error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\n\tinfo, err := c.client.GetVolumeInfo(ctx, &cmapi.GetVolumeArgs{Vid: vid})\n\tif err != nil {\n\t\tspan.Errorf(\"get volume info failed: err[%+v]\", err)\n\t\treturn nil, err\n\t}\n\tret := &VolumeInfoSimple{}\n\tret.set(info)\n\treturn ret, nil\n}", "func (*ListAuditedEventsResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_audit_v1alpha1_service_proto_rawDescGZIP(), []int{1}\n}", "func (vk *VK) StatsGet(params Params) (response StatsGetResponse, err error) {\n\terr = vk.RequestUnmarshal(\"stats.get\", &response, params)\n\treturn\n}", "func (*MStatisticsResponse) Descriptor() ([]byte, []int) {\n\treturn file_s_stats_proto_rawDescGZIP(), []int{1}\n}", "func (ns *NodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {\n\tif len(req.VolumeId) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume ID was empty\")\n\t}\n\tif len(req.VolumePath) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"NodeGetVolumeStats volume path was empty\")\n\t}\n\n\tif _, err := os.Lstat(req.VolumePath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, status.Errorf(codes.NotFound, \"path %s does not exist\", req.VolumePath)\n\t\t}\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to stat file %s: %v\", req.VolumePath, err)\n\t}\n\n\tvolumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to get metrics: %v\", err)\n\t}\n\n\tavailable, ok := volumeMetrics.Available.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume available size(%v)\", volumeMetrics.Available)\n\t}\n\tcapacity, ok := volumeMetrics.Capacity.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume capacity size(%v)\", volumeMetrics.Capacity)\n\t}\n\tused, ok := volumeMetrics.Used.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform volume used size(%v)\", volumeMetrics.Used)\n\t}\n\n\tinodesFree, ok := volumeMetrics.InodesFree.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes free(%v)\", volumeMetrics.InodesFree)\n\t}\n\tinodes, ok := volumeMetrics.Inodes.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes(%v)\", volumeMetrics.Inodes)\n\t}\n\tinodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to transform disk inodes used(%v)\", volumeMetrics.InodesUsed)\n\t}\n\n\treturn &csi.NodeGetVolumeStatsResponse{\n\t\tUsage: []*csi.VolumeUsage{\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_BYTES,\n\t\t\t\tAvailable: available,\n\t\t\t\tTotal: capacity,\n\t\t\t\tUsed: used,\n\t\t\t},\n\t\t\t{\n\t\t\t\tUnit: csi.VolumeUsage_INODES,\n\t\t\t\tAvailable: inodesFree,\n\t\t\t\tTotal: inodes,\n\t\t\t\tUsed: inodesUsed,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (o *GetAPI24RemoteVolumeSnapshotsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetApi24RemoteVolumeSnapshotsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetApi24RemoteVolumeSnapshotsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func Convert_internal_GetBIOSSerialNumberResponse_To_v1alpha1_GetBIOSSerialNumberResponse(in *internal.GetBIOSSerialNumberResponse, out *v1alpha1.GetBIOSSerialNumberResponse) error {\n\treturn autoConvert_internal_GetBIOSSerialNumberResponse_To_v1alpha1_GetBIOSSerialNumberResponse(in, out)\n}", "func GetAllVolumeInfos() (int32, []*vp.Volume) {\n\n\t_, conn, err := utils.DialVolMgr(VolMgrHosts)\n\tif err != nil {\n\t\tlogger.Error(\"GetAllDatanode failed,Dial to VolMgrHosts fail :%v\", err)\n\t\treturn -1, nil\n\t}\n\tdefer conn.Close()\n\tvc := vp.NewVolMgrClient(conn)\n\n\tpVolumeInfosReq := &vp.VolumeInfosReq{}\n\tctx, _ := context.WithTimeout(context.Background(), VOLUME_TIMEOUT_SECONDS*time.Second)\n\tpVolumeInfosAck, err := vc.VolumeInfos(ctx, pVolumeInfosReq)\n\tif err != nil {\n\t\tlogger.Error(\"GetAllVolumeInfos failed,grpc func err :%v\", err)\n\t\treturn -1, nil\n\t}\n\tif pVolumeInfosAck.Ret != 0 {\n\t\tlogger.Error(\"GetAllVolumeInfos failed,grpc func ret :%v\", pVolumeInfosAck.Ret)\n\t\treturn -1, nil\n\t}\n\treturn 0, pVolumeInfosAck.Volumes\n}", "func (*VodGetMediaInfosResponse) Descriptor() ([]byte, []int) {\n\treturn file_vod_response_response_vod_proto_rawDescGZIP(), []int{9}\n}", "func (m *LoadStatsResponse) Validate() error {\n\treturn m.validate(false)\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *PcloudVolumegroupsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudVolumegroupsGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudVolumegroupsGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudVolumegroupsGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudVolumegroupsGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudVolumegroupsGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func NewGetMonitoringReportResponse(status types.GenericDeviceModelStatus) *GetMonitoringReportResponse {\n\treturn &GetMonitoringReportResponse{Status: status}\n}", "func (client IotHubResourceClient) GetStatsResponder(resp *http.Response) (result RegistryStatistics, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}", "func GetVolInfo(name string) (int32, *vp.GetVolInfoAck) {\n\n\t_, conn, err := utils.DialVolMgr(VolMgrHosts)\n\tif err != nil {\n\t\tlogger.Error(\"GetVolInfo failed,Dial to VolMgrHosts fail :%v\", err)\n\t\treturn -1, nil\n\t}\n\tdefer conn.Close()\n\tvc := vp.NewVolMgrClient(conn)\n\n\tpGetVolInfoReq := &vp.GetVolInfoReq{\n\t\tUUID: name,\n\t}\n\tctx, _ := context.WithTimeout(context.Background(), VOLUME_TIMEOUT_SECONDS*time.Second)\n\tack, err := vc.GetVolInfo(ctx, pGetVolInfoReq)\n\tif err != nil || ack.Ret != 0 {\n\t\treturn -1, &vp.GetVolInfoAck{}\n\t}\n\treturn 0, ack\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (*ListDashboardsResponse) Descriptor() ([]byte, []int) {\n\treturn file_data_proto_rawDescGZIP(), []int{1}\n}", "func (m *CatalogInfoResponse) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateErrors(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLimits(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStandardUnitDescriptionGroup(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (*GetTeamByShortName_Response) Descriptor() ([]byte, []int) {\n\treturn file_uac_Team_proto_rawDescGZIP(), []int{3, 0}\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func (a *Client) GetTableStats(params *GetTableStatsParams) (*GetTableStatsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetTableStatsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getTableStats\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/tables/{tableName}/stats\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetTableStatsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetTableStatsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for getTableStats: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (*CMsgGCToClientPlayerStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{144}\n}", "func (s *Server) GetStats(w rest.ResponseWriter, r *rest.Request) {\n\tresponse := models.BaseResponse{}\n\tresponse.Init(w)\n\n\tcurrentUser, err := s.LoginProcess(response, r)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tuserID, err := s.GetUserIDFromParams(r)\n\n\tif err != nil {\n\t\tresponse.SendError(err.Error())\n\t\treturn\n\t}\n\n\tif userID == 0 {\n\t\tuserID = currentUser.ID\n\t}\n\n\tb := new(badgecontroller.Badge)\n\n\tstats, err := b.List(s.Db, userID)\n\n\tif err != nil {\n\t\tresponse.SendError(err.Error())\n\t\treturn\n\t}\n\n\tresponse.SendSuccess(stats)\n}", "func (*StatisticsResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{129}\n}", "func CreateGetSnapshotSettingsResponse() (response *GetSnapshotSettingsResponse) {\n\tresponse = &GetSnapshotSettingsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func GetLunsFromVolumeNameV2(name string) (LunV2, error) {\n\tquery := \"/api/datacenter/storage/luns?volume.name=\" + name\n\treturn getLunsInfoV2(query)\n}", "func (api *API) GetVolume() (*Volume, error) {\n\tvar resp Volume\n\terr := api.call(\"market_history\", \"get_volume\", EmptyParams, &resp)\n\treturn &resp, err\n}", "func (r *CustomResource) ToV1Alpha1() map[string]interface{} {\n\tresult := map[string]interface{}{}\n\tresult[\"name\"] = r.Name()\n\tresult[\"kind\"] = r.Kind()\n\tfor k, v := range r.Spec {\n\t\tresult[k] = v\n\t}\n\treturn result\n}", "func (client *KeyVaultClient) GetKeyVersionsHandleResponse(resp *http.Response) (KeyVaultClientGetKeyVersionsResponse, error) {\n\tresult := KeyVaultClientGetKeyVersionsResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.KeyListResult); err != nil {\n\t\treturn KeyVaultClientGetKeyVersionsResponse{}, err\n\t}\n\treturn result, nil\n}", "func CreateListServerGroupsResponse() (response *ListServerGroupsResponse) {\n\tresponse = &ListServerGroupsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetHealthMonitorLogsResponse() (response *GetHealthMonitorLogsResponse) {\n\tresponse = &GetHealthMonitorLogsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client GroupClient) GetTableStatisticResponder(resp *http.Response) (result USQLTableStatistics, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}" ]
[ "0.6934598", "0.690916", "0.6702717", "0.61582994", "0.61535895", "0.59885436", "0.5874269", "0.580857", "0.5731421", "0.5625578", "0.5508169", "0.5225498", "0.5189987", "0.51881635", "0.51695865", "0.5075374", "0.5066856", "0.5047773", "0.49688366", "0.49535075", "0.48126945", "0.47821933", "0.47543144", "0.47495538", "0.47374898", "0.47167352", "0.4679219", "0.4656852", "0.45705843", "0.45507634", "0.44996214", "0.44627512", "0.44247252", "0.4421941", "0.43979368", "0.43947566", "0.4378216", "0.43746185", "0.4373069", "0.43465236", "0.4342931", "0.4335609", "0.43316558", "0.43109444", "0.43107948", "0.4294234", "0.42832398", "0.42593724", "0.42593345", "0.42562175", "0.4246997", "0.4242713", "0.42425638", "0.42419147", "0.42296842", "0.42287317", "0.4219544", "0.42192158", "0.42180622", "0.42146355", "0.41957796", "0.419305", "0.41821313", "0.41660422", "0.41647077", "0.41620743", "0.41526604", "0.4151273", "0.41411236", "0.413644", "0.41310906", "0.41294193", "0.4126302", "0.41230363", "0.41220173", "0.41156605", "0.41090256", "0.41067335", "0.4103281", "0.40985662", "0.4093123", "0.40891954", "0.4087253", "0.40795887", "0.40777245", "0.40712944", "0.40710843", "0.40710288", "0.40706387", "0.4062939", "0.40600613", "0.40510595", "0.40508214", "0.40498367", "0.40489683", "0.40475586", "0.40430227", "0.40424573", "0.4042264", "0.40380833" ]
0.82592857
0
Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest is an autogenerated conversion function.
func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error { return autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_IsSymlinkRequest_To_impl_IsSymlinkRequest(in *v2alpha1.IsSymlinkRequest, out *impl.IsSymlinkRequest) error {\n\treturn autoConvert_v2alpha1_IsSymlinkRequest_To_impl_IsSymlinkRequest(in, out)\n}", "func InitVolumeOperationRequestInterface(ctx context.Context, cleanupInterval int,\n\tisBlockVolumeSnapshotEnabled func() bool) (VolumeOperationRequest, error) {\n\tlog := logger.GetLogger(ctx)\n\tcsiNamespace = getCSINamespace()\n\n\toperationStoreInitLock.Lock()\n\tdefer operationStoreInitLock.Unlock()\n\tif operationRequestStoreInstance == nil {\n\t\t// Create CnsVolumeOperationRequest definition on API server.\n\t\tlog.Info(\n\t\t\t\"Creating CnsVolumeOperationRequest definition on API server and initializing VolumeOperationRequest instance\",\n\t\t)\n\t\terr := k8s.CreateCustomResourceDefinitionFromManifest(ctx,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFile,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFileName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create CnsVolumeOperationRequest CRD with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get in cluster config for client to API server.\n\t\tconfig, err := k8s.GetKubeConfig(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get kubeconfig with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create client to API server.\n\t\tk8sclient, err := k8s.NewClientForGroup(ctx, config, cnsvolumeoprequestv1alpha1.SchemeGroupVersion.Group)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create k8sClient with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Initialize the operationRequestStoreOnETCD implementation of\n\t\t// VolumeOperationRequest interface.\n\t\t// NOTE: Currently there is only a single implementation of this\n\t\t// interface. Future implementations will need modify this step.\n\t\toperationRequestStoreInstance = &operationRequestStore{\n\t\t\tk8sclient: k8sclient,\n\t\t}\n\t\tgo operationRequestStoreInstance.cleanupStaleInstances(cleanupInterval, isBlockVolumeSnapshotEnabled)\n\t}\n\n\treturn operationRequestStoreInstance, nil\n}", "func Convert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in *v1beta1.IsVolumeFormattedResponse, out *internal.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in, out)\n}", "func IsVolumeNameValid(name string) (bool, error) {\n\treturn true, nil\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func (o *Manager) CanFormat(ctx context.Context, inType string) (available struct {\n\tV0 bool\n\tV1 string\n}, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceManager+\".CanFormat\", 0, inType).Store(&available)\n\treturn\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (o *WhatsAppNameWhatsAppApiContent) SetFormattedName(v string) {\n\to.FormattedName = v\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (o *WhatsAppNameWhatsAppApiContent) GetFormattedNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.FormattedName, true\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func isRequestSigned(isPresign bool, query url.Values, header http.Header) bool {\n\tif query.Get(v4Internal.AmzSignatureKey) != \"\" {\n\t\treturn true\n\t}\n\n\tif header.Get(\"Authorization\") != \"\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsFilterRequest(url string) bool {\n\t//read all the pattern and check if match\n\tif apiConfigParsedData.RequestFilterEnabled {\n\t\tisFiltered, _ := regexp.MatchString(\".*\\\\.(\"+apiConfig.Data.RequestFilterType+\")\", url)\n\t\treturn isFiltered\n\t}\n\treturn false\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func IsInternalRequest(ctx context.Context) bool {\n\tif v := ctx.Value(internalRequestKey); v != nil {\n\t\treturn v.(bool)\n\t}\n\n\treturn false\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (o *ObjectInfo) IsEncryptedMultipart() bool {\n\t_, ok := o.UserDefined[ReservedMetadataPrefix+\"Encrypted-Multipart\"]\n\treturn ok\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func CfnVolume_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnVolume\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (p Provider) IsRequestValid(ratesRequest model.RatesRequest) (bool, error) {\n\treturn p.BaseProvider.IsRequestValid(p, ratesRequest)\n}", "func NewVolumeModifyIterAsyncRequest() *VolumeModifyIterAsyncRequest {\n\treturn &VolumeModifyIterAsyncRequest{}\n}", "func (o *V1VirusDatasetRequest) HasFormat() bool {\n\tif o != nil && o.Format != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func VolumeEncryption(value bool) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.EncryptVols = value\n\t\treturn nil\n\t}\n}", "func Convert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in *v1beta1.ResizeVolumeRequest, out *internal.ResizeVolumeRequest) error {\n\treturn autoConvert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in, out)\n}", "func (options *GetWorkspaceReadmeOptions) SetFormatted(formatted string) *GetWorkspaceReadmeOptions {\n\toptions.Formatted = core.StringPtr(formatted)\n\treturn options\n}", "func (v Volume) IsLocal() bool {\n\treturn v.Type == VolumeTypeLocal\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func IsSupported(reqHeader http.Header) bool {\n\th, err := accept.ParseHeader(reqHeader.Get(\"Accept\"))\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn h.Quality(ContentType, map[string]string{\"charset\": \"utf-8\"}) > 0\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func isVolumeSupported(volume *v1.PersistentVolumeClaim) bool {\n\tfor k, v := range volume.Annotations {\n\t\tif (k == \"volume.beta.kubernetes.io/storage-provisioner\" && v == \"kubernetes.io/aws-ebs\") ||\n\t\t\t(k == \"pv.kubernetes.io/provisioned-by\" && strings.Contains(v, \"ebs.csi.aws.com\")) ||\n\t\t\t(k == \"volume.beta.kubernetes.io/storage-provisioner\" && strings.Contains(v, \"ebs.csi.aws.com\")) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out)\n}", "func CfnVolume_IsCfnResource(construct constructs.IConstruct) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnVolume\",\n\t\t\"isCfnResource\",\n\t\t[]interface{}{construct},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (t *osCinderCSITranslator) CanSupportInline(volume *v1.Volume) bool {\n\treturn volume != nil && volume.Cinder != nil\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func ValidateVolumeSpec(volspec *api.VolumeSpecUpdate) error {\n\t// case of checking possible halevel flag combination\n\tif volspec.GetHaLevel() > 0 {\n\t\tif volspec.GetSize() > 0 || volspec.GetShared() || volspec.GetSticky() {\n\t\t\t// Please have unique msgs for each case so it's easy for use to identity the\n\t\t\t// flags mismatch combination.\n\t\t\treturn fmt.Errorf(\"Invalid halevel flag combination. Size, Shared or Sticky flag not supported \" +\n\t\t\t\t\"with halevel flag\")\n\t\t}\n\t}\n\treturn nil\n}", "func shouldFormatDisk(diskConfig *kurmaDiskConfiguration, currentfstype string) bool {\n\t// if no configured fstype is given, then no\n\tif diskConfig.FsType == \"\" {\n\t\treturn false\n\t}\n\n\t// if format is set to false\n\tif diskConfig.Format != nil && *diskConfig.Format == false {\n\t\treturn false\n\t}\n\n\t// if the current fstype matches the configured fstype\n\tif currentfstype == diskConfig.FsType {\n\t\treturn false\n\t}\n\n\t// if here, then yes\n\treturn true\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func NewVolumeFormat(source string, quiet bool) Format {\n\tswitch source {\n\tcase TableFormatKey:\n\t\tif quiet {\n\t\t\treturn defaultVolumeQuietFormat\n\t\t}\n\t\treturn defaultVolumeTableFormat\n\tcase RawFormatKey:\n\t\tif quiet {\n\t\t\treturn `name: {{.Name}}`\n\t\t}\n\t\treturn `name: {{.Name}}\\ndriver: {{.Driver}}\\n`\n\t}\n\treturn Format(source)\n}", "func ValidateRequest(request Request) (bool, error) {\n\tvar validationError ValidationError\n\t// Check there are units in the group.\n\tif len(request.Units) == 0 {\n\t\tvalidationError.Add(noUnitsInGroupError)\n\t}\n\n\t// Check that there are not any @ symbols in the group name.\n\tif strings.Contains(request.Group, \"@\") {\n\t\tvalidationError.Add(atInGroupNameError)\n\t}\n\n\tunitNames := []string{}\n\tfor _, unit := range request.Units {\n\t\tunitNames = append(unitNames, unit.Name)\n\t}\n\n\t// Check that we're not mixing units with @ and units without @.\n\tif !StringsHaveOrNot(unitNames, \"@.\") {\n\t\tvalidationError.Add(mixedSliceInstanceError)\n\t}\n\n\t// Check that all unit names are prefixed by the group name.\n\tif !StringsHasPrefix(unitNames, request.Group) {\n\t\tvalidationError.Add(badUnitPrefixError)\n\t}\n\n\t// Check that @ only occurrences at most once per unit name.\n\tif StringsCountMoreThan(unitNames, \"@\", 1) {\n\t\tvalidationError.Add(multipleAtInUnitNameError)\n\t}\n\n\t// Check that all unit names are unique.\n\tif !StringsUnique(unitNames) {\n\t\tvalidationError.Add(unitsSameNameError)\n\t}\n\n\tif len(validationError.CausingErrors) != 0 {\n\t\treturn false, validationError\n\t}\n\treturn true, nil\n}", "func IsUserRequestCtx(ctx context.Context) bool {\n\treturn ctxutils.IsAPIGwCtx(ctx)\n}", "func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, diskEncryptionSet)\n}", "func QueryVolumeUtil(ctx context.Context, m cnsvolume.Manager, queryFilter cnstypes.CnsQueryFilter,\n\tquerySelection *cnstypes.CnsQuerySelection, useQueryVolumeAsync bool) (*cnstypes.CnsQueryResult, error) {\n\tlog := logger.GetLogger(ctx)\n\tvar queryAsyncNotSupported bool\n\tvar queryResult *cnstypes.CnsQueryResult\n\tvar err error\n\tif useQueryVolumeAsync {\n\t\t// AsyncQueryVolume feature switch is enabled.\n\t\tqueryResult, err = m.QueryVolumeAsync(ctx, queryFilter, querySelection)\n\t\tif err != nil {\n\t\t\tif err.Error() == cnsvsphere.ErrNotSupported.Error() {\n\t\t\t\tlog.Warn(\"QueryVolumeAsync is not supported. Invoking QueryVolume API\")\n\t\t\t\tqueryAsyncNotSupported = true\n\t\t\t} else { // Return for any other failures.\n\t\t\t\treturn nil, logger.LogNewErrorCodef(log, codes.Internal,\n\t\t\t\t\t\"queryVolumeAsync failed for queryFilter: %v. Err=%+v\", queryFilter, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tif !useQueryVolumeAsync || queryAsyncNotSupported {\n\t\tqueryResult, err = m.QueryVolume(ctx, queryFilter)\n\t\tif err != nil {\n\t\t\treturn nil, logger.LogNewErrorCodef(log, codes.Internal,\n\t\t\t\t\"queryVolume failed for queryFilter: %+v. Err=%+v\", queryFilter, err.Error())\n\t\t}\n\t}\n\treturn queryResult, nil\n}", "func Convert_v2alpha1_PathExistsRequest_To_impl_PathExistsRequest(in *v2alpha1.PathExistsRequest, out *impl.PathExistsRequest) error {\n\treturn autoConvert_v2alpha1_PathExistsRequest_To_impl_PathExistsRequest(in, out)\n}", "func isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) error {\n\tif len(volCaps) == 0 {\n\t\treturn fmt.Errorf(\"volume capabilities missing in request\")\n\t}\n\tfor _, c := range volCaps {\n\t\tif c.GetBlock() != nil {\n\t\t\treturn fmt.Errorf(\"block volume capability not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func Convert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in *impl.IsSymlinkRequest, out *v2alpha1.IsSymlinkRequest) error {\n\treturn autoConvert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in, out)\n}", "func (p *xmlProvider) IsReadable(r *http.Request, v interface{}) bool {\n\treturn true\n}", "func VolumeSpec(vspec *api.VolumeSpec) corev1.PersistentVolumeClaimSpec {\n\treturn corev1.PersistentVolumeClaimSpec{\n\t\tStorageClassName: vspec.StorageClass,\n\t\tAccessModes: vspec.AccessModes,\n\t\tResources: corev1.ResourceRequirements{\n\t\t\tRequests: corev1.ResourceList{\n\t\t\t\tcorev1.ResourceStorage: vspec.SizeParsed,\n\t\t\t},\n\t\t},\n\t}\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func (f FormatHeader) Valid() bool {\n\treturn f.ID == 0x20746d66 && f.Size == 0x10 && f.AudioFormat == 1\n}", "func ParamSpecInternal(paramType Type, name string, nick string, blurb string, flags ParamFlags) ParamSpec {\n\tc_param_type := (C.GType)(paramType)\n\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\tc_nick := C.CString(nick)\n\tdefer C.free(unsafe.Pointer(c_nick))\n\n\tc_blurb := C.CString(blurb)\n\tdefer C.free(unsafe.Pointer(c_blurb))\n\n\tc_flags := (C.GParamFlags)(flags)\n\n\tretC := C.g_param_spec_internal(c_param_type, c_name, c_nick, c_blurb, c_flags)\n\tretGo := *ParamSpecNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func (s SCIONDMsg) NewIfInfoRequest() (IFInfoRequest, error) {\n\ts.Struct.SetUint16(8, 6)\n\tss, err := NewIFInfoRequest(s.Struct.Segment())\n\tif err != nil {\n\t\treturn IFInfoRequest{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}", "func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) {\n\t// Note: we purposefully do not call cancel manually. The reason is\n\t// when canceled, the request.Body will EOF when reading due to the way\n\t// it streams data in. Cancel will still be run when the timeout is\n\t// hit, so this doesn't really harm anything.\n\tctx, _ = c.withConfiguredTimeout(ctx)\n\treturn c.rawRequestWithContext(ctx, r)\n}", "func IsMultipart(str []byte) (isMulti bool, mediaType, boundary string, body []byte) {\n\tisMulti = false\n\tmediaType = \"\"\n\tboundary = \"\"\n\tbody = ([]byte)(\"\")\n\tvar pars map[string]string\n\tvar err error\n\tif bytes.HasPrefix(str, MIMEVersion1B) {\n\t\tcri := bytes.IndexByte(str, '\\n')\n\t\tif cri < 0 { // shouldn't happen\n\t\t\treturn\n\t\t}\n\t\tctln := str[cri+1:]\n\t\tif bytes.HasPrefix(ctln, ContentTypeB) { // should\n\t\t\tcri2 := bytes.IndexByte(ctln, '\\n')\n\t\t\tif cri2 < 0 { // shouldn't happen\n\t\t\t\treturn\n\t\t\t}\n\t\t\thdr := ctln[len(ContentTypeB)+1 : cri2]\n\t\t\tmediaType, pars, err = mime.ParseMediaType(string(hdr))\n\t\t\tif err != nil { // shouldn't happen\n\t\t\t\tlog.Printf(\"mimedata.IsMultipart: malformed MIME header: %v\\n\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif strings.HasPrefix(mediaType, \"multipart/\") {\n\t\t\t\tisMulti = true\n\t\t\t\tbody = str[cri2+1:]\n\t\t\t\tboundary = pars[\"boundary\"]\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (me TxsdPresentationAttributesFontSpecificationFontVariant) IsSmallCaps() bool {\n\treturn me.String() == \"small-caps\"\n}", "func (v *Plugin_Handshake_Args) IsSetRequest() bool {\n\treturn v != nil && v.Request != nil\n}", "func isVolumePublished(canAtMultiNode bool, attachReq *model.VolumeAttachmentSpec,\n\tmetadata map[string]string) (*model.VolumeAttachmentSpec, error) {\n\tglog.V(5).Infof(\"start to isVolumePublished, canAtMultiNode = %v, attachReq = %v\",\n\t\tcanAtMultiNode, attachReq)\n\n\tattachments, err := Client.ListVolumeAttachments()\n\tif err != nil {\n\t\tglog.V(5).Info(\"ListVolumeAttachments failed: \" + err.Error())\n\t\treturn nil, status.Error(codes.FailedPrecondition, err.Error())\n\t}\n\n\tfor _, attachSpec := range attachments {\n\t\tif attachSpec.VolumeId == attachReq.VolumeId {\n\t\t\tif attachSpec.Host != attachReq.Host {\n\t\t\t\tif !canAtMultiNode {\n\t\t\t\t\tmsg := fmt.Sprintf(\"the volume %s has been published to another node and does not have MULTI_NODE volume capability\",\n\t\t\t\t\t\tattachReq.VolumeId)\n\t\t\t\t\treturn nil, status.Error(codes.FailedPrecondition, msg)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Opensds does not have volume_capability and readonly parameters,\n\t\t\t\t// but needs to check other parameters to determine compatibility?\n\t\t\t\tif attachSpec.Platform == attachReq.Platform &&\n\t\t\t\t\tattachSpec.OsType == attachReq.OsType &&\n\t\t\t\t\tattachSpec.Initiator == attachReq.Initiator &&\n\t\t\t\t\tisStringMapEqual(attachSpec.Metadata, metadata) &&\n\t\t\t\t\tattachSpec.AccessProtocol == attachReq.AccessProtocol {\n\t\t\t\t\tglog.V(5).Info(\"Volume published and is compatible\")\n\n\t\t\t\t\treturn attachSpec, nil\n\t\t\t\t}\n\n\t\t\t\tglog.Error(\"Volume published but is incompatible, incompatible attachement Id = \" + attachSpec.Id)\n\t\t\t\treturn nil, status.Error(codes.AlreadyExists, \"Volume published but is incompatible\")\n\t\t\t}\n\t\t}\n\t}\n\n\tglog.V(5).Info(\"Need to create a new attachment\")\n\treturn nil, nil\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func isCveFormat(fl FieldLevel) bool {\n\tcveString := fl.Field().String()\n\n\treturn cveRegex.MatchString(cveString)\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func marshalUpdatePrivateCloudUpdatePrivateCloudRequest(c *Client, m map[string]interface{}) ([]byte, error) {\n\n\treturn json.Marshal(m)\n}", "func (v *ServiceGenerator_Generate_Args) IsSetRequest() bool {\n\treturn v != nil && v.Request != nil\n}", "func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif req.GetName() == \"\" {\n\t\treturn errors.New(\"volume name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn errors.New(\"volume capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn errors.New(\"block access type not allowed\")\n\t\t}\n\t}\n\n\tif req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {\n\t\treturn errors.New(\"secrets cannot be nil or empty\")\n\t}\n\n\treturn nil\n}", "func (c *UFSClient) NewRemoveUFSVolumeRequest() *RemoveUFSVolumeRequest {\n\treq := &RemoveUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *Client) FormatSubscribeRequest() interface{} {\n\treturn &SubscribeRequest{\n\t\tEvent: \"subscribe\",\n\t\tPair: c.Pairs,\n\t\tSubscription: struct {\n\t\t\tName string `json:\"name\"`\n\t\t}{Name: \"spread\"},\n\t}\n}", "func (vol *VolumeInfoSimple) IsActive() bool {\n\treturn vol.Status == proto.VolumeStatusActive\n}", "func EncodeInitStatusRequest(_ context.Context, request interface{}) (interface{}, error) {\n\treturn &pb.InitStatusRequest{}, nil\n}", "func isIsoBicFormat(fl FieldLevel) bool {\n\tbicString := fl.Field().String()\n\n\treturn bicRegex.MatchString(bicString)\n}", "func IsGS1CompPartEncodable(s string) bool {\n\tfor i := range s {\n\t\t// null may only be followed by null\n\t\tif s[i] == nullASCII {\n\t\t\tfor i++; i < len(s); i++ {\n\t\t\t\tif s[i] != nullASCII {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t} else if !(s[i] <= 127 && gs1AICPCharSet[s[i]&0x7F] == 1) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}" ]
[ "0.7798853", "0.7311026", "0.7175302", "0.63214546", "0.6180906", "0.5922275", "0.5618228", "0.5379867", "0.5366598", "0.5078055", "0.47575882", "0.47379783", "0.4697924", "0.4661607", "0.46237588", "0.4479994", "0.44749045", "0.44351402", "0.42801523", "0.42551017", "0.4218258", "0.4211704", "0.4166768", "0.4166768", "0.41151252", "0.40809372", "0.40539607", "0.40011135", "0.3989117", "0.39740607", "0.396967", "0.39611617", "0.39586964", "0.39031976", "0.38561454", "0.38215968", "0.38160923", "0.3814548", "0.3804608", "0.38034377", "0.3798899", "0.37946007", "0.37937194", "0.375767", "0.37558553", "0.3749468", "0.3734534", "0.37234464", "0.37207538", "0.3705173", "0.36921832", "0.3688528", "0.36880186", "0.36865765", "0.36832064", "0.36826292", "0.3674768", "0.36737865", "0.3658269", "0.36467412", "0.36332667", "0.36321494", "0.3624831", "0.36181805", "0.36108375", "0.36000857", "0.35789552", "0.3572848", "0.35696298", "0.35514143", "0.35452983", "0.35445762", "0.35434744", "0.35426605", "0.35384825", "0.35290024", "0.3521835", "0.35213694", "0.35188434", "0.35105166", "0.34976852", "0.34937072", "0.3491094", "0.3490001", "0.3486793", "0.3480323", "0.3466081", "0.34653977", "0.34636265", "0.3462151", "0.34618935", "0.34615338", "0.346105", "0.34585935", "0.34543574", "0.34490755", "0.3445653", "0.34452936", "0.34437543", "0.34305224" ]
0.87529594
0
Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest is an autogenerated conversion function.
func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error { return autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func Convert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in *impl.IsSymlinkRequest, out *v2alpha1.IsSymlinkRequest) error {\n\treturn autoConvert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func (o *WhatsAppNameWhatsAppApiContent) SetFormattedName(v string) {\n\to.FormattedName = v\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in *impl.PathExistsRequest, out *v2alpha1.PathExistsRequest) error {\n\treturn autoConvert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in, out)\n}", "func IsVolumeNameValid(name string) (bool, error) {\n\treturn true, nil\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func InitVolumeOperationRequestInterface(ctx context.Context, cleanupInterval int,\n\tisBlockVolumeSnapshotEnabled func() bool) (VolumeOperationRequest, error) {\n\tlog := logger.GetLogger(ctx)\n\tcsiNamespace = getCSINamespace()\n\n\toperationStoreInitLock.Lock()\n\tdefer operationStoreInitLock.Unlock()\n\tif operationRequestStoreInstance == nil {\n\t\t// Create CnsVolumeOperationRequest definition on API server.\n\t\tlog.Info(\n\t\t\t\"Creating CnsVolumeOperationRequest definition on API server and initializing VolumeOperationRequest instance\",\n\t\t)\n\t\terr := k8s.CreateCustomResourceDefinitionFromManifest(ctx,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFile,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFileName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create CnsVolumeOperationRequest CRD with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get in cluster config for client to API server.\n\t\tconfig, err := k8s.GetKubeConfig(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get kubeconfig with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create client to API server.\n\t\tk8sclient, err := k8s.NewClientForGroup(ctx, config, cnsvolumeoprequestv1alpha1.SchemeGroupVersion.Group)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create k8sClient with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Initialize the operationRequestStoreOnETCD implementation of\n\t\t// VolumeOperationRequest interface.\n\t\t// NOTE: Currently there is only a single implementation of this\n\t\t// interface. Future implementations will need modify this step.\n\t\toperationRequestStoreInstance = &operationRequestStore{\n\t\t\tk8sclient: k8sclient,\n\t\t}\n\t\tgo operationRequestStoreInstance.cleanupStaleInstances(cleanupInterval, isBlockVolumeSnapshotEnabled)\n\t}\n\n\treturn operationRequestStoreInstance, nil\n}", "func Convert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in *impl.RmdirContentsRequest, out *v2alpha1.RmdirContentsRequest) error {\n\treturn autoConvert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in, out)\n}", "func Convert_internal_StopServiceRequest_To_v1alpha1_StopServiceRequest(in *internal.StopServiceRequest, out *v1alpha1.StopServiceRequest) error {\n\treturn autoConvert_internal_StopServiceRequest_To_v1alpha1_StopServiceRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func (o *WhatsAppNameWhatsAppApiContent) GetFormattedNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.FormattedName, true\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func (o *Manager) CanFormat(ctx context.Context, inType string) (available struct {\n\tV0 bool\n\tV1 string\n}, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceManager+\".CanFormat\", 0, inType).Store(&available)\n\treturn\n}", "func ValidateRequest(fromCurrency *models.CurrencyExchangeRequest) []string {\n\tvar validations []string\n\n\tif fromCurrency.FromCurrency == \"\" {\n\t\tvalidations = append(validations, EmptyFromCurrency)\n\t}\n\tif fromCurrency.ToCurrency == \"\" {\n\t\tvalidations = append(validations, EmptyToCurrency)\n\t}\n\treturn validations\n}", "func ValidateRequest(request Request) (bool, error) {\n\tvar validationError ValidationError\n\t// Check there are units in the group.\n\tif len(request.Units) == 0 {\n\t\tvalidationError.Add(noUnitsInGroupError)\n\t}\n\n\t// Check that there are not any @ symbols in the group name.\n\tif strings.Contains(request.Group, \"@\") {\n\t\tvalidationError.Add(atInGroupNameError)\n\t}\n\n\tunitNames := []string{}\n\tfor _, unit := range request.Units {\n\t\tunitNames = append(unitNames, unit.Name)\n\t}\n\n\t// Check that we're not mixing units with @ and units without @.\n\tif !StringsHaveOrNot(unitNames, \"@.\") {\n\t\tvalidationError.Add(mixedSliceInstanceError)\n\t}\n\n\t// Check that all unit names are prefixed by the group name.\n\tif !StringsHasPrefix(unitNames, request.Group) {\n\t\tvalidationError.Add(badUnitPrefixError)\n\t}\n\n\t// Check that @ only occurrences at most once per unit name.\n\tif StringsCountMoreThan(unitNames, \"@\", 1) {\n\t\tvalidationError.Add(multipleAtInUnitNameError)\n\t}\n\n\t// Check that all unit names are unique.\n\tif !StringsUnique(unitNames) {\n\t\tvalidationError.Add(unitsSameNameError)\n\t}\n\n\tif len(validationError.CausingErrors) != 0 {\n\t\treturn false, validationError\n\t}\n\treturn true, nil\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func (a *authenticator) isV1(r *http.Request) bool {\n\treturn r.Form.Get(v1Arg) != \"\"\n}", "func (options *GetWorkspaceReadmeOptions) SetFormatted(formatted string) *GetWorkspaceReadmeOptions {\n\toptions.Formatted = core.StringPtr(formatted)\n\treturn options\n}", "func (m *MsgCreateRequest) ValidateBasic() error {\n\tif m.From == \"\" {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"from cannot be empty\")\n\t}\n\tif _, err := hubtypes.ProvAddressFromBech32(m.From); err != nil {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, err.Error())\n\t}\n\tif m.Duration < 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"duration cannot be negative\")\n\t}\n\tif m.Duration == 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"duration cannot be zero\")\n\t}\n\tif m.Gigabytes < 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"gigabytes cannot be negative\")\n\t}\n\tif m.Gigabytes == 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"gigabytes cannot be zero\")\n\t}\n\tif m.Prices == nil {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"prices cannot be nil\")\n\t}\n\tif m.Prices.Len() == 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"prices cannot be empty\")\n\t}\n\tif m.Prices.IsAnyNil() {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"prices cannot contain nil\")\n\t}\n\tif !m.Prices.IsValid() {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"prices must be valid\")\n\t}\n\n\treturn nil\n}", "func (m *MsgStartRequest) ValidateBasic() error {\n\tif m.From == \"\" {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"from cannot be empty\")\n\t}\n\tif _, err := sdk.AccAddressFromBech32(m.From); err != nil {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, err.Error())\n\t}\n\tif m.ID == 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"id cannot be zero\")\n\t}\n\tif m.Address == \"\" {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"address cannot be empty\")\n\t}\n\tif _, err := hubtypes.NodeAddressFromBech32(m.Address); err != nil {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, err.Error())\n\t}\n\n\treturn nil\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif req.GetName() == \"\" {\n\t\treturn errors.New(\"volume name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn errors.New(\"volume capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn errors.New(\"block access type not allowed\")\n\t\t}\n\t}\n\n\tif req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {\n\t\treturn errors.New(\"secrets cannot be nil or empty\")\n\t}\n\n\treturn nil\n}", "func Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1alpha1.VolumeAttachmentSpec, s conversion.Scope) error {\n\treturn autoConvert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in, out, s)\n}", "func IsFilterRequest(url string) bool {\n\t//read all the pattern and check if match\n\tif apiConfigParsedData.RequestFilterEnabled {\n\t\tisFiltered, _ := regexp.MatchString(\".*\\\\.(\"+apiConfig.Data.RequestFilterType+\")\", url)\n\t\treturn isFiltered\n\t}\n\treturn false\n}", "func IsRequestValid(strings ...string) (result bool) {\n\tresult = true\n\tfor _, s := range strings {\n\t\tif s == \"\" {\n\t\t\tresult = false\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func Convert_v1alpha3_SourceSpec_To_v1alpha1_SourceSpec(in *v1alpha3.SourceSpec, out *SourceSpec, s conversion.Scope) error {\n\treturn autoConvert_v1alpha3_SourceSpec_To_v1alpha1_SourceSpec(in, out, s)\n}", "func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, diskEncryptionSet)\n}", "func VolumeEncryption(value bool) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.EncryptVols = value\n\t\treturn nil\n\t}\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func (client *CapacitiesClient) checkNameAvailabilityCreateRequest(ctx context.Context, location string, capacityParameters CheckCapacityNameAvailabilityParameters, options *CapacitiesClientCheckNameAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.PowerBIDedicated/locations/{location}/checkNameAvailability\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-01-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, capacityParameters)\n}", "func (r *RequestAPI) UpdateRequestV1(ctx context.Context, req *desc.UpdateRequestV1Request) (*desc.UpdateRequestV1Response, error) {\n\tlog.Printf(\"Got update request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"UpdateRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.UpdateEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := r.repo.Update(\n\t\tctx, models.NewRequest(req.RequestId, req.UserId, req.Type, req.Text),\n\t)\n\tif errors.Is(err, repository.NotFound) {\n\t\treturn nil, status.Error(codes.NotFound, \"request does not exist\")\n\t} else if err != nil {\n\t\tlog.Error().\n\t\t\tUint64(\"request_id\", req.RequestId).\n\t\t\tStr(\"endpoint\", \"UpdateRequestV1\").\n\t\t\tErr(err).\n\t\t\tMsgf(\"Failed to update request\")\n\t\treturn nil, err\n\t}\n\n\tr.producer.Send(producer.NewEvent(ctx, req.RequestId, producer.UpdateEvent, err))\n\tr.metrics.IncUpdate(1, \"UpdateRequestV1\")\n\treturn &desc.UpdateRequestV1Response{}, nil\n}", "func isRequestSigned(isPresign bool, query url.Values, header http.Header) bool {\n\tif query.Get(v4Internal.AmzSignatureKey) != \"\" {\n\t\treturn true\n\t}\n\n\tif header.Get(\"Authorization\") != \"\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Convert_internal_StartServiceRequest_To_v1alpha1_StartServiceRequest(in *internal.StartServiceRequest, out *v1alpha1.StartServiceRequest) error {\n\treturn autoConvert_internal_StartServiceRequest_To_v1alpha1_StartServiceRequest(in, out)\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func CfnVolume_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnVolume\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (m *FreeIpaUpscaleV1Request) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateEnvironmentCrn(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTargetAvailabilityType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *E2SmRcPreIndicationHeaderFormat1) Validate() error {\n\treturn m.validate(false)\n}", "func Convert_internal_GetBIOSSerialNumberRequest_To_v1alpha1_GetBIOSSerialNumberRequest(in *internal.GetBIOSSerialNumberRequest, out *v1alpha1.GetBIOSSerialNumberRequest) error {\n\treturn autoConvert_internal_GetBIOSSerialNumberRequest_To_v1alpha1_GetBIOSSerialNumberRequest(in, out)\n}", "func Convert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in *FakeRequest, out *v1alpha2.FakeRequest, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in, out, s)\n}", "func Convert_v2alpha1_IsSymlinkRequest_To_impl_IsSymlinkRequest(in *v2alpha1.IsSymlinkRequest, out *impl.IsSymlinkRequest) error {\n\treturn autoConvert_v2alpha1_IsSymlinkRequest_To_impl_IsSymlinkRequest(in, out)\n}", "func validateVanillaControllerExpandVolumeRequest(ctx context.Context, req *csi.ControllerExpandVolumeRequest) error {\n\treturn common.ValidateControllerExpandVolumeRequest(ctx, req)\n}", "func ParseFormatted(input string) (time.Time, string, error) {\n\t// \"Mon, 02 Jan 2006 15:04:05 MST\"\n\tif t, err := time.Parse(time.RFC1123, input); err == nil {\n\t\treturn t, time.RFC1123, nil\n\t}\n\n\t// \"Mon, 02 Jan 2006 15:04:05 -0700\"\n\tif t, err := time.Parse(time.RFC1123Z, input); err == nil {\n\t\treturn t, time.RFC1123Z, nil\n\t}\n\n\t// \"2006-01-02T15:04:05Z07:00\"\n\tif t, err := time.Parse(time.RFC3339, input); err == nil {\n\t\treturn t, time.RFC3339, nil\n\t}\n\n\t// \"2006-01-02T15:04:05.999999999Z07:00\"\n\tif t, err := time.Parse(time.RFC3339Nano, input); err == nil {\n\t\treturn t, time.RFC3339Nano, nil\n\t}\n\n\t// \"02 Jan 06 15:04 MST\"\n\tif t, err := time.Parse(time.RFC822, input); err == nil {\n\t\treturn t, time.RFC822, nil\n\t}\n\n\t// \"02 Jan 06 15:04 -0700\"\n\tif t, err := time.Parse(time.RFC822Z, input); err == nil {\n\t\treturn t, time.RFC822Z, nil\n\t}\n\n\t// \"Monday, 02-Jan-06 15:04:05 MST\"\n\tif t, err := time.Parse(time.RFC850, input); err == nil {\n\t\treturn t, time.RFC850, nil\n\t}\n\n\t// \"Mon Jan _2 15:04:05 2006\"\n\tif t, err := time.Parse(time.ANSIC, input); err == nil {\n\t\treturn t, time.ANSIC, nil\n\t}\n\n\t// \"Mon Jan _2 15:04:05 MST 2006\"\n\tif t, err := time.Parse(time.UnixDate, input); err == nil {\n\t\treturn t, time.UnixDate, nil\n\t}\n\n\t// \"Mon Jan 02 15:04:05 -0700 2006\"\n\tif t, err := time.Parse(time.RubyDate, input); err == nil {\n\t\treturn t, time.RubyDate, nil\n\t}\n\n\t// \"3:04PM\"\n\tif t, err := time.Parse(time.Kitchen, input); err == nil {\n\t\treturn t, time.Kitchen, nil\n\t}\n\n\t// \"Jan _2 15:04:05\"\n\tif t, err := time.Parse(time.Stamp, input); err == nil {\n\t\treturn t, time.Stamp, nil\n\t}\n\n\t// \"Jan _2 15:04:05.000\"\n\tif t, err := time.Parse(time.StampMilli, input); err == nil {\n\t\treturn t, time.StampMilli, nil\n\t}\n\n\t// \"Jan _2 15:04:05.000000\"\n\tif t, err := time.Parse(time.StampMicro, input); err == nil {\n\t\treturn t, time.StampMicro, nil\n\t}\n\n\t// \"Jan _2 15:04:05.000000000\"\n\tif t, err := time.Parse(time.StampNano, input); err == nil {\n\t\treturn t, time.StampNano, nil\n\t}\n\n\t// \"Mon, 02 Jan 2006 15:04:05 GMT\"\n\tif t, err := time.Parse(FormatHTTP, input); err == nil {\n\t\treturn t, FormatHTTP, nil\n\t}\n\n\tif t, err := time.Parse(FormatGo, strings.Split(input, \" m=\")[0]); err == nil {\n\t\treturn t, FormatGo, nil\n\t}\n\n\t// \"2019-01-25 21:51:38\"\n\tif t, err := time.Parse(FormatSimple, input); err == nil {\n\t\treturn t, FormatSimple, nil\n\t}\n\n\treturn time.Time{}, \"\", ErrParseFormatted\n}", "func NewVolumeModifyIterAsyncRequest() *VolumeModifyIterAsyncRequest {\n\treturn &VolumeModifyIterAsyncRequest{}\n}", "func IsGS1CompPartEncodable(s string) bool {\n\tfor i := range s {\n\t\t// null may only be followed by null\n\t\tif s[i] == nullASCII {\n\t\t\tfor i++; i < len(s); i++ {\n\t\t\t\tif s[i] != nullASCII {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t} else if !(s[i] <= 127 && gs1AICPCharSet[s[i]&0x7F] == 1) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (c CheckNameAvailabilityRequest) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"name\", c.Name)\n\tpopulate(objectMap, \"type\", c.Type)\n\treturn json.Marshal(objectMap)\n}", "func Convert_application_ApplicationSpec_To_v1alpha1_ApplicationSpec(in *application.ApplicationSpec, out *ApplicationSpec, s conversion.Scope) error {\n\treturn autoConvert_application_ApplicationSpec_To_v1alpha1_ApplicationSpec(in, out, s)\n}", "func CreateModifyHostAvailabilityRequest() (request *ModifyHostAvailabilityRequest) {\n\trequest = &ModifyHostAvailabilityRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"ModifyHostAvailability\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func (m *RetrieveCatalogObjectRequest) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func Convert_v1alpha2_ScriptSourceSpec_To_v1alpha1_ScriptSourceSpec(in *v1alpha2.ScriptSourceSpec, out *ScriptSourceSpec, s conversion.Scope) error {\n\treturn autoConvert_v1alpha2_ScriptSourceSpec_To_v1alpha1_ScriptSourceSpec(in, out, s)\n}", "func (p Provider) IsRequestValid(ratesRequest model.RatesRequest) (bool, error) {\n\treturn p.BaseProvider.IsRequestValid(p, ratesRequest)\n}", "func (c *Client) FormatSubscribeRequest() interface{} {\n\treturn &SubscribeRequest{\n\t\tEvent: \"subscribe\",\n\t\tPair: c.Pairs,\n\t\tSubscription: struct {\n\t\t\tName string `json:\"name\"`\n\t\t}{Name: \"spread\"},\n\t}\n}", "func (m *StackV2Request) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCluster(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCustomDomain(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateFailurePolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGatewayPort(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateGeneral(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateImageSettings(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateInstanceGroups(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetwork(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePlacement(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStackAuthentication(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTags(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *MsgSubscribeRequest) ValidateBasic() error {\n\tif m.From == \"\" {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"from cannot be empty\")\n\t}\n\tif _, err := sdk.AccAddressFromBech32(m.From); err != nil {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, err.Error())\n\t}\n\tif m.ID == 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"id cannot be zero\")\n\t}\n\tif m.Denom == \"\" {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"denom cannot be empty\")\n\t}\n\tif err := sdk.ValidateDenom(m.Denom); err != nil {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, err.Error())\n\t}\n\n\treturn nil\n}", "func TPUPartitionedInputV2IsPacked(value bool) TPUPartitionedInputV2Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"is_packed\"] = value\n\t}\n}", "func (m *MsgUpdateStatusRequest) ValidateBasic() error {\n\tif m.From == \"\" {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"from cannot be empty\")\n\t}\n\tif _, err := hubtypes.ProvAddressFromBech32(m.From); err != nil {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, err.Error())\n\t}\n\tif m.ID == 0 {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"id cannot be zero\")\n\t}\n\tif !m.Status.IsOneOf(hubtypes.StatusActive, hubtypes.StatusInactive) {\n\t\treturn sdkerrors.Wrap(ErrorInvalidMessage, \"status must be one of [active, inactive]\")\n\t}\n\n\treturn nil\n}", "func NewRequestFormatter(opts ...RequestFormatterOpt) *RequestFormatter {\n\ta := new(RequestFormatter)\n\tfor _, opt := range opts {\n\t\topt.apply(a)\n\t}\n\treturn a\n}", "func Convert_v1alpha1_SourceSpec_To_v1alpha3_SourceSpec(in *SourceSpec, out *v1alpha3.SourceSpec, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_SourceSpec_To_v1alpha3_SourceSpec(in, out, s)\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (d DiskScheduleAvailabilityRequest) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tpopulate(objectMap, \"country\", d.Country)\n\tpopulate(objectMap, \"expectedDataSizeInTeraBytes\", d.ExpectedDataSizeInTeraBytes)\n\tobjectMap[\"skuName\"] = SKUNameDataBoxDisk\n\tpopulate(objectMap, \"storageLocation\", d.StorageLocation)\n\treturn json.Marshal(objectMap)\n}", "func ValidateVolumeSpec(volspec *api.VolumeSpecUpdate) error {\n\t// case of checking possible halevel flag combination\n\tif volspec.GetHaLevel() > 0 {\n\t\tif volspec.GetSize() > 0 || volspec.GetShared() || volspec.GetSticky() {\n\t\t\t// Please have unique msgs for each case so it's easy for use to identity the\n\t\t\t// flags mismatch combination.\n\t\t\treturn fmt.Errorf(\"Invalid halevel flag combination. Size, Shared or Sticky flag not supported \" +\n\t\t\t\t\"with halevel flag\")\n\t\t}\n\t}\n\treturn nil\n}", "func (uri *Pkcs11URI) Format() (string, error) {\n\tif err := uri.Validate(); err != nil {\n\t\treturn \"\", err\n\t}\n\tresult := \"pkcs11:\" + formatAttributes(uri.pathAttributes, true)\n\tif len(uri.queryAttributes) > 0 {\n\t\tresult += \"?\" + formatAttributes(uri.queryAttributes, false)\n\t}\n\treturn result, nil\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v1alpha3_FileSpec_To_v1alpha1_FileSpec(in *v1alpha3.FileSpec, out *FileSpec, s conversion.Scope) error {\n\treturn autoConvert_v1alpha3_FileSpec_To_v1alpha1_FileSpec(in, out, s)\n}", "func NewBasicKeyRequest() *BasicKeyRequest {\n\tbkr := csr.NewBasicKeyRequest()\n\treturn &BasicKeyRequest{Algo: bkr.A, Size: bkr.S}\n}", "func (m *FulfillmentRequest) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateEndDate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateItemID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateQuantity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSource(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStartDate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func IsInternalRequest(ctx context.Context) bool {\n\tif v := ctx.Value(internalRequestKey); v != nil {\n\t\treturn v.(bool)\n\t}\n\n\treturn false\n}", "func (v *Plugin_Handshake_Args) IsSetRequest() bool {\n\treturn v != nil && v.Request != nil\n}" ]
[ "0.74300635", "0.7394265", "0.7235801", "0.6471025", "0.6251731", "0.5836225", "0.57377946", "0.5735765", "0.56770515", "0.5361098", "0.5340112", "0.5232699", "0.5160232", "0.5074785", "0.49031568", "0.48863614", "0.4812914", "0.4799436", "0.4770457", "0.45899117", "0.45259595", "0.44887218", "0.44510353", "0.44415215", "0.43857783", "0.43847194", "0.42687064", "0.4268135", "0.42505836", "0.4206262", "0.41946706", "0.417837", "0.414432", "0.4141881", "0.41354385", "0.4134258", "0.4087843", "0.40838113", "0.39941376", "0.39718822", "0.39710262", "0.3931681", "0.39124608", "0.3906718", "0.388767", "0.38786474", "0.38716057", "0.38711947", "0.38694456", "0.38671678", "0.38528976", "0.38434693", "0.38419518", "0.38046753", "0.37926742", "0.378766", "0.37817252", "0.37742668", "0.37617984", "0.3759132", "0.3755709", "0.37556544", "0.3750756", "0.37429196", "0.37383276", "0.37380764", "0.37380764", "0.373205", "0.37166575", "0.3711762", "0.37083402", "0.36992964", "0.36981922", "0.3668507", "0.36667", "0.36649516", "0.36636916", "0.36513996", "0.36374697", "0.36353657", "0.36291316", "0.36242616", "0.3623464", "0.36194405", "0.36095065", "0.36061057", "0.3593782", "0.35925615", "0.3590258", "0.35858503", "0.35815087", "0.35763738", "0.35742643", "0.35716513", "0.35666728", "0.3564825", "0.35635707", "0.35621512", "0.35553682", "0.35499522" ]
0.86798626
0
Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse is an autogenerated conversion function.
func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error { return autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in *v1beta1.IsVolumeFormattedResponse, out *internal.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func (o *Manager) CanFormat(ctx context.Context, inType string) (available struct {\n\tV0 bool\n\tV1 string\n}, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceManager+\".CanFormat\", 0, inType).Store(&available)\n\treturn\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func (vr *VersionResponse) IsOk() bool {\n\treturn len(vr.version) > 0\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_IsSymlinkResponse_To_impl_IsSymlinkResponse(in *v2alpha1.IsSymlinkResponse, out *impl.IsSymlinkResponse) error {\n\treturn autoConvert_v2alpha1_IsSymlinkResponse_To_impl_IsSymlinkResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (o *WhatsAppNameWhatsAppApiContent) GetFormattedNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.FormattedName, true\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func (r Response) AsBasicIntangible() (BasicIntangible, bool) {\n\treturn nil, false\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func (o *PcloudPvminstancesVolumesGetOK) IsSuccess() bool {\n\treturn true\n}", "func ResponseFormat(h http.Header) Format {\n\tct := h.Get(hdrContentType)\n\n\tmediatype, params, err := mime.ParseMediaType(ct)\n\tif err != nil {\n\t\treturn FmtUnknown\n\t}\n\n\tconst textType = \"text/plain\"\n\n\tswitch mediatype {\n\tcase ProtoType:\n\t\tif p, ok := params[\"proto\"]; ok && p != ProtoProtocol {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\tif e, ok := params[\"encoding\"]; ok && e != \"delimited\" {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtProtoDelim\n\n\tcase textType:\n\t\tif v, ok := params[\"version\"]; ok && v != TextVersion {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtText\n\t}\n\n\treturn FmtUnknown\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func (rb ResponseBase) AsBasicIntangible() (BasicIntangible, bool) {\n\treturn nil, false\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func (o *PcloudPvminstancesVolumesGetUnauthorized) IsSuccess() bool {\n\treturn false\n}", "func (s *SaleResponse) FormatResponse() *g.Response {\n\tresponse := new(g.Response)\n\tresponse.Acquirer = Name\n\n\tif s.OrderResult != nil {\n\t\tresponse.Id = s.OrderResult.OrderReference\n\t\tresponse.AuthorizationCode = s.OrderResult.OrderKey\n\t}\n\n\t// If CreditCard\n\tif len(s.CreditCardTransactionResultCollection) > 0 {\n\t\ttransaction := s.CreditCardTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\t//response.CreditCard = &g.CreditCard{}\n\t\tresponse.NSU = transaction.UniqueSequentialNumber\n\t\tresponse.TID = transaction.TransactionIdentifier\n\t}\n\n\t// If BankingBillet\n\tif len(s.BoletoTransactionResultCollection) > 0 {\n\t\ttransaction := s.BoletoTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\tresponse.BarCode = transaction.Barcode\n\t\tresponse.BoletoUrl = transaction.BoletoUrl\n\t}\n\n\treturn response\n}", "func (m Message) IsResponse() bool {\n\treturn m.Y == \"r\"\n}", "func (r *BaseStandard) IsOk() bool {\n\tif r.AuditInfo.StatusCode < http.StatusOK || r.AuditInfo.StatusCode >= http.StatusMultipleChoices {\n\t\treturn false\n\t}\n\n\tif !r.HasItems() {\n\t\treturn false\n\t}\n\n\tif len(r.AuditInfo.Errors.Items) > 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func (er *ExitResponse) IsOk() bool {\n\treturn er.Ok\n}", "func (s *OsdCsiServer) ValidateVolumeCapabilities(\n\tctx context.Context,\n\treq *csi.ValidateVolumeCapabilitiesRequest,\n) (*csi.ValidateVolumeCapabilitiesResponse, error) {\n\n\tcapabilities := req.GetVolumeCapabilities()\n\tif capabilities == nil || len(capabilities) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"volume_capabilities must be specified\")\n\t}\n\tid := req.GetVolumeId()\n\tif len(id) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"volume_id must be specified\")\n\t}\n\n\t// Log request\n\tclogger.WithContext(ctx).Infof(\"csi.ValidateVolumeCapabilities of id %s \"+\n\t\t\"capabilities %#v \", id, capabilities)\n\n\t// Get grpc connection\n\tconn, err := s.getConn()\n\tif err != nil {\n\t\treturn nil, status.Errorf(\n\t\t\tcodes.Unavailable,\n\t\t\t\"Unable to connect to SDK server: %v\", err)\n\t}\n\n\t// Get secret if any was passed\n\tctx = s.setupContext(ctx, req.GetSecrets())\n\tctx, cancel := grpcutil.WithDefaultTimeout(ctx)\n\tdefer cancel()\n\n\t// Check ID is valid with the specified volume capabilities\n\tvolumes := api.NewOpenStorageVolumeClient(conn)\n\tresp, err := volumes.Inspect(ctx, &api.SdkVolumeInspectRequest{\n\t\tVolumeId: id,\n\t})\n\tif err != nil {\n\t\treturn nil, status.Error(codes.NotFound, \"ID not found\")\n\t}\n\tv := resp.GetVolume()\n\tif v.Id != id {\n\t\terrs := fmt.Sprintf(\n\t\t\t\"Driver volume id [%s] does not equal requested id of: %s\",\n\t\t\tv.Id,\n\t\t\tid)\n\t\tclogger.WithContext(ctx).Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\t// Setup uninitialized response object\n\tresult := &csi.ValidateVolumeCapabilitiesResponse{\n\t\tConfirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{\n\t\t\tVolumeContext: req.GetVolumeContext(),\n\t\t\tVolumeCapabilities: req.GetVolumeCapabilities(),\n\t\t\tParameters: req.GetParameters(),\n\t\t},\n\t}\n\n\t// Check capability\n\tfor _, capability := range capabilities {\n\t\t// Currently the CSI spec defines all storage as \"file systems.\"\n\t\t// So we do not need to check this with the volume. All we will check\n\t\t// here is the validity of the capability access type.\n\t\tif capability.GetMount() == nil && capability.GetBlock() == nil {\n\t\t\treturn nil, status.Error(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"Cannot have both mount and block be undefined\")\n\t\t}\n\n\t\t// Check access mode is setup correctly\n\t\tmode := capability.GetAccessMode()\n\t\tswitch {\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER:\n\t\t\tif v.Spec.Sharedv4 || v.Spec.Shared {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif v.Readonly {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY:\n\t\t\tif v.Spec.Sharedv4 || v.Spec.Shared {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !v.Readonly {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageNotReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY:\n\t\t\tif !v.Spec.Sharedv4 && !v.Spec.Shared {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageNotMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !v.Readonly {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageNotReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER ||\n\t\t\tmode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER:\n\t\t\tif !v.Spec.Sharedv4 && !v.Spec.Shared {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageNotMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif v.Readonly {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, status.Errorf(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"AccessMode %s is not allowed\",\n\t\t\t\tmode.Mode.String())\n\t\t}\n\n\t\tif result.Confirmed == nil {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\t// If we passed all the checks, then it is valid.\n\t// result.Message needs to be empty on return\n\treturn result, nil\n}", "func (i Intangible) AsBasicResponse() (BasicResponse, bool) {\n\treturn &i, true\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func responseFormat(acceptHeader string) (Format, *protocolError) {\n\tif acceptHeader == \"\" {\n\t\treturn FormatBinary, nil\n\t}\n\n\tparsed, err := parseAccept(acceptHeader)\n\tif err != nil {\n\t\treturn FormatBinary, errorf(http.StatusBadRequest, \"Accept header: %s\", err)\n\t}\n\tformats := make(acceptFormatSlice, 0, len(parsed))\n\tfor _, at := range parsed {\n\t\tf, err := FormatFromMediaType(at.MediaType, at.MediaTypeParams)\n\t\tif err != nil {\n\t\t\t// Ignore invalid format. Check further.\n\t\t\tcontinue\n\t\t}\n\t\tformats = append(formats, acceptFormat{f, at.QualityFactor})\n\t}\n\tif len(formats) == 0 {\n\t\treturn FormatBinary, errorf(\n\t\t\thttp.StatusNotAcceptable,\n\t\t\t\"Accept header: specified media types are not not supported. Supported types: %q, %q, %q, %q.\",\n\t\t\tFormatBinary.MediaType(),\n\t\t\tFormatJSONPB.MediaType(),\n\t\t\tFormatText.MediaType(),\n\t\t\tContentTypeJSON,\n\t\t)\n\t}\n\tsort.Sort(formats) // order by quality factor and format preference.\n\treturn formats[0].Format, nil\n}", "func (o *PcloudPvminstancesVolumesGetBadRequest) IsSuccess() bool {\n\treturn false\n}", "func ServeFormatted(w http.ResponseWriter, r *http.Request, v interface{}) {\n\taccept := r.Header.Get(\"Accept\")\n\tswitch accept {\n\tcase applicationJson:\n\t\tServeJson(w, v)\n\tcase applicationXml, textXml:\n\t\tServeXml(w, v)\n\tdefault:\n\t\tServeJson(w, v)\n\t}\n\n\treturn\n}", "func GetVolumeStatus(hostName, volumeName string) (map[string]string, error) {\n\tformatStr1 := \" --format '{{index .Status.access}} {{index .Status \\\"attach-as\\\"}} {{index .Status.capacity.allocated}} {{index .Status.capacity.size}} {{index .Status \\\"clone-from\\\"}}\"\n\tformatStr2 := \" {{index .Status \\\"created by VM\\\"}} {{index .Status.datastore}} {{index .Status.diskformat}} {{index .Status.fstype}} {{index .Status.status}} {{index .Status \\\"attached to VM\\\"}}'\"\n\n\tcmd := dockercli.InspectVolume + volumeName + formatStr1 + formatStr2\n\tout, err := ssh.InvokeCommand(hostName, cmd)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstatus := make(map[string]string)\n\tval := strings.Fields(out)\n\n\tfor i := 0; i < len(dockercli.VolumeStatusFields); i++ {\n\t\tstatus[dockercli.VolumeStatusFields[i]] = val[i]\n\t}\n\treturn status, nil\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func (r Response) AsIntangible() (*Intangible, bool) {\n\treturn nil, false\n}", "func ConvertInternalMuteInfo(muteInfo *models.VodMuteInfoResponse) *rpc.VodMuteInfoResponse {\n\taudibleMagicResponses := make([]*rpc.AudibleMagicResponse, len(muteInfo.AudibleMagicResponses))\n\tfor i, audibleMagicResponse := range muteInfo.AudibleMagicResponses {\n\t\taudibleMagicResponses[i] = &rpc.AudibleMagicResponse{\n\t\t\tAudibleMagicResponseId: utils.Int64ToStr(audibleMagicResponse.AudibleMagicResponseID),\n\t\t\tVodId: utils.Int64ToStr(audibleMagicResponse.VodID),\n\t\t\tTitle: audibleMagicResponse.Title,\n\t\t\tPerformer: audibleMagicResponse.Performer,\n\t\t\tGenre: audibleMagicResponse.Genre,\n\t\t\tArtist: audibleMagicResponse.Artist,\n\t\t\tAlbumTitle: audibleMagicResponse.AlbumTitle,\n\t\t\tSong: audibleMagicResponse.Song,\n\t\t\tIsrc: audibleMagicResponse.Isrc,\n\t\t\tIsMatch: audibleMagicResponse.IsMatch,\n\t\t\tMatchOffsetSeconds: utils.ProtobufInt64Value(audibleMagicResponse.MatchOffsetSeconds),\n\t\t\tMatchDurationSeconds: utils.ProtobufInt64Value(audibleMagicResponse.MatchDurationSeconds),\n\t\t\tScanOffsetSeconds: utils.ProtobufInt64Value(audibleMagicResponse.ScanOffsetSeconds),\n\t\t\tScanDurationSeconds: utils.ProtobufInt64Value(audibleMagicResponse.ScanDurationSeconds),\n\t\t\tMuteOffsetSeconds: utils.ProtobufInt64Value(audibleMagicResponse.MuteOffsetSeconds),\n\t\t\tMuteDurationSeconds: utils.ProtobufInt64Value(audibleMagicResponse.MuteDurationSeconds),\n\t\t\tAudibleMagicItemId: audibleMagicResponse.AudibleMagicItemID,\n\t\t\tCreatedAt: utils.ProtobufTimeAsTimestamp(&audibleMagicResponse.CreatedAt),\n\t\t\tUnmutedAt: utils.ProtobufTimeAsTimestamp(audibleMagicResponse.UnmutedAt),\n\t\t}\n\t}\n\n\tmutedSegments := make([]*rpc.VodMutedSegment, len(muteInfo.MutedSegments))\n\tfor i, mutedSegment := range muteInfo.MutedSegments {\n\t\tmutedSegments[i] = &rpc.VodMutedSegment{\n\t\t\tOffset: mutedSegment.Offset,\n\t\t\tDuration: mutedSegment.Duration,\n\t\t}\n\t}\n\n\tvar vodAppeal *rpc.VodAppeal\n\tif muteInfo.VodAppeal != nil {\n\t\tvodAppeal = &rpc.VodAppeal{\n\t\t\tVodAppealId: utils.Int64ToStr(muteInfo.VodAppeal.VodAppealID),\n\t\t\tResolvedAt: utils.ProtobufTimeAsTimestamp(muteInfo.VodAppeal.ResolvedAt),\n\t\t\tCreatedAt: utils.ProtobufTimeAsTimestamp(&muteInfo.VodAppeal.CreatedAt),\n\t\t\tUpdatedAt: utils.ProtobufTimeAsTimestamp(&muteInfo.VodAppeal.UpdatedAt),\n\t\t\tVodId: utils.Int64ToStr(muteInfo.VodAppeal.VodID),\n\t\t\tPriority: muteInfo.VodAppeal.Priority,\n\t\t\tFullName: muteInfo.VodAppeal.FullName,\n\t\t\tStreetAddress1: muteInfo.VodAppeal.StreetAddress1,\n\t\t\tStreetAddress2: muteInfo.VodAppeal.StreetAddress2,\n\t\t\tCity: muteInfo.VodAppeal.City,\n\t\t\tState: muteInfo.VodAppeal.State,\n\t\t\tZipcode: muteInfo.VodAppeal.Zipcode,\n\t\t\tCountry: muteInfo.VodAppeal.Country,\n\t\t}\n\t}\n\n\ttrackAppeals := make([]*rpc.TrackAppeal, len(muteInfo.TrackAppeals))\n\tfor i, trackAppeal := range muteInfo.TrackAppeals {\n\t\ttrackAppeals[i] = &rpc.TrackAppeal{\n\t\t\tTrackAppealId: utils.Int64ToStr(trackAppeal.TrackAppealID),\n\t\t\tAudibleMagicResponseId: utils.Int64ToStr(trackAppeal.AudibleMagicResponseID),\n\t\t\tVodAppealId: utils.Int64ToStr(trackAppeal.VodAppealID),\n\t\t\tReason: trackAppeal.Reason,\n\t\t\tCreatedAt: utils.ProtobufTimeAsTimestamp(&trackAppeal.CreatedAt),\n\t\t\tUpdatedAt: utils.ProtobufTimeAsTimestamp(&trackAppeal.UpdatedAt),\n\t\t\tResolvedAt: utils.ProtobufTimeAsTimestamp(trackAppeal.ResolvedAt),\n\t\t}\n\t}\n\n\treturn &rpc.VodMuteInfoResponse{\n\t\tId: utils.Int64ToStr(muteInfo.ID),\n\t\tAudibleMagicResponses: audibleMagicResponses,\n\t\tMutedSegments: mutedSegments,\n\t\tVodAppeal: vodAppeal,\n\t\tTrackAppeals: trackAppeals,\n\t\tIsMuted: utils.ProtobufBoolValue(muteInfo.IsMuted),\n\t\tCanCreateAppeal: utils.ProtobufBoolValue(muteInfo.CanCreateAppeal),\n\t\tHasPendingAppeal: utils.ProtobufBoolValue(muteInfo.HasPendingAppeal),\n\t}\n}", "func (imr *InvokeMethodResponse) IsHTTPResponse() bool {\n\tif imr.r == nil {\n\t\treturn false\n\t}\n\t// gRPC status code <= 15 - https://github.com/grpc/grpc/blob/master/doc/statuscodes.md\n\t// HTTP status code >= 100 - https://tools.ietf.org/html/rfc2616#section-10\n\treturn imr.r.Status.Code >= 100\n}", "func (c *Controller) ServeFormatted(encoding ...bool) error {\n\thasIndent := BConfig.RunMode != PROD\n\thasEncoding := len(encoding) > 0 && encoding[0]\n\treturn c.Ctx.Output.ServeFormatted(c.Data, hasIndent, hasEncoding)\n}", "func isVolumeSupported(volume *v1.PersistentVolumeClaim) bool {\n\tfor k, v := range volume.Annotations {\n\t\tif (k == \"volume.beta.kubernetes.io/storage-provisioner\" && v == \"kubernetes.io/aws-ebs\") ||\n\t\t\t(k == \"pv.kubernetes.io/provisioned-by\" && strings.Contains(v, \"ebs.csi.aws.com\")) ||\n\t\t\t(k == \"volume.beta.kubernetes.io/storage-provisioner\" && strings.Contains(v, \"ebs.csi.aws.com\")) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (v *versionType) IsConvertible() bool {\n\treturn hasMethod(v.pkg, v.typ, \"ConvertFrom\") && hasMethod(v.pkg, v.typ, \"ConvertTo\")\n}", "func (o *InlineResponse20049Post) GetDisplayBodyOk() (*string, bool) {\n\tif o == nil || o.DisplayBody == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DisplayBody, true\n}", "func (o *StatusDescriptorDTO) HasFormatter() bool {\n\tif o != nil && o.Formatter != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (rb ResponseBase) AsIntangible() (*Intangible, bool) {\n\treturn nil, false\n}", "func Convert_v2alpha1_PathExistsResponse_To_impl_PathExistsResponse(in *v2alpha1.PathExistsResponse, out *impl.PathExistsResponse) error {\n\treturn autoConvert_v2alpha1_PathExistsResponse_To_impl_PathExistsResponse(in, out)\n}", "func (i Identifiable) AsBasicResponse() (BasicResponse, bool) {\n\treturn nil, false\n}", "func (o *V1VirusDatasetRequest) HasFormat() bool {\n\tif o != nil && o.Format != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s StatusCode) IsPrivateSpec() bool {\n\treturn s.In(StatusRangePrivate)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (rb ResponseBase) AsBasicCivicStructure() (BasicCivicStructure, bool) {\n\treturn nil, false\n}", "func (o *Volume) HasVolumeId() bool {\n\tif o != nil && o.VolumeId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Volume) HasVolumeId() bool {\n\tif o != nil && o.VolumeId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func (r Response) AsBasicCivicStructure() (BasicCivicStructure, bool) {\n\treturn nil, false\n}", "func (m *RegionResp) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (r *Response) Supported() bool {\n\treturn !strings.Contains(r.String(), NotSupported)\n}", "func (s *OsdCsiServer) ValidateVolumeCapabilities(\n\tctx context.Context,\n\treq *csi.ValidateVolumeCapabilitiesRequest,\n) (*csi.ValidateVolumeCapabilitiesResponse, error) {\n\n\tcapabilities := req.GetVolumeCapabilities()\n\tif capabilities == nil || len(capabilities) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"volume_capabilities must be specified\")\n\t}\n\tid := req.GetVolumeId()\n\tif len(id) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"volume_id must be specified\")\n\t}\n\tattributes := req.GetVolumeAttributes()\n\n\t// Log request\n\tlogrus.Debugf(\"ValidateVolumeCapabilities of id %s \"+\n\t\t\"capabilities %#v \"+\n\t\t\"attributes %#v \",\n\t\tid,\n\t\tcapabilities,\n\t\tattributes)\n\n\t// Check ID is valid with the specified volume capabilities\n\tvolumes, err := s.driver.Inspect([]string{id})\n\tif err != nil || len(volumes) == 0 {\n\t\treturn nil, status.Error(codes.NotFound, \"ID not found\")\n\t}\n\tif len(volumes) != 1 {\n\t\terrs := fmt.Sprintf(\n\t\t\t\"Driver returned an unexpected number of volumes when one was expected: %d\",\n\t\t\tlen(volumes))\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tv := volumes[0]\n\tif v.Id != id {\n\t\terrs := fmt.Sprintf(\n\t\t\t\"Driver volume id [%s] does not equal requested id of: %s\",\n\t\t\tv.Id,\n\t\t\tid)\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\n\t// Setup uninitialized response object\n\tresult := &csi.ValidateVolumeCapabilitiesResponse{\n\t\tSupported: true,\n\t}\n\n\t// Check capability\n\tfor _, capability := range capabilities {\n\t\t// Currently the CSI spec defines all storage as \"file systems.\"\n\t\t// So we do not need to check this with the volume. All we will check\n\t\t// here is the validity of the capability access type.\n\t\tif capability.GetMount() == nil && capability.GetBlock() == nil {\n\t\t\treturn nil, status.Error(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"Cannot have both mount and block be undefined\")\n\t\t}\n\n\t\t// Check access mode is setup correctly\n\t\tmode := capability.GetAccessMode()\n\t\tswitch {\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER:\n\t\t\tif v.Spec.Shared {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif v.Readonly {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY:\n\t\t\tif v.Spec.Shared {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !v.Readonly {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageNotReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY:\n\t\t\tif !v.Spec.Shared {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageNotMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !v.Readonly {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageNotReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER ||\n\t\t\tmode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER:\n\t\t\tif !v.Spec.Shared {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageNotMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif v.Readonly {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, status.Errorf(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"AccessMode %s is not allowed\",\n\t\t\t\tmode.Mode.String())\n\t\t}\n\n\t\tif !result.Supported {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\t// If we passed all the checks, then it is valid\n\tresult.Message = \"Volume is supported\"\n\treturn result, nil\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (dr *DeleteResponse) IsOk() bool {\n\treturn dr.ok\n}", "func OkFormated(c *routing.Context, msg string, service string) error {\n\tResponse(c, `{\"error\": false, \"msg\": \"`+msg+`\"}`, 200, service, \"application/json\")\n\treturn nil\n}", "func (r Virtual_Disk_Image) GetIsEncrypted() (resp bool, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Disk_Image\", \"getIsEncrypted\", nil, &r.Options, &resp)\n\treturn\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func (o *V1VolumeClaim) GetSpecOk() (*V1VolumeClaimSpec, bool) {\n\tif o == nil || o.Spec == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Spec, true\n}", "func (cs CivicStructure) AsBasicResponse() (BasicResponse, bool) {\n\treturn &cs, true\n}", "func (sr SearchResponse) AsBasicIntangible() (BasicIntangible, bool) {\n\treturn nil, false\n}", "func (rb ResponseBase) AsBasicResponse() (BasicResponse, bool) {\n\treturn nil, false\n}", "func (v *VolumeService) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {\n\t// verify a volume was provided\n\tif len(volumeID) == 0 {\n\t\treturn types.Volume{}, nil, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound\n\tif strings.Contains(volumeID, \"notfound\") {\n\t\treturn types.Volume{}, nil,\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// check if the volume is not-found\n\tif strings.Contains(volumeID, \"not-found\") {\n\t\treturn types.Volume{}, nil,\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: \"local\",\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: volumeID,\n\t\tScope: \"local\",\n\t}\n\n\t// marshal response into raw bytes\n\tb, err := json.Marshal(response)\n\tif err != nil {\n\t\treturn types.Volume{}, nil, err\n\t}\n\n\treturn response, b, nil\n}", "func IsVolumeNameValid(name string) (bool, error) {\n\treturn true, nil\n}", "func (v *VolumeService) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(volumeID) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound\n\tif strings.Contains(volumeID, \"notfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// check if the volume is not-found\n\tif strings.Contains(volumeID, \"not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", volumeID))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: \"local\",\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: volumeID,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}", "func (o *V1VirusDatasetRequest) GetFormatOk() (*V1TableFormat, bool) {\n\tif o == nil || o.Format == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Format, true\n}", "func (o *GetClientConfig200ResponseDenylist) GetVisibleOk() (*bool, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Visible, true\n}", "func FormatStandardResponse(success bool, errorCode, errorSubcode, message string, w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tresponse := StandardResponse{Success: success, ErrorCode: errorCode, ErrorSubcode: errorSubcode, ErrorMessage: message}\n\n\tif !response.Success {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\n\t// Encode the response as JSON\n\tif err := json.NewEncoder(w).Encode(response); err != nil {\n\t\tlog.Printf(\"Error forming the boolean response (%v)\\n. %v\", response, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (d *portworx) IsPureVolume(volume *torpedovolume.Volume) (bool, error) {\n\tvar proxySpec *api.ProxySpec\n\tvar err error\n\tif proxySpec, err = d.getProxySpecForAVolume(volume); err != nil {\n\t\treturn false, err\n\t}\n\n\tif proxySpec == nil {\n\t\treturn false, nil\n\t}\n\n\tif proxySpec.ProxyProtocol == api.ProxyProtocol_PROXY_PROTOCOL_PURE_BLOCK || proxySpec.ProxyProtocol == api.ProxyProtocol_PROXY_PROTOCOL_PURE_FILE {\n\t\tlog.Debugf(\"Volume [%s] is Pure volume\", volume.ID)\n\t\treturn true, nil\n\t}\n\n\tlog.Debugf(\"Volume [%s] is not Pure Block volume\", volume.ID)\n\treturn false, nil\n}", "func (sr *SearchResponse) IsOk() bool {\n\t// Empty responses (meaning no matches) are not errors...\n\treturn len(sr.Documents) > 0\n}", "func (l License) AsBasicResponse() (BasicResponse, bool) {\n\treturn &l, true\n}", "func CreateListManagedPrivateSpacesResponse() (response *ListManagedPrivateSpacesResponse) {\n\tresponse = &ListManagedPrivateSpacesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in *v2alpha1.MkdirResponse, out *impl.MkdirResponse) error {\n\treturn autoConvert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in, out)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func (d *portworx) IsPureFileVolume(volume *torpedovolume.Volume) (bool, error) {\n\tvar proxySpec *api.ProxySpec\n\tvar err error\n\tif proxySpec, err = d.getProxySpecForAVolume(volume); err != nil {\n\t\treturn false, err\n\t}\n\tif proxySpec == nil {\n\t\treturn false, nil\n\t}\n\n\tif proxySpec.ProxyProtocol == api.ProxyProtocol_PROXY_PROTOCOL_PURE_FILE {\n\t\tlog.Debugf(\"Volume [%s] is Pure File volume\", volume.ID)\n\t\treturn true, nil\n\t}\n\n\tlog.Debugf(\"Volume [%s] is not Pure File volume\", volume.ID)\n\treturn false, nil\n}", "func (o *HyperflexVmSnapshotInfoAllOf) GetDisplayStatusOk() (*string, bool) {\n\tif o == nil || o.DisplayStatus == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DisplayStatus, true\n}", "func (r *Response) IsOk() bool {\n\treturn r.Code == ok\n}", "func (o *WhatsAppNameWhatsAppApiContent) SetFormattedName(v string) {\n\to.FormattedName = v\n}", "func (o FieldResponseOutput) Packed() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v FieldResponse) bool { return v.Packed }).(pulumi.BoolOutput)\n}", "func (o *InlineResponse200115) GetPrivateOk() (*string, bool) {\n\tif o == nil || o.Private == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Private, true\n}", "func (cw CreativeWork) AsBasicResponse() (BasicResponse, bool) {\n\treturn &cw, true\n}", "func (o InstanceGroupManagerStatusResponseOutput) Stateful() InstanceGroupManagerStatusStatefulResponseOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerStatusResponse) InstanceGroupManagerStatusStatefulResponse {\n\t\treturn v.Stateful\n\t}).(InstanceGroupManagerStatusStatefulResponseOutput)\n}" ]
[ "0.7618364", "0.70595145", "0.67880017", "0.64405686", "0.61822075", "0.5839305", "0.5336016", "0.52365017", "0.514176", "0.48982102", "0.47906035", "0.47612655", "0.47533578", "0.46719325", "0.4605236", "0.458764", "0.45759124", "0.45696095", "0.45471212", "0.44820127", "0.44492096", "0.4428358", "0.42211118", "0.42137387", "0.41938484", "0.4163624", "0.41158453", "0.40888798", "0.40622637", "0.4052731", "0.40516153", "0.40261018", "0.4023988", "0.40014458", "0.39780426", "0.39681146", "0.3965348", "0.39556625", "0.39353138", "0.39285642", "0.3926884", "0.3923947", "0.39204666", "0.39135492", "0.39066046", "0.38928196", "0.38815325", "0.38748083", "0.38568056", "0.3854895", "0.38517457", "0.3839034", "0.38269627", "0.38241738", "0.3821606", "0.38134593", "0.38043818", "0.38036376", "0.38031766", "0.37983608", "0.37981457", "0.37956125", "0.3789314", "0.37818438", "0.37813434", "0.37813434", "0.37781334", "0.37660822", "0.37643072", "0.3759734", "0.3759007", "0.3757676", "0.37550798", "0.3750415", "0.3749156", "0.37463433", "0.3745505", "0.37409523", "0.37323192", "0.3732238", "0.3728916", "0.37266752", "0.37233612", "0.37228474", "0.37170208", "0.37160847", "0.37160122", "0.37081784", "0.3706976", "0.36965612", "0.36925414", "0.36853734", "0.36796844", "0.3677407", "0.36735705", "0.36706957", "0.36702895", "0.3665993", "0.36600852", "0.36552325" ]
0.8633932
0
Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse is an autogenerated conversion function.
func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error { return autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in *v1beta1.IsVolumeFormattedResponse, out *internal.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in *impl.IsSymlinkResponse, out *v2alpha1.IsSymlinkResponse) error {\n\treturn autoConvert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in, out)\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func (o *Manager) CanFormat(ctx context.Context, inType string) (available struct {\n\tV0 bool\n\tV1 string\n}, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceManager+\".CanFormat\", 0, inType).Store(&available)\n\treturn\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func (r Response) AsBasicIntangible() (BasicIntangible, bool) {\n\treturn nil, false\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (vr *VersionResponse) IsOk() bool {\n\treturn len(vr.version) > 0\n}", "func Convert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in *impl.RmdirContentsResponse, out *v2alpha1.RmdirContentsResponse) error {\n\treturn autoConvert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func (o *WhatsAppNameWhatsAppApiContent) GetFormattedNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.FormattedName, true\n}", "func ServeFormatted(w http.ResponseWriter, r *http.Request, v interface{}) {\n\taccept := r.Header.Get(\"Accept\")\n\tswitch accept {\n\tcase applicationJson:\n\t\tServeJson(w, v)\n\tcase applicationXml, textXml:\n\t\tServeXml(w, v)\n\tdefault:\n\t\tServeJson(w, v)\n\t}\n\n\treturn\n}", "func ResponseFormat(h http.Header) Format {\n\tct := h.Get(hdrContentType)\n\n\tmediatype, params, err := mime.ParseMediaType(ct)\n\tif err != nil {\n\t\treturn FmtUnknown\n\t}\n\n\tconst textType = \"text/plain\"\n\n\tswitch mediatype {\n\tcase ProtoType:\n\t\tif p, ok := params[\"proto\"]; ok && p != ProtoProtocol {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\tif e, ok := params[\"encoding\"]; ok && e != \"delimited\" {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtProtoDelim\n\n\tcase textType:\n\t\tif v, ok := params[\"version\"]; ok && v != TextVersion {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtText\n\t}\n\n\treturn FmtUnknown\n}", "func (c *Controller) ServeFormatted(encoding ...bool) error {\n\thasIndent := BConfig.RunMode != PROD\n\thasEncoding := len(encoding) > 0 && encoding[0]\n\treturn c.Ctx.Output.ServeFormatted(c.Data, hasIndent, hasEncoding)\n}", "func Convert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in *impl.PathExistsResponse, out *v2alpha1.PathExistsResponse) error {\n\treturn autoConvert_impl_PathExistsResponse_To_v2alpha1_PathExistsResponse(in, out)\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (rb ResponseBase) AsBasicIntangible() (BasicIntangible, bool) {\n\treturn nil, false\n}", "func responseFormat(acceptHeader string) (Format, *protocolError) {\n\tif acceptHeader == \"\" {\n\t\treturn FormatBinary, nil\n\t}\n\n\tparsed, err := parseAccept(acceptHeader)\n\tif err != nil {\n\t\treturn FormatBinary, errorf(http.StatusBadRequest, \"Accept header: %s\", err)\n\t}\n\tformats := make(acceptFormatSlice, 0, len(parsed))\n\tfor _, at := range parsed {\n\t\tf, err := FormatFromMediaType(at.MediaType, at.MediaTypeParams)\n\t\tif err != nil {\n\t\t\t// Ignore invalid format. Check further.\n\t\t\tcontinue\n\t\t}\n\t\tformats = append(formats, acceptFormat{f, at.QualityFactor})\n\t}\n\tif len(formats) == 0 {\n\t\treturn FormatBinary, errorf(\n\t\t\thttp.StatusNotAcceptable,\n\t\t\t\"Accept header: specified media types are not not supported. Supported types: %q, %q, %q, %q.\",\n\t\t\tFormatBinary.MediaType(),\n\t\t\tFormatJSONPB.MediaType(),\n\t\t\tFormatText.MediaType(),\n\t\t\tContentTypeJSON,\n\t\t)\n\t}\n\tsort.Sort(formats) // order by quality factor and format preference.\n\treturn formats[0].Format, nil\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (r Response) AsIntangible() (*Intangible, bool) {\n\treturn nil, false\n}", "func (o *PcloudPvminstancesVolumesGetBadRequest) IsSuccess() bool {\n\treturn false\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func (i Intangible) AsBasicResponse() (BasicResponse, bool) {\n\treturn &i, true\n}", "func (o *PcloudPvminstancesVolumesGetOK) IsSuccess() bool {\n\treturn true\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func (sr SearchResponse) AsBasicIntangible() (BasicIntangible, bool) {\n\treturn nil, false\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func (s *OsdCsiServer) ValidateVolumeCapabilities(\n\tctx context.Context,\n\treq *csi.ValidateVolumeCapabilitiesRequest,\n) (*csi.ValidateVolumeCapabilitiesResponse, error) {\n\n\tcapabilities := req.GetVolumeCapabilities()\n\tif capabilities == nil || len(capabilities) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"volume_capabilities must be specified\")\n\t}\n\tid := req.GetVolumeId()\n\tif len(id) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"volume_id must be specified\")\n\t}\n\n\t// Log request\n\tclogger.WithContext(ctx).Infof(\"csi.ValidateVolumeCapabilities of id %s \"+\n\t\t\"capabilities %#v \", id, capabilities)\n\n\t// Get grpc connection\n\tconn, err := s.getConn()\n\tif err != nil {\n\t\treturn nil, status.Errorf(\n\t\t\tcodes.Unavailable,\n\t\t\t\"Unable to connect to SDK server: %v\", err)\n\t}\n\n\t// Get secret if any was passed\n\tctx = s.setupContext(ctx, req.GetSecrets())\n\tctx, cancel := grpcutil.WithDefaultTimeout(ctx)\n\tdefer cancel()\n\n\t// Check ID is valid with the specified volume capabilities\n\tvolumes := api.NewOpenStorageVolumeClient(conn)\n\tresp, err := volumes.Inspect(ctx, &api.SdkVolumeInspectRequest{\n\t\tVolumeId: id,\n\t})\n\tif err != nil {\n\t\treturn nil, status.Error(codes.NotFound, \"ID not found\")\n\t}\n\tv := resp.GetVolume()\n\tif v.Id != id {\n\t\terrs := fmt.Sprintf(\n\t\t\t\"Driver volume id [%s] does not equal requested id of: %s\",\n\t\t\tv.Id,\n\t\t\tid)\n\t\tclogger.WithContext(ctx).Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\t// Setup uninitialized response object\n\tresult := &csi.ValidateVolumeCapabilitiesResponse{\n\t\tConfirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{\n\t\t\tVolumeContext: req.GetVolumeContext(),\n\t\t\tVolumeCapabilities: req.GetVolumeCapabilities(),\n\t\t\tParameters: req.GetParameters(),\n\t\t},\n\t}\n\n\t// Check capability\n\tfor _, capability := range capabilities {\n\t\t// Currently the CSI spec defines all storage as \"file systems.\"\n\t\t// So we do not need to check this with the volume. All we will check\n\t\t// here is the validity of the capability access type.\n\t\tif capability.GetMount() == nil && capability.GetBlock() == nil {\n\t\t\treturn nil, status.Error(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"Cannot have both mount and block be undefined\")\n\t\t}\n\n\t\t// Check access mode is setup correctly\n\t\tmode := capability.GetAccessMode()\n\t\tswitch {\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER:\n\t\t\tif v.Spec.Sharedv4 || v.Spec.Shared {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif v.Readonly {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY:\n\t\t\tif v.Spec.Sharedv4 || v.Spec.Shared {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !v.Readonly {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageNotReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY:\n\t\t\tif !v.Spec.Sharedv4 && !v.Spec.Shared {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageNotMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !v.Readonly {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageNotReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER ||\n\t\t\tmode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER:\n\t\t\tif !v.Spec.Sharedv4 && !v.Spec.Shared {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageNotMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif v.Readonly {\n\t\t\t\tresult.Confirmed = nil\n\t\t\t\tresult.Message = volumeCapabilityMessageReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, status.Errorf(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"AccessMode %s is not allowed\",\n\t\t\t\tmode.Mode.String())\n\t\t}\n\n\t\tif result.Confirmed == nil {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\t// If we passed all the checks, then it is valid.\n\t// result.Message needs to be empty on return\n\treturn result, nil\n}", "func (m *CatalogInfoResponse) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateErrors(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLimits(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStandardUnitDescriptionGroup(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func (o *PcloudPvminstancesVolumesGetUnauthorized) IsSuccess() bool {\n\treturn false\n}", "func (o InstanceGroupManagerStatusResponseOutput) Stateful() InstanceGroupManagerStatusStatefulResponseOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerStatusResponse) InstanceGroupManagerStatusStatefulResponse {\n\t\treturn v.Stateful\n\t}).(InstanceGroupManagerStatusStatefulResponseOutput)\n}", "func (o FieldResponseOutput) Packed() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v FieldResponse) bool { return v.Packed }).(pulumi.BoolOutput)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func (s *SaleResponse) FormatResponse() *g.Response {\n\tresponse := new(g.Response)\n\tresponse.Acquirer = Name\n\n\tif s.OrderResult != nil {\n\t\tresponse.Id = s.OrderResult.OrderReference\n\t\tresponse.AuthorizationCode = s.OrderResult.OrderKey\n\t}\n\n\t// If CreditCard\n\tif len(s.CreditCardTransactionResultCollection) > 0 {\n\t\ttransaction := s.CreditCardTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\t//response.CreditCard = &g.CreditCard{}\n\t\tresponse.NSU = transaction.UniqueSequentialNumber\n\t\tresponse.TID = transaction.TransactionIdentifier\n\t}\n\n\t// If BankingBillet\n\tif len(s.BoletoTransactionResultCollection) > 0 {\n\t\ttransaction := s.BoletoTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\tresponse.BarCode = transaction.Barcode\n\t\tresponse.BoletoUrl = transaction.BoletoUrl\n\t}\n\n\treturn response\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func (o *WhatsAppNameWhatsAppApiContent) SetFormattedName(v string) {\n\to.FormattedName = v\n}", "func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface {\n\treturn c.resourceV1alpha2\n}", "func (rb ResponseBase) AsIntangible() (*Intangible, bool) {\n\treturn nil, false\n}", "func (s *OsdCsiServer) ValidateVolumeCapabilities(\n\tctx context.Context,\n\treq *csi.ValidateVolumeCapabilitiesRequest,\n) (*csi.ValidateVolumeCapabilitiesResponse, error) {\n\n\tcapabilities := req.GetVolumeCapabilities()\n\tif capabilities == nil || len(capabilities) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"volume_capabilities must be specified\")\n\t}\n\tid := req.GetVolumeId()\n\tif len(id) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"volume_id must be specified\")\n\t}\n\tattributes := req.GetVolumeAttributes()\n\n\t// Log request\n\tlogrus.Debugf(\"ValidateVolumeCapabilities of id %s \"+\n\t\t\"capabilities %#v \"+\n\t\t\"attributes %#v \",\n\t\tid,\n\t\tcapabilities,\n\t\tattributes)\n\n\t// Check ID is valid with the specified volume capabilities\n\tvolumes, err := s.driver.Inspect([]string{id})\n\tif err != nil || len(volumes) == 0 {\n\t\treturn nil, status.Error(codes.NotFound, \"ID not found\")\n\t}\n\tif len(volumes) != 1 {\n\t\terrs := fmt.Sprintf(\n\t\t\t\"Driver returned an unexpected number of volumes when one was expected: %d\",\n\t\t\tlen(volumes))\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tv := volumes[0]\n\tif v.Id != id {\n\t\terrs := fmt.Sprintf(\n\t\t\t\"Driver volume id [%s] does not equal requested id of: %s\",\n\t\t\tv.Id,\n\t\t\tid)\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\n\t// Setup uninitialized response object\n\tresult := &csi.ValidateVolumeCapabilitiesResponse{\n\t\tSupported: true,\n\t}\n\n\t// Check capability\n\tfor _, capability := range capabilities {\n\t\t// Currently the CSI spec defines all storage as \"file systems.\"\n\t\t// So we do not need to check this with the volume. All we will check\n\t\t// here is the validity of the capability access type.\n\t\tif capability.GetMount() == nil && capability.GetBlock() == nil {\n\t\t\treturn nil, status.Error(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"Cannot have both mount and block be undefined\")\n\t\t}\n\n\t\t// Check access mode is setup correctly\n\t\tmode := capability.GetAccessMode()\n\t\tswitch {\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER:\n\t\t\tif v.Spec.Shared {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif v.Readonly {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY:\n\t\t\tif v.Spec.Shared {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !v.Readonly {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageNotReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY:\n\t\t\tif !v.Spec.Shared {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageNotMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !v.Readonly {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageNotReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase mode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER ||\n\t\t\tmode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER:\n\t\t\tif !v.Spec.Shared {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageNotMultinodeVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif v.Readonly {\n\t\t\t\tresult.Supported = false\n\t\t\t\tresult.Message = volumeCapabilityMessageReadOnlyVolume\n\t\t\t\tbreak\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, status.Errorf(\n\t\t\t\tcodes.InvalidArgument,\n\t\t\t\t\"AccessMode %s is not allowed\",\n\t\t\t\tmode.Mode.String())\n\t\t}\n\n\t\tif !result.Supported {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\t// If we passed all the checks, then it is valid\n\tresult.Message = \"Volume is supported\"\n\treturn result, nil\n}", "func OkFormated(c *routing.Context, msg string, service string) error {\n\tResponse(c, `{\"error\": false, \"msg\": \"`+msg+`\"}`, 200, service, \"application/json\")\n\treturn nil\n}", "func (l License) AsBasicResponse() (BasicResponse, bool) {\n\treturn &l, true\n}", "func (o *V1VirusDatasetRequest) GetFormatOk() (*V1TableFormat, bool) {\n\tif o == nil || o.Format == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Format, true\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (m *SearchAvailabilityResponse) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAvailabilities(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateErrors(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_StopServiceResponse_To_v1alpha1_StopServiceResponse(in *internal.StopServiceResponse, out *v1alpha1.StopServiceResponse) error {\n\treturn autoConvert_internal_StopServiceResponse_To_v1alpha1_StopServiceResponse(in, out)\n}", "func GetVolumeStatus(hostName, volumeName string) (map[string]string, error) {\n\tformatStr1 := \" --format '{{index .Status.access}} {{index .Status \\\"attach-as\\\"}} {{index .Status.capacity.allocated}} {{index .Status.capacity.size}} {{index .Status \\\"clone-from\\\"}}\"\n\tformatStr2 := \" {{index .Status \\\"created by VM\\\"}} {{index .Status.datastore}} {{index .Status.diskformat}} {{index .Status.fstype}} {{index .Status.status}} {{index .Status \\\"attached to VM\\\"}}'\"\n\n\tcmd := dockercli.InspectVolume + volumeName + formatStr1 + formatStr2\n\tout, err := ssh.InvokeCommand(hostName, cmd)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstatus := make(map[string]string)\n\tval := strings.Fields(out)\n\n\tfor i := 0; i < len(dockercli.VolumeStatusFields); i++ {\n\t\tstatus[dockercli.VolumeStatusFields[i]] = val[i]\n\t}\n\treturn status, nil\n}", "func (c *Client) IsV1API() bool {\n\treturn c.isV1\n}", "func (dd *AccountDoc) IsValidFormat() bool {\n\tif dd.Created == 0 || dd.GetType() != int(AccountDIDType) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (fe FoodEstablishment) AsBasicResponse() (BasicResponse, bool) {\n\treturn &fe, true\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func CreateListManagedPrivateSpacesResponse() (response *ListManagedPrivateSpacesResponse) {\n\tresponse = &ListManagedPrivateSpacesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (bbcblr BlockBlobsCommitBlockListResponse) IsServerEncrypted() string {\n\treturn bbcblr.rawResponse.Header.Get(\"x-ms-request-server-encrypted\")\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (m *MinGasPriceResponse) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func FormatResponse(o interface{}) string {\n\tout, err := json.MarshalIndent(o, \"\", \"\\t\")\n\tMust(err, `Command failed because an error occurred while prettifying output: %s`, err)\n\treturn string(out)\n}", "func (options *GetWorkspaceReadmeOptions) SetFormatted(formatted string) *GetWorkspaceReadmeOptions {\n\toptions.Formatted = core.StringPtr(formatted)\n\treturn options\n}", "func IsVolumeNameValid(name string) (bool, error) {\n\treturn true, nil\n}", "func (o *StatusDescriptorDTO) GetFormatterOk() (*string, bool) {\n\tif o == nil || o.Formatter == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Formatter, true\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func (i Identifiable) AsBasicResponse() (BasicResponse, bool) {\n\treturn nil, false\n}", "func Overlay2Supported(kernelVersion string) bool {\n\tif !overlayFSSupported() {\n\t\treturn false\n\t}\n\n\tdaemonV, err := kernel.ParseRelease(kernelVersion)\n\tif err != nil {\n\t\treturn false\n\t}\n\trequiredV := kernel.VersionInfo{Kernel: 4}\n\treturn kernel.CompareKernelVersion(*daemonV, requiredV) > -1\n\n}", "func (abcr AppendBlobsCreateResponse) IsServerEncrypted() string {\n\treturn abcr.rawResponse.Header.Get(\"x-ms-request-server-encrypted\")\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func (m *CatalogItemResourceUpfrontPriceResponse) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func (m *ExtractionResponseItems0) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func (m *DataResponseV1) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateArchive(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCurrent(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m Message) IsResponse() bool {\n\treturn m.Y == \"r\"\n}", "func (o *PcloudVolumegroupsGetBadRequest) IsSuccess() bool {\n\treturn false\n}", "func (er *ExitResponse) IsOk() bool {\n\treturn er.Ok\n}", "func (bbsbr BlockBlobsStageBlockResponse) IsServerEncrypted() string {\n\treturn bbsbr.rawResponse.Header.Get(\"x-ms-request-server-encrypted\")\n}", "func (r *BaseStandard) IsOk() bool {\n\tif r.AuditInfo.StatusCode < http.StatusOK || r.AuditInfo.StatusCode >= http.StatusMultipleChoices {\n\t\treturn false\n\t}\n\n\tif !r.HasItems() {\n\t\treturn false\n\t}\n\n\tif len(r.AuditInfo.Errors.Items) > 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func StatusReadable(rm RM) (_ bool) {\n\t_, resp, err := rm.Get(restStatusReadable)\n\treturn err == nil && resp.StatusCode == http.StatusOK\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func Convert_internal_StartServiceResponse_To_v1alpha1_StartServiceResponse(in *internal.StartServiceResponse, out *v1alpha1.StartServiceResponse) error {\n\treturn autoConvert_internal_StartServiceResponse_To_v1alpha1_StartServiceResponse(in, out)\n}" ]
[ "0.73026353", "0.71197075", "0.706614", "0.63889986", "0.57709247", "0.5684597", "0.5541731", "0.55237895", "0.55222034", "0.5446338", "0.5368189", "0.52565306", "0.5210927", "0.50859964", "0.50825715", "0.48439112", "0.48228666", "0.47261494", "0.46686915", "0.45941815", "0.45797136", "0.45740268", "0.45442808", "0.45007196", "0.43876663", "0.438676", "0.43738192", "0.43606585", "0.42983213", "0.42486086", "0.4234489", "0.4214442", "0.4150731", "0.41411722", "0.4137137", "0.41332456", "0.41285434", "0.40962324", "0.40935305", "0.4079373", "0.4067323", "0.40599927", "0.40490267", "0.4026607", "0.40203056", "0.40021962", "0.3990854", "0.39760092", "0.39716497", "0.39529848", "0.39518544", "0.3949885", "0.39425412", "0.39230436", "0.39226407", "0.39197862", "0.39149508", "0.39114836", "0.3895127", "0.38947845", "0.38748237", "0.38741767", "0.3867062", "0.38564032", "0.38494572", "0.3844212", "0.38398844", "0.38272524", "0.38073674", "0.3792736", "0.37895358", "0.37878388", "0.37871182", "0.37787732", "0.37766045", "0.377595", "0.3773737", "0.3767807", "0.3759059", "0.37562248", "0.3751583", "0.37441966", "0.37424606", "0.37341157", "0.37340784", "0.37324184", "0.37289318", "0.37243572", "0.3718323", "0.37036425", "0.37010673", "0.3697425", "0.36944285", "0.36943942", "0.3685812", "0.3683123", "0.3681674", "0.36801445", "0.36799723", "0.36742336" ]
0.86001444
0
Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest is an autogenerated conversion function.
func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error { return autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func (a *Client) ListVolumes(params *ListVolumesParams) (*ListVolumesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListVolumesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListVolumes\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/volumes\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListVolumesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListVolumesOK), nil\n\n}", "func (c *restClient) ListVolumes(ctx context.Context, req *netapppb.ListVolumesRequest, opts ...gax.CallOption) *VolumeIterator {\n\tit := &VolumeIterator{}\n\treq = proto.Clone(req).(*netapppb.ListVolumesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*netapppb.Volume, string, error) {\n\t\tresp := &netapppb.ListVolumesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetOrderBy() != \"\" {\n\t\t\tparams.Add(\"orderBy\", fmt.Sprintf(\"%v\", req.GetOrderBy()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetVolumes(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (s *OsdCsiServer) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\tlogrus.Debugf(\"ListVolumes req[%#v]\", req)\n\n\t// Until the issue #138 on the CSI spec is resolved we will not support\n\t// tokenization\n\tif req.GetMaxEntries() != 0 {\n\t\treturn nil, status.Error(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"Driver does not support tokenization. Please see \"+\n\t\t\t\t\"https://github.com/container-storage-interface/spec/issues/138\")\n\t}\n\n\tvolumes, err := s.driver.Enumerate(&api.VolumeLocator{}, nil)\n\tif err != nil {\n\t\terrs := fmt.Sprintf(\"Unable to get list of volumes: %s\", err.Error())\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tentries := make([]*csi.ListVolumesResponse_Entry, len(volumes))\n\tfor i, v := range volumes {\n\t\t// Initialize entry\n\t\tentries[i] = &csi.ListVolumesResponse_Entry{\n\t\t\tVolume: &csi.Volume{},\n\t\t}\n\n\t\t// Required\n\t\tentries[i].Volume.Id = v.Id\n\n\t\t// This entry is optional in the API, but OSD has\n\t\t// the information available to provide it\n\t\tentries[i].Volume.CapacityBytes = int64(v.Spec.Size)\n\n\t\t// Attributes. We can add or remove as needed since they\n\t\t// are optional and opaque to the Container Orchestrator(CO)\n\t\t// but could be used for debugging using a csi complient client.\n\t\tentries[i].Volume.Attributes = osdVolumeAttributes(v)\n\t}\n\n\treturn &csi.ListVolumesResponse{\n\t\tEntries: entries,\n\t}, nil\n}", "func (c *Client) BuildStorageVolumesListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: StorageVolumesListSpinRegistryPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"spin-registry\", \"storage_volumes_list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (driver *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\tklog.V(4).Infof(\"ListVolumes: called with args %#v\", req)\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func ListVolumes(\n\tctx context.Context,\n\tc csi.ControllerClient,\n\tversion *csi.Version,\n\tmaxEntries uint32,\n\tstartingToken string,\n\tcallOpts ...grpc.CallOption) (\n\tvolumes []*csi.VolumeInfo, nextToken string, err error) {\n\n\treq := &csi.ListVolumesRequest{\n\t\tMaxEntries: maxEntries,\n\t\tStartingToken: startingToken,\n\t\tVersion: version,\n\t}\n\n\tres, err := c.ListVolumes(ctx, req, callOpts...)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tresult := res.GetResult()\n\tnextToken = result.NextToken\n\tentries := result.Entries\n\n\t// check to see if there are zero entries\n\tif len(result.Entries) == 0 {\n\t\treturn nil, nextToken, nil\n\t}\n\n\tvolumes = make([]*csi.VolumeInfo, len(entries))\n\n\tfor x, e := range entries {\n\t\tif volumes[x] = e.GetVolumeInfo(); volumes[x] == nil {\n\t\t\treturn nil, \"\", ErrNilVolumeInfo\n\t\t}\n\t}\n\n\treturn volumes, nextToken, nil\n}", "func (cs *DefaultControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (s *VolumeListener) List(inctx context.Context, in *protocol.VolumeListRequest) (_ *protocol.VolumeListResponse, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot list volume\")\n\n\tif s == nil {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tjob, err := PrepareJob(inctx, in.GetTenantId(), \"/volumes/list\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer job.Close()\n\n\tctx := job.Context()\n\n\thandler := VolumeHandler(job)\n\tvolumes, xerr := handler.List(in.GetAll())\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\t// Map resources.Volume to protocol.Volume\n\tvar pbvolumes []*protocol.VolumeInspectResponse\n\tfor _, v := range volumes {\n\t\tpbVolume, xerr := v.ToProtocol(ctx)\n\t\tif xerr != nil {\n\t\t\treturn nil, xerr\n\t\t}\n\n\t\tpbvolumes = append(pbvolumes, pbVolume)\n\t}\n\trv := &protocol.VolumeListResponse{Volumes: pbvolumes}\n\treturn rv, nil\n}", "func (client VolumesClient) List(ctx context.Context, location string, storageSubSystem string, storagePool string, filter string) (result VolumeListPage, err error) {\n\tresult.fn = client.listNextResults\n\treq, err := client.ListPreparer(ctx, location, storageSubSystem, storagePool, filter)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListSender(req)\n\tif err != nil {\n\t\tresult.vl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.vl, err = client.ListResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (a *Client) PostContainersVolumesList(params *PostContainersVolumesListParams, authInfo runtime.ClientAuthInfoWriter) (*PostContainersVolumesListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostContainersVolumesListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostContainersVolumesList\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/containers/volumes/list\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostContainersVolumesListReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostContainersVolumesListOK), nil\n\n}", "func (cs *controller) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func NewListDisksOK() *ListDisksOK {\n\treturn &ListDisksOK{}\n}", "func CreateListDisks00Request() (request *ListDisks00Request) {\n\trequest = &ListDisks00Request{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"EcsDemo\", \"2019-06-20\", \"ListDisks00\", \"\", \"\")\n\treturn\n}", "func (a *HyperflexApiService) GetHyperflexVolumeList(ctx context.Context) ApiGetHyperflexVolumeListRequest {\n\treturn ApiGetHyperflexVolumeListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func List(d Driver) (*volume.ListResponse, error) {\n\tlog.Debugf(\"Entering List\")\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tvar vols []*volume.Volume\n\tfor name, v := range d.GetVolumes() {\n\t\tlog.Debugf(\"Volume found: %s\", v)\n\t\tm, err := getMount(d, v.GetMount())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvols = append(vols, &volume.Volume{Name: name, Status: v.GetStatus(), Mountpoint: m.GetPath()})\n\t}\n\treturn &volume.ListResponse{Volumes: vols}, nil\n}", "func EncodeStorageVolumesDeleteRequest(encoder func(*http.Request) goahttp.Encoder) func(*http.Request, interface{}) error {\n\treturn func(req *http.Request, v interface{}) error {\n\t\tp, ok := v.(*spinregistry.StorageVolumesDeletePayload)\n\t\tif !ok {\n\t\t\treturn goahttp.ErrInvalidType(\"spin-registry\", \"storage_volumes_delete\", \"*spinregistry.StorageVolumesDeletePayload\", v)\n\t\t}\n\t\tbody := NewStorageVolumesDeleteRequestBody(p)\n\t\tif err := encoder(req).Encode(&body); err != nil {\n\t\t\treturn goahttp.ErrEncodingError(\"spin-registry\", \"storage_volumes_delete\", err)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (v *VolumeService) VolumeList(ctx context.Context, filter filters.Args) (volume.VolumeListOKBody, error) {\n\treturn volume.VolumeListOKBody{}, nil\n}", "func (d *MinioDriver) List(r volume.Request) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tvar vols []*volume.Volume\n\tfor name, v := range d.volumes {\n\t\tvols = append(vols,\n\t\t\t&volume.Volume{\n\t\t\t\tName: name,\n\t\t\t\tMountpoint: v.mountpoint,\n\t\t\t})\n\t}\n\treturn volumeResp(\"\", \"\", vols, capability, \"\")\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func (r ListVolumesRequest) Send(ctx context.Context) (*ListVolumesResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ListVolumesResponse{\n\t\tListVolumesOutput: r.Request.Data.(*ListVolumesOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (c *clustermgrClient) ListDiskVolumeUnits(ctx context.Context, diskID proto.DiskID) (rets []*VunitInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfos, err := c.client.ListVolumeUnit(ctx, &cmapi.ListVolumeUnitArgs{DiskID: diskID})\n\tif err != nil {\n\t\tspan.Errorf(\"list disk volume units failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tdiskInfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range infos {\n\t\tele := VunitInfoSimple{}\n\t\tele.set(info, diskInfo.Host)\n\t\trets = append(rets, &ele)\n\t}\n\treturn rets, nil\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (srv *VolumeService) List() ([]api.Volume, error) {\n\treturn srv.provider.ListVolumes()\n}", "func (s stack) ListVolumes(ctx context.Context) ([]*abstract.Volume, fail.Error) {\n\tif valid.IsNil(s) {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\n\treturn nil, fail.NotImplementedError(\"implement me\")\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func (so ServiceOffering) ListRequest() (ListCommand, error) {\n\t// Restricted cannot be applied here because it really has three states\n\treq := &ListServiceOfferings{\n\t\tID: so.ID,\n\t\tName: so.Name,\n\t\tSystemVMType: so.SystemVMType,\n\t}\n\n\tif so.IsSystem {\n\t\treq.IsSystem = &so.IsSystem\n\t}\n\n\treturn req, nil\n}", "func NewCmdDiskList() *cobra.Command {\n\treq := base.BizClient.NewDescribeUDiskRequest()\n\ttypeMap := map[string]string{\n\t\t\"DataDisk\": \"Oridinary-Data-Disk\",\n\t\t\"SystemDisk\": \"Oridinary-System-Disk\",\n\t\t\"SSDDataDisk\": \"SSD-Data-Disk\",\n\t}\n\tarkModeMap := map[string]string{\n\t\t\"Yes\": \"true\",\n\t\t\"No\": \"false\",\n\t}\n\tcmd := &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"List udisk instance\",\n\t\tLong: \"List udisk instance\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tfor key, val := range typeMap {\n\t\t\t\tif *req.DiskType == val {\n\t\t\t\t\t*req.DiskType = key\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp, err := base.BizClient.DescribeUDisk(req)\n\t\t\tif err != nil {\n\t\t\t\tbase.HandleError(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlist := []DiskRow{}\n\t\t\tfor _, disk := range resp.DataSet {\n\t\t\t\trow := DiskRow{\n\t\t\t\t\tResourceID: disk.UDiskId,\n\t\t\t\t\tName: disk.Name,\n\t\t\t\t\tGroup: disk.Tag,\n\t\t\t\t\tSize: fmt.Sprintf(\"%dGB\", disk.Size),\n\t\t\t\t\tType: typeMap[disk.DiskType],\n\t\t\t\t\tEnableDataArk: arkModeMap[disk.UDataArkMode],\n\t\t\t\t\tMountUHost: fmt.Sprintf(\"%s/%s\", disk.UHostName, disk.UHostIP),\n\t\t\t\t\tMountPoint: disk.DeviceName,\n\t\t\t\t\tState: disk.Status,\n\t\t\t\t\tCreationTime: base.FormatDate(disk.CreateTime),\n\t\t\t\t\tExpirationTime: base.FormatDate(disk.ExpiredTime),\n\t\t\t\t}\n\t\t\t\tif disk.UHostIP == \"\" {\n\t\t\t\t\trow.MountUHost = \"\"\n\t\t\t\t}\n\t\t\t\tlist = append(list, row)\n\t\t\t}\n\t\t\tif global.json {\n\t\t\t\tbase.PrintJSON(list)\n\t\t\t} else {\n\t\t\t\tbase.PrintTableS(list)\n\t\t\t}\n\t\t},\n\t}\n\tflags := cmd.Flags()\n\tflags.SortFlags = false\n\treq.ProjectId = flags.String(\"project-id\", base.ConfigInstance.ProjectID, \"Optional. Assign project-id\")\n\treq.Region = flags.String(\"region\", base.ConfigInstance.Region, \"Optional. Assign region\")\n\treq.Zone = flags.String(\"zone\", base.ConfigInstance.Zone, \"Optional. Assign availability zone\")\n\treq.UDiskId = flags.String(\"resource-id\", \"\", \"Optional. Resource ID of the udisk to search\")\n\treq.DiskType = flags.String(\"udisk-type\", \"\", \"Optional. Optional. Type of the udisk to search. 'Oridinary-Data-Disk','Oridinary-System-Disk' or 'SSD-Data-Disk'\")\n\treq.Offset = cmd.Flags().Int(\"offset\", 0, \"Optional. Offset\")\n\treq.Limit = cmd.Flags().Int(\"limit\", 50, \"Optional. Limit\")\n\tflags.SetFlagValues(\"udisk-type\", \"Oridinary-Data-Disk\", \"Oridinary-System-Disk\", \"SSD-Data-Disk\")\n\treturn cmd\n}", "func (d *VolumeDriver) List(r volume.Request) volume.Response {\n\tlog.Errorf(\"VolumeDriver List to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func (d *defaultDataVolumeManager) ListDataVolumes(ctx context.Context, kubeconfig []byte, listOpts ...client.ListOption) (*cdicorev1alpha1.DataVolumeList, error) {\n\tc, namespace, err := d.client.GetClient(kubeconfig)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create kubevirt client\")\n\t}\n\n\tdvList := cdicorev1alpha1.DataVolumeList{}\n\tif err := c.List(ctx, &dvList, listOpts...); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not list DataVolumes in namespace %s\", namespace)\n\t}\n\n\treturn &dvList, nil\n}", "func (d *DirDriver) List() (*volume.ListResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit List() endpoint\")\n\n\tvols := new(volume.ListResponse)\n\tvols.Volumes = []*volume.Volume{}\n\n\tfor _, vol := range d.volumes {\n\t\tnewVol := new(volume.Volume)\n\t\tnewVol.Name = vol.name\n\t\tnewVol.Mountpoint = vol.path\n\t\tnewVol.CreatedAt = vol.createTime.String()\n\t\tvols.Volumes = append(vols.Volumes, newVol)\n\t\tlogrus.Debugf(\"Adding volume %s to list response\", newVol.Name)\n\t}\n\n\treturn vols, nil\n}", "func (s *Module) DiskList() ([]pkg.VDisk, error) {\n\tpools, err := s.diskPools()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar disks []pkg.VDisk\n\tfor _, pool := range pools {\n\n\t\titems, err := os.ReadDir(pool)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to list virtual disks\")\n\t\t}\n\n\t\tfor _, item := range items {\n\t\t\tif item.IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinfo, err := item.Info()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get file info for '%s'\", item.Name())\n\t\t\t}\n\n\t\t\tdisks = append(disks, pkg.VDisk{\n\t\t\t\tPath: filepath.Join(pool, item.Name()),\n\t\t\t\tSize: info.Size(),\n\t\t\t})\n\t\t}\n\n\t\treturn disks, nil\n\t}\n\n\treturn disks, nil\n}", "func (s *DataStore) ListVolumes() (map[string]*longhorn.Volume, error) {\n\titemMap := make(map[string]*longhorn.Volume)\n\n\tlist, err := s.ListVolumesRO()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, itemRO := range list {\n\t\t// Cannot use cached object from lister\n\t\titemMap[itemRO.Name] = itemRO.DeepCopy()\n\t}\n\treturn itemMap, nil\n}", "func (digitalocean DigitalOcean) ListVolumes() ([]godo.Volume, error) {\n\tclient, err := DigitalOceanClient()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvolumes, _, err := client.client.Storage.ListVolumes(client.context, &godo.ListVolumeParams{})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn volumes, err\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (c *restClient) EncryptVolumes(ctx context.Context, req *netapppb.EncryptVolumesRequest, opts ...gax.CallOption) (*EncryptVolumesOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tjsonReq, err := m.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v:encrypt\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &EncryptVolumesOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (d *lvm) ListVolumes() ([]Volume, error) {\n\tvols := make(map[string]Volume)\n\n\tcmd := exec.Command(\"lvs\", \"--noheadings\", \"-o\", \"lv_name\", d.config[\"lvm.vg_name\"])\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscanner := bufio.NewScanner(stdout)\n\tfor scanner.Scan() {\n\t\trawName := strings.TrimSpace(scanner.Text())\n\t\tvar volType VolumeType\n\t\tvar volName string\n\n\t\tfor _, volumeType := range d.Info().VolumeTypes {\n\t\t\tprefix := fmt.Sprintf(\"%s_\", volumeType)\n\t\t\tif strings.HasPrefix(rawName, prefix) {\n\t\t\t\tvolType = volumeType\n\t\t\t\tvolName = strings.TrimPrefix(rawName, prefix)\n\t\t\t}\n\t\t}\n\n\t\tif volType == \"\" {\n\t\t\td.logger.Debug(\"Ignoring unrecognised volume type\", logger.Ctx{\"name\": rawName})\n\t\t\tcontinue // Ignore unrecognised volume.\n\t\t}\n\n\t\tlvSnapSepCount := strings.Count(volName, lvmSnapshotSeparator)\n\t\tif lvSnapSepCount%2 != 0 {\n\t\t\t// If snapshot separator count is odd, then this means we have a lone lvmSnapshotSeparator\n\t\t\t// that is not part of the lvmEscapedHyphen pair, which means this volume is a snapshot.\n\t\t\td.logger.Debug(\"Ignoring snapshot volume\", logger.Ctx{\"name\": rawName})\n\t\t\tcontinue // Ignore snapshot volumes.\n\t\t}\n\n\t\tisBlock := strings.HasSuffix(volName, lvmBlockVolSuffix)\n\n\t\tif volType == VolumeTypeVM && !isBlock {\n\t\t\tcontinue // Ignore VM filesystem volumes as we will just return the VM's block volume.\n\t\t}\n\n\t\t// Unescape raw LVM name to storage volume name. Safe to do now we know we are not dealing\n\t\t// with snapshot volumes.\n\t\tvolName = strings.Replace(volName, lvmEscapedHyphen, \"-\", -1)\n\n\t\tcontentType := ContentTypeFS\n\t\tif volType == VolumeTypeCustom && strings.HasSuffix(volName, lvmISOVolSuffix) {\n\t\t\tcontentType = ContentTypeISO\n\t\t\tvolName = strings.TrimSuffix(volName, lvmISOVolSuffix)\n\t\t} else if volType == VolumeTypeVM || isBlock {\n\t\t\tcontentType = ContentTypeBlock\n\t\t\tvolName = strings.TrimSuffix(volName, lvmBlockVolSuffix)\n\t\t}\n\n\t\t// If a new volume has been found, or the volume will replace an existing image filesystem volume\n\t\t// then proceed to add the volume to the map. We allow image volumes to overwrite existing\n\t\t// filesystem volumes of the same name so that for VM images we only return the block content type\n\t\t// volume (so that only the single \"logical\" volume is returned).\n\t\texistingVol, foundExisting := vols[volName]\n\t\tif !foundExisting || (existingVol.Type() == VolumeTypeImage && existingVol.ContentType() == ContentTypeFS) {\n\t\t\tv := NewVolume(d, d.name, volType, contentType, volName, make(map[string]string), d.config)\n\n\t\t\tif contentType == ContentTypeFS {\n\t\t\t\tv.SetMountFilesystemProbe(true)\n\t\t\t}\n\n\t\t\tvols[volName] = v\n\t\t\tcontinue\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Unexpected duplicate volume %q found\", volName)\n\t}\n\n\terrMsg, err := io.ReadAll(stderr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed getting volume list: %v: %w\", strings.TrimSpace(string(errMsg)), err)\n\t}\n\n\tvolList := make([]Volume, len(vols))\n\tfor _, v := range vols {\n\t\tvolList = append(volList, v)\n\t}\n\n\treturn volList, nil\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func (h *ApiHandler) handleListVolumes(c echo.Context) error {\n\tbuilder := h.Builder(c)\n\n\tvar kalmPVCList v1.PersistentVolumeClaimList\n\tif err := builder.List(&kalmPVCList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar kalmPVList v1.PersistentVolumeList\n\tif err := builder.List(&kalmPVList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkalmPVMap := make(map[string]v1.PersistentVolume)\n\tfor _, kalmPV := range kalmPVList.Items {\n\t\tkalmPVMap[kalmPV.Name] = kalmPV\n\t}\n\n\trespVolumes := []resources.Volume{}\n\tfor _, kalmPVC := range kalmPVCList.Items {\n\t\trespVolume, err := builder.BuildVolumeResponse(kalmPVC, kalmPVMap[kalmPVC.Spec.VolumeName])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trespVolumes = append(respVolumes, *respVolume)\n\t}\n\n\treturn c.JSON(200, respVolumes)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (cl *Client) gceVolumeList(ctx context.Context, vla *csp.VolumeListArgs, volumeType string) ([]*csp.Volume, error) {\n\tcomputeService, err := cl.getComputeService(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilter := \"\"\n\tif volumeType != \"\" {\n\t\tfilter = fmt.Sprintf(`type=\"%s\"`, volumeType)\n\t}\n\tfor _, tag := range vla.Tags {\n\t\tif filter != \"\" {\n\t\t\tfilter += \" AND \"\n\t\t}\n\t\tkv := strings.SplitN(tag, \":\", 2)\n\t\tif len(kv) == 1 { // if just \"key\" is specified then the existence of a label with that key will be matched\n\t\t\tfilter += fmt.Sprintf(\"labels.%s:*\", kv[0])\n\t\t} else { // if specified here as \"key:value\" then both the key and value will be matched\n\t\t\tfilter += fmt.Sprintf(`labels.%s=\"%s\"`, kv[0], kv[1])\n\t\t}\n\t}\n\treq := computeService.Disks().List(cl.projectID, cl.attrs[AttrZone].Value).Filter(filter)\n\tresult := []*csp.Volume{}\n\tif err = req.Pages(ctx, func(page *compute.DiskList) error {\n\t\tfor _, disk := range page.Items {\n\t\t\tvol := gceDiskToVolume(disk)\n\t\t\tresult = append(result, vol)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to list GC disks: %w\", err)\n\t}\n\treturn result, nil\n}", "func (c *Client) ListVolumes(ctx context.Context, req *netapppb.ListVolumesRequest, opts ...gax.CallOption) *VolumeIterator {\n\treturn c.internalClient.ListVolumes(ctx, req, opts...)\n}", "func (d *driverInfo) List() ([]*Volume, error) {\n\tvar volumes []*Volume\n\n\tfor _, vol := range d.volumes {\n\t\tvolumes = append(volumes, vol)\n\t}\n\n\treturn volumes, nil\n}", "func (client *DiskEncryptionSetsClient) listCreateRequest(ctx context.Context, options *DiskEncryptionSetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*corev1.PersistentVolume, err error) {\n\tlistopt := metav1.ListOptions{\n\t\tLabelSelector: selector.String(),\n\t}\n\tif s.tweakListOptions != nil {\n\t\ts.tweakListOptions(&listopt)\n\t}\n\tlist, err := s.client.CoreV1().PersistentVolumes().List(listopt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range list.Items {\n\t\tret = append(ret, &list.Items[i])\n\t}\n\treturn ret, nil\n}", "func (c *Core) ListVolumes(labels map[string]string) ([]*types.Volume, error) {\n\tvar retVolumes = make([]*types.Volume, 0)\n\n\t// list local meta store.\n\tmetaList, err := c.store.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// scan all drivers.\n\tlogrus.Debugf(\"probing all drivers for listing volume\")\n\tdrivers, err := driver.GetAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx := driver.Contexts()\n\n\tvar realVolumes = map[string]*types.Volume{}\n\tvar volumeDrivers = map[string]driver.Driver{}\n\n\tfor _, dv := range drivers {\n\t\tvolumeDrivers[dv.Name(ctx)] = dv\n\n\t\td, ok := dv.(driver.Lister)\n\t\tif !ok {\n\t\t\t// not Lister, ignore it.\n\t\t\tcontinue\n\t\t}\n\t\tvList, err := d.List(ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"volume driver %s list error: %v\", dv.Name(ctx), err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, v := range vList {\n\t\t\trealVolumes[v.Name] = v\n\t\t}\n\t}\n\n\tfor name, obj := range metaList {\n\t\tv, ok := obj.(*types.Volume)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\td, ok := volumeDrivers[v.Spec.Backend]\n\t\tif !ok {\n\t\t\t// driver not exist, ignore it\n\t\t\tcontinue\n\t\t}\n\n\t\t// the local driver and tmpfs driver\n\t\tif d.StoreMode(ctx).IsLocal() {\n\t\t\tretVolumes = append(retVolumes, v)\n\t\t\tcontinue\n\t\t}\n\n\t\trv, ok := realVolumes[name]\n\t\tif !ok {\n\t\t\t// real volume not exist, ignore it\n\t\t\tcontinue\n\t\t}\n\t\tv.Status.MountPoint = rv.Status.MountPoint\n\n\t\tdelete(realVolumes, name)\n\n\t\tretVolumes = append(retVolumes, v)\n\t}\n\n\tfor _, v := range realVolumes {\n\t\t// found new volumes, store the meta\n\t\tlogrus.Warningf(\"found new volume %s\", v.Name)\n\t\tc.store.Put(v)\n\n\t\tretVolumes = append(retVolumes, v)\n\n\t}\n\n\treturn retVolumes, nil\n}", "func ListVdisks(cluster ardb.StorageCluster, pred func(vdiskID string) bool) ([]string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tserverCh, err := cluster.ServerIterator(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype serverResult struct {\n\t\tids []string\n\t\terr error\n\t}\n\tresultCh := make(chan serverResult)\n\n\tvar action listVdisksAction\n\tif pred == nil {\n\t\taction.filter = filterListedVdiskID\n\t} else {\n\t\taction.filter = func(str string) (string, bool) {\n\t\t\tstr, ok := filterListedVdiskID(str)\n\t\t\tif !ok {\n\t\t\t\treturn \"\", false\n\t\t\t}\n\t\t\treturn str, pred(str)\n\t\t}\n\t}\n\n\tvar serverCount int\n\tvar reply interface{}\n\tfor server := range serverCh {\n\t\tserver := server\n\t\tgo func() {\n\t\t\tvar result serverResult\n\t\t\tlog.Infof(\"listing all vdisks stored on %v\", server.Config())\n\t\t\treply, result.err = server.Do(action)\n\t\t\tif result.err == nil && reply != nil {\n\t\t\t\t// [NOTE] this line of code relies on the fact that our\n\t\t\t\t// custom `listVdisksAction` type returns a `[]string` value as a reply,\n\t\t\t\t// as soon as that logic changes, this line will start causing trouble.\n\t\t\t\tresult.ids = reply.([]string)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase resultCh <- result:\n\t\t\tcase <-ctx.Done():\n\t\t\t}\n\t\t}()\n\t\tserverCount++\n\t}\n\n\t// collect the ids from all servers within the given cluster\n\tvar ids []string\n\tvar result serverResult\n\tfor i := 0; i < serverCount; i++ {\n\t\tresult = <-resultCh\n\t\tif result.err != nil {\n\t\t\t// return early, an error has occured!\n\t\t\treturn nil, result.err\n\t\t}\n\t\tids = append(ids, result.ids...)\n\t}\n\n\tif len(ids) <= 1 {\n\t\treturn ids, nil // nothing to do\n\t}\n\n\t// sort and dedupe\n\tsort.Strings(ids)\n\tids = dedupStrings(ids)\n\n\treturn ids, nil\n}", "func NewListDisksBadRequest() *ListDisksBadRequest {\n\treturn &ListDisksBadRequest{}\n}", "func (r *ProjectsLocationsVolumesService) List(parent string) *ProjectsLocationsVolumesListCall {\n\tc := &ProjectsLocationsVolumesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\treturn c\n}", "func (c *volumeCommand) listVolumes(ctx context.Context, ns id.Namespace, vols []string) ([]*model.Volume, error) {\n\tuseIDs, err := c.config.UseIDs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !useIDs {\n\t\treturn c.client.GetNamespaceVolumesByName(ctx, ns, vols...)\n\t}\n\n\tvolIDs := []id.Volume{}\n\tfor _, uid := range vols {\n\t\tvolIDs = append(volIDs, id.Volume(uid))\n\t}\n\n\treturn c.client.GetNamespaceVolumesByUID(ctx, ns, volIDs...)\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func ListVmWithEphemeralDisk(localPath string) ([]*v1.VirtualMachineInstance, error) {\n\tvar keys []*v1.VirtualMachineInstance\n\n\texists, err := FileExists(localPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif exists == false {\n\t\treturn nil, nil\n\t}\n\n\terr = filepath.Walk(localPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() == false {\n\t\t\treturn nil\n\t\t}\n\n\t\trelativePath := strings.TrimPrefix(path, localPath+\"/\")\n\t\tif relativePath == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tdirs := strings.Split(relativePath, \"/\")\n\t\tif len(dirs) != 2 {\n\t\t\treturn nil\n\t\t}\n\n\t\tnamespace := dirs[0]\n\t\tdomain := dirs[1]\n\t\tif namespace == \"\" || domain == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tkeys = append(keys, v1.NewVMIReferenceFromNameWithNS(dirs[0], dirs[1]))\n\t\treturn nil\n\t})\n\n\treturn keys, err\n}", "func EncodeStorageVolumesCreateRequest(encoder func(*http.Request) goahttp.Encoder) func(*http.Request, interface{}) error {\n\treturn func(req *http.Request, v interface{}) error {\n\t\tp, ok := v.(*spinregistry.StorageVolumesCreatePayload)\n\t\tif !ok {\n\t\t\treturn goahttp.ErrInvalidType(\"spin-registry\", \"storage_volumes_create\", \"*spinregistry.StorageVolumesCreatePayload\", v)\n\t\t}\n\t\tbody := NewStorageVolumesCreateRequestBody(p)\n\t\tif err := encoder(req).Encode(&body); err != nil {\n\t\t\treturn goahttp.ErrEncodingError(\"spin-registry\", \"storage_volumes_create\", err)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (in *DiskReqs) DeepCopy() *DiskReqs {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DiskReqs)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func VolumesToKvmDiskArgs(volumes []types.Volume) []string {\n\targs := []string{}\n\n\tfor _, vol := range volumes {\n\t\tmountTag := vol.Name.String() // tag/channel name for virtio\n\t\tif vol.Kind == \"host\" {\n\t\t\t// eg. --9p=/home/jon/srcdir,tag\n\t\t\targ := \"--9p=\" + vol.Source + \",\" + mountTag\n\t\t\tlog.Printf(\"stage1: --disk argument: %#v\\n\", arg)\n\t\t\targs = append(args, arg)\n\t\t}\n\t}\n\n\treturn args\n}", "func (cl *Client) VolumeList(ctx context.Context, vla *csp.VolumeListArgs) ([]*csp.Volume, error) {\n\tvar svc, volumeType string\n\tif vla.StorageTypeName != \"\" {\n\t\tvar obj *models.CSPStorageType\n\t\tif svc, volumeType, obj = StorageTypeToServiceVolumeType(vla.StorageTypeName); obj == nil || svc != ServiceGCE {\n\t\t\treturn nil, fmt.Errorf(\"invalid storage type\")\n\t\t}\n\t\tvolumeType = fmt.Sprintf(volTypeURL, cl.projectID, cl.attrs[AttrZone].Value, volumeType)\n\t}\n\treturn cl.gceVolumeList(ctx, vla, volumeType)\n}", "func DiskPartitions(disk string) []*Partition {\n\tmsg := `\nThe DiskPartitions() function has been DEPRECATED and will be removed in the\n1.0 release of ghw. Please use the Disk.Partitions attribute.\n`\n\twarn(msg)\n\tctx := contextFromEnv()\n\treturn ctx.diskPartitions(disk)\n}", "func (c *Client) EncryptVolumes(ctx context.Context, req *netapppb.EncryptVolumesRequest, opts ...gax.CallOption) (*EncryptVolumesOperation, error) {\n\treturn c.internalClient.EncryptVolumes(ctx, req, opts...)\n}", "func GetServerVolumesListMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// to json\n\tvIn, err := json.Marshal(volumesIn)\n\tassert.Nil(err, \"Server volume test data corrupted\")\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 200, nil)\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\tassert.Nil(err, \"Error getting server volume list\")\n\tassert.Equal(volumesIn, vOut, \"GetServerVolumesListMocked returned different server volumes\")\n\n\treturn vOut\n}", "func CreateListFileSystemsRequest() (request *ListFileSystemsRequest) {\n\trequest = &ListFileSystemsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"ListFileSystems\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func ListVolumes(ip string) (string, error) {\n\tlog.Printf(\"Listing volumes.\")\n\treturn ssh.InvokeCommand(ip, dockercli.ListVolumes)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func (m *MountNewCreateDisksParamsVMVolume) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateElfStoragePolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSize(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSizeUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s ListVolumesInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (v *VolumesServiceMock) List(podUID string) (list *api.VolumeList, err error) {\n\targs := v.Called(podUID)\n\tx := args.Get(0)\n\tif x != nil {\n\t\tlist = x.(*api.VolumeList)\n\t}\n\terr = args.Error(1)\n\treturn\n}", "func VolumeEncryption(value bool) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.EncryptVols = value\n\t\treturn nil\n\t}\n}", "func (b *Poloniex) GetVolumes() (vc VolumeCollection, err error) {\n\tr, err := b.client.do(\"GET\", \"public?command=return24hVolume\", nil, false)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = json.Unmarshal(r, &vc); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func GetVolList(volumeID string) (*apis.ZFSVolumeList, error) {\n\tlistOptions := v1.ListOptions{\n\t\tLabelSelector: ZFSNodeKey + \"=\" + NodeID,\n\t}\n\n\treturn volbuilder.NewKubeclient().\n\t\tWithNamespace(OpenEBSNamespace).List(listOptions)\n\n}", "func (c *Client) ListCDSVolume(queryArgs *ListCDSVolumeArgs) (*ListCDSVolumeResult, error) {\n\treturn ListCDSVolume(c, queryArgs)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func (m *VMAddDiskParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateWhere(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) {\n\tlistOpt := listSnapshotOptions{ResourceType: \"volume\"}\n\treturn s.list(ctx, opt, &listOpt)\n}", "func GetVolumes(dir string, query map[string]string) ([]lepton.NanosVolume, error) {\n\tvar vols []lepton.NanosVolume\n\n\tfiles, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn vols, err\n\t}\n\n\tfor _, info := range files {\n\t\tif info.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfilename := strings.TrimSuffix(info.Name(), \".raw\")\n\t\tnameParts := strings.Split(filename, lepton.VolumeDelimiter)\n\t\tif len(nameParts) < 2 { // invalid file name\n\t\t\tcontinue\n\t\t}\n\n\t\tfi, err := info.Info()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvols = append(vols, lepton.NanosVolume{\n\t\t\tID: nameParts[1],\n\t\t\tName: nameParts[0],\n\t\t\tLabel: nameParts[0],\n\t\t\tSize: lepton.Bytes2Human(fi.Size()),\n\t\t\tPath: path.Join(dir, info.Name()),\n\t\t\tCreatedAt: fi.ModTime().String(),\n\t\t})\n\t}\n\n\treturn filterVolume(vols, query)\n}", "func (s *StackEbrc) ListVolumes() ([]abstract.Volume, fail.Error) {\n\tlogrus.Debug(\"ebrc.Client.ListVolumes() called\")\n\tdefer logrus.Debug(\"ebrc.Client.ListVolumes() done\")\n\n\tvar volumes []abstract.Volume\n\n\torg, vdc, err := s.getOrgVdc()\n\tif err != nil {\n\t\treturn volumes, fail.Wrap(err, fmt.Sprintf(\"Error listing volumes\"))\n\t}\n\n\t// Check if network is already there\n\trefs, err := getLinks(org, \"vnd.vmware.vcloud.disk+xml\")\n\tif err != nil {\n\t\treturn nil, fail.Wrap(err, fmt.Sprintf(\"Error recovering network information\"))\n\t}\n\tfor _, ref := range refs {\n\t\t// FIXME: Add data\n\t\tdr, err := vdc.QueryDisk(ref.Name)\n\t\tif err == nil {\n\t\t\tthed, err := vdc.FindDiskByHREF(dr.Disk.HREF)\n\t\t\tif err == nil {\n\t\t\t\tvolumes = append(volumes, abstract.Volume{Name: ref.Name, ID: ref.ID, Size: thed.Disk.Size})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn volumes, nil\n}", "func (c *BlockVolumeClient) List(params *BlockVolumeParams) (*BlockVolumeList, error) {\n\tlist := &BlockVolumeList{}\n\n\terr := c.Backend.CallIntoInterface(\"v1/Storage/Block/Volume/list\", params, list)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}", "func (r ApiGetHyperflexVolumeListRequest) Tags(tags string) ApiGetHyperflexVolumeListRequest {\n\tr.tags = &tags\n\treturn r\n}", "func RunListDisk() {\n\n\t// dir, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\t// if err != nil {\n\t// \tlog.Fatal(err)\n\t// \treturn\n\t// }\n\n\t// lsscsipath := path.Join(dir, \"lsscsi\")\n\t// if _, err := os.Stat(lsscsipath); os.IsNotExist(err) {\n\t// \tlsscsipath = \"lsscsi\"\n\t// }\n\tlsscsipath := \"lsscsi\"\n\tcmd := exec.Command(lsscsipath, \"-s\", \"-g\")\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttimer := time.AfterFunc(10*time.Second, func() {\n\t\tcmd.Process.Kill()\n\t})\n\n\tscanner := bufio.NewScanner(stdout)\n\tvar hddinfo []string\n\tvar hddchanged bool\n\tvar wg sync.WaitGroup\n\tfor scanner.Scan() {\n\t\tss := scanner.Text()\n\t\tfmt.Println(ss)\n\t\thddinfo = append(hddinfo, ss)\n\t\tif !DetectData.MatchKey(ss) {\n\t\t\thddchanged = true\n\t\t}\n\t\tif !DetectData.ContainsKey(ss) {\n\t\t\t//\\s Matches any white-space character.\n\t\t\tr := regexp.MustCompile(`^([\\s\\S]{13})(disk[\\s\\S]{4})([\\s\\S]{9})([\\s\\S]{17})([\\s\\S]{6})([\\s\\S]{11})([\\s\\S]{11})([\\s\\S]+)$`)\n\t\t\tdiskinfos := r.FindStringSubmatch(ss)\n\t\t\tif len(diskinfos) == 9 {\n\t\t\t\tvar dddect = NewSyncDataDetect()\n\t\t\t\tdddect.detectHDD.Locpath = strings.Trim(diskinfos[1], \" \")\n\t\t\t\tdddect.detectHDD.Type = strings.Trim(diskinfos[2], \" \")\n\t\t\t\tdddect.detectHDD.Manufacture = strings.Trim(diskinfos[3], \" \")\n\t\t\t\tdddect.detectHDD.Model = strings.Trim(diskinfos[4], \" \")\n\t\t\t\tdddect.detectHDD.Version = strings.Trim(diskinfos[5], \" \")\n\t\t\t\tdddect.detectHDD.LinuxName = strings.Trim(diskinfos[6], \" \")\n\t\t\t\tdddect.detectHDD.SGLibName = strings.Trim(diskinfos[7], \" \")\n\t\t\t\tdddect.detectHDD.Size = strings.Trim(diskinfos[8], \" \")\n\n\t\t\t\tif strings.Index(dddect.detectHDD.LinuxName, `/dev/`) == -1 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t//hddchanged = true\n\t\t\t\tDetectData.AddValue(ss, dddect)\n\t\t\t\twg.Add(1)\n\t\t\t\tgo dddect.ReadDataFromSmartCtl(&wg)\n\t\t\t}\n\t\t} else {\n\t\t\tif vv, ok := DetectData.Get(ss); ok {\n\t\t\t\tif len(vv.detectHDD.UILabel) == 0 && len(vv.detectHDD.Otherinfo) == 0 {\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo vv.ReadDataFromSmartCtl(&wg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ttimer.Stop()\n\tDetectData.RemoveOld(hddinfo)\n\n\ttime.Sleep(4 * time.Second)\n\n\tif hddchanged {\n\t\tfmt.Print(\"changed!\")\n\t\tcclist, err := configxmldata.Conf.GetCardListIndex()\n\t\tif err == nil {\n\t\t\tfor _, i := range cclist {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo SASHDDinfo.RunCardInfo(i, &wg)\n\t\t\t}\n\t\t}\n\t\tfor i := 0; i < 30; i++ {\n\t\t\tif waitTimeout(&wg, 10*time.Second) {\n\t\t\t\tfmt.Println(\"Timed out waiting for wait group\")\n\t\t\t\tMergeCalibration()\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Wait group finished\")\n\t\t\t\tMergeCalibration()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\twaitTimeout(&wg, 300*time.Second)\n\t}\n\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func (s *ContainerDefinition) SetVolumesFrom(v []*VolumeFrom) *ContainerDefinition {\n\ts.VolumesFrom = v\n\treturn s\n}", "func (client *Client) ListVolumes(all bool) ([]api.Volume, error) {\n\tif all {\n\t\treturn client.listAllVolumes()\n\t}\n\treturn client.listMonitoredVolumes()\n\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func (c *Client) EncryptVolumesOperation(name string) *EncryptVolumesOperation {\n\treturn c.internalClient.EncryptVolumesOperation(name)\n}", "func (s *SnapshotsServiceOp) ListVolume(opt *ListOptions) ([]Snapshot, *Response, error) {\n\tlistOpt := listSnapshotOptions{ResourceType: \"volume\"}\n\treturn s.list(opt, &listOpt)\n}", "func (d ImagefsDriver) List() (*volume.ListResponse, error) {\n\tcontainers, err := d.cli.ContainerList(context.Background(), types.ContainerListOptions{\n\t\tAll: true,\n\t\tFilters: filters.NewArgs(filters.Arg(\"label\", \"com.docker.imagefs.version\")),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tresponse := &volume.ListResponse{}\n\tfor i := range containers {\n\t\tresponse.Volumes = append(response.Volumes, &volume.Volume{\n\t\t\t// TODO(rabrams) fall back to id if no names\n\t\t\tName: containers[i].Names[0],\n\t\t})\n\t}\n\treturn response, nil\n}", "func (c *Client) BuildListDerivativeMarketsRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListDerivativeMarketsRestAPIPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"RestAPI\", \"listDerivativeMarkets\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func NewListRequest() *todopb.ListRequest {\n\tmessage := &todopb.ListRequest{}\n\treturn message\n}" ]
[ "0.80168897", "0.7660719", "0.76466656", "0.70520216", "0.65116286", "0.6492416", "0.63914317", "0.60159266", "0.54290915", "0.5409692", "0.54077804", "0.5343861", "0.53437334", "0.53437334", "0.5277095", "0.5174668", "0.51579815", "0.5143447", "0.5138751", "0.5115184", "0.50981057", "0.50653726", "0.50001943", "0.49786648", "0.49753082", "0.49153262", "0.48564062", "0.48441774", "0.48247057", "0.4794454", "0.4792675", "0.47848618", "0.47832298", "0.47546563", "0.4727512", "0.47219518", "0.47202027", "0.46991214", "0.4684258", "0.46823883", "0.46530527", "0.46456957", "0.46251872", "0.4619421", "0.46056667", "0.45929223", "0.45901254", "0.45803759", "0.4570262", "0.45676464", "0.45599183", "0.455881", "0.45439348", "0.45412782", "0.45381114", "0.45343804", "0.4531084", "0.45258623", "0.45231962", "0.45045453", "0.44855765", "0.44742623", "0.44569284", "0.44564378", "0.4431203", "0.44005126", "0.43761835", "0.43358904", "0.4334283", "0.4331448", "0.4327268", "0.43178383", "0.43168968", "0.4311609", "0.43031347", "0.42970785", "0.42870164", "0.4284801", "0.42728075", "0.42492947", "0.42375243", "0.42353222", "0.42268923", "0.42266443", "0.42213425", "0.42211014", "0.42200163", "0.421963", "0.4209596", "0.4203428", "0.42034274", "0.41989097", "0.41986328", "0.41882992", "0.4185246", "0.4177736", "0.41633543", "0.41590753", "0.4154003", "0.41513798" ]
0.88825774
0
Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest is an autogenerated conversion function.
func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error { return autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (a *HyperflexApiService) GetHyperflexVolumeList(ctx context.Context) ApiGetHyperflexVolumeListRequest {\n\treturn ApiGetHyperflexVolumeListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (a *Client) ListVolumes(params *ListVolumesParams) (*ListVolumesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListVolumesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListVolumes\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/volumes\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListVolumesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListVolumesOK), nil\n\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func (s *OsdCsiServer) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\tlogrus.Debugf(\"ListVolumes req[%#v]\", req)\n\n\t// Until the issue #138 on the CSI spec is resolved we will not support\n\t// tokenization\n\tif req.GetMaxEntries() != 0 {\n\t\treturn nil, status.Error(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"Driver does not support tokenization. Please see \"+\n\t\t\t\t\"https://github.com/container-storage-interface/spec/issues/138\")\n\t}\n\n\tvolumes, err := s.driver.Enumerate(&api.VolumeLocator{}, nil)\n\tif err != nil {\n\t\terrs := fmt.Sprintf(\"Unable to get list of volumes: %s\", err.Error())\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tentries := make([]*csi.ListVolumesResponse_Entry, len(volumes))\n\tfor i, v := range volumes {\n\t\t// Initialize entry\n\t\tentries[i] = &csi.ListVolumesResponse_Entry{\n\t\t\tVolume: &csi.Volume{},\n\t\t}\n\n\t\t// Required\n\t\tentries[i].Volume.Id = v.Id\n\n\t\t// This entry is optional in the API, but OSD has\n\t\t// the information available to provide it\n\t\tentries[i].Volume.CapacityBytes = int64(v.Spec.Size)\n\n\t\t// Attributes. We can add or remove as needed since they\n\t\t// are optional and opaque to the Container Orchestrator(CO)\n\t\t// but could be used for debugging using a csi complient client.\n\t\tentries[i].Volume.Attributes = osdVolumeAttributes(v)\n\t}\n\n\treturn &csi.ListVolumesResponse{\n\t\tEntries: entries,\n\t}, nil\n}", "func NewListDisksBadRequest() *ListDisksBadRequest {\n\treturn &ListDisksBadRequest{}\n}", "func (c *Client) BuildStorageVolumesListRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: StorageVolumesListSpinRegistryPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"spin-registry\", \"storage_volumes_list\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func CreateListDisks00Request() (request *ListDisks00Request) {\n\trequest = &ListDisks00Request{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"EcsDemo\", \"2019-06-20\", \"ListDisks00\", \"\", \"\")\n\treturn\n}", "func (c *restClient) ListVolumes(ctx context.Context, req *netapppb.ListVolumesRequest, opts ...gax.CallOption) *VolumeIterator {\n\tit := &VolumeIterator{}\n\treq = proto.Clone(req).(*netapppb.ListVolumesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*netapppb.Volume, string, error) {\n\t\tresp := &netapppb.ListVolumesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetOrderBy() != \"\" {\n\t\t\tparams.Add(\"orderBy\", fmt.Sprintf(\"%v\", req.GetOrderBy()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetVolumes(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func (s *VolumeListener) List(inctx context.Context, in *protocol.VolumeListRequest) (_ *protocol.VolumeListResponse, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot list volume\")\n\n\tif s == nil {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tjob, err := PrepareJob(inctx, in.GetTenantId(), \"/volumes/list\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer job.Close()\n\n\tctx := job.Context()\n\n\thandler := VolumeHandler(job)\n\tvolumes, xerr := handler.List(in.GetAll())\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\t// Map resources.Volume to protocol.Volume\n\tvar pbvolumes []*protocol.VolumeInspectResponse\n\tfor _, v := range volumes {\n\t\tpbVolume, xerr := v.ToProtocol(ctx)\n\t\tif xerr != nil {\n\t\t\treturn nil, xerr\n\t\t}\n\n\t\tpbvolumes = append(pbvolumes, pbVolume)\n\t}\n\trv := &protocol.VolumeListResponse{Volumes: pbvolumes}\n\treturn rv, nil\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func ListVolumes(\n\tctx context.Context,\n\tc csi.ControllerClient,\n\tversion *csi.Version,\n\tmaxEntries uint32,\n\tstartingToken string,\n\tcallOpts ...grpc.CallOption) (\n\tvolumes []*csi.VolumeInfo, nextToken string, err error) {\n\n\treq := &csi.ListVolumesRequest{\n\t\tMaxEntries: maxEntries,\n\t\tStartingToken: startingToken,\n\t\tVersion: version,\n\t}\n\n\tres, err := c.ListVolumes(ctx, req, callOpts...)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tresult := res.GetResult()\n\tnextToken = result.NextToken\n\tentries := result.Entries\n\n\t// check to see if there are zero entries\n\tif len(result.Entries) == 0 {\n\t\treturn nil, nextToken, nil\n\t}\n\n\tvolumes = make([]*csi.VolumeInfo, len(entries))\n\n\tfor x, e := range entries {\n\t\tif volumes[x] = e.GetVolumeInfo(); volumes[x] == nil {\n\t\t\treturn nil, \"\", ErrNilVolumeInfo\n\t\t}\n\t}\n\n\treturn volumes, nextToken, nil\n}", "func NewListDisksOK() *ListDisksOK {\n\treturn &ListDisksOK{}\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func (a *Client) PostContainersVolumesList(params *PostContainersVolumesListParams, authInfo runtime.ClientAuthInfoWriter) (*PostContainersVolumesListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostContainersVolumesListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostContainersVolumesList\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/containers/volumes/list\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostContainersVolumesListReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostContainersVolumesListOK), nil\n\n}", "func (cs *DefaultControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (client *DiskEncryptionSetsClient) listCreateRequest(ctx context.Context, options *DiskEncryptionSetsListOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (r *RequestAPI) ListRequestV1(ctx context.Context, req *desc.ListRequestsV1Request) (*desc.ListRequestsV1Response, error) {\n\tlog.Printf(\"Got list request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"ListRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.ReadEvent); err != nil {\n\t\treturn nil, err\n\t}\n\tvar (\n\t\trequests []models.Request\n\t\terr error\n\t)\n\n\tif req.SearchQuery != \"\" { // ideally would move search to a separate endpoint, so it's easier to extend\n\t\trequests, err = r.searcher.Search(ctx, req.SearchQuery, req.Limit, req.Offset)\n\t} else {\n\t\trequests, err = r.repo.List(ctx, req.Limit, req.Offset)\n\t}\n\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tErr(err).\n\t\t\tStr(\"endpoint\", \"ListRequestV1\").\n\t\t\tUint64(\"limit\", req.Limit).\n\t\t\tUint64(\"offset\", req.Offset).\n\t\t\tMsgf(\"Failed to list requests\")\n\t\tr.producer.Send(producer.NewEvent(ctx, 0, producer.ReadEvent, err))\n\t\treturn nil, err\n\t}\n\n\tret := make([]*desc.Request, 0, len(requests))\n\teventMsgs := make([]producer.EventMsg, 0, len(requests))\n\n\tfor _, req := range requests {\n\t\tret = append(ret, &desc.Request{\n\t\t\tId: req.Id,\n\t\t\tUserId: req.UserId,\n\t\t\tType: req.Type,\n\t\t\tText: req.Text,\n\t\t})\n\t\teventMsgs = append(eventMsgs, producer.NewEvent(ctx, req.Id, producer.ReadEvent, nil))\n\t\tr.producer.Send(eventMsgs...)\n\n\t}\n\tr.metrics.IncList(1, \"ListRequestV1\")\n\treturn &desc.ListRequestsV1Response{\n\t\tRequests: ret,\n\t}, nil\n}", "func (so ServiceOffering) ListRequest() (ListCommand, error) {\n\t// Restricted cannot be applied here because it really has three states\n\treq := &ListServiceOfferings{\n\t\tID: so.ID,\n\t\tName: so.Name,\n\t\tSystemVMType: so.SystemVMType,\n\t}\n\n\tif so.IsSystem {\n\t\treq.IsSystem = &so.IsSystem\n\t}\n\n\treturn req, nil\n}", "func (driver *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\tklog.V(4).Infof(\"ListVolumes: called with args %#v\", req)\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func VolumesToKvmDiskArgs(volumes []types.Volume) []string {\n\targs := []string{}\n\n\tfor _, vol := range volumes {\n\t\tmountTag := vol.Name.String() // tag/channel name for virtio\n\t\tif vol.Kind == \"host\" {\n\t\t\t// eg. --9p=/home/jon/srcdir,tag\n\t\t\targ := \"--9p=\" + vol.Source + \",\" + mountTag\n\t\t\tlog.Printf(\"stage1: --disk argument: %#v\\n\", arg)\n\t\t\targs = append(args, arg)\n\t\t}\n\t}\n\n\treturn args\n}", "func (v *VolumeService) VolumeList(ctx context.Context, filter filters.Args) (volume.VolumeListOKBody, error) {\n\treturn volume.VolumeListOKBody{}, nil\n}", "func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (client VolumesClient) List(ctx context.Context, location string, storageSubSystem string, storagePool string, filter string) (result VolumeListPage, err error) {\n\tresult.fn = client.listNextResults\n\treq, err := client.ListPreparer(ctx, location, storageSubSystem, storagePool, filter)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListSender(req)\n\tif err != nil {\n\t\tresult.vl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.vl, err = client.ListResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func NewListStorageV1alpha1VolumeAttachmentOK() *ListStorageV1alpha1VolumeAttachmentOK {\n\n\treturn &ListStorageV1alpha1VolumeAttachmentOK{}\n}", "func (cl *Client) gceVolumeList(ctx context.Context, vla *csp.VolumeListArgs, volumeType string) ([]*csp.Volume, error) {\n\tcomputeService, err := cl.getComputeService(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfilter := \"\"\n\tif volumeType != \"\" {\n\t\tfilter = fmt.Sprintf(`type=\"%s\"`, volumeType)\n\t}\n\tfor _, tag := range vla.Tags {\n\t\tif filter != \"\" {\n\t\t\tfilter += \" AND \"\n\t\t}\n\t\tkv := strings.SplitN(tag, \":\", 2)\n\t\tif len(kv) == 1 { // if just \"key\" is specified then the existence of a label with that key will be matched\n\t\t\tfilter += fmt.Sprintf(\"labels.%s:*\", kv[0])\n\t\t} else { // if specified here as \"key:value\" then both the key and value will be matched\n\t\t\tfilter += fmt.Sprintf(`labels.%s=\"%s\"`, kv[0], kv[1])\n\t\t}\n\t}\n\treq := computeService.Disks().List(cl.projectID, cl.attrs[AttrZone].Value).Filter(filter)\n\tresult := []*csp.Volume{}\n\tif err = req.Pages(ctx, func(page *compute.DiskList) error {\n\t\tfor _, disk := range page.Items {\n\t\t\tvol := gceDiskToVolume(disk)\n\t\t\tresult = append(result, vol)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to list GC disks: %w\", err)\n\t}\n\treturn result, nil\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func (cs *controller) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (c *restClient) EncryptVolumes(ctx context.Context, req *netapppb.EncryptVolumesRequest, opts ...gax.CallOption) (*EncryptVolumesOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tjsonReq, err := m.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v:encrypt\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &EncryptVolumesOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func ListVmWithEphemeralDisk(localPath string) ([]*v1.VirtualMachineInstance, error) {\n\tvar keys []*v1.VirtualMachineInstance\n\n\texists, err := FileExists(localPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif exists == false {\n\t\treturn nil, nil\n\t}\n\n\terr = filepath.Walk(localPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() == false {\n\t\t\treturn nil\n\t\t}\n\n\t\trelativePath := strings.TrimPrefix(path, localPath+\"/\")\n\t\tif relativePath == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tdirs := strings.Split(relativePath, \"/\")\n\t\tif len(dirs) != 2 {\n\t\t\treturn nil\n\t\t}\n\n\t\tnamespace := dirs[0]\n\t\tdomain := dirs[1]\n\t\tif namespace == \"\" || domain == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tkeys = append(keys, v1.NewVMIReferenceFromNameWithNS(dirs[0], dirs[1]))\n\t\treturn nil\n\t})\n\n\treturn keys, err\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func (d *lvm) ListVolumes() ([]Volume, error) {\n\tvols := make(map[string]Volume)\n\n\tcmd := exec.Command(\"lvs\", \"--noheadings\", \"-o\", \"lv_name\", d.config[\"lvm.vg_name\"])\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscanner := bufio.NewScanner(stdout)\n\tfor scanner.Scan() {\n\t\trawName := strings.TrimSpace(scanner.Text())\n\t\tvar volType VolumeType\n\t\tvar volName string\n\n\t\tfor _, volumeType := range d.Info().VolumeTypes {\n\t\t\tprefix := fmt.Sprintf(\"%s_\", volumeType)\n\t\t\tif strings.HasPrefix(rawName, prefix) {\n\t\t\t\tvolType = volumeType\n\t\t\t\tvolName = strings.TrimPrefix(rawName, prefix)\n\t\t\t}\n\t\t}\n\n\t\tif volType == \"\" {\n\t\t\td.logger.Debug(\"Ignoring unrecognised volume type\", logger.Ctx{\"name\": rawName})\n\t\t\tcontinue // Ignore unrecognised volume.\n\t\t}\n\n\t\tlvSnapSepCount := strings.Count(volName, lvmSnapshotSeparator)\n\t\tif lvSnapSepCount%2 != 0 {\n\t\t\t// If snapshot separator count is odd, then this means we have a lone lvmSnapshotSeparator\n\t\t\t// that is not part of the lvmEscapedHyphen pair, which means this volume is a snapshot.\n\t\t\td.logger.Debug(\"Ignoring snapshot volume\", logger.Ctx{\"name\": rawName})\n\t\t\tcontinue // Ignore snapshot volumes.\n\t\t}\n\n\t\tisBlock := strings.HasSuffix(volName, lvmBlockVolSuffix)\n\n\t\tif volType == VolumeTypeVM && !isBlock {\n\t\t\tcontinue // Ignore VM filesystem volumes as we will just return the VM's block volume.\n\t\t}\n\n\t\t// Unescape raw LVM name to storage volume name. Safe to do now we know we are not dealing\n\t\t// with snapshot volumes.\n\t\tvolName = strings.Replace(volName, lvmEscapedHyphen, \"-\", -1)\n\n\t\tcontentType := ContentTypeFS\n\t\tif volType == VolumeTypeCustom && strings.HasSuffix(volName, lvmISOVolSuffix) {\n\t\t\tcontentType = ContentTypeISO\n\t\t\tvolName = strings.TrimSuffix(volName, lvmISOVolSuffix)\n\t\t} else if volType == VolumeTypeVM || isBlock {\n\t\t\tcontentType = ContentTypeBlock\n\t\t\tvolName = strings.TrimSuffix(volName, lvmBlockVolSuffix)\n\t\t}\n\n\t\t// If a new volume has been found, or the volume will replace an existing image filesystem volume\n\t\t// then proceed to add the volume to the map. We allow image volumes to overwrite existing\n\t\t// filesystem volumes of the same name so that for VM images we only return the block content type\n\t\t// volume (so that only the single \"logical\" volume is returned).\n\t\texistingVol, foundExisting := vols[volName]\n\t\tif !foundExisting || (existingVol.Type() == VolumeTypeImage && existingVol.ContentType() == ContentTypeFS) {\n\t\t\tv := NewVolume(d, d.name, volType, contentType, volName, make(map[string]string), d.config)\n\n\t\t\tif contentType == ContentTypeFS {\n\t\t\t\tv.SetMountFilesystemProbe(true)\n\t\t\t}\n\n\t\t\tvols[volName] = v\n\t\t\tcontinue\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Unexpected duplicate volume %q found\", volName)\n\t}\n\n\terrMsg, err := io.ReadAll(stderr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed getting volume list: %v: %w\", strings.TrimSpace(string(errMsg)), err)\n\t}\n\n\tvolList := make([]Volume, len(vols))\n\tfor _, v := range vols {\n\t\tvolList = append(volList, v)\n\t}\n\n\treturn volList, nil\n}", "func (m *VMAddDiskParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateWhere(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cl *Client) VolumeList(ctx context.Context, vla *csp.VolumeListArgs) ([]*csp.Volume, error) {\n\tvar svc, volumeType string\n\tif vla.StorageTypeName != \"\" {\n\t\tvar obj *models.CSPStorageType\n\t\tif svc, volumeType, obj = StorageTypeToServiceVolumeType(vla.StorageTypeName); obj == nil || svc != ServiceGCE {\n\t\t\treturn nil, fmt.Errorf(\"invalid storage type\")\n\t\t}\n\t\tvolumeType = fmt.Sprintf(volTypeURL, cl.projectID, cl.attrs[AttrZone].Value, volumeType)\n\t}\n\treturn cl.gceVolumeList(ctx, vla, volumeType)\n}", "func Convert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in *impl.RmdirContentsRequest, out *v2alpha1.RmdirContentsRequest) error {\n\treturn autoConvert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in, out)\n}", "func CreateListFileSystemsRequest() (request *ListFileSystemsRequest) {\n\trequest = &ListFileSystemsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"ListFileSystems\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func List(d Driver) (*volume.ListResponse, error) {\n\tlog.Debugf(\"Entering List\")\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tvar vols []*volume.Volume\n\tfor name, v := range d.GetVolumes() {\n\t\tlog.Debugf(\"Volume found: %s\", v)\n\t\tm, err := getMount(d, v.GetMount())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvols = append(vols, &volume.Volume{Name: name, Status: v.GetStatus(), Mountpoint: m.GetPath()})\n\t}\n\treturn &volume.ListResponse{Volumes: vols}, nil\n}", "func (m *MountNewCreateDisksParamsVMVolume) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateElfStoragePolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSize(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSizeUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func ListVdisks(cluster ardb.StorageCluster, pred func(vdiskID string) bool) ([]string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tserverCh, err := cluster.ServerIterator(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype serverResult struct {\n\t\tids []string\n\t\terr error\n\t}\n\tresultCh := make(chan serverResult)\n\n\tvar action listVdisksAction\n\tif pred == nil {\n\t\taction.filter = filterListedVdiskID\n\t} else {\n\t\taction.filter = func(str string) (string, bool) {\n\t\t\tstr, ok := filterListedVdiskID(str)\n\t\t\tif !ok {\n\t\t\t\treturn \"\", false\n\t\t\t}\n\t\t\treturn str, pred(str)\n\t\t}\n\t}\n\n\tvar serverCount int\n\tvar reply interface{}\n\tfor server := range serverCh {\n\t\tserver := server\n\t\tgo func() {\n\t\t\tvar result serverResult\n\t\t\tlog.Infof(\"listing all vdisks stored on %v\", server.Config())\n\t\t\treply, result.err = server.Do(action)\n\t\t\tif result.err == nil && reply != nil {\n\t\t\t\t// [NOTE] this line of code relies on the fact that our\n\t\t\t\t// custom `listVdisksAction` type returns a `[]string` value as a reply,\n\t\t\t\t// as soon as that logic changes, this line will start causing trouble.\n\t\t\t\tresult.ids = reply.([]string)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase resultCh <- result:\n\t\t\tcase <-ctx.Done():\n\t\t\t}\n\t\t}()\n\t\tserverCount++\n\t}\n\n\t// collect the ids from all servers within the given cluster\n\tvar ids []string\n\tvar result serverResult\n\tfor i := 0; i < serverCount; i++ {\n\t\tresult = <-resultCh\n\t\tif result.err != nil {\n\t\t\t// return early, an error has occured!\n\t\t\treturn nil, result.err\n\t\t}\n\t\tids = append(ids, result.ids...)\n\t}\n\n\tif len(ids) <= 1 {\n\t\treturn ids, nil // nothing to do\n\t}\n\n\t// sort and dedupe\n\tsort.Strings(ids)\n\tids = dedupStrings(ids)\n\n\treturn ids, nil\n}", "func (s *Module) DiskList() ([]pkg.VDisk, error) {\n\tpools, err := s.diskPools()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar disks []pkg.VDisk\n\tfor _, pool := range pools {\n\n\t\titems, err := os.ReadDir(pool)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to list virtual disks\")\n\t\t}\n\n\t\tfor _, item := range items {\n\t\t\tif item.IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinfo, err := item.Info()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get file info for '%s'\", item.Name())\n\t\t\t}\n\n\t\t\tdisks = append(disks, pkg.VDisk{\n\t\t\t\tPath: filepath.Join(pool, item.Name()),\n\t\t\t\tSize: info.Size(),\n\t\t\t})\n\t\t}\n\n\t\treturn disks, nil\n\t}\n\n\treturn disks, nil\n}", "func (r ApiGetHyperflexVolumeListRequest) Tags(tags string) ApiGetHyperflexVolumeListRequest {\n\tr.tags = &tags\n\treturn r\n}", "func (c *Client) EncryptVolumes(ctx context.Context, req *netapppb.EncryptVolumesRequest, opts ...gax.CallOption) (*EncryptVolumesOperation, error) {\n\treturn c.internalClient.EncryptVolumes(ctx, req, opts...)\n}", "func (c *clustermgrClient) ListDiskVolumeUnits(ctx context.Context, diskID proto.DiskID) (rets []*VunitInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfos, err := c.client.ListVolumeUnit(ctx, &cmapi.ListVolumeUnitArgs{DiskID: diskID})\n\tif err != nil {\n\t\tspan.Errorf(\"list disk volume units failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tdiskInfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range infos {\n\t\tele := VunitInfoSimple{}\n\t\tele.set(info, diskInfo.Host)\n\t\trets = append(rets, &ele)\n\t}\n\treturn rets, nil\n}", "func (d *MinioDriver) List(r volume.Request) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tvar vols []*volume.Volume\n\tfor name, v := range d.volumes {\n\t\tvols = append(vols,\n\t\t\t&volume.Volume{\n\t\t\t\tName: name,\n\t\t\t\tMountpoint: v.mountpoint,\n\t\t\t})\n\t}\n\treturn volumeResp(\"\", \"\", vols, capability, \"\")\n}", "func (m *ListDocsV1Request) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Limit\n\n\t// no validation rules for Offset\n\n\treturn nil\n}", "func CreateListAvailableFileSystemTypesRequest() (request *ListAvailableFileSystemTypesRequest) {\n\trequest = &ListAvailableFileSystemTypesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"EHPC\", \"2018-04-12\", \"ListAvailableFileSystemTypes\", \"ehs\", \"openAPI\")\n\treturn\n}", "func EncodeStorageVolumesDeleteRequest(encoder func(*http.Request) goahttp.Encoder) func(*http.Request, interface{}) error {\n\treturn func(req *http.Request, v interface{}) error {\n\t\tp, ok := v.(*spinregistry.StorageVolumesDeletePayload)\n\t\tif !ok {\n\t\t\treturn goahttp.ErrInvalidType(\"spin-registry\", \"storage_volumes_delete\", \"*spinregistry.StorageVolumesDeletePayload\", v)\n\t\t}\n\t\tbody := NewStorageVolumesDeleteRequestBody(p)\n\t\tif err := encoder(req).Encode(&body); err != nil {\n\t\t\treturn goahttp.ErrEncodingError(\"spin-registry\", \"storage_volumes_delete\", err)\n\t\t}\n\t\treturn nil\n\t}\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func NewCmdDiskList() *cobra.Command {\n\treq := base.BizClient.NewDescribeUDiskRequest()\n\ttypeMap := map[string]string{\n\t\t\"DataDisk\": \"Oridinary-Data-Disk\",\n\t\t\"SystemDisk\": \"Oridinary-System-Disk\",\n\t\t\"SSDDataDisk\": \"SSD-Data-Disk\",\n\t}\n\tarkModeMap := map[string]string{\n\t\t\"Yes\": \"true\",\n\t\t\"No\": \"false\",\n\t}\n\tcmd := &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"List udisk instance\",\n\t\tLong: \"List udisk instance\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tfor key, val := range typeMap {\n\t\t\t\tif *req.DiskType == val {\n\t\t\t\t\t*req.DiskType = key\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp, err := base.BizClient.DescribeUDisk(req)\n\t\t\tif err != nil {\n\t\t\t\tbase.HandleError(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlist := []DiskRow{}\n\t\t\tfor _, disk := range resp.DataSet {\n\t\t\t\trow := DiskRow{\n\t\t\t\t\tResourceID: disk.UDiskId,\n\t\t\t\t\tName: disk.Name,\n\t\t\t\t\tGroup: disk.Tag,\n\t\t\t\t\tSize: fmt.Sprintf(\"%dGB\", disk.Size),\n\t\t\t\t\tType: typeMap[disk.DiskType],\n\t\t\t\t\tEnableDataArk: arkModeMap[disk.UDataArkMode],\n\t\t\t\t\tMountUHost: fmt.Sprintf(\"%s/%s\", disk.UHostName, disk.UHostIP),\n\t\t\t\t\tMountPoint: disk.DeviceName,\n\t\t\t\t\tState: disk.Status,\n\t\t\t\t\tCreationTime: base.FormatDate(disk.CreateTime),\n\t\t\t\t\tExpirationTime: base.FormatDate(disk.ExpiredTime),\n\t\t\t\t}\n\t\t\t\tif disk.UHostIP == \"\" {\n\t\t\t\t\trow.MountUHost = \"\"\n\t\t\t\t}\n\t\t\t\tlist = append(list, row)\n\t\t\t}\n\t\t\tif global.json {\n\t\t\t\tbase.PrintJSON(list)\n\t\t\t} else {\n\t\t\t\tbase.PrintTableS(list)\n\t\t\t}\n\t\t},\n\t}\n\tflags := cmd.Flags()\n\tflags.SortFlags = false\n\treq.ProjectId = flags.String(\"project-id\", base.ConfigInstance.ProjectID, \"Optional. Assign project-id\")\n\treq.Region = flags.String(\"region\", base.ConfigInstance.Region, \"Optional. Assign region\")\n\treq.Zone = flags.String(\"zone\", base.ConfigInstance.Zone, \"Optional. Assign availability zone\")\n\treq.UDiskId = flags.String(\"resource-id\", \"\", \"Optional. Resource ID of the udisk to search\")\n\treq.DiskType = flags.String(\"udisk-type\", \"\", \"Optional. Optional. Type of the udisk to search. 'Oridinary-Data-Disk','Oridinary-System-Disk' or 'SSD-Data-Disk'\")\n\treq.Offset = cmd.Flags().Int(\"offset\", 0, \"Optional. Offset\")\n\treq.Limit = cmd.Flags().Int(\"limit\", 50, \"Optional. Limit\")\n\tflags.SetFlagValues(\"udisk-type\", \"Oridinary-Data-Disk\", \"Oridinary-System-Disk\", \"SSD-Data-Disk\")\n\treturn cmd\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func (a *HyperflexApiService) GetHyperflexDriveList(ctx context.Context) ApiGetHyperflexDriveListRequest {\n\treturn ApiGetHyperflexDriveListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (digitalocean DigitalOcean) ListVolumes() ([]godo.Volume, error) {\n\tclient, err := DigitalOceanClient()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvolumes, _, err := client.client.Storage.ListVolumes(client.context, &godo.ListVolumeParams{})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn volumes, err\n}", "func (s stack) ListVolumes(ctx context.Context) ([]*abstract.Volume, fail.Error) {\n\tif valid.IsNil(s) {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\n\treturn nil, fail.NotImplementedError(\"implement me\")\n}", "func (d *defaultDataVolumeManager) ListDataVolumes(ctx context.Context, kubeconfig []byte, listOpts ...client.ListOption) (*cdicorev1alpha1.DataVolumeList, error) {\n\tc, namespace, err := d.client.GetClient(kubeconfig)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create kubevirt client\")\n\t}\n\n\tdvList := cdicorev1alpha1.DataVolumeList{}\n\tif err := c.List(ctx, &dvList, listOpts...); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not list DataVolumes in namespace %s\", namespace)\n\t}\n\n\treturn &dvList, nil\n}", "func VolumeEncryption(value bool) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.EncryptVols = value\n\t\treturn nil\n\t}\n}", "func (s *ContainerDefinition) SetVolumesFrom(v []*VolumeFrom) *ContainerDefinition {\n\ts.VolumesFrom = v\n\treturn s\n}", "func NewListRequest() *todopb.ListRequest {\n\tmessage := &todopb.ListRequest{}\n\treturn message\n}", "func (r ListVolumesRequest) Send(ctx context.Context) (*ListVolumesResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ListVolumesResponse{\n\t\tListVolumesOutput: r.Request.Data.(*ListVolumesOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskEncryptionSetsListByResourceGroupOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (srv *VolumeService) List() ([]api.Volume, error) {\n\treturn srv.provider.ListVolumes()\n}", "func GetVolList(volumeID string) (*apis.ZFSVolumeList, error) {\n\tlistOptions := v1.ListOptions{\n\t\tLabelSelector: ZFSNodeKey + \"=\" + NodeID,\n\t}\n\n\treturn volbuilder.NewKubeclient().\n\t\tWithNamespace(OpenEBSNamespace).List(listOptions)\n\n}", "func NewListZonesRequest(server string) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/zone\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func GetVolumesFromClusterNameV2(cluster string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?cluster.name=\" + cluster\n\treturn getVolumesV2(query)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func CreateListDAGVersionsRequest() (request *ListDAGVersionsRequest) {\n\trequest = &ListDAGVersionsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dms-enterprise\", \"2018-11-01\", \"ListDAGVersions\", \"dms-enterprise\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (in *DiskReqs) DeepCopy() *DiskReqs {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DiskReqs)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func EncodeStorageVolumesCreateRequest(encoder func(*http.Request) goahttp.Encoder) func(*http.Request, interface{}) error {\n\treturn func(req *http.Request, v interface{}) error {\n\t\tp, ok := v.(*spinregistry.StorageVolumesCreatePayload)\n\t\tif !ok {\n\t\t\treturn goahttp.ErrInvalidType(\"spin-registry\", \"storage_volumes_create\", \"*spinregistry.StorageVolumesCreatePayload\", v)\n\t\t}\n\t\tbody := NewStorageVolumesCreateRequestBody(p)\n\t\tif err := encoder(req).Encode(&body); err != nil {\n\t\t\treturn goahttp.ErrEncodingError(\"spin-registry\", \"storage_volumes_create\", err)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (s *DataStore) ListVolumes() (map[string]*longhorn.Volume, error) {\n\titemMap := make(map[string]*longhorn.Volume)\n\n\tlist, err := s.ListVolumesRO()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, itemRO := range list {\n\t\t// Cannot use cached object from lister\n\t\titemMap[itemRO.Name] = itemRO.DeepCopy()\n\t}\n\treturn itemMap, nil\n}", "func (h *ApiHandler) handleListVolumes(c echo.Context) error {\n\tbuilder := h.Builder(c)\n\n\tvar kalmPVCList v1.PersistentVolumeClaimList\n\tif err := builder.List(&kalmPVCList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar kalmPVList v1.PersistentVolumeList\n\tif err := builder.List(&kalmPVList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkalmPVMap := make(map[string]v1.PersistentVolume)\n\tfor _, kalmPV := range kalmPVList.Items {\n\t\tkalmPVMap[kalmPV.Name] = kalmPV\n\t}\n\n\trespVolumes := []resources.Volume{}\n\tfor _, kalmPVC := range kalmPVCList.Items {\n\t\trespVolume, err := builder.BuildVolumeResponse(kalmPVC, kalmPVMap[kalmPVC.Spec.VolumeName])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trespVolumes = append(respVolumes, *respVolume)\n\t}\n\n\treturn c.JSON(200, respVolumes)\n}", "func (a *Client) WatchNetworkingV1beta1NamespacedIngressList(params *WatchNetworkingV1beta1NamespacedIngressListParams) (*WatchNetworkingV1beta1NamespacedIngressListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewWatchNetworkingV1beta1NamespacedIngressListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"watchNetworkingV1beta1NamespacedIngressList\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/apis/networking.k8s.io/v1beta1/watch/namespaces/{namespace}/ingresses\",\n\t\tProducesMediaTypes: []string{\"application/json\", \"application/json;stream=watch\", \"application/vnd.kubernetes.protobuf\", \"application/vnd.kubernetes.protobuf;stream=watch\", \"application/yaml\"},\n\t\tConsumesMediaTypes: []string{\"*/*\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &WatchNetworkingV1beta1NamespacedIngressListReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*WatchNetworkingV1beta1NamespacedIngressListOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for watchNetworkingV1beta1NamespacedIngressList: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*corev1.PersistentVolume, err error) {\n\tlistopt := metav1.ListOptions{\n\t\tLabelSelector: selector.String(),\n\t}\n\tif s.tweakListOptions != nil {\n\t\ts.tweakListOptions(&listopt)\n\t}\n\tlist, err := s.client.CoreV1().PersistentVolumes().List(listopt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range list.Items {\n\t\tret = append(ret, &list.Items[i])\n\t}\n\treturn ret, nil\n}", "func (c *Core) ListVolumeName(labels map[string]string) ([]string, error) {\n\tvar names []string\n\n\tvolumes, err := c.ListVolumes(labels)\n\tif err != nil {\n\t\treturn names, err\n\t}\n\n\tfor _, v := range volumes {\n\t\tnames = append(names, v.Name)\n\t}\n\n\treturn names, nil\n}", "func (c *Client) BuildListDerivativeMarketsRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: ListDerivativeMarketsRestAPIPath()}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"RestAPI\", \"listDerivativeMarkets\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}", "func DiskPartitions(disk string) []*Partition {\n\tmsg := `\nThe DiskPartitions() function has been DEPRECATED and will be removed in the\n1.0 release of ghw. Please use the Disk.Partitions attribute.\n`\n\twarn(msg)\n\tctx := contextFromEnv()\n\treturn ctx.diskPartitions(disk)\n}", "func GetVolumesFromStyleV2(style string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?state=\" + style\n\treturn getVolumesV2(query)\n}", "func (d *DirDriver) List() (*volume.ListResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit List() endpoint\")\n\n\tvols := new(volume.ListResponse)\n\tvols.Volumes = []*volume.Volume{}\n\n\tfor _, vol := range d.volumes {\n\t\tnewVol := new(volume.Volume)\n\t\tnewVol.Name = vol.name\n\t\tnewVol.Mountpoint = vol.path\n\t\tnewVol.CreatedAt = vol.createTime.String()\n\t\tvols.Volumes = append(vols.Volumes, newVol)\n\t\tlogrus.Debugf(\"Adding volume %s to list response\", newVol.Name)\n\t}\n\n\treturn vols, nil\n}", "func GetServerVolumesListMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// to json\n\tvIn, err := json.Marshal(volumesIn)\n\tassert.Nil(err, \"Server volume test data corrupted\")\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 200, nil)\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\tassert.Nil(err, \"Error getting server volume list\")\n\tassert.Equal(volumesIn, vOut, \"GetServerVolumesListMocked returned different server volumes\")\n\n\treturn vOut\n}", "func Convert_v1alpha4_DiskEncryptionSetParameters_To_v1beta1_DiskEncryptionSetParameters(in *DiskEncryptionSetParameters, out *v1beta1.DiskEncryptionSetParameters, s conversion.Scope) error {\n\treturn autoConvert_v1alpha4_DiskEncryptionSetParameters_To_v1beta1_DiskEncryptionSetParameters(in, out, s)\n}", "func GetVolumesV2() (VolumeV2, error) {\n\tvar volumes VolumeV2\n\tquery := \"/api/datacenter/storage/volume\"\n\tbodyText, err := getResponseBody(query)\n\tif err != nil {\n\t\treturn VolumeV2{}, err\n\t}\n\terr = json.Unmarshal(bodyText, &volumes)\n\tif err != nil {\n\t\tlog.Printf(\"verita-core: Error: %v\", err)\n\t\treturn volumes, err\n\t}\n\treturn volumes, nil\n}", "func (r ApiGetHyperflexDriveListRequest) Tags(tags string) ApiGetHyperflexDriveListRequest {\n\tr.tags = &tags\n\treturn r\n}" ]
[ "0.7903982", "0.7738413", "0.7081426", "0.7036308", "0.6367281", "0.62083286", "0.6083195", "0.6071158", "0.6018601", "0.59098595", "0.5379584", "0.52960646", "0.5229448", "0.51603013", "0.515112", "0.513708", "0.5069635", "0.5062731", "0.49963173", "0.4978222", "0.49534005", "0.49398738", "0.49353597", "0.48958898", "0.4861518", "0.48598367", "0.48480976", "0.4847926", "0.48062196", "0.47767574", "0.47586393", "0.4743821", "0.47414216", "0.47300583", "0.47154188", "0.47038445", "0.4700447", "0.46965677", "0.46593937", "0.46203333", "0.46203333", "0.4608313", "0.46080676", "0.45950145", "0.458876", "0.45535764", "0.4497449", "0.4488984", "0.4473814", "0.44511312", "0.44280964", "0.4413547", "0.4401047", "0.43921462", "0.4391725", "0.43494958", "0.43471938", "0.4339285", "0.43346116", "0.4334283", "0.4332457", "0.43322244", "0.43043026", "0.4282199", "0.42774642", "0.4275363", "0.4273241", "0.42636085", "0.4259599", "0.4245813", "0.42330974", "0.42325082", "0.42296925", "0.4228383", "0.42194608", "0.42156637", "0.421347", "0.42053518", "0.42019007", "0.41952714", "0.41878587", "0.41859075", "0.4182872", "0.41756928", "0.41508317", "0.41354987", "0.41211057", "0.411983", "0.411925", "0.4116668", "0.41144288", "0.41123265", "0.4104105", "0.40982234", "0.4096866", "0.40897593", "0.40838885", "0.4080841", "0.4079378", "0.40588787" ]
0.8952377
0
Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse is an autogenerated conversion function.
func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error { return autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func (s *OsdCsiServer) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\tlogrus.Debugf(\"ListVolumes req[%#v]\", req)\n\n\t// Until the issue #138 on the CSI spec is resolved we will not support\n\t// tokenization\n\tif req.GetMaxEntries() != 0 {\n\t\treturn nil, status.Error(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"Driver does not support tokenization. Please see \"+\n\t\t\t\t\"https://github.com/container-storage-interface/spec/issues/138\")\n\t}\n\n\tvolumes, err := s.driver.Enumerate(&api.VolumeLocator{}, nil)\n\tif err != nil {\n\t\terrs := fmt.Sprintf(\"Unable to get list of volumes: %s\", err.Error())\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tentries := make([]*csi.ListVolumesResponse_Entry, len(volumes))\n\tfor i, v := range volumes {\n\t\t// Initialize entry\n\t\tentries[i] = &csi.ListVolumesResponse_Entry{\n\t\t\tVolume: &csi.Volume{},\n\t\t}\n\n\t\t// Required\n\t\tentries[i].Volume.Id = v.Id\n\n\t\t// This entry is optional in the API, but OSD has\n\t\t// the information available to provide it\n\t\tentries[i].Volume.CapacityBytes = int64(v.Spec.Size)\n\n\t\t// Attributes. We can add or remove as needed since they\n\t\t// are optional and opaque to the Container Orchestrator(CO)\n\t\t// but could be used for debugging using a csi complient client.\n\t\tentries[i].Volume.Attributes = osdVolumeAttributes(v)\n\t}\n\n\treturn &csi.ListVolumesResponse{\n\t\tEntries: entries,\n\t}, nil\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func (c *restClient) ListVolumes(ctx context.Context, req *netapppb.ListVolumesRequest, opts ...gax.CallOption) *VolumeIterator {\n\tit := &VolumeIterator{}\n\treq = proto.Clone(req).(*netapppb.ListVolumesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*netapppb.Volume, string, error) {\n\t\tresp := &netapppb.ListVolumesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetOrderBy() != \"\" {\n\t\t\tparams.Add(\"orderBy\", fmt.Sprintf(\"%v\", req.GetOrderBy()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetVolumes(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func List(d Driver) (*volume.ListResponse, error) {\n\tlog.Debugf(\"Entering List\")\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tvar vols []*volume.Volume\n\tfor name, v := range d.GetVolumes() {\n\t\tlog.Debugf(\"Volume found: %s\", v)\n\t\tm, err := getMount(d, v.GetMount())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvols = append(vols, &volume.Volume{Name: name, Status: v.GetStatus(), Mountpoint: m.GetPath()})\n\t}\n\treturn &volume.ListResponse{Volumes: vols}, nil\n}", "func (a *Client) ListVolumes(params *ListVolumesParams) (*ListVolumesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListVolumesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListVolumes\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/volumes\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListVolumesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListVolumesOK), nil\n\n}", "func (d *DirDriver) List() (*volume.ListResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit List() endpoint\")\n\n\tvols := new(volume.ListResponse)\n\tvols.Volumes = []*volume.Volume{}\n\n\tfor _, vol := range d.volumes {\n\t\tnewVol := new(volume.Volume)\n\t\tnewVol.Name = vol.name\n\t\tnewVol.Mountpoint = vol.path\n\t\tnewVol.CreatedAt = vol.createTime.String()\n\t\tvols.Volumes = append(vols.Volumes, newVol)\n\t\tlogrus.Debugf(\"Adding volume %s to list response\", newVol.Name)\n\t}\n\n\treturn vols, nil\n}", "func CreateListDisks00Response() (response *ListDisks00Response) {\n\tresponse = &ListDisks00Response{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func (cs *DefaultControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func ListVolumes(\n\tctx context.Context,\n\tc csi.ControllerClient,\n\tversion *csi.Version,\n\tmaxEntries uint32,\n\tstartingToken string,\n\tcallOpts ...grpc.CallOption) (\n\tvolumes []*csi.VolumeInfo, nextToken string, err error) {\n\n\treq := &csi.ListVolumesRequest{\n\t\tMaxEntries: maxEntries,\n\t\tStartingToken: startingToken,\n\t\tVersion: version,\n\t}\n\n\tres, err := c.ListVolumes(ctx, req, callOpts...)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tresult := res.GetResult()\n\tnextToken = result.NextToken\n\tentries := result.Entries\n\n\t// check to see if there are zero entries\n\tif len(result.Entries) == 0 {\n\t\treturn nil, nextToken, nil\n\t}\n\n\tvolumes = make([]*csi.VolumeInfo, len(entries))\n\n\tfor x, e := range entries {\n\t\tif volumes[x] = e.GetVolumeInfo(); volumes[x] == nil {\n\t\t\treturn nil, \"\", ErrNilVolumeInfo\n\t\t}\n\t}\n\n\treturn volumes, nextToken, nil\n}", "func (page DiskListPageClient) Response() azcompute.DiskList {\n\tl := azcompute.DiskList{}\n\terr := DeepCopy(&l, page.dlp.Response())\n\tif err != nil {\n\t\tpage.err = fmt.Errorf(\"fail to get disk list result, %s\", err) //nolint:staticcheck\n\t}\n\treturn l\n}", "func (cs *controller) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (driver *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\tklog.V(4).Infof(\"ListVolumes: called with args %#v\", req)\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (h *ApiHandler) handleListVolumes(c echo.Context) error {\n\tbuilder := h.Builder(c)\n\n\tvar kalmPVCList v1.PersistentVolumeClaimList\n\tif err := builder.List(&kalmPVCList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar kalmPVList v1.PersistentVolumeList\n\tif err := builder.List(&kalmPVList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkalmPVMap := make(map[string]v1.PersistentVolume)\n\tfor _, kalmPV := range kalmPVList.Items {\n\t\tkalmPVMap[kalmPV.Name] = kalmPV\n\t}\n\n\trespVolumes := []resources.Volume{}\n\tfor _, kalmPVC := range kalmPVCList.Items {\n\t\trespVolume, err := builder.BuildVolumeResponse(kalmPVC, kalmPVMap[kalmPVC.Spec.VolumeName])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trespVolumes = append(respVolumes, *respVolume)\n\t}\n\n\treturn c.JSON(200, respVolumes)\n}", "func (digitalocean DigitalOcean) ListVolumes() ([]godo.Volume, error) {\n\tclient, err := DigitalOceanClient()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvolumes, _, err := client.client.Storage.ListVolumes(client.context, &godo.ListVolumeParams{})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn volumes, err\n}", "func (client VolumesClient) List(ctx context.Context, location string, storageSubSystem string, storagePool string, filter string) (result VolumeListPage, err error) {\n\tresult.fn = client.listNextResults\n\treq, err := client.ListPreparer(ctx, location, storageSubSystem, storagePool, filter)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListSender(req)\n\tif err != nil {\n\t\tresult.vl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.vl, err = client.ListResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (srv *VolumeService) List() ([]api.Volume, error) {\n\treturn srv.provider.ListVolumes()\n}", "func (d *driverInfo) List() ([]*Volume, error) {\n\tvar volumes []*Volume\n\n\tfor _, vol := range d.volumes {\n\t\tvolumes = append(volumes, vol)\n\t}\n\n\treturn volumes, nil\n}", "func GetServerVolumesListMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// to json\n\tvIn, err := json.Marshal(volumesIn)\n\tassert.Nil(err, \"Server volume test data corrupted\")\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 200, nil)\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\tassert.Nil(err, \"Error getting server volume list\")\n\tassert.Equal(volumesIn, vOut, \"GetServerVolumesListMocked returned different server volumes\")\n\n\treturn vOut\n}", "func (c *clustermgrClient) ListDiskVolumeUnits(ctx context.Context, diskID proto.DiskID) (rets []*VunitInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfos, err := c.client.ListVolumeUnit(ctx, &cmapi.ListVolumeUnitArgs{DiskID: diskID})\n\tif err != nil {\n\t\tspan.Errorf(\"list disk volume units failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tdiskInfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range infos {\n\t\tele := VunitInfoSimple{}\n\t\tele.set(info, diskInfo.Host)\n\t\trets = append(rets, &ele)\n\t}\n\treturn rets, nil\n}", "func (d *MinioDriver) List(r volume.Request) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tvar vols []*volume.Volume\n\tfor name, v := range d.volumes {\n\t\tvols = append(vols,\n\t\t\t&volume.Volume{\n\t\t\t\tName: name,\n\t\t\t\tMountpoint: v.mountpoint,\n\t\t\t})\n\t}\n\treturn volumeResp(\"\", \"\", vols, capability, \"\")\n}", "func CreateListFileSystemsResponse() (response *ListFileSystemsResponse) {\n\tresponse = &ListFileSystemsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func NewFileDirsListResponse(items []*models.FileDirs) []render.Renderer {\n\tlist := []render.Renderer{}\n\tfor i := range items {\n\t\tlist = append(list, NewFileDirsResponse(items[i]))\n\t}\n\n\treturn list\n}", "func (s *VolumeListener) List(inctx context.Context, in *protocol.VolumeListRequest) (_ *protocol.VolumeListResponse, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot list volume\")\n\n\tif s == nil {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tjob, err := PrepareJob(inctx, in.GetTenantId(), \"/volumes/list\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer job.Close()\n\n\tctx := job.Context()\n\n\thandler := VolumeHandler(job)\n\tvolumes, xerr := handler.List(in.GetAll())\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\t// Map resources.Volume to protocol.Volume\n\tvar pbvolumes []*protocol.VolumeInspectResponse\n\tfor _, v := range volumes {\n\t\tpbVolume, xerr := v.ToProtocol(ctx)\n\t\tif xerr != nil {\n\t\t\treturn nil, xerr\n\t\t}\n\n\t\tpbvolumes = append(pbvolumes, pbVolume)\n\t}\n\trv := &protocol.VolumeListResponse{Volumes: pbvolumes}\n\treturn rv, nil\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func (client VolumesClient) ListResponder(resp *http.Response) (result VolumeList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (*ResponseGetVolumes) Descriptor() ([]byte, []int) {\n\treturn file_pkg_metadata_metadata_proto_rawDescGZIP(), []int{1}\n}", "func (d *VolumeDriver) List(r volume.Request) volume.Response {\n\tlog.Errorf(\"VolumeDriver List to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func (s *DataStore) ListVolumes() (map[string]*longhorn.Volume, error) {\n\titemMap := make(map[string]*longhorn.Volume)\n\n\tlist, err := s.ListVolumesRO()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, itemRO := range list {\n\t\t// Cannot use cached object from lister\n\t\titemMap[itemRO.Name] = itemRO.DeepCopy()\n\t}\n\treturn itemMap, nil\n}", "func (p *VolumesClientListPager) PageResponse() VolumesClientListResponse {\n\treturn p.current\n}", "func (b *Poloniex) GetVolumes() (vc VolumeCollection, err error) {\n\tr, err := b.client.do(\"GET\", \"public?command=return24hVolume\", nil, false)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = json.Unmarshal(r, &vc); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (c *Core) ListVolumes(labels map[string]string) ([]*types.Volume, error) {\n\tvar retVolumes = make([]*types.Volume, 0)\n\n\t// list local meta store.\n\tmetaList, err := c.store.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// scan all drivers.\n\tlogrus.Debugf(\"probing all drivers for listing volume\")\n\tdrivers, err := driver.GetAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx := driver.Contexts()\n\n\tvar realVolumes = map[string]*types.Volume{}\n\tvar volumeDrivers = map[string]driver.Driver{}\n\n\tfor _, dv := range drivers {\n\t\tvolumeDrivers[dv.Name(ctx)] = dv\n\n\t\td, ok := dv.(driver.Lister)\n\t\tif !ok {\n\t\t\t// not Lister, ignore it.\n\t\t\tcontinue\n\t\t}\n\t\tvList, err := d.List(ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"volume driver %s list error: %v\", dv.Name(ctx), err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, v := range vList {\n\t\t\trealVolumes[v.Name] = v\n\t\t}\n\t}\n\n\tfor name, obj := range metaList {\n\t\tv, ok := obj.(*types.Volume)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\td, ok := volumeDrivers[v.Spec.Backend]\n\t\tif !ok {\n\t\t\t// driver not exist, ignore it\n\t\t\tcontinue\n\t\t}\n\n\t\t// the local driver and tmpfs driver\n\t\tif d.StoreMode(ctx).IsLocal() {\n\t\t\tretVolumes = append(retVolumes, v)\n\t\t\tcontinue\n\t\t}\n\n\t\trv, ok := realVolumes[name]\n\t\tif !ok {\n\t\t\t// real volume not exist, ignore it\n\t\t\tcontinue\n\t\t}\n\t\tv.Status.MountPoint = rv.Status.MountPoint\n\n\t\tdelete(realVolumes, name)\n\n\t\tretVolumes = append(retVolumes, v)\n\t}\n\n\tfor _, v := range realVolumes {\n\t\t// found new volumes, store the meta\n\t\tlogrus.Warningf(\"found new volume %s\", v.Name)\n\t\tc.store.Put(v)\n\n\t\tretVolumes = append(retVolumes, v)\n\n\t}\n\n\treturn retVolumes, nil\n}", "func (s stack) ListVolumes(ctx context.Context) ([]*abstract.Volume, fail.Error) {\n\tif valid.IsNil(s) {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\n\treturn nil, fail.NotImplementedError(\"implement me\")\n}", "func (a *Client) PostContainersVolumesList(params *PostContainersVolumesListParams, authInfo runtime.ClientAuthInfoWriter) (*PostContainersVolumesListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostContainersVolumesListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostContainersVolumesList\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/containers/volumes/list\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostContainersVolumesListReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostContainersVolumesListOK), nil\n\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (proxy *remoteDriverProxy) List() ([]*remoteVolumeDesc, error) {\n\tvar req remoteVolumeListReq\n\tvar resp remoteVolumeListResp\n\n\tif err := proxy.client.CallService(remoteVolumeListService, &req, &resp, true); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn nil, errors.New(resp.Err)\n\t}\n\n\treturn resp.Volumes, nil\n}", "func (d *lvm) ListVolumes() ([]Volume, error) {\n\tvols := make(map[string]Volume)\n\n\tcmd := exec.Command(\"lvs\", \"--noheadings\", \"-o\", \"lv_name\", d.config[\"lvm.vg_name\"])\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscanner := bufio.NewScanner(stdout)\n\tfor scanner.Scan() {\n\t\trawName := strings.TrimSpace(scanner.Text())\n\t\tvar volType VolumeType\n\t\tvar volName string\n\n\t\tfor _, volumeType := range d.Info().VolumeTypes {\n\t\t\tprefix := fmt.Sprintf(\"%s_\", volumeType)\n\t\t\tif strings.HasPrefix(rawName, prefix) {\n\t\t\t\tvolType = volumeType\n\t\t\t\tvolName = strings.TrimPrefix(rawName, prefix)\n\t\t\t}\n\t\t}\n\n\t\tif volType == \"\" {\n\t\t\td.logger.Debug(\"Ignoring unrecognised volume type\", logger.Ctx{\"name\": rawName})\n\t\t\tcontinue // Ignore unrecognised volume.\n\t\t}\n\n\t\tlvSnapSepCount := strings.Count(volName, lvmSnapshotSeparator)\n\t\tif lvSnapSepCount%2 != 0 {\n\t\t\t// If snapshot separator count is odd, then this means we have a lone lvmSnapshotSeparator\n\t\t\t// that is not part of the lvmEscapedHyphen pair, which means this volume is a snapshot.\n\t\t\td.logger.Debug(\"Ignoring snapshot volume\", logger.Ctx{\"name\": rawName})\n\t\t\tcontinue // Ignore snapshot volumes.\n\t\t}\n\n\t\tisBlock := strings.HasSuffix(volName, lvmBlockVolSuffix)\n\n\t\tif volType == VolumeTypeVM && !isBlock {\n\t\t\tcontinue // Ignore VM filesystem volumes as we will just return the VM's block volume.\n\t\t}\n\n\t\t// Unescape raw LVM name to storage volume name. Safe to do now we know we are not dealing\n\t\t// with snapshot volumes.\n\t\tvolName = strings.Replace(volName, lvmEscapedHyphen, \"-\", -1)\n\n\t\tcontentType := ContentTypeFS\n\t\tif volType == VolumeTypeCustom && strings.HasSuffix(volName, lvmISOVolSuffix) {\n\t\t\tcontentType = ContentTypeISO\n\t\t\tvolName = strings.TrimSuffix(volName, lvmISOVolSuffix)\n\t\t} else if volType == VolumeTypeVM || isBlock {\n\t\t\tcontentType = ContentTypeBlock\n\t\t\tvolName = strings.TrimSuffix(volName, lvmBlockVolSuffix)\n\t\t}\n\n\t\t// If a new volume has been found, or the volume will replace an existing image filesystem volume\n\t\t// then proceed to add the volume to the map. We allow image volumes to overwrite existing\n\t\t// filesystem volumes of the same name so that for VM images we only return the block content type\n\t\t// volume (so that only the single \"logical\" volume is returned).\n\t\texistingVol, foundExisting := vols[volName]\n\t\tif !foundExisting || (existingVol.Type() == VolumeTypeImage && existingVol.ContentType() == ContentTypeFS) {\n\t\t\tv := NewVolume(d, d.name, volType, contentType, volName, make(map[string]string), d.config)\n\n\t\t\tif contentType == ContentTypeFS {\n\t\t\t\tv.SetMountFilesystemProbe(true)\n\t\t\t}\n\n\t\t\tvols[volName] = v\n\t\t\tcontinue\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Unexpected duplicate volume %q found\", volName)\n\t}\n\n\terrMsg, err := io.ReadAll(stderr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed getting volume list: %v: %w\", strings.TrimSpace(string(errMsg)), err)\n\t}\n\n\tvolList := make([]Volume, len(vols))\n\tfor _, v := range vols {\n\t\tvolList = append(volList, v)\n\t}\n\n\treturn volList, nil\n}", "func (c *Client) ListCDSVolume(queryArgs *ListCDSVolumeArgs) (*ListCDSVolumeResult, error) {\n\treturn ListCDSVolume(c, queryArgs)\n}", "func (v *VolumeService) VolumeList(ctx context.Context, filter filters.Args) (volume.VolumeListOKBody, error) {\n\treturn volume.VolumeListOKBody{}, nil\n}", "func (s *StackEbrc) ListVolumes() ([]abstract.Volume, fail.Error) {\n\tlogrus.Debug(\"ebrc.Client.ListVolumes() called\")\n\tdefer logrus.Debug(\"ebrc.Client.ListVolumes() done\")\n\n\tvar volumes []abstract.Volume\n\n\torg, vdc, err := s.getOrgVdc()\n\tif err != nil {\n\t\treturn volumes, fail.Wrap(err, fmt.Sprintf(\"Error listing volumes\"))\n\t}\n\n\t// Check if network is already there\n\trefs, err := getLinks(org, \"vnd.vmware.vcloud.disk+xml\")\n\tif err != nil {\n\t\treturn nil, fail.Wrap(err, fmt.Sprintf(\"Error recovering network information\"))\n\t}\n\tfor _, ref := range refs {\n\t\t// FIXME: Add data\n\t\tdr, err := vdc.QueryDisk(ref.Name)\n\t\tif err == nil {\n\t\t\tthed, err := vdc.FindDiskByHREF(dr.Disk.HREF)\n\t\t\tif err == nil {\n\t\t\t\tvolumes = append(volumes, abstract.Volume{Name: ref.Name, ID: ref.ID, Size: thed.Disk.Size})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn volumes, nil\n}", "func GetVolumesV2() (VolumeV2, error) {\n\tvar volumes VolumeV2\n\tquery := \"/api/datacenter/storage/volume\"\n\tbodyText, err := getResponseBody(query)\n\tif err != nil {\n\t\treturn VolumeV2{}, err\n\t}\n\terr = json.Unmarshal(bodyText, &volumes)\n\tif err != nil {\n\t\tlog.Printf(\"verita-core: Error: %v\", err)\n\t\treturn volumes, err\n\t}\n\treturn volumes, nil\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func (v *VolumesServiceMock) List(podUID string) (list *api.VolumeList, err error) {\n\targs := v.Called(podUID)\n\tx := args.Get(0)\n\tif x != nil {\n\t\tlist = x.(*api.VolumeList)\n\t}\n\terr = args.Error(1)\n\treturn\n}", "func buildVolumes(response interface{}) []*model.Volume {\r\n\tvalues := reflect.ValueOf(response)\r\n\tresults := make([]*model.Volume, values.Len())\r\n\r\n\tfor i := 0; i < values.Len(); i++ {\r\n\t\tvalue := &model.Volume{}\r\n\t\tjsonutil.Decode(values.Index(i).Interface(), value)\r\n\t\tresults[i] = value\r\n\t}\r\n\r\n\treturn results\r\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func NewCmdDiskList() *cobra.Command {\n\treq := base.BizClient.NewDescribeUDiskRequest()\n\ttypeMap := map[string]string{\n\t\t\"DataDisk\": \"Oridinary-Data-Disk\",\n\t\t\"SystemDisk\": \"Oridinary-System-Disk\",\n\t\t\"SSDDataDisk\": \"SSD-Data-Disk\",\n\t}\n\tarkModeMap := map[string]string{\n\t\t\"Yes\": \"true\",\n\t\t\"No\": \"false\",\n\t}\n\tcmd := &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"List udisk instance\",\n\t\tLong: \"List udisk instance\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tfor key, val := range typeMap {\n\t\t\t\tif *req.DiskType == val {\n\t\t\t\t\t*req.DiskType = key\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp, err := base.BizClient.DescribeUDisk(req)\n\t\t\tif err != nil {\n\t\t\t\tbase.HandleError(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlist := []DiskRow{}\n\t\t\tfor _, disk := range resp.DataSet {\n\t\t\t\trow := DiskRow{\n\t\t\t\t\tResourceID: disk.UDiskId,\n\t\t\t\t\tName: disk.Name,\n\t\t\t\t\tGroup: disk.Tag,\n\t\t\t\t\tSize: fmt.Sprintf(\"%dGB\", disk.Size),\n\t\t\t\t\tType: typeMap[disk.DiskType],\n\t\t\t\t\tEnableDataArk: arkModeMap[disk.UDataArkMode],\n\t\t\t\t\tMountUHost: fmt.Sprintf(\"%s/%s\", disk.UHostName, disk.UHostIP),\n\t\t\t\t\tMountPoint: disk.DeviceName,\n\t\t\t\t\tState: disk.Status,\n\t\t\t\t\tCreationTime: base.FormatDate(disk.CreateTime),\n\t\t\t\t\tExpirationTime: base.FormatDate(disk.ExpiredTime),\n\t\t\t\t}\n\t\t\t\tif disk.UHostIP == \"\" {\n\t\t\t\t\trow.MountUHost = \"\"\n\t\t\t\t}\n\t\t\t\tlist = append(list, row)\n\t\t\t}\n\t\t\tif global.json {\n\t\t\t\tbase.PrintJSON(list)\n\t\t\t} else {\n\t\t\t\tbase.PrintTableS(list)\n\t\t\t}\n\t\t},\n\t}\n\tflags := cmd.Flags()\n\tflags.SortFlags = false\n\treq.ProjectId = flags.String(\"project-id\", base.ConfigInstance.ProjectID, \"Optional. Assign project-id\")\n\treq.Region = flags.String(\"region\", base.ConfigInstance.Region, \"Optional. Assign region\")\n\treq.Zone = flags.String(\"zone\", base.ConfigInstance.Zone, \"Optional. Assign availability zone\")\n\treq.UDiskId = flags.String(\"resource-id\", \"\", \"Optional. Resource ID of the udisk to search\")\n\treq.DiskType = flags.String(\"udisk-type\", \"\", \"Optional. Optional. Type of the udisk to search. 'Oridinary-Data-Disk','Oridinary-System-Disk' or 'SSD-Data-Disk'\")\n\treq.Offset = cmd.Flags().Int(\"offset\", 0, \"Optional. Offset\")\n\treq.Limit = cmd.Flags().Int(\"limit\", 50, \"Optional. Limit\")\n\tflags.SetFlagValues(\"udisk-type\", \"Oridinary-Data-Disk\", \"Oridinary-System-Disk\", \"SSD-Data-Disk\")\n\treturn cmd\n}", "func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) {\n\tlistOpt := listSnapshotOptions{ResourceType: \"volume\"}\n\treturn s.list(ctx, opt, &listOpt)\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (d ImagefsDriver) List() (*volume.ListResponse, error) {\n\tcontainers, err := d.cli.ContainerList(context.Background(), types.ContainerListOptions{\n\t\tAll: true,\n\t\tFilters: filters.NewArgs(filters.Arg(\"label\", \"com.docker.imagefs.version\")),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tresponse := &volume.ListResponse{}\n\tfor i := range containers {\n\t\tresponse.Volumes = append(response.Volumes, &volume.Volume{\n\t\t\t// TODO(rabrams) fall back to id if no names\n\t\t\tName: containers[i].Names[0],\n\t\t})\n\t}\n\treturn response, nil\n}", "func (d *defaultDataVolumeManager) ListDataVolumes(ctx context.Context, kubeconfig []byte, listOpts ...client.ListOption) (*cdicorev1alpha1.DataVolumeList, error) {\n\tc, namespace, err := d.client.GetClient(kubeconfig)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create kubevirt client\")\n\t}\n\n\tdvList := cdicorev1alpha1.DataVolumeList{}\n\tif err := c.List(ctx, &dvList, listOpts...); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not list DataVolumes in namespace %s\", namespace)\n\t}\n\n\treturn &dvList, nil\n}", "func (s *SnapshotsServiceOp) ListVolume(opt *ListOptions) ([]Snapshot, *Response, error) {\n\tlistOpt := listSnapshotOptions{ResourceType: \"volume\"}\n\treturn s.list(opt, &listOpt)\n}", "func (c *Client) ListVolumes(ctx context.Context, req *netapppb.ListVolumesRequest, opts ...gax.CallOption) *VolumeIterator {\n\treturn c.internalClient.ListVolumes(ctx, req, opts...)\n}", "func NewListDisksOK() *ListDisksOK {\n\treturn &ListDisksOK{}\n}", "func DecodeStorageVolumesDeleteResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_delete\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (s *Module) DiskList() ([]pkg.VDisk, error) {\n\tpools, err := s.diskPools()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar disks []pkg.VDisk\n\tfor _, pool := range pools {\n\n\t\titems, err := os.ReadDir(pool)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to list virtual disks\")\n\t\t}\n\n\t\tfor _, item := range items {\n\t\t\tif item.IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinfo, err := item.Info()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get file info for '%s'\", item.Name())\n\t\t\t}\n\n\t\t\tdisks = append(disks, pkg.VDisk{\n\t\t\t\tPath: filepath.Join(pool, item.Name()),\n\t\t\t\tSize: info.Size(),\n\t\t\t})\n\t\t}\n\n\t\treturn disks, nil\n\t}\n\n\treturn disks, nil\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func GetServerVolumesListFailStatusMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// to json\n\tvIn, err := json.Marshal(volumesIn)\n\tassert.Nil(err, \"Server volume test data corrupted\")\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 499, nil)\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\n\tassert.NotNil(err, \"We are expecting an status code error\")\n\tassert.Nil(vOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"499\", \"Error should contain http code 499\")\n\n\treturn vOut\n}", "func (c *volumeCommand) listVolumes(ctx context.Context, ns id.Namespace, vols []string) ([]*model.Volume, error) {\n\tuseIDs, err := c.config.UseIDs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !useIDs {\n\t\treturn c.client.GetNamespaceVolumesByName(ctx, ns, vols...)\n\t}\n\n\tvolIDs := []id.Volume{}\n\tfor _, uid := range vols {\n\t\tvolIDs = append(volIDs, id.Volume(uid))\n\t}\n\n\treturn c.client.GetNamespaceVolumesByUID(ctx, ns, volIDs...)\n}", "func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogrus.Infof(\"Listing volumes using plugin %s\", p.Name)\n\n\tresp, err := p.sendRequest(nil, listPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, listPath, \"\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvolumeRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tvolumeResp := new(volume.ListResponse)\n\tif err := json.Unmarshal(volumeRespBytes, volumeResp); err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling volume plugin %s list response: %w\", p.Name, err)\n\t}\n\n\treturn volumeResp.Volumes, nil\n}", "func ListVdisks(cluster ardb.StorageCluster, pred func(vdiskID string) bool) ([]string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tserverCh, err := cluster.ServerIterator(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype serverResult struct {\n\t\tids []string\n\t\terr error\n\t}\n\tresultCh := make(chan serverResult)\n\n\tvar action listVdisksAction\n\tif pred == nil {\n\t\taction.filter = filterListedVdiskID\n\t} else {\n\t\taction.filter = func(str string) (string, bool) {\n\t\t\tstr, ok := filterListedVdiskID(str)\n\t\t\tif !ok {\n\t\t\t\treturn \"\", false\n\t\t\t}\n\t\t\treturn str, pred(str)\n\t\t}\n\t}\n\n\tvar serverCount int\n\tvar reply interface{}\n\tfor server := range serverCh {\n\t\tserver := server\n\t\tgo func() {\n\t\t\tvar result serverResult\n\t\t\tlog.Infof(\"listing all vdisks stored on %v\", server.Config())\n\t\t\treply, result.err = server.Do(action)\n\t\t\tif result.err == nil && reply != nil {\n\t\t\t\t// [NOTE] this line of code relies on the fact that our\n\t\t\t\t// custom `listVdisksAction` type returns a `[]string` value as a reply,\n\t\t\t\t// as soon as that logic changes, this line will start causing trouble.\n\t\t\t\tresult.ids = reply.([]string)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase resultCh <- result:\n\t\t\tcase <-ctx.Done():\n\t\t\t}\n\t\t}()\n\t\tserverCount++\n\t}\n\n\t// collect the ids from all servers within the given cluster\n\tvar ids []string\n\tvar result serverResult\n\tfor i := 0; i < serverCount; i++ {\n\t\tresult = <-resultCh\n\t\tif result.err != nil {\n\t\t\t// return early, an error has occured!\n\t\t\treturn nil, result.err\n\t\t}\n\t\tids = append(ids, result.ids...)\n\t}\n\n\tif len(ids) <= 1 {\n\t\treturn ids, nil // nothing to do\n\t}\n\n\t// sort and dedupe\n\tsort.Strings(ids)\n\tids = dedupStrings(ids)\n\n\treturn ids, nil\n}", "func (client *Client) ShowVolumes(volumes ...string) (*Response, *ResponseStatus, error) {\n\treturn client.FormattedRequest(\"/show/volumes/\\\"%s\\\"\", strings.Join(volumes, \",\"))\n}", "func (a *HyperflexApiService) GetHyperflexVolumeList(ctx context.Context) ApiGetHyperflexVolumeListRequest {\n\treturn ApiGetHyperflexVolumeListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*corev1.PersistentVolume, err error) {\n\tlistopt := metav1.ListOptions{\n\t\tLabelSelector: selector.String(),\n\t}\n\tif s.tweakListOptions != nil {\n\t\ts.tweakListOptions(&listopt)\n\t}\n\tlist, err := s.client.CoreV1().PersistentVolumes().List(listopt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range list.Items {\n\t\tret = append(ret, &list.Items[i])\n\t}\n\treturn ret, nil\n}", "func GetServerVolumesListFailJSONMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// wrong json\n\tvIn := []byte{10, 20, 30}\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 200, nil)\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\n\tassert.NotNil(err, \"We are expecting a marshalling error\")\n\tassert.Nil(vOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"invalid character\", \"Error message should include the string 'invalid character'\")\n\n\treturn vOut\n}", "func (s *Stack) ListVolumes() ([]resources.Volume, error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\n\tdefer concurrency.NewTracer(nil, \"\", true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvar vs []resources.Volume\n\terr := volumesv2.List(s.VolumeClient, volumesv2.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {\n\t\tlist, err := volumesv2.ExtractVolumes(page)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error listing volumes: volume extraction: %+v\", err)\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, vol := range list {\n\t\t\tav := resources.Volume{\n\t\t\t\tID: vol.ID,\n\t\t\t\tName: vol.Name,\n\t\t\t\tSize: vol.Size,\n\t\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\t\tState: toVolumeState(vol.Status),\n\t\t\t}\n\t\t\tvs = append(vs, av)\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil || len(vs) == 0 {\n\t\tif err != nil {\n\t\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error listing volume types: %s\", ProviderErrorToString(err)))\n\t\t}\n\t\tlog.Warnf(\"Complete volume list empty\")\n\t}\n\treturn vs, nil\n}", "func GetVolumesFromUUIDV2(uuid string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?uuid=\" + uuid\n\treturn getVolumesV2(query)\n}", "func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudPvminstancesVolumesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesGetUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func ListVolumes(ip string) (string, error) {\n\tlog.Printf(\"Listing volumes.\")\n\treturn ssh.InvokeCommand(ip, dockercli.ListVolumes)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (client *Client) ListVolumes(all bool) ([]api.Volume, error) {\n\tif all {\n\t\treturn client.listAllVolumes()\n\t}\n\treturn client.listMonitoredVolumes()\n\n}", "func (c *BlockVolumeClient) List(params *BlockVolumeParams) (*BlockVolumeList, error) {\n\tlist := &BlockVolumeList{}\n\n\terr := c.Backend.CallIntoInterface(\"v1/Storage/Block/Volume/list\", params, list)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}", "func Convert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in *v2alpha1.MkdirResponse, out *impl.MkdirResponse) error {\n\treturn autoConvert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (r Virtual_Storage_Repository) GetDiskImages() (resp []datatypes.Virtual_Disk_Image, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Storage_Repository\", \"getDiskImages\", nil, &r.Options, &resp)\n\treturn\n}", "func GetVolumesFromStyleV2(style string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?state=\" + style\n\treturn getVolumesV2(query)\n}", "func DecodeListDevicesResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody ListDevicesResponseBody\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"chargewatch\", \"listDevices\", err)\n\t\t\t}\n\t\t\terr = ValidateListDevicesResponseBody(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrValidationError(\"chargewatch\", \"listDevices\", err)\n\t\t\t}\n\t\t\tres := NewListDevicesResultOK(&body)\n\t\t\treturn res, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"chargewatch\", \"listDevices\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (r ListVolumesRequest) Send(ctx context.Context) (*ListVolumesResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &ListVolumesResponse{\n\t\tListVolumesOutput: r.Request.Data.(*ListVolumesOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (a *HetznerVolumes) FindVolumes() ([]*volumes.Volume, error) {\n\tklog.V(2).Infof(\"Finding attachable etcd volumes\")\n\n\tif a.server.Datacenter == nil || a.server.Datacenter.Location == nil {\n\t\treturn nil, fmt.Errorf(\"failed to find server location for the running server\")\n\t}\n\tserverLocation := a.server.Datacenter.Location.Name\n\n\tmatchingVolumes, err := getMatchingVolumes(a.hcloudClient, a.matchNameTags)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get matching volumes: %w\", err)\n\t}\n\n\tvar localEtcdVolumes []*volumes.Volume\n\tfor _, volume := range matchingVolumes {\n\t\t// Only volumes from the same location can be mounted\n\t\tif volume.Location == nil {\n\t\t\tklog.Warningf(\"failed to find volume %s(%d) location\", volume.Name, volume.ID)\n\t\t\tcontinue\n\t\t}\n\t\tvolumeLocation := volume.Location.Name\n\t\tif volumeLocation != serverLocation {\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.V(2).Infof(\"Found attachable volume %s(%d) with status %q\", volume.Name, volume.ID, volume.Status)\n\n\t\tvolumeID := strconv.Itoa(volume.ID)\n\t\tlocalEtcdVolume := &volumes.Volume{\n\t\t\tProviderID: volumeID,\n\t\t\tInfo: volumes.VolumeInfo{\n\t\t\t\tDescription: a.clusterName + \"-\" + volumeID,\n\t\t\t},\n\t\t\tMountName: \"hcloud-\" + volumeID,\n\t\t\tEtcdName: \"vol-\" + volumeID,\n\t\t}\n\n\t\tif volume.Server != nil {\n\t\t\tlocalEtcdVolume.AttachedTo = strconv.Itoa(volume.Server.ID)\n\t\t\tif volume.Server.ID == a.server.ID {\n\t\t\t\tlocalEtcdVolume.LocalDevice = fmt.Sprintf(\"%s%d\", localDevicePrefix, volume.ID)\n\t\t\t}\n\t\t}\n\n\t\tlocalEtcdVolumes = append(localEtcdVolumes, localEtcdVolume)\n\t}\n\n\treturn localEtcdVolumes, nil\n}", "func (client *DiskEncryptionSetsClient) listHandleResponse(resp *http.Response) (DiskEncryptionSetsListResponse, error) {\n\tresult := DiskEncryptionSetsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSetList); err != nil {\n\t\treturn DiskEncryptionSetsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func GetVolumesFromClusterNameV2(cluster string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?cluster.name=\" + cluster\n\treturn getVolumesV2(query)\n}", "func GetVolumesFromStateV2(state string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?state=\" + state\n\treturn getVolumesV2(query)\n}", "func GetVolList(volumeID string) (*apis.ZFSVolumeList, error) {\n\tlistOptions := v1.ListOptions{\n\t\tLabelSelector: ZFSNodeKey + \"=\" + NodeID,\n\t}\n\n\treturn volbuilder.NewKubeclient().\n\t\tWithNamespace(OpenEBSNamespace).List(listOptions)\n\n}", "func GetVolumes(dir string, query map[string]string) ([]lepton.NanosVolume, error) {\n\tvar vols []lepton.NanosVolume\n\n\tfiles, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn vols, err\n\t}\n\n\tfor _, info := range files {\n\t\tif info.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfilename := strings.TrimSuffix(info.Name(), \".raw\")\n\t\tnameParts := strings.Split(filename, lepton.VolumeDelimiter)\n\t\tif len(nameParts) < 2 { // invalid file name\n\t\t\tcontinue\n\t\t}\n\n\t\tfi, err := info.Info()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvols = append(vols, lepton.NanosVolume{\n\t\t\tID: nameParts[1],\n\t\t\tName: nameParts[0],\n\t\t\tLabel: nameParts[0],\n\t\t\tSize: lepton.Bytes2Human(fi.Size()),\n\t\t\tPath: path.Join(dir, info.Name()),\n\t\t\tCreatedAt: fi.ModTime().String(),\n\t\t})\n\t}\n\n\treturn filterVolume(vols, query)\n}", "func GetVolumes(mountHost bool, mountSecret bool, instanceName string) []corev1.Volume {\n\tvar hostPathDirectoryTypeForPtr = corev1.HostPathDirectory\n\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"osd-monitored-logs-local\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: \"osd-monitored-logs-local\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"osd-monitored-logs-metadata\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: \"osd-monitored-logs-metadata\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"splunk-state\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tHostPath: &corev1.HostPathVolumeSource{\n\t\t\t\t\tPath: \"/var/lib/misc\",\n\t\t\t\t\tType: &hostPathDirectoryTypeForPtr,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif mountHost {\n\t\tvolumes = append(volumes,\n\t\t\tcorev1.Volume{\n\t\t\t\tName: \"host\",\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tHostPath: &corev1.HostPathVolumeSource{\n\t\t\t\t\t\tPath: \"/\",\n\t\t\t\t\t\tType: &hostPathDirectoryTypeForPtr,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t} else {\n\t\t// if we aren't mounting the host dir, we're the hf\n\t\tvar hfName = instanceName + \"-hfconfig\"\n\t\tvolumes = append(volumes,\n\t\t\tcorev1.Volume{\n\t\t\t\tName: hfName,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\tName: hfName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t}\n\n\tif mountSecret {\n\t\tvolumes = append(volumes,\n\t\t\tcorev1.Volume{\n\t\t\t\tName: config.SplunkAuthSecretName,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\tSecretName: config.SplunkAuthSecretName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t} else {\n\t\t// if we aren't mounting the secret, we're fwding to the splunk hf\n\t\tvar internalName = instanceName + \"-internalsplunk\"\n\t\tvolumes = append(volumes,\n\t\t\tcorev1.Volume{\n\t\t\t\tName: internalName,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\tName: internalName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t}\n\n\treturn volumes\n}", "func CreateListManagedPrivateSpacesResponse() (response *ListManagedPrivateSpacesResponse) {\n\tresponse = &ListManagedPrivateSpacesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (s *VolumeStore) List() ([]volume.Volume, []string, error) {\n\tvols, warnings, err := s.list()\n\tif err != nil {\n\t\treturn nil, nil, &OpErr{Err: err, Op: \"list\"}\n\t}\n\tvar out []volume.Volume\n\n\tfor _, v := range vols {\n\t\tname := normaliseVolumeName(v.Name())\n\n\t\ts.locks.Lock(name)\n\t\tstoredV, exists := s.getNamed(name)\n\t\t// Note: it's not safe to populate the cache here because the volume may have been\n\t\t// deleted before we acquire a lock on its name\n\t\tif exists && storedV.DriverName() != v.DriverName() {\n\t\t\tlogrus.Warnf(\"Volume name %s already exists for driver %s, not including volume returned by %s\", v.Name(), storedV.DriverName(), v.DriverName())\n\t\t\ts.locks.Unlock(v.Name())\n\t\t\tcontinue\n\t\t}\n\n\t\tout = append(out, v)\n\t\ts.locks.Unlock(v.Name())\n\t}\n\treturn out, warnings, nil\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func (s *LifecyclerRPCServer) Volumes(opts HostOpts, resp *[]string) (err error) {\n\t*resp, err = s.Plugin.Volumes(opts.Version, opts.FlagValues)\n\treturn err\n}" ]
[ "0.7961379", "0.7648047", "0.75920993", "0.74197817", "0.64900607", "0.62819856", "0.62386733", "0.59977704", "0.5971432", "0.5950459", "0.5950459", "0.5867729", "0.5857057", "0.5770099", "0.57509506", "0.57335275", "0.57225084", "0.5716024", "0.5688472", "0.5680943", "0.5663614", "0.5591184", "0.5484759", "0.547163", "0.546521", "0.53565544", "0.5355972", "0.5349414", "0.5337238", "0.53314644", "0.53127736", "0.53086275", "0.5275838", "0.526834", "0.5262672", "0.52545464", "0.5246079", "0.5219903", "0.52131766", "0.5172954", "0.51703167", "0.51529455", "0.5148864", "0.5143431", "0.51191264", "0.50991523", "0.50888157", "0.50714535", "0.5070584", "0.5024373", "0.5019777", "0.5010139", "0.49950594", "0.49922484", "0.49844435", "0.49842894", "0.49669605", "0.49635258", "0.49622405", "0.4958449", "0.49523517", "0.49506563", "0.49489492", "0.49383765", "0.49382305", "0.49379614", "0.4919981", "0.49055657", "0.4877933", "0.48730338", "0.48621842", "0.48452774", "0.48386437", "0.48154318", "0.48063385", "0.47920078", "0.47603044", "0.47572964", "0.47494826", "0.4748334", "0.47369996", "0.4732831", "0.46911192", "0.4678083", "0.46398532", "0.46314114", "0.46143812", "0.46123385", "0.46009487", "0.46006453", "0.45961666", "0.45920485", "0.45744032", "0.45712167", "0.45558006", "0.45359424", "0.453498", "0.45340148", "0.4526778", "0.45251465" ]
0.8754785
0
Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse is an autogenerated conversion function.
func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error { return autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (s *OsdCsiServer) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\tlogrus.Debugf(\"ListVolumes req[%#v]\", req)\n\n\t// Until the issue #138 on the CSI spec is resolved we will not support\n\t// tokenization\n\tif req.GetMaxEntries() != 0 {\n\t\treturn nil, status.Error(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"Driver does not support tokenization. Please see \"+\n\t\t\t\t\"https://github.com/container-storage-interface/spec/issues/138\")\n\t}\n\n\tvolumes, err := s.driver.Enumerate(&api.VolumeLocator{}, nil)\n\tif err != nil {\n\t\terrs := fmt.Sprintf(\"Unable to get list of volumes: %s\", err.Error())\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tentries := make([]*csi.ListVolumesResponse_Entry, len(volumes))\n\tfor i, v := range volumes {\n\t\t// Initialize entry\n\t\tentries[i] = &csi.ListVolumesResponse_Entry{\n\t\t\tVolume: &csi.Volume{},\n\t\t}\n\n\t\t// Required\n\t\tentries[i].Volume.Id = v.Id\n\n\t\t// This entry is optional in the API, but OSD has\n\t\t// the information available to provide it\n\t\tentries[i].Volume.CapacityBytes = int64(v.Spec.Size)\n\n\t\t// Attributes. We can add or remove as needed since they\n\t\t// are optional and opaque to the Container Orchestrator(CO)\n\t\t// but could be used for debugging using a csi complient client.\n\t\tentries[i].Volume.Attributes = osdVolumeAttributes(v)\n\t}\n\n\treturn &csi.ListVolumesResponse{\n\t\tEntries: entries,\n\t}, nil\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func (a *Client) ListVolumes(params *ListVolumesParams) (*ListVolumesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListVolumesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListVolumes\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/volumes\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListVolumesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListVolumesOK), nil\n\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func (c *restClient) ListVolumes(ctx context.Context, req *netapppb.ListVolumesRequest, opts ...gax.CallOption) *VolumeIterator {\n\tit := &VolumeIterator{}\n\treq = proto.Clone(req).(*netapppb.ListVolumesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*netapppb.Volume, string, error) {\n\t\tresp := &netapppb.ListVolumesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetOrderBy() != \"\" {\n\t\t\tparams.Add(\"orderBy\", fmt.Sprintf(\"%v\", req.GetOrderBy()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetVolumes(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func CreateListDisks00Response() (response *ListDisks00Response) {\n\tresponse = &ListDisks00Response{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (page DiskListPageClient) Response() azcompute.DiskList {\n\tl := azcompute.DiskList{}\n\terr := DeepCopy(&l, page.dlp.Response())\n\tif err != nil {\n\t\tpage.err = fmt.Errorf(\"fail to get disk list result, %s\", err) //nolint:staticcheck\n\t}\n\treturn l\n}", "func List(d Driver) (*volume.ListResponse, error) {\n\tlog.Debugf(\"Entering List\")\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tvar vols []*volume.Volume\n\tfor name, v := range d.GetVolumes() {\n\t\tlog.Debugf(\"Volume found: %s\", v)\n\t\tm, err := getMount(d, v.GetMount())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvols = append(vols, &volume.Volume{Name: name, Status: v.GetStatus(), Mountpoint: m.GetPath()})\n\t}\n\treturn &volume.ListResponse{Volumes: vols}, nil\n}", "func ListVolumes(\n\tctx context.Context,\n\tc csi.ControllerClient,\n\tversion *csi.Version,\n\tmaxEntries uint32,\n\tstartingToken string,\n\tcallOpts ...grpc.CallOption) (\n\tvolumes []*csi.VolumeInfo, nextToken string, err error) {\n\n\treq := &csi.ListVolumesRequest{\n\t\tMaxEntries: maxEntries,\n\t\tStartingToken: startingToken,\n\t\tVersion: version,\n\t}\n\n\tres, err := c.ListVolumes(ctx, req, callOpts...)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tresult := res.GetResult()\n\tnextToken = result.NextToken\n\tentries := result.Entries\n\n\t// check to see if there are zero entries\n\tif len(result.Entries) == 0 {\n\t\treturn nil, nextToken, nil\n\t}\n\n\tvolumes = make([]*csi.VolumeInfo, len(entries))\n\n\tfor x, e := range entries {\n\t\tif volumes[x] = e.GetVolumeInfo(); volumes[x] == nil {\n\t\t\treturn nil, \"\", ErrNilVolumeInfo\n\t\t}\n\t}\n\n\treturn volumes, nextToken, nil\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func (d *DirDriver) List() (*volume.ListResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit List() endpoint\")\n\n\tvols := new(volume.ListResponse)\n\tvols.Volumes = []*volume.Volume{}\n\n\tfor _, vol := range d.volumes {\n\t\tnewVol := new(volume.Volume)\n\t\tnewVol.Name = vol.name\n\t\tnewVol.Mountpoint = vol.path\n\t\tnewVol.CreatedAt = vol.createTime.String()\n\t\tvols.Volumes = append(vols.Volumes, newVol)\n\t\tlogrus.Debugf(\"Adding volume %s to list response\", newVol.Name)\n\t}\n\n\treturn vols, nil\n}", "func (cs *DefaultControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (digitalocean DigitalOcean) ListVolumes() ([]godo.Volume, error) {\n\tclient, err := DigitalOceanClient()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvolumes, _, err := client.client.Storage.ListVolumes(client.context, &godo.ListVolumeParams{})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn volumes, err\n}", "func GetVolumesV2() (VolumeV2, error) {\n\tvar volumes VolumeV2\n\tquery := \"/api/datacenter/storage/volume\"\n\tbodyText, err := getResponseBody(query)\n\tif err != nil {\n\t\treturn VolumeV2{}, err\n\t}\n\terr = json.Unmarshal(bodyText, &volumes)\n\tif err != nil {\n\t\tlog.Printf(\"verita-core: Error: %v\", err)\n\t\treturn volumes, err\n\t}\n\treturn volumes, nil\n}", "func (cs *controller) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (h *ApiHandler) handleListVolumes(c echo.Context) error {\n\tbuilder := h.Builder(c)\n\n\tvar kalmPVCList v1.PersistentVolumeClaimList\n\tif err := builder.List(&kalmPVCList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar kalmPVList v1.PersistentVolumeList\n\tif err := builder.List(&kalmPVList, client.MatchingLabels{\"kalm-managed\": \"true\"}); err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkalmPVMap := make(map[string]v1.PersistentVolume)\n\tfor _, kalmPV := range kalmPVList.Items {\n\t\tkalmPVMap[kalmPV.Name] = kalmPV\n\t}\n\n\trespVolumes := []resources.Volume{}\n\tfor _, kalmPVC := range kalmPVCList.Items {\n\t\trespVolume, err := builder.BuildVolumeResponse(kalmPVC, kalmPVMap[kalmPVC.Spec.VolumeName])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trespVolumes = append(respVolumes, *respVolume)\n\t}\n\n\treturn c.JSON(200, respVolumes)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func (driver *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {\n\tklog.V(4).Infof(\"ListVolumes: called with args %#v\", req)\n\treturn nil, status.Error(codes.Unimplemented, \"\")\n}", "func (s *VolumeListener) List(inctx context.Context, in *protocol.VolumeListRequest) (_ *protocol.VolumeListResponse, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot list volume\")\n\n\tif s == nil {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tjob, err := PrepareJob(inctx, in.GetTenantId(), \"/volumes/list\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer job.Close()\n\n\tctx := job.Context()\n\n\thandler := VolumeHandler(job)\n\tvolumes, xerr := handler.List(in.GetAll())\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\t// Map resources.Volume to protocol.Volume\n\tvar pbvolumes []*protocol.VolumeInspectResponse\n\tfor _, v := range volumes {\n\t\tpbVolume, xerr := v.ToProtocol(ctx)\n\t\tif xerr != nil {\n\t\t\treturn nil, xerr\n\t\t}\n\n\t\tpbvolumes = append(pbvolumes, pbVolume)\n\t}\n\trv := &protocol.VolumeListResponse{Volumes: pbvolumes}\n\treturn rv, nil\n}", "func GetServerVolumesListMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// to json\n\tvIn, err := json.Marshal(volumesIn)\n\tassert.Nil(err, \"Server volume test data corrupted\")\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 200, nil)\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\tassert.Nil(err, \"Error getting server volume list\")\n\tassert.Equal(volumesIn, vOut, \"GetServerVolumesListMocked returned different server volumes\")\n\n\treturn vOut\n}", "func CreateListFileSystemsResponse() (response *ListFileSystemsResponse) {\n\tresponse = &ListFileSystemsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client VolumesClient) List(ctx context.Context, location string, storageSubSystem string, storagePool string, filter string) (result VolumeListPage, err error) {\n\tresult.fn = client.listNextResults\n\treq, err := client.ListPreparer(ctx, location, storageSubSystem, storagePool, filter)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListSender(req)\n\tif err != nil {\n\t\tresult.vl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.vl, err = client.ListResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"List\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (client *Client) ShowVolumes(volumes ...string) (*Response, *ResponseStatus, error) {\n\treturn client.FormattedRequest(\"/show/volumes/\\\"%s\\\"\", strings.Join(volumes, \",\"))\n}", "func (client VolumesClient) ListResponder(resp *http.Response) (result VolumeList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (d *lvm) ListVolumes() ([]Volume, error) {\n\tvols := make(map[string]Volume)\n\n\tcmd := exec.Command(\"lvs\", \"--noheadings\", \"-o\", \"lv_name\", d.config[\"lvm.vg_name\"])\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscanner := bufio.NewScanner(stdout)\n\tfor scanner.Scan() {\n\t\trawName := strings.TrimSpace(scanner.Text())\n\t\tvar volType VolumeType\n\t\tvar volName string\n\n\t\tfor _, volumeType := range d.Info().VolumeTypes {\n\t\t\tprefix := fmt.Sprintf(\"%s_\", volumeType)\n\t\t\tif strings.HasPrefix(rawName, prefix) {\n\t\t\t\tvolType = volumeType\n\t\t\t\tvolName = strings.TrimPrefix(rawName, prefix)\n\t\t\t}\n\t\t}\n\n\t\tif volType == \"\" {\n\t\t\td.logger.Debug(\"Ignoring unrecognised volume type\", logger.Ctx{\"name\": rawName})\n\t\t\tcontinue // Ignore unrecognised volume.\n\t\t}\n\n\t\tlvSnapSepCount := strings.Count(volName, lvmSnapshotSeparator)\n\t\tif lvSnapSepCount%2 != 0 {\n\t\t\t// If snapshot separator count is odd, then this means we have a lone lvmSnapshotSeparator\n\t\t\t// that is not part of the lvmEscapedHyphen pair, which means this volume is a snapshot.\n\t\t\td.logger.Debug(\"Ignoring snapshot volume\", logger.Ctx{\"name\": rawName})\n\t\t\tcontinue // Ignore snapshot volumes.\n\t\t}\n\n\t\tisBlock := strings.HasSuffix(volName, lvmBlockVolSuffix)\n\n\t\tif volType == VolumeTypeVM && !isBlock {\n\t\t\tcontinue // Ignore VM filesystem volumes as we will just return the VM's block volume.\n\t\t}\n\n\t\t// Unescape raw LVM name to storage volume name. Safe to do now we know we are not dealing\n\t\t// with snapshot volumes.\n\t\tvolName = strings.Replace(volName, lvmEscapedHyphen, \"-\", -1)\n\n\t\tcontentType := ContentTypeFS\n\t\tif volType == VolumeTypeCustom && strings.HasSuffix(volName, lvmISOVolSuffix) {\n\t\t\tcontentType = ContentTypeISO\n\t\t\tvolName = strings.TrimSuffix(volName, lvmISOVolSuffix)\n\t\t} else if volType == VolumeTypeVM || isBlock {\n\t\t\tcontentType = ContentTypeBlock\n\t\t\tvolName = strings.TrimSuffix(volName, lvmBlockVolSuffix)\n\t\t}\n\n\t\t// If a new volume has been found, or the volume will replace an existing image filesystem volume\n\t\t// then proceed to add the volume to the map. We allow image volumes to overwrite existing\n\t\t// filesystem volumes of the same name so that for VM images we only return the block content type\n\t\t// volume (so that only the single \"logical\" volume is returned).\n\t\texistingVol, foundExisting := vols[volName]\n\t\tif !foundExisting || (existingVol.Type() == VolumeTypeImage && existingVol.ContentType() == ContentTypeFS) {\n\t\t\tv := NewVolume(d, d.name, volType, contentType, volName, make(map[string]string), d.config)\n\n\t\t\tif contentType == ContentTypeFS {\n\t\t\t\tv.SetMountFilesystemProbe(true)\n\t\t\t}\n\n\t\t\tvols[volName] = v\n\t\t\tcontinue\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Unexpected duplicate volume %q found\", volName)\n\t}\n\n\terrMsg, err := io.ReadAll(stderr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed getting volume list: %v: %w\", strings.TrimSpace(string(errMsg)), err)\n\t}\n\n\tvolList := make([]Volume, len(vols))\n\tfor _, v := range vols {\n\t\tvolList = append(volList, v)\n\t}\n\n\treturn volList, nil\n}", "func (a *HyperflexApiService) GetHyperflexVolumeList(ctx context.Context) ApiGetHyperflexVolumeListRequest {\n\treturn ApiGetHyperflexVolumeListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (srv *VolumeService) List() ([]api.Volume, error) {\n\treturn srv.provider.ListVolumes()\n}", "func (c *Client) ListCDSVolume(queryArgs *ListCDSVolumeArgs) (*ListCDSVolumeResult, error) {\n\treturn ListCDSVolume(c, queryArgs)\n}", "func (v *VolumeService) VolumeList(ctx context.Context, filter filters.Args) (volume.VolumeListOKBody, error) {\n\treturn volume.VolumeListOKBody{}, nil\n}", "func (p *VolumesClientListPager) PageResponse() VolumesClientListResponse {\n\treturn p.current\n}", "func (c *clustermgrClient) ListDiskVolumeUnits(ctx context.Context, diskID proto.DiskID) (rets []*VunitInfoSimple, err error) {\n\tc.rwLock.RLock()\n\tdefer c.rwLock.RUnlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\tinfos, err := c.client.ListVolumeUnit(ctx, &cmapi.ListVolumeUnitArgs{DiskID: diskID})\n\tif err != nil {\n\t\tspan.Errorf(\"list disk volume units failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tdiskInfo, err := c.client.DiskInfo(ctx, diskID)\n\tif err != nil {\n\t\tspan.Errorf(\"get disk info failed: disk_id[%d], err[%+v]\", diskID, err)\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range infos {\n\t\tele := VunitInfoSimple{}\n\t\tele.set(info, diskInfo.Host)\n\t\trets = append(rets, &ele)\n\t}\n\treturn rets, nil\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func GetServerVolumesListFailStatusMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// to json\n\tvIn, err := json.Marshal(volumesIn)\n\tassert.Nil(err, \"Server volume test data corrupted\")\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 499, nil)\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\n\tassert.NotNil(err, \"We are expecting an status code error\")\n\tassert.Nil(vOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"499\", \"Error should contain http code 499\")\n\n\treturn vOut\n}", "func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) {\n\tlistOpt := listSnapshotOptions{ResourceType: \"volume\"}\n\treturn s.list(ctx, opt, &listOpt)\n}", "func (d *MinioDriver) List(r volume.Request) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tvar vols []*volume.Volume\n\tfor name, v := range d.volumes {\n\t\tvols = append(vols,\n\t\t\t&volume.Volume{\n\t\t\t\tName: name,\n\t\t\t\tMountpoint: v.mountpoint,\n\t\t\t})\n\t}\n\treturn volumeResp(\"\", \"\", vols, capability, \"\")\n}", "func NewFileDirsListResponse(items []*models.FileDirs) []render.Renderer {\n\tlist := []render.Renderer{}\n\tfor i := range items {\n\t\tlist = append(list, NewFileDirsResponse(items[i]))\n\t}\n\n\treturn list\n}", "func (a *Client) PostContainersVolumesList(params *PostContainersVolumesListParams, authInfo runtime.ClientAuthInfoWriter) (*PostContainersVolumesListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPostContainersVolumesListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"PostContainersVolumesList\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/containers/volumes/list\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &PostContainersVolumesListReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PostContainersVolumesListOK), nil\n\n}", "func Convert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in *impl.RmdirContentsResponse, out *v2alpha1.RmdirContentsResponse) error {\n\treturn autoConvert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in, out)\n}", "func NewListStorageV1alpha1VolumeAttachmentOK() *ListStorageV1alpha1VolumeAttachmentOK {\n\n\treturn &ListStorageV1alpha1VolumeAttachmentOK{}\n}", "func GetVolumesFromClusterNameV2(cluster string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?cluster.name=\" + cluster\n\treturn getVolumesV2(query)\n}", "func (*ResponseGetVolumes) Descriptor() ([]byte, []int) {\n\treturn file_pkg_metadata_metadata_proto_rawDescGZIP(), []int{1}\n}", "func (s *DataStore) ListVolumes() (map[string]*longhorn.Volume, error) {\n\titemMap := make(map[string]*longhorn.Volume)\n\n\tlist, err := s.ListVolumesRO()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, itemRO := range list {\n\t\t// Cannot use cached object from lister\n\t\titemMap[itemRO.Name] = itemRO.DeepCopy()\n\t}\n\treturn itemMap, nil\n}", "func (s *SnapshotsServiceOp) ListVolume(opt *ListOptions) ([]Snapshot, *Response, error) {\n\tlistOpt := listSnapshotOptions{ResourceType: \"volume\"}\n\treturn s.list(opt, &listOpt)\n}", "func (b *Poloniex) GetVolumes() (vc VolumeCollection, err error) {\n\tr, err := b.client.do(\"GET\", \"public?command=return24hVolume\", nil, false)\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = json.Unmarshal(r, &vc); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func GetServerVolumesListFailJSONMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// wrong json\n\tvIn := []byte{10, 20, 30}\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 200, nil)\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\n\tassert.NotNil(err, \"We are expecting a marshalling error\")\n\tassert.Nil(vOut, \"Expecting nil output\")\n\tassert.Contains(err.Error(), \"invalid character\", \"Error message should include the string 'invalid character'\")\n\n\treturn vOut\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func NewListDisksOK() *ListDisksOK {\n\treturn &ListDisksOK{}\n}", "func (s *StackEbrc) ListVolumes() ([]abstract.Volume, fail.Error) {\n\tlogrus.Debug(\"ebrc.Client.ListVolumes() called\")\n\tdefer logrus.Debug(\"ebrc.Client.ListVolumes() done\")\n\n\tvar volumes []abstract.Volume\n\n\torg, vdc, err := s.getOrgVdc()\n\tif err != nil {\n\t\treturn volumes, fail.Wrap(err, fmt.Sprintf(\"Error listing volumes\"))\n\t}\n\n\t// Check if network is already there\n\trefs, err := getLinks(org, \"vnd.vmware.vcloud.disk+xml\")\n\tif err != nil {\n\t\treturn nil, fail.Wrap(err, fmt.Sprintf(\"Error recovering network information\"))\n\t}\n\tfor _, ref := range refs {\n\t\t// FIXME: Add data\n\t\tdr, err := vdc.QueryDisk(ref.Name)\n\t\tif err == nil {\n\t\t\tthed, err := vdc.FindDiskByHREF(dr.Disk.HREF)\n\t\t\tif err == nil {\n\t\t\t\tvolumes = append(volumes, abstract.Volume{Name: ref.Name, ID: ref.ID, Size: thed.Disk.Size})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn volumes, nil\n}", "func (d *driverInfo) List() ([]*Volume, error) {\n\tvar volumes []*Volume\n\n\tfor _, vol := range d.volumes {\n\t\tvolumes = append(volumes, vol)\n\t}\n\n\treturn volumes, nil\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (s stack) ListVolumes(ctx context.Context) ([]*abstract.Volume, fail.Error) {\n\tif valid.IsNil(s) {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\n\treturn nil, fail.NotImplementedError(\"implement me\")\n}", "func GetVolumesFromStyleV2(style string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?state=\" + style\n\treturn getVolumesV2(query)\n}", "func (s *Module) DiskList() ([]pkg.VDisk, error) {\n\tpools, err := s.diskPools()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar disks []pkg.VDisk\n\tfor _, pool := range pools {\n\n\t\titems, err := os.ReadDir(pool)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to list virtual disks\")\n\t\t}\n\n\t\tfor _, item := range items {\n\t\t\tif item.IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinfo, err := item.Info()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get file info for '%s'\", item.Name())\n\t\t\t}\n\n\t\t\tdisks = append(disks, pkg.VDisk{\n\t\t\t\tPath: filepath.Join(pool, item.Name()),\n\t\t\t\tSize: info.Size(),\n\t\t\t})\n\t\t}\n\n\t\treturn disks, nil\n\t}\n\n\treturn disks, nil\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogrus.Infof(\"Listing volumes using plugin %s\", p.Name)\n\n\tresp, err := p.sendRequest(nil, listPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, listPath, \"\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvolumeRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tvolumeResp := new(volume.ListResponse)\n\tif err := json.Unmarshal(volumeRespBytes, volumeResp); err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling volume plugin %s list response: %w\", p.Name, err)\n\t}\n\n\treturn volumeResp.Volumes, nil\n}", "func GetVolumesFromStateV2(state string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?state=\" + state\n\treturn getVolumesV2(query)\n}", "func CreateListAvailableFileSystemTypesResponse() (response *ListAvailableFileSystemTypesResponse) {\n\tresponse = &ListAvailableFileSystemTypesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func ListVdisks(cluster ardb.StorageCluster, pred func(vdiskID string) bool) ([]string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tserverCh, err := cluster.ServerIterator(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype serverResult struct {\n\t\tids []string\n\t\terr error\n\t}\n\tresultCh := make(chan serverResult)\n\n\tvar action listVdisksAction\n\tif pred == nil {\n\t\taction.filter = filterListedVdiskID\n\t} else {\n\t\taction.filter = func(str string) (string, bool) {\n\t\t\tstr, ok := filterListedVdiskID(str)\n\t\t\tif !ok {\n\t\t\t\treturn \"\", false\n\t\t\t}\n\t\t\treturn str, pred(str)\n\t\t}\n\t}\n\n\tvar serverCount int\n\tvar reply interface{}\n\tfor server := range serverCh {\n\t\tserver := server\n\t\tgo func() {\n\t\t\tvar result serverResult\n\t\t\tlog.Infof(\"listing all vdisks stored on %v\", server.Config())\n\t\t\treply, result.err = server.Do(action)\n\t\t\tif result.err == nil && reply != nil {\n\t\t\t\t// [NOTE] this line of code relies on the fact that our\n\t\t\t\t// custom `listVdisksAction` type returns a `[]string` value as a reply,\n\t\t\t\t// as soon as that logic changes, this line will start causing trouble.\n\t\t\t\tresult.ids = reply.([]string)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase resultCh <- result:\n\t\t\tcase <-ctx.Done():\n\t\t\t}\n\t\t}()\n\t\tserverCount++\n\t}\n\n\t// collect the ids from all servers within the given cluster\n\tvar ids []string\n\tvar result serverResult\n\tfor i := 0; i < serverCount; i++ {\n\t\tresult = <-resultCh\n\t\tif result.err != nil {\n\t\t\t// return early, an error has occured!\n\t\t\treturn nil, result.err\n\t\t}\n\t\tids = append(ids, result.ids...)\n\t}\n\n\tif len(ids) <= 1 {\n\t\treturn ids, nil // nothing to do\n\t}\n\n\t// sort and dedupe\n\tsort.Strings(ids)\n\tids = dedupStrings(ids)\n\n\treturn ids, nil\n}", "func (s *Stack) ListVolumes() ([]resources.Volume, error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\n\tdefer concurrency.NewTracer(nil, \"\", true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvar vs []resources.Volume\n\terr := volumesv2.List(s.VolumeClient, volumesv2.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {\n\t\tlist, err := volumesv2.ExtractVolumes(page)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error listing volumes: volume extraction: %+v\", err)\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, vol := range list {\n\t\t\tav := resources.Volume{\n\t\t\t\tID: vol.ID,\n\t\t\t\tName: vol.Name,\n\t\t\t\tSize: vol.Size,\n\t\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\t\tState: toVolumeState(vol.Status),\n\t\t\t}\n\t\t\tvs = append(vs, av)\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil || len(vs) == 0 {\n\t\tif err != nil {\n\t\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error listing volume types: %s\", ProviderErrorToString(err)))\n\t\t}\n\t\tlog.Warnf(\"Complete volume list empty\")\n\t}\n\treturn vs, nil\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (d *VolumeDriver) List(r volume.Request) volume.Response {\n\tlog.Errorf(\"VolumeDriver List to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func GetVolumesFromTypeV2(t string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?type=\" + t\n\treturn getVolumesV2(query)\n}", "func GetVolumesFromUUIDV2(uuid string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?uuid=\" + uuid\n\treturn getVolumesV2(query)\n}", "func (c *Core) ListVolumes(labels map[string]string) ([]*types.Volume, error) {\n\tvar retVolumes = make([]*types.Volume, 0)\n\n\t// list local meta store.\n\tmetaList, err := c.store.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// scan all drivers.\n\tlogrus.Debugf(\"probing all drivers for listing volume\")\n\tdrivers, err := driver.GetAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx := driver.Contexts()\n\n\tvar realVolumes = map[string]*types.Volume{}\n\tvar volumeDrivers = map[string]driver.Driver{}\n\n\tfor _, dv := range drivers {\n\t\tvolumeDrivers[dv.Name(ctx)] = dv\n\n\t\td, ok := dv.(driver.Lister)\n\t\tif !ok {\n\t\t\t// not Lister, ignore it.\n\t\t\tcontinue\n\t\t}\n\t\tvList, err := d.List(ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"volume driver %s list error: %v\", dv.Name(ctx), err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, v := range vList {\n\t\t\trealVolumes[v.Name] = v\n\t\t}\n\t}\n\n\tfor name, obj := range metaList {\n\t\tv, ok := obj.(*types.Volume)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\td, ok := volumeDrivers[v.Spec.Backend]\n\t\tif !ok {\n\t\t\t// driver not exist, ignore it\n\t\t\tcontinue\n\t\t}\n\n\t\t// the local driver and tmpfs driver\n\t\tif d.StoreMode(ctx).IsLocal() {\n\t\t\tretVolumes = append(retVolumes, v)\n\t\t\tcontinue\n\t\t}\n\n\t\trv, ok := realVolumes[name]\n\t\tif !ok {\n\t\t\t// real volume not exist, ignore it\n\t\t\tcontinue\n\t\t}\n\t\tv.Status.MountPoint = rv.Status.MountPoint\n\n\t\tdelete(realVolumes, name)\n\n\t\tretVolumes = append(retVolumes, v)\n\t}\n\n\tfor _, v := range realVolumes {\n\t\t// found new volumes, store the meta\n\t\tlogrus.Warningf(\"found new volume %s\", v.Name)\n\t\tc.store.Put(v)\n\n\t\tretVolumes = append(retVolumes, v)\n\n\t}\n\n\treturn retVolumes, nil\n}", "func (c *BlockVolumeClient) List(params *BlockVolumeParams) (*BlockVolumeList, error) {\n\tlist := &BlockVolumeList{}\n\n\terr := c.Backend.CallIntoInterface(\"v1/Storage/Block/Volume/list\", params, list)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}", "func (proxy *remoteDriverProxy) List() ([]*remoteVolumeDesc, error) {\n\tvar req remoteVolumeListReq\n\tvar resp remoteVolumeListResp\n\n\tif err := proxy.client.CallService(remoteVolumeListService, &req, &resp, true); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn nil, errors.New(resp.Err)\n\t}\n\n\treturn resp.Volumes, nil\n}", "func (d ImagefsDriver) List() (*volume.ListResponse, error) {\n\tcontainers, err := d.cli.ContainerList(context.Background(), types.ContainerListOptions{\n\t\tAll: true,\n\t\tFilters: filters.NewArgs(filters.Arg(\"label\", \"com.docker.imagefs.version\")),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tresponse := &volume.ListResponse{}\n\tfor i := range containers {\n\t\tresponse.Volumes = append(response.Volumes, &volume.Volume{\n\t\t\t// TODO(rabrams) fall back to id if no names\n\t\t\tName: containers[i].Names[0],\n\t\t})\n\t}\n\treturn response, nil\n}", "func (d *defaultDataVolumeManager) ListDataVolumes(ctx context.Context, kubeconfig []byte, listOpts ...client.ListOption) (*cdicorev1alpha1.DataVolumeList, error) {\n\tc, namespace, err := d.client.GetClient(kubeconfig)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not create kubevirt client\")\n\t}\n\n\tdvList := cdicorev1alpha1.DataVolumeList{}\n\tif err := c.List(ctx, &dvList, listOpts...); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not list DataVolumes in namespace %s\", namespace)\n\t}\n\n\treturn &dvList, nil\n}", "func NewCmdDiskList() *cobra.Command {\n\treq := base.BizClient.NewDescribeUDiskRequest()\n\ttypeMap := map[string]string{\n\t\t\"DataDisk\": \"Oridinary-Data-Disk\",\n\t\t\"SystemDisk\": \"Oridinary-System-Disk\",\n\t\t\"SSDDataDisk\": \"SSD-Data-Disk\",\n\t}\n\tarkModeMap := map[string]string{\n\t\t\"Yes\": \"true\",\n\t\t\"No\": \"false\",\n\t}\n\tcmd := &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"List udisk instance\",\n\t\tLong: \"List udisk instance\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tfor key, val := range typeMap {\n\t\t\t\tif *req.DiskType == val {\n\t\t\t\t\t*req.DiskType = key\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp, err := base.BizClient.DescribeUDisk(req)\n\t\t\tif err != nil {\n\t\t\t\tbase.HandleError(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlist := []DiskRow{}\n\t\t\tfor _, disk := range resp.DataSet {\n\t\t\t\trow := DiskRow{\n\t\t\t\t\tResourceID: disk.UDiskId,\n\t\t\t\t\tName: disk.Name,\n\t\t\t\t\tGroup: disk.Tag,\n\t\t\t\t\tSize: fmt.Sprintf(\"%dGB\", disk.Size),\n\t\t\t\t\tType: typeMap[disk.DiskType],\n\t\t\t\t\tEnableDataArk: arkModeMap[disk.UDataArkMode],\n\t\t\t\t\tMountUHost: fmt.Sprintf(\"%s/%s\", disk.UHostName, disk.UHostIP),\n\t\t\t\t\tMountPoint: disk.DeviceName,\n\t\t\t\t\tState: disk.Status,\n\t\t\t\t\tCreationTime: base.FormatDate(disk.CreateTime),\n\t\t\t\t\tExpirationTime: base.FormatDate(disk.ExpiredTime),\n\t\t\t\t}\n\t\t\t\tif disk.UHostIP == \"\" {\n\t\t\t\t\trow.MountUHost = \"\"\n\t\t\t\t}\n\t\t\t\tlist = append(list, row)\n\t\t\t}\n\t\t\tif global.json {\n\t\t\t\tbase.PrintJSON(list)\n\t\t\t} else {\n\t\t\t\tbase.PrintTableS(list)\n\t\t\t}\n\t\t},\n\t}\n\tflags := cmd.Flags()\n\tflags.SortFlags = false\n\treq.ProjectId = flags.String(\"project-id\", base.ConfigInstance.ProjectID, \"Optional. Assign project-id\")\n\treq.Region = flags.String(\"region\", base.ConfigInstance.Region, \"Optional. Assign region\")\n\treq.Zone = flags.String(\"zone\", base.ConfigInstance.Zone, \"Optional. Assign availability zone\")\n\treq.UDiskId = flags.String(\"resource-id\", \"\", \"Optional. Resource ID of the udisk to search\")\n\treq.DiskType = flags.String(\"udisk-type\", \"\", \"Optional. Optional. Type of the udisk to search. 'Oridinary-Data-Disk','Oridinary-System-Disk' or 'SSD-Data-Disk'\")\n\treq.Offset = cmd.Flags().Int(\"offset\", 0, \"Optional. Offset\")\n\treq.Limit = cmd.Flags().Int(\"limit\", 50, \"Optional. Limit\")\n\tflags.SetFlagValues(\"udisk-type\", \"Oridinary-Data-Disk\", \"Oridinary-System-Disk\", \"SSD-Data-Disk\")\n\treturn cmd\n}", "func (o *PcloudPvminstancesVolumesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPcloudPvminstancesVolumesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPcloudPvminstancesVolumesGetBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewPcloudPvminstancesVolumesGetUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPcloudPvminstancesVolumesGetForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewPcloudPvminstancesVolumesGetNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewPcloudPvminstancesVolumesGetInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (client *DiskEncryptionSetsClient) listHandleResponse(resp *http.Response) (DiskEncryptionSetsListResponse, error) {\n\tresult := DiskEncryptionSetsListResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSetList); err != nil {\n\t\treturn DiskEncryptionSetsListResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func ListVolumes(ip string) (string, error) {\n\tlog.Printf(\"Listing volumes.\")\n\treturn ssh.InvokeCommand(ip, dockercli.ListVolumes)\n}", "func CreateListManagedPrivateSpacesResponse() (response *ListManagedPrivateSpacesResponse) {\n\tresponse = &ListManagedPrivateSpacesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (cl *Client) VolumeList(ctx context.Context, vla *csp.VolumeListArgs) ([]*csp.Volume, error) {\n\tvar svc, volumeType string\n\tif vla.StorageTypeName != \"\" {\n\t\tvar obj *models.CSPStorageType\n\t\tif svc, volumeType, obj = StorageTypeToServiceVolumeType(vla.StorageTypeName); obj == nil || svc != ServiceGCE {\n\t\t\treturn nil, fmt.Errorf(\"invalid storage type\")\n\t\t}\n\t\tvolumeType = fmt.Sprintf(volTypeURL, cl.projectID, cl.attrs[AttrZone].Value, volumeType)\n\t}\n\treturn cl.gceVolumeList(ctx, vla, volumeType)\n}", "func (c *Client) ListVolumes(ctx context.Context, req *netapppb.ListVolumesRequest, opts ...gax.CallOption) *VolumeIterator {\n\treturn c.internalClient.ListVolumes(ctx, req, opts...)\n}", "func GetVolList(volumeID string) (*apis.ZFSVolumeList, error) {\n\tlistOptions := v1.ListOptions{\n\t\tLabelSelector: ZFSNodeKey + \"=\" + NodeID,\n\t}\n\n\treturn volbuilder.NewKubeclient().\n\t\tWithNamespace(OpenEBSNamespace).List(listOptions)\n\n}", "func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*corev1.PersistentVolume, err error) {\n\tlistopt := metav1.ListOptions{\n\t\tLabelSelector: selector.String(),\n\t}\n\tif s.tweakListOptions != nil {\n\t\ts.tweakListOptions(&listopt)\n\t}\n\tlist, err := s.client.CoreV1().PersistentVolumes().List(listopt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range list.Items {\n\t\tret = append(ret, &list.Items[i])\n\t}\n\treturn ret, nil\n}", "func (c *volumeCommand) listVolumes(ctx context.Context, ns id.Namespace, vols []string) ([]*model.Volume, error) {\n\tuseIDs, err := c.config.UseIDs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !useIDs {\n\t\treturn c.client.GetNamespaceVolumesByName(ctx, ns, vols...)\n\t}\n\n\tvolIDs := []id.Volume{}\n\tfor _, uid := range vols {\n\t\tvolIDs = append(volIDs, id.Volume(uid))\n\t}\n\n\treturn c.client.GetNamespaceVolumesByUID(ctx, ns, volIDs...)\n}", "func GetVolumesFromSvmNameV2(svm string) (VolumeV2, error) {\n\tquery := \"/api/datacenter/storage/volumes?svm.name=\" + svm\n\treturn getVolumesV2(query)\n}", "func GetServerVolumesListFailErrMocked(t *testing.T, volumesIn []*types.Volume, serverID string) []*types.Volume {\n\n\tassert := assert.New(t)\n\n\t// wire up\n\tcs := &utils.MockConcertoService{}\n\tds, err := NewServerService(cs)\n\tassert.Nil(err, \"Couldn't load server service\")\n\tassert.NotNil(ds, \"Server service not instanced\")\n\n\t// to json\n\tvIn, err := json.Marshal(volumesIn)\n\tassert.Nil(err, \"Server volume test data corrupted\")\n\n\t// call service\n\tcs.On(\"Get\", fmt.Sprintf(\"/cloud/servers/%s/volumes\", serverID)).Return(vIn, 200, fmt.Errorf(\"mocked error\"))\n\tvOut, err := ds.GetServerVolumesList(serverID)\n\n\tassert.NotNil(err, \"We are expecting an error\")\n\tassert.Nil(vOut, \"Expecting nil output\")\n\tassert.Equal(err.Error(), \"mocked error\", \"Error should be 'mocked error'\")\n\n\treturn vOut\n}", "func (a *HetznerVolumes) FindVolumes() ([]*volumes.Volume, error) {\n\tklog.V(2).Infof(\"Finding attachable etcd volumes\")\n\n\tif a.server.Datacenter == nil || a.server.Datacenter.Location == nil {\n\t\treturn nil, fmt.Errorf(\"failed to find server location for the running server\")\n\t}\n\tserverLocation := a.server.Datacenter.Location.Name\n\n\tmatchingVolumes, err := getMatchingVolumes(a.hcloudClient, a.matchNameTags)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get matching volumes: %w\", err)\n\t}\n\n\tvar localEtcdVolumes []*volumes.Volume\n\tfor _, volume := range matchingVolumes {\n\t\t// Only volumes from the same location can be mounted\n\t\tif volume.Location == nil {\n\t\t\tklog.Warningf(\"failed to find volume %s(%d) location\", volume.Name, volume.ID)\n\t\t\tcontinue\n\t\t}\n\t\tvolumeLocation := volume.Location.Name\n\t\tif volumeLocation != serverLocation {\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.V(2).Infof(\"Found attachable volume %s(%d) with status %q\", volume.Name, volume.ID, volume.Status)\n\n\t\tvolumeID := strconv.Itoa(volume.ID)\n\t\tlocalEtcdVolume := &volumes.Volume{\n\t\t\tProviderID: volumeID,\n\t\t\tInfo: volumes.VolumeInfo{\n\t\t\t\tDescription: a.clusterName + \"-\" + volumeID,\n\t\t\t},\n\t\t\tMountName: \"hcloud-\" + volumeID,\n\t\t\tEtcdName: \"vol-\" + volumeID,\n\t\t}\n\n\t\tif volume.Server != nil {\n\t\t\tlocalEtcdVolume.AttachedTo = strconv.Itoa(volume.Server.ID)\n\t\t\tif volume.Server.ID == a.server.ID {\n\t\t\t\tlocalEtcdVolume.LocalDevice = fmt.Sprintf(\"%s%d\", localDevicePrefix, volume.ID)\n\t\t\t}\n\t\t}\n\n\t\tlocalEtcdVolumes = append(localEtcdVolumes, localEtcdVolume)\n\t}\n\n\treturn localEtcdVolumes, nil\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}" ]
[ "0.7956895", "0.7773968", "0.71468264", "0.69334245", "0.6462499", "0.6385222", "0.63655627", "0.62483734", "0.6136864", "0.61138284", "0.5691875", "0.56875324", "0.55796784", "0.55417126", "0.5532194", "0.5531017", "0.55172753", "0.54519886", "0.5445132", "0.54347545", "0.54273874", "0.53985685", "0.5370857", "0.5362697", "0.5353439", "0.5353439", "0.5318605", "0.5273127", "0.52553964", "0.5211304", "0.5193914", "0.51924664", "0.5159051", "0.51395845", "0.5132518", "0.51154065", "0.5110307", "0.5101073", "0.50912416", "0.5083003", "0.5060407", "0.50456417", "0.50429016", "0.50337684", "0.502373", "0.5021204", "0.5005469", "0.4949835", "0.4942905", "0.49415657", "0.49178493", "0.4916887", "0.49113667", "0.49060333", "0.4902721", "0.48960295", "0.4891971", "0.4882327", "0.48779106", "0.48732373", "0.48698783", "0.48654738", "0.48590475", "0.48359704", "0.4804167", "0.48040614", "0.47905153", "0.47884682", "0.47697067", "0.47614142", "0.47531447", "0.47466597", "0.47451848", "0.4737528", "0.4737415", "0.47221786", "0.47144216", "0.470786", "0.47055054", "0.46953133", "0.46913525", "0.46900728", "0.46883193", "0.46862972", "0.4666318", "0.460308", "0.45968476", "0.45925492", "0.4591239", "0.45793727", "0.45522657", "0.45439705", "0.45380867", "0.4535334", "0.45250982", "0.45234972", "0.4518479", "0.450541", "0.45001745", "0.44983882" ]
0.8875099
0
Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest is an autogenerated conversion function.
func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error { return autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func (d *VolumeDriver) Mount(r volume.MountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Mounting volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tlog.Errorf(\"VolumeDriver Mount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func (d *MinioDriver) Mount(r volume.MountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Mount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections > 0 {\n\t\tv.connections++\n\t\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n\t}\n\n\tif err := d.mountVolume(v); err != nil {\n\t\tglog.Warningf(\"mounting %#v volume failed: %s\", v, err.Error())\n\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t}\n\n\t// if the mount was successful, then increment the number of connections we\n\t// have to the mount.\n\tv.connections++\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (d *DirDriver) Mount(req *volume.MountRequest) (*volume.MountResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Mount() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn nil, fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tvol.mounts[req.ID] = true\n\n\treturn &volume.MountResponse{\n\t\tMountpoint: vol.path,\n\t}, nil\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (c *Controller) Mount(mountRequest k8sresources.FlexVolumeMountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-mount-start\")\n\tdefer c.logger.Println(\"controller-mount-end\")\n\tc.logger.Println(fmt.Sprintf(\"mountRequest [%#v]\", mountRequest))\n\tvar lnPath string\n\tattachRequest := resources.AttachRequest{Name: mountRequest.MountDevice, Host: getHost()}\n\tmountedPath, err := c.Client.Attach(attachRequest)\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to mount volume [%s], Error: %#v\", mountRequest.MountDevice, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\tif mountRequest.Version == k8sresources.KubernetesVersion_1_5 {\n\t\t//For k8s 1.5, by the time we do the attach/mount, the mountDir (MountPath) is not created trying to do mount and ln will fail because the dir is not found, so we need to create the directory before continuing\n\t\tdir := filepath.Dir(mountRequest.MountPath)\n\t\tc.logger.Printf(\"mountrequest.MountPath %s\", mountRequest.MountPath)\n\t\tlnPath = mountRequest.MountPath\n\t\tk8sRequiredMountPoint := path.Join(mountRequest.MountPath, mountRequest.MountDevice)\n\t\tif _, err = os.Stat(k8sRequiredMountPoint); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\n\t\t\t\tc.logger.Printf(\"creating volume directory %s\", dir)\n\t\t\t\terr = os.MkdirAll(dir, 0777)\n\t\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\t\tmsg := fmt.Sprintf(\"Failed creating volume directory %#v\", err)\n\t\t\t\t\tc.logger.Println(msg)\n\n\t\t\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\t\t\tStatus: \"Failure\",\n\t\t\t\t\t\tMessage: msg,\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// For k8s 1.6 and later kubelet creates a folder as the MountPath, including the volume name, whenwe try to create the symlink this will fail because the same name exists. This is why we need to remove it before continuing.\n\t} else {\n\t\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\t\tif strings.HasPrefix(mountedPath, ubiquityMountPrefix) {\n\t\t\tlnPath = mountRequest.MountPath\n\t\t} else {\n\t\t\tlnPath, _ = path.Split(mountRequest.MountPath)\n\t\t}\n\t\tc.logger.Printf(\"removing folder %s\", mountRequest.MountPath)\n\n\t\terr = os.Remove(mountRequest.MountPath)\n\t\tif err != nil && !os.IsExist(err) {\n\t\t\tmsg := fmt.Sprintf(\"Failed removing existing volume directory %#v\", err)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\n\t\t}\n\n\t}\n\tsymLinkCommand := \"/bin/ln\"\n\targs := []string{\"-s\", mountedPath, lnPath}\n\tc.logger.Printf(fmt.Sprintf(\"creating slink from %s -> %s\", mountedPath, lnPath))\n\n\tvar stderr bytes.Buffer\n\tcmd := exec.Command(symLinkCommand, args...)\n\tcmd.Stderr = &stderr\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Controller: mount failed to symlink %#v\", stderr.String())\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: msg,\n\t\t}\n\n\t}\n\tmsg := fmt.Sprintf(\"Volume mounted successfully to %s\", mountedPath)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: msg,\n\t}\n}", "func (d *lvm) MountVolume(vol Volume, op *operations.Operation) error {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t// Activate LVM volume if needed.\n\tactivated, err := d.activateVolume(vol)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif activated {\n\t\trevert.Add(func() { _, _ = d.deactivateVolume(vol) })\n\t}\n\n\tif vol.contentType == ContentTypeFS {\n\t\t// Check if already mounted.\n\t\tmountPath := vol.MountPath()\n\t\tif !filesystem.IsMountPoint(mountPath) {\n\t\t\tfsType := vol.ConfigBlockFilesystem()\n\t\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\n\t\t\tif vol.mountFilesystemProbe {\n\t\t\t\tfsType, err = fsProbe(volDevPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed probing filesystem: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = vol.EnsureMountPath()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmountFlags, mountOptions := filesystem.ResolveMountOptions(strings.Split(vol.ConfigBlockMountOptions(), \",\"))\n\t\t\terr = TryMount(volDevPath, mountPath, fsType, mountFlags, mountOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to mount LVM logical volume: %w\", err)\n\t\t\t}\n\n\t\t\td.logger.Debug(\"Mounted logical volume\", logger.Ctx{\"volName\": vol.name, \"dev\": volDevPath, \"path\": mountPath, \"options\": mountOptions})\n\t\t}\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, mount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\terr = d.MountVolume(fsVol, op)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvol.MountRefCountIncrement() // From here on it is up to caller to call UnmountVolume() when done.\n\trevert.Success()\n\treturn nil\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func (d *VolumeDriver) MountVolume(name string, fstype string, id string, isReadOnly bool, skipAttach bool) (string, error) {\n\tlog.Errorf(\"VolumeDriver MountVolume to be implemented\")\n\tmountpoint := getMountPoint(name)\n\treturn mountpoint, nil\n}", "func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {\n\tif req == nil {\n\t\treturn \"\", fmt.Errorf(\"must provide non-nil request to MountVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlogrus.Infof(\"Mounting volume %s using plugin %s for container %s\", req.Name, p.Name, req.ID)\n\n\tresp, err := p.sendRequest(req, mountPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, mountPath, req.Name); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmountRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tmountResp := new(volume.MountResponse)\n\tif err := json.Unmarshal(mountRespBytes, mountResp); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshalling volume plugin %s path response: %w\", p.Name, err)\n\t}\n\n\treturn mountResp.Mountpoint, nil\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func MountVolume(vol *apis.LVMVolume, mount *MountInfo, podLVInfo *PodLVInfo) error {\n\tvolume := vol.Spec.VolGroup + \"/\" + vol.Name\n\tmounted, err := verifyMountRequest(vol, mount.MountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif mounted {\n\t\tklog.Infof(\"lvm : already mounted %s => %s\", volume, mount.MountPath)\n\t\treturn nil\n\t}\n\n\tdevicePath := DevPath + volume\n\n\terr = FormatAndMountVol(devicePath, mount)\n\tif err != nil {\n\t\treturn status.Errorf(\n\t\t\tcodes.Internal,\n\t\t\t\"failed to format and mount the volume error: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t}\n\n\tklog.Infof(\"lvm: volume %v mounted %v fs %v\", volume, mount.MountPath, mount.FSType)\n\n\tif ioLimitsEnabled && podLVInfo != nil {\n\t\tif err := setIOLimits(vol, podLVInfo, devicePath); err != nil {\n\t\t\tklog.Warningf(\"lvm: error setting io limits: podUid %s, device %s, err=%v\", podLVInfo.UID, devicePath, err)\n\t\t} else {\n\t\t\tklog.Infof(\"lvm: io limits set for podUid %v, device %s\", podLVInfo.UID, devicePath)\n\t\t}\n\t}\n\n\treturn nil\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func (o *QtreeCreateRequest) SetVolume(newValue string) *QtreeCreateRequest {\n\to.VolumePtr = &newValue\n\treturn o\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func (in *Mount) DeepCopy() *Mount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Mount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *UFSClient) NewDescribeUFSVolumeMountpointRequest() *DescribeUFSVolumeMountpointRequest {\n\treq := &DescribeUFSVolumeMountpointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewAddUFSVolumeMountPointRequest() *AddUFSVolumeMountPointRequest {\n\treq := &AddUFSVolumeMountPointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in *v2alpha1.RmdirRequest, out *impl.RmdirRequest) error {\n\treturn autoConvert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in, out)\n}", "func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error {\n\tdriver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/)\n\tif err != nil || driver == nil {\n\t\tglog.Errorf(\"Failed to get portworx driver. Err: %v\", err)\n\t\treturn err\n\t}\n\n\terr = driver.Mount(m.volName, mountPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Error mounting Portworx Volume (%v) on Path (%v): %v\", m.volName, mountPath, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func ParseVolume(input string) (Volume, error) {\n\tparts := strings.Split(input, \":\")\n\tswitch len(parts) {\n\tcase 1:\n\t\treturn Volume{Type: VolumeTypeInstance, Path: input}, nil\n\tcase 2:\n\t\tif vt, mountOptions, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{Type: vt, Path: parts[1], MountOptions: mountOptions}, nil\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0]}, nil\n\tcase 3:\n\t\tif _, _, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t\t}\n\t\toptions, err := parseVolumeOptions(parts[2])\n\t\tif err != nil {\n\t\t\treturn Volume{}, maskAny(err)\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0], Options: options}, nil\n\tdefault:\n\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t}\n}", "func ValidateVolumeMount(volumeMount string) (string, error) {\n\tsrc := \"\"\n\tdest := \"\"\n\n\t// validate 'SRC[:DEST]' substring\n\tsplit := strings.Split(volumeMount, \":\")\n\tif len(split) < 1 || len(split) > 2 {\n\t\treturn \"\", fmt.Errorf(\"Invalid volume mount '%s': only one ':' allowed\", volumeMount)\n\t}\n\n\t// we only have SRC specified -> DEST = SRC\n\tif len(split) == 1 {\n\t\tsrc = split[0]\n\t\tdest = src\n\t} else {\n\t\tsrc = split[0]\n\t\tdest = split[1]\n\t}\n\n\t// verify that the source exists\n\tif src != \"\" {\n\t\tif _, err := os.Stat(src); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Failed to stat file/dir that you're trying to mount: '%s' in '%s'\", src, volumeMount)\n\t\t}\n\t}\n\n\t// verify that the destination is an absolute path\n\tif !strings.HasPrefix(dest, \"/\") {\n\t\treturn \"\", fmt.Errorf(\"Volume mount destination doesn't appear to be an absolute path: '%s' in '%s'\", dest, volumeMount)\n\t}\n\n\treturn fmt.Sprintf(\"%s:%s\", src, dest), nil\n}", "func (c *UFSClient) NewRemoveUFSVolumeMountPointRequest() *RemoveUFSVolumeMountPointRequest {\n\treq := &RemoveUFSVolumeMountPointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (m *DefaultMounter) Mount(\n\tsource string,\n\ttarget string,\n\tfstype string,\n\tflags uintptr,\n\tdata string,\n\ttimeout int,\n) error {\n\treturn syscall.Mount(source, target, fstype, flags, data)\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func GetVolumeMountFromCustomConfigSpec(cfcm *apicommonv1.CustomConfig, volumeName, volumePath, defaultSubPath string) corev1.VolumeMount {\n\tsubPath := defaultSubPath\n\tif cfcm.ConfigMap != nil && len(cfcm.ConfigMap.Items) > 0 {\n\t\tsubPath = cfcm.ConfigMap.Items[0].Path\n\t}\n\n\treturn corev1.VolumeMount{\n\t\tName: volumeName,\n\t\tMountPath: volumePath,\n\t\tSubPath: subPath,\n\t\tReadOnly: true,\n\t}\n}", "func mountVolume(ctx context.Context, src, dest, vID string, size int64, readOnly bool) error {\n\tglog.V(5).Infof(\"[mountVolume] source: %v destination: %v\", src, dest)\n\tif err := SafeMount(src, dest, string(FSTypeXFS),\n\t\tfunc() []MountOption {\n\t\t\tmOpts := []MountOption{\n\t\t\t\tMountOptionMSBind,\n\t\t\t}\n\t\t\tif readOnly {\n\t\t\t\tmOpts = append(mOpts, MountOptionMSReadOnly)\n\t\t\t}\n\t\t\treturn mOpts\n\t\t}(), []string{quotaOption}); err != nil {\n\t\treturn err\n\t}\n\n\tif size > 0 {\n\t\txfsQuota := &xfs.XFSQuota{\n\t\t\tPath: dest,\n\t\t\tProjectID: vID,\n\t\t}\n\t\tif err := xfsQuota.SetQuota(ctx, size); err != nil {\n\t\t\treturn status.Errorf(codes.Internal, \"Error while setting xfs limits: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func (a *HyperflexApiService) PatchHyperflexVolume(ctx context.Context, moid string) ApiPatchHyperflexVolumeRequest {\n\treturn ApiPatchHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (proxy *remoteDriverProxy) Mount(name, id string) (string, error) {\n\tvar req = remoteVolumeMountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeMountResp\n\n\tif err := proxy.client.CallService(remoteVolumeMountService, &req, &resp, true); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn \"\", errors.New(resp.Err)\n\t}\n\n\treturn resp.Mountpoint, nil\n}", "func (d ImagefsDriver) Mount(r *volume.MountRequest) (*volume.MountResponse, error) {\n\tcontainerID, err := d.FindVolumeContainer(r.Name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\terr = d.cli.ContainerStart(\n\t\tcontext.Background(),\n\t\tcontainerID,\n\t\ttypes.ContainerStartOptions{},\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tvar _ret *volume.MountResponse\n\tret, err := d.Path(&volume.PathRequest{Name: r.Name})\n\tif ret != nil {\n\t\t_ret = &volume.MountResponse{\n\t\t\tMountpoint: ret.Mountpoint,\n\t\t}\n\t}\n\treturn _ret, err\n}", "func (s *VolumeListener) Attach(inctx context.Context, in *protocol.VolumeAttachmentRequest) (_ *googleprotobuf.Empty, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot attach volume\")\n\n\tempty := &googleprotobuf.Empty{}\n\tif s == nil {\n\t\treturn empty, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn empty, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tvolumeRef, _ := srvutils.GetReference(in.GetVolume())\n\tif volumeRef == \"\" {\n\t\treturn empty, fail.InvalidRequestError(\"neither name nor id given as reference for volume\")\n\t}\n\thostRef, _ := srvutils.GetReference(in.GetHost())\n\tif hostRef == \"\" {\n\t\treturn empty, fail.InvalidRequestError(\"neither name nor id given as reference for host\")\n\t}\n\tmountPath := in.GetMountPath()\n\n\tfilesystem := in.GetFormat()\n\tdoNotFormat := in.DoNotFormat\n\tdoNotMount := in.DoNotMount\n\n\tjob, xerr := PrepareJob(inctx, in.GetVolume().GetTenantId(), fmt.Sprintf(\"/volume/%s/host/%s/attach\", volumeRef, hostRef))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer job.Close()\n\n\thandler := VolumeHandler(job)\n\tif xerr = handler.Attach(volumeRef, hostRef, mountPath, filesystem, doNotFormat, doNotMount); xerr != nil {\n\t\treturn empty, xerr\n\t}\n\n\treturn empty, nil\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (in *FileMount) DeepCopy() *FileMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(FileMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func (m *Mounter) Mount(\n\tminor int,\n\tdevPath, path, fs string,\n\tflags uintptr,\n\tdata string,\n\ttimeout int,\n\topts map[string]string,\n) error {\n\t// device gets overwritten if opts specifies fuse mount with\n\t// options.OptionsDeviceFuseMount.\n\tdevice := devPath\n\tif value, ok := opts[options.OptionsDeviceFuseMount]; ok {\n\t\t// fuse mounts show-up with this key as device.\n\t\tdevice = value\n\t}\n\n\tpath = normalizeMountPath(path)\n\tif len(m.allowedDirs) > 0 {\n\t\tfoundPrefix := false\n\t\tfor _, allowedDir := range m.allowedDirs {\n\t\t\tif strings.Contains(path, allowedDir) {\n\t\t\t\tfoundPrefix = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !foundPrefix {\n\t\t\treturn ErrMountpathNotAllowed\n\t\t}\n\t}\n\tdev, ok := m.HasTarget(path)\n\tif ok && dev != device {\n\t\tlogrus.Warnf(\"cannot mount %q, device %q is mounted at %q\", device, dev, path)\n\t\treturn ErrExist\n\t}\n\tm.Lock()\n\tinfo, ok := m.mounts[device]\n\tif !ok {\n\t\tinfo = &Info{\n\t\t\tDevice: device,\n\t\t\tMountpoint: make([]*PathInfo, 0),\n\t\t\tMinor: minor,\n\t\t\tFs: fs,\n\t\t}\n\t}\n\tm.mounts[device] = info\n\tm.Unlock()\n\tinfo.Lock()\n\tdefer info.Unlock()\n\n\t// Validate input params\n\t// FS check is not needed if it is a bind mount\n\tif !strings.HasPrefix(info.Fs, fs) && (flags&syscall.MS_BIND) != syscall.MS_BIND {\n\t\tlogrus.Warnf(\"%s Existing mountpoint has fs %q cannot change to %q\",\n\t\t\tdevice, info.Fs, fs)\n\t\treturn ErrEinval\n\t}\n\n\t// Try to find the mountpoint. If it already exists, do nothing\n\tfor _, p := range info.Mountpoint {\n\t\tif p.Path == path {\n\t\t\tlogrus.Infof(\"%q mountpoint for device %q already exists\",\n\t\t\t\tdevice, path)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\th := m.kl.Acquire(path)\n\tdefer m.kl.Release(&h)\n\n\t// Record previous state of the path\n\tpathWasReadOnly := m.isPathSetImmutable(path)\n\tvar (\n\t\tisBindMounted bool = false\n\t\tbindMountPath string\n\t)\n\n\tif err := m.makeMountpathReadOnly(path); err != nil {\n\t\tif strings.Contains(err.Error(), \"Inappropriate ioctl for device\") {\n\t\t\tlogrus.Warnf(\"failed to make %s readonly. Err: %v\", path, err)\n\t\t\t// If we cannot chattr the original mount path, we bind mount it to\n\t\t\t// a path in osd mount path and then chattr it\n\t\t\tif bindMountPath, err = m.bindMountOriginalPath(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tisBindMounted = true\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to make %s readonly. Err: %v\", path, err)\n\t\t}\n\t}\n\n\t// The device is not mounted at path, mount it and add to its mountpoints.\n\tif err := m.mountImpl.Mount(devPath, path, fs, flags, data, timeout); err != nil {\n\t\t// Rollback only if was writeable\n\t\tif !pathWasReadOnly {\n\t\t\tif e := m.makeMountpathWriteable(path); e != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to make %v writeable during rollback. Err: %v Mount err: %v\",\n\t\t\t\t\tpath, e, err)\n\t\t\t}\n\t\t\tif isBindMounted {\n\t\t\t\tif cleanupErr := m.cleanupBindMount(path, bindMountPath, err); cleanupErr != nil {\n\t\t\t\t\treturn cleanupErr\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn err\n\t}\n\n\tinfo.Mountpoint = append(info.Mountpoint, &PathInfo{Path: path})\n\n\treturn nil\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func (d *MinioDriver) mountVolume(volume *minioVolume) error {\n\n\tminioPath := fmt.Sprintf(\"%s/%s\", d.server, volume.bucketName)\n\n\t//NOTE: make this adjustable in the future for https if secure is passed.\n\tcmd := fmt.Sprintf(\"mount -t minfs http://%s %s\", minioPath, volume.mountpoint)\n\tif err := provisionConfig(d); err != nil {\n\t\treturn err\n\t}\n\n\tout, err := exec.Command(\"sh\", \"-c\", cmd).Output()\n\tif err != nil {\n\t\tglog.Warningf(\"Error while executing mount command (%s): %s\", cmd, err)\n\t\tglog.V(1).Infof(\"Dump output of command: %#v\", out)\n\t\treturn err\n\t}\n\treturn nil\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func NewMountSpec(mountType string, path string) (*MountSpec, error) {\n\tmt := MountSpecType(mountType)\n\tswitch mt {\n\tcase MountSpecDev, MountSpecLib, MountSpecSym, MountSpecDir:\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected mount type: %v\", mt)\n\t}\n\tif path == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid path: %v\", path)\n\t}\n\n\tmount := MountSpec{\n\t\tType: mt,\n\t\tPath: path,\n\t}\n\n\treturn &mount, nil\n}", "func (m *Mount) Mount(target string) error {\n\treturn ErrNotImplementOnUnix\n}", "func parseMountInfoLine(line string) *Mount {\n\tfields := strings.Split(line, \" \")\n\tif len(fields) < 10 {\n\t\treturn nil\n\t}\n\n\t// Count the optional fields. In case new fields are appended later,\n\t// don't simply assume that n == len(fields) - 4.\n\tn := 6\n\tfor fields[n] != \"-\" {\n\t\tn++\n\t\tif n >= len(fields) {\n\t\t\treturn nil\n\t\t}\n\t}\n\tif n+3 >= len(fields) {\n\t\treturn nil\n\t}\n\n\tvar mnt *Mount = &Mount{}\n\tvar err error\n\tmnt.DeviceNumber, err = newDeviceNumberFromString(fields[2])\n\tif err != nil {\n\t\treturn nil\n\t}\n\tmnt.BindMnt = unescapeString(fields[3]) != \"/\"\n\tmnt.Path = unescapeString(fields[4])\n\tfor _, opt := range strings.Split(fields[5], \",\") {\n\t\tif opt == \"ro\" {\n\t\t\tmnt.ReadOnly = true\n\t\t}\n\t}\n\tmnt.FilesystemType = unescapeString(fields[n+1])\n\tmnt.Device = getDeviceName(mnt.DeviceNumber)\n\treturn mnt\n}", "func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, cgroupsInt interface{}) (*fs.Inode, error) {\n\tpanic(\"unimplemented\")\n}", "func mount(device, target, mType, options string) error {\n\tflag, _ := parseOptions(options)\n\tif flag&REMOUNT != REMOUNT {\n\t\tif mounted, err := mounted(target); err != nil || mounted {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn forceMount(device, target, mType, options)\n}", "func (in *VpVolumeAndMount) DeepCopy() *VpVolumeAndMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VpVolumeAndMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Mount(mountpoint string, fs string, device string, isReadOnly bool) error {\n\tlog.WithFields(log.Fields{\n\t\t\"device\": device,\n\t\t\"mountpoint\": mountpoint,\n\t}).Debug(\"Calling syscall.Mount() \")\n\n\tflags := 0\n\tif isReadOnly {\n\t\tflags = syscall.MS_RDONLY\n\t}\n\terr := syscall.Mount(device, mountpoint, fs, uintptr(flags), \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to mount device %s at %s: %s\", device, mountpoint, err)\n\t}\n\treturn nil\n}", "func (d *fsStorage) Mount(volume *Volume) error {\n\treturn nil\n}", "func (d *driverInfo) Mount(volume *Volume) error {\n\t// don't mount twice\n\tif err := volume.CheckUnmounted(); err != nil {\n\t\treturn err\n\t}\n\n\tvolume.MountPath = d.getMountPath(volume.Name)\n\texists, err := fs.DirExists(volume.MountPath)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error accessing mount path '%s': %v\", volume.MountPath, err)\n\t}\n\n\tif !exists {\n\t\tif err := fs.CreateDir(volume.MountPath, true, 0700); err != nil {\n\t\t\treturn fmt.Errorf(\"error creating mount path '%s': %v\", volume.MountPath, err)\n\t\t}\n\t}\n\n\tif err := d.storage.Mount(volume); err != nil {\n\t\tvolume.MountPath = \"\"\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func (o *Filesystem) Mount(ctx context.Context, options map[string]dbus.Variant) (mountPath string, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Mount\", 0, options).Store(&mountPath)\n\treturn\n}", "func (vm *ContainerVM) overlayMount() error {\n\tvm.effectivePath = filepath.Join(vm.instancePath, \"fs\")\n\tworkPath := filepath.Join(vm.instancePath, \"fs_work\")\n\n\terr := os.MkdirAll(vm.effectivePath, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(workPath, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create the overlay mountpoint\n\targs := []string{\n\t\t\"mount\",\n\t\t\"-t\",\n\t\t\"overlay\",\n\t\tfmt.Sprintf(\"megamount_%v\", vm.ID),\n\t\t\"-o\",\n\t\tfmt.Sprintf(\"lowerdir=%v,upperdir=%v,workdir=%v\", vm.FSPath, vm.effectivePath, workPath),\n\t\tvm.effectivePath,\n\t}\n\tlog.Debug(\"mounting overlay: %v\", args)\n\tout, err := processWrapper(args...)\n\tif err != nil {\n\t\tlog.Error(\"overlay mount: %v %v\", err, out)\n\t\treturn err\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func (s *Stack) CreateVolume(request resources.VolumeRequest) (volume *resources.Volume, err error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif request.Name == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"request.Name\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", request.Name), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvolume, err = s.GetVolume(request.Name)\n\tif err != nil {\n\t\tif _, ok := err.(scerr.ErrNotFound); !ok {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif volume != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\taz, err := s.SelectedAvailabilityZone()\n\tif err != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\tvar v resources.Volume\n\tswitch s.versions[\"volume\"] {\n\tcase \"v1\":\n\t\tvar vol *volumesv1.Volume\n\t\tvol, err = volumesv1.Create(s.VolumeClient, volumesv1.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tcase \"v2\":\n\t\tvar vol *volumesv2.Volume\n\t\tvol, err = volumesv2.Create(s.VolumeClient, volumesv2.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tdefault:\n\t\terr = scerr.Errorf(fmt.Sprintf(\"unmanaged service 'volume' version '%s'\", s.versions[\"volume\"]), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error creating volume : %s\", ProviderErrorToString(err)))\n\t}\n\n\treturn &v, nil\n}", "func Mount(d Ploop, p *MountParam) (string, error) {\n\tvar a C.struct_ploop_mount_param\n\tvar device string\n\n\tif p.uuid != \"\" {\n\t\ta.guid = C.CString(p.uuid)\n\t\tdefer cfree(a.guid)\n\t}\n\tif p.target != \"\" {\n\t\ta.target = C.CString(p.target)\n\t\tdefer cfree(a.target)\n\t}\n\n\t// mount_data should not be NULL\n\ta.mount_data = C.CString(p.data)\n\tdefer cfree(a.mount_data)\n\n\ta.flags = C.int(p.flags)\n\ta.ro = bool2cint(p.readonly)\n\ta.fsck = bool2cint(p.fsck)\n\ta.quota = bool2cint(p.quota)\n\n\tret := C.ploop_mount_image(d.d, &a)\n\tif ret == 0 {\n\t\tdevice = C.GoString(&a.device[0])\n\t\t// TODO? fsck_code = C.GoString(a.fsck_rc)\n\t}\n\treturn device, mkerr(ret)\n}", "func (v *Volume) mount() error {\n\tif !v.needsMount() {\n\t\treturn nil\n\t}\n\n\t// Update the volume from the DB to get an accurate mount counter.\n\tif err := v.update(); err != nil {\n\t\treturn err\n\t}\n\n\t// If the count is non-zero, the volume is already mounted.\n\t// Nothing to do.\n\tif v.state.MountCount > 0 {\n\t\tv.state.MountCount++\n\t\tlogrus.Debugf(\"Volume %s mount count now at %d\", v.Name(), v.state.MountCount)\n\t\treturn v.save()\n\t}\n\n\t// Volume plugins implement their own mount counter, based on the ID of\n\t// the mounting container. But we already have one, and honestly I trust\n\t// ours more. So hardcode container ID to something reasonable, and use\n\t// the same one for everything.\n\tif v.UsesVolumeDriver() {\n\t\tif v.plugin == nil {\n\t\t\treturn fmt.Errorf(\"volume plugin %s (needed by volume %s) missing: %w\", v.Driver(), v.Name(), define.ErrMissingPlugin)\n\t\t}\n\n\t\treq := new(pluginapi.MountRequest)\n\t\treq.Name = v.Name()\n\t\treq.ID = pseudoCtrID\n\t\tmountPoint, err := v.plugin.MountVolume(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tv.state.MountCount++\n\t\tv.state.MountPoint = mountPoint\n\t\treturn v.save()\n\t} else if v.config.Driver == define.VolumeDriverImage {\n\t\tmountPoint, err := v.runtime.storageService.MountContainerImage(v.config.StorageID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"mounting volume %s image failed: %w\", v.Name(), err)\n\t\t}\n\n\t\tv.state.MountCount++\n\t\tv.state.MountPoint = mountPoint\n\t\treturn v.save()\n\t}\n\n\tvolDevice := v.config.Options[\"device\"]\n\tvolType := v.config.Options[\"type\"]\n\tvolOptions := v.config.Options[\"o\"]\n\n\t// Some filesystems (tmpfs) don't have a device, but we still need to\n\t// give the kernel something.\n\tif volDevice == \"\" && volType != \"\" {\n\t\tvolDevice = volType\n\t}\n\n\t// We need to use the actual mount command.\n\t// Convincing unix.Mount to use the same semantics as the mount command\n\t// itself seems prohibitively difficult.\n\t// TODO: might want to cache this path in the runtime?\n\tmountPath, err := exec.LookPath(\"mount\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"locating 'mount' binary: %w\", err)\n\t}\n\tmountArgs := []string{}\n\tif volOptions != \"\" {\n\t\tmountArgs = append(mountArgs, \"-o\", volOptions)\n\t}\n\tswitch volType {\n\tcase \"\":\n\tcase define.TypeBind:\n\t\tmountArgs = append(mountArgs, \"-o\", volType)\n\tdefault:\n\t\tmountArgs = append(mountArgs, \"-t\", volType)\n\t}\n\n\tmountArgs = append(mountArgs, volDevice, v.config.MountPoint)\n\tmountCmd := exec.Command(mountPath, mountArgs...)\n\n\tlogrus.Debugf(\"Running mount command: %s %s\", mountPath, strings.Join(mountArgs, \" \"))\n\tif output, err := mountCmd.CombinedOutput(); err != nil {\n\t\tlogrus.Debugf(\"Mount %v failed with %v\", mountCmd, err)\n\t\treturn errors.New(string(output))\n\t}\n\n\tlogrus.Debugf(\"Mounted volume %s\", v.Name())\n\n\t// Increment the mount counter\n\tv.state.MountCount++\n\tlogrus.Debugf(\"Volume %s mount count now at %d\", v.Name(), v.state.MountCount)\n\treturn v.save()\n}", "func (*MountRequest) Descriptor() ([]byte, []int) {\n\treturn file_provider_v1alpha1_service_proto_rawDescGZIP(), []int{2}\n}", "func (osh *SystemHandler) Mount(source string, target string, fsType string, flags uintptr, data string) error {\n\treturn syscall.Mount(source, target, fsType, flags, data)\n}", "func (o *QtreeCreateRequest) Volume() string {\n\tvar r string\n\tif o.VolumePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.VolumePtr\n\treturn r\n}", "func NewMountSpecFromLine(line string) (*MountSpec, error) {\n\tparts := strings.SplitN(strings.TrimSpace(line), \",\", 2)\n\tif len(parts) < 2 {\n\t\treturn nil, fmt.Errorf(\"failed to parse line: %v\", line)\n\t}\n\tmountType := strings.TrimSpace(parts[0])\n\tpath := strings.TrimSpace(parts[1])\n\n\treturn NewMountSpec(mountType, path)\n}", "func NewVolume(volumeRequest provider.Volume) Volume {\n\t// Build the template to send to backend\n\n\tvolume := Volume{\n\t\tID: volumeRequest.VolumeID,\n\t\tCRN: volumeRequest.CRN,\n\t\tTags: volumeRequest.VPCVolume.Tags,\n\t\tZone: &Zone{\n\t\t\tName: volumeRequest.Az,\n\t\t},\n\t\tProvider: string(volumeRequest.Provider),\n\t\tVolumeType: string(volumeRequest.VolumeType),\n\t}\n\tif volumeRequest.Name != nil {\n\t\tvolume.Name = *volumeRequest.Name\n\t}\n\tif volumeRequest.Capacity != nil {\n\t\tvolume.Capacity = int64(*volumeRequest.Capacity)\n\t}\n\tif volumeRequest.VPCVolume.Profile != nil {\n\t\tvolume.Profile = &Profile{\n\t\t\tName: volumeRequest.VPCVolume.Profile.Name,\n\t\t}\n\t}\n\tif volumeRequest.VPCVolume.ResourceGroup != nil {\n\t\tvolume.ResourceGroup = &ResourceGroup{\n\t\t\tID: volumeRequest.VPCVolume.ResourceGroup.ID,\n\t\t\tName: volumeRequest.VPCVolume.ResourceGroup.Name,\n\t\t}\n\t}\n\n\tif volumeRequest.Iops != nil {\n\t\tvalue, err := strconv.ParseInt(*volumeRequest.Iops, 10, 64)\n\t\tif err != nil {\n\t\t\tvolume.Iops = 0\n\t\t}\n\t\tvolume.Iops = value\n\t}\n\tif volumeRequest.VPCVolume.VolumeEncryptionKey != nil && len(volumeRequest.VPCVolume.VolumeEncryptionKey.CRN) > 0 {\n\t\tencryptionKeyCRN := volumeRequest.VPCVolume.VolumeEncryptionKey.CRN\n\t\tvolume.VolumeEncryptionKey = &VolumeEncryptionKey{CRN: encryptionKeyCRN}\n\t}\n\n\tvolume.Cluster = volumeRequest.Attributes[ClusterIDTagName]\n\tvolume.Status = StatusType(volumeRequest.Attributes[VolumeStatus])\n\treturn volume\n}", "func Mount(dev, path, fsType, data string, flags uintptr) error {\n\tif err := unix.Mount(dev, path, fsType, flags, data); err != nil {\n\t\treturn fmt.Errorf(\"Mount %q on %q type %q flags %x: %v\",\n\t\t\tdev, path, fsType, flags, err)\n\t}\n\treturn nil\n}", "func (d *Driver) internalMount(ctx context.Context, vol *smbVolume, volCap *csi.VolumeCapability, secrets map[string]string) error {\n\tstagingPath := getInternalMountPath(d.workingMountDir, vol)\n\n\tif volCap == nil {\n\t\tvolCap = &csi.VolumeCapability{\n\t\t\tAccessType: &csi.VolumeCapability_Mount{\n\t\t\t\tMount: &csi.VolumeCapability_MountVolume{},\n\t\t\t},\n\t\t}\n\t}\n\n\tklog.V(4).Infof(\"internally mounting %v at %v\", vol.source, stagingPath)\n\t_, err := d.NodeStageVolume(ctx, &csi.NodeStageVolumeRequest{\n\t\tStagingTargetPath: stagingPath,\n\t\tVolumeContext: map[string]string{\n\t\t\tsourceField: vol.source,\n\t\t},\n\t\tVolumeCapability: volCap,\n\t\tVolumeId: vol.id,\n\t\tSecrets: secrets,\n\t})\n\treturn err\n}", "func (in *DataMount) DeepCopy() *DataMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DataMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func From(vol *apis.ZFSVolume) *ZFSVolume {\n\treturn &ZFSVolume{\n\t\tObject: vol,\n\t}\n}", "func (c *Controller) MountDevice(mountDeviceRequest k8sresources.FlexVolumeMountDeviceRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-MountDevice-start\")\n\tdefer c.logger.Println(\"controller-MountDevice-end\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func NewMountCapability(\n\tmode csi.VolumeCapability_AccessMode_Mode,\n\tfsType string,\n\tmountFlags []string) *csi.VolumeCapability {\n\n\treturn &csi.VolumeCapability{\n\t\tAccessMode: &csi.VolumeCapability_AccessMode{\n\t\t\tMode: mode,\n\t\t},\n\t\tAccessType: &csi.VolumeCapability_Mount{\n\t\t\tMount: &csi.VolumeCapability_MountVolume{\n\t\t\t\tFsType: fsType,\n\t\t\t\tMountFlags: mountFlags,\n\t\t\t},\n\t\t},\n\t}\n}", "func DiskMount(srcPath string, dstPath string, readonly bool, recursive bool, propagation string, mountOptions []string, fsName string) error {\n\tvar err error\n\n\t// Prepare the mount flags\n\tflags := 0\n\tif readonly {\n\t\tflags |= unix.MS_RDONLY\n\t}\n\n\t// Detect the filesystem\n\tif fsName == \"none\" {\n\t\tflags |= unix.MS_BIND\n\t}\n\n\tif propagation != \"\" {\n\t\tswitch propagation {\n\t\tcase \"private\":\n\t\t\tflags |= unix.MS_PRIVATE\n\t\tcase \"shared\":\n\t\t\tflags |= unix.MS_SHARED\n\t\tcase \"slave\":\n\t\t\tflags |= unix.MS_SLAVE\n\t\tcase \"unbindable\":\n\t\t\tflags |= unix.MS_UNBINDABLE\n\t\tcase \"rprivate\":\n\t\t\tflags |= unix.MS_PRIVATE | unix.MS_REC\n\t\tcase \"rshared\":\n\t\t\tflags |= unix.MS_SHARED | unix.MS_REC\n\t\tcase \"rslave\":\n\t\t\tflags |= unix.MS_SLAVE | unix.MS_REC\n\t\tcase \"runbindable\":\n\t\t\tflags |= unix.MS_UNBINDABLE | unix.MS_REC\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Invalid propagation mode %q\", propagation)\n\t\t}\n\t}\n\n\tif recursive {\n\t\tflags |= unix.MS_REC\n\t}\n\n\t// Mount the filesystem\n\terr = unix.Mount(srcPath, dstPath, fsName, uintptr(flags), strings.Join(mountOptions, \",\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to mount %q at %q with filesystem %q: %w\", srcPath, dstPath, fsName, err)\n\t}\n\n\t// Remount bind mounts in readonly mode if requested\n\tif readonly == true && flags&unix.MS_BIND == unix.MS_BIND {\n\t\tflags = unix.MS_RDONLY | unix.MS_BIND | unix.MS_REMOUNT\n\t\terr = unix.Mount(\"\", dstPath, fsName, uintptr(flags), \"\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to mount %q in readonly mode: %w\", dstPath, err)\n\t\t}\n\t}\n\n\tflags = unix.MS_REC | unix.MS_SLAVE\n\terr = unix.Mount(\"\", dstPath, \"\", uintptr(flags), \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to make mount %q private: %w\", dstPath, err)\n\t}\n\n\treturn nil\n}", "func FormatAndMountVol(devicePath string, mountInfo *MountInfo) error {\n\tmounter := &mount.SafeFormatAndMount{Interface: mount.New(\"\"), Exec: utilexec.New()}\n\n\terr := mounter.FormatAndMount(devicePath, mountInfo.MountPath, mountInfo.FSType, mountInfo.MountOptions)\n\tif err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: failed to mount volume %s [%s] to %s, error %v\",\n\t\t\tdevicePath, mountInfo.FSType, mountInfo.MountPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func (img *Image) Mount(mountPoint, fs string, flags uintptr, data string) error {\n\treturn devMount(img, mountPoint, fs, flags, data)\n}", "func parseMountEntry(entry string) (*mount, error) {\n\tif strings.HasPrefix(entry, \"#\") { // The entry is a comment.\n\t\treturn nil, nil\n\t}\n\tparts := strings.Split(entry, \" \")\n\tif len(parts) < 4 {\n\t\treturn nil, fmt.Errorf(\"invalid format: expected at least 4 space-separated columns\")\n\t}\n\t// There may be 4 escaped characters (space (\\040), tab (\\011), newline (\\012)\n\t// and backslash (\\134)), so we unescape them if necessary.\n\tunescape := strings.NewReplacer(\n\t\t`\\040`, \"\\040\",\n\t\t`\\011`, \"\\011\",\n\t\t`\\012`, \"\\012\",\n\t\t`\\134`, \"\\134\",\n\t)\n\treturn &mount{\n\t\tdevice: unescape.Replace(parts[0]),\n\t\tmountPoint: unescape.Replace(parts[1]),\n\t\tfsType: unescape.Replace(parts[2]),\n\t\toptions: unescape.Replace(parts[3]),\n\t}, nil\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func InitVolumeOperationRequestInterface(ctx context.Context, cleanupInterval int,\n\tisBlockVolumeSnapshotEnabled func() bool) (VolumeOperationRequest, error) {\n\tlog := logger.GetLogger(ctx)\n\tcsiNamespace = getCSINamespace()\n\n\toperationStoreInitLock.Lock()\n\tdefer operationStoreInitLock.Unlock()\n\tif operationRequestStoreInstance == nil {\n\t\t// Create CnsVolumeOperationRequest definition on API server.\n\t\tlog.Info(\n\t\t\t\"Creating CnsVolumeOperationRequest definition on API server and initializing VolumeOperationRequest instance\",\n\t\t)\n\t\terr := k8s.CreateCustomResourceDefinitionFromManifest(ctx,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFile,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFileName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create CnsVolumeOperationRequest CRD with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get in cluster config for client to API server.\n\t\tconfig, err := k8s.GetKubeConfig(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get kubeconfig with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create client to API server.\n\t\tk8sclient, err := k8s.NewClientForGroup(ctx, config, cnsvolumeoprequestv1alpha1.SchemeGroupVersion.Group)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create k8sClient with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Initialize the operationRequestStoreOnETCD implementation of\n\t\t// VolumeOperationRequest interface.\n\t\t// NOTE: Currently there is only a single implementation of this\n\t\t// interface. Future implementations will need modify this step.\n\t\toperationRequestStoreInstance = &operationRequestStore{\n\t\t\tk8sclient: k8sclient,\n\t\t}\n\t\tgo operationRequestStoreInstance.cleanupStaleInstances(cleanupInterval, isBlockVolumeSnapshotEnabled)\n\t}\n\n\treturn operationRequestStoreInstance, nil\n}", "func NewVolume(opts *xmsv3.PostVolumesReq) (resp *xmsv3.PostVolumesResp, err error) {\n\treturn xms.PostVolumes(opts)\n}", "func (m *ServiceBindingVolumeMount) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateContainerDir(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDeviceType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDriver(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMode(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func newMountEventFromMountInfo(mnt *mountinfo.Info) (*model.MountEvent, error) {\n\tgroupID, err := parseGroupID(mnt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create a MountEvent out of the parsed MountInfo\n\treturn &model.MountEvent{\n\t\tParentMountID: uint32(mnt.Parent),\n\t\tMountPointStr: mnt.Mountpoint,\n\t\tRootStr: mnt.Root,\n\t\tMountID: uint32(mnt.ID),\n\t\tGroupID: groupID,\n\t\tDevice: uint32(unix.Mkdev(uint32(mnt.Major), uint32(mnt.Minor))),\n\t\tFSType: mnt.FSType,\n\t}, nil\n}", "func GetVolumeMountWithSubPath(volumeName, volumePath, subPath string) corev1.VolumeMount {\n\treturn corev1.VolumeMount{\n\t\tName: volumeName,\n\t\tMountPath: volumePath,\n\t\tSubPath: subPath,\n\t\tReadOnly: true,\n\t}\n}", "func (z *zfsctl) Mount(ctx context.Context) *execute {\n\targs := []string{\"mount\"}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func sysMount(device, target, mType string, flag uintptr, data string) error {\n\tif err := syscall.Mount(device, target, mType, flag, data); err != nil {\n\t\treturn err\n\t}\n\n\t// If we have a bind mount or remount, remount...\n\tif flag&syscall.MS_BIND == syscall.MS_BIND &&\n\t\tflag&syscall.MS_RDONLY == syscall.MS_RDONLY {\n\t\treturn syscall.Mount(\n\t\t\tdevice, target, mType, flag|syscall.MS_REMOUNT, data)\n\t}\n\treturn nil\n}", "func (in *Volume) DeepCopy() *Volume {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Volume)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Volume) DeepCopy() *Volume {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Volume)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Volume) DeepCopy() *Volume {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Volume)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Volume) DeepCopy() *Volume {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Volume)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Volume) DeepCopy() *Volume {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Volume)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (fs *FS) BindMount(\n\tctx context.Context,\n\tsource, target string,\n\toptions ...string) error {\n\n\tif options == nil {\n\t\toptions = []string{\"bind\"}\n\t} else {\n\t\toptions = append(options, \"bind\")\n\t}\n\treturn fs.mount(ctx, source, target, \"\", options...)\n}", "func (in *DroidVirtVolumeSpec) DeepCopy() *DroidVirtVolumeSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DroidVirtVolumeSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SecretEngineMountSpec) DeepCopy() *SecretEngineMountSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretEngineMountSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *HelmRequestSpec) DeepCopy() *HelmRequestSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(HelmRequestSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *UFSClient) NewCreateUFSVolumeRequest() *CreateUFSVolumeRequest {\n\treq := &CreateUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}" ]
[ "0.70171463", "0.70161045", "0.6775354", "0.6216111", "0.61736137", "0.6101995", "0.5975546", "0.5975546", "0.5975546", "0.5975546", "0.58994436", "0.5708777", "0.566498", "0.5648641", "0.5639586", "0.53766435", "0.53567606", "0.53019917", "0.52746314", "0.525937", "0.52591425", "0.51558894", "0.5149465", "0.51069486", "0.5085002", "0.5072101", "0.5061847", "0.5036119", "0.502737", "0.5026241", "0.50224173", "0.494725", "0.494414", "0.49236506", "0.48954508", "0.48768172", "0.48372734", "0.48371443", "0.4825665", "0.47786227", "0.47717598", "0.4763601", "0.4731736", "0.47301048", "0.47123125", "0.47115257", "0.47103554", "0.47087938", "0.46904686", "0.46875277", "0.46632442", "0.46624896", "0.46457386", "0.4624751", "0.46187523", "0.46143508", "0.46128365", "0.46124032", "0.460296", "0.4600415", "0.4554391", "0.45412517", "0.45278013", "0.45218378", "0.45141917", "0.45094743", "0.4508572", "0.45060843", "0.45042554", "0.44874236", "0.44870383", "0.4476752", "0.44618747", "0.4441789", "0.44381768", "0.4437602", "0.44305497", "0.44213697", "0.44004107", "0.43918693", "0.43854153", "0.43794236", "0.4373643", "0.43734983", "0.43711007", "0.43615577", "0.43550172", "0.43542847", "0.43542063", "0.43478718", "0.4347557", "0.4347557", "0.4347557", "0.4347557", "0.4347557", "0.43367505", "0.43221587", "0.43098402", "0.42994663", "0.42989007" ]
0.87418956
0
Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest is an autogenerated conversion function.
func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error { return autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *Controller) Mount(mountRequest k8sresources.FlexVolumeMountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-mount-start\")\n\tdefer c.logger.Println(\"controller-mount-end\")\n\tc.logger.Println(fmt.Sprintf(\"mountRequest [%#v]\", mountRequest))\n\tvar lnPath string\n\tattachRequest := resources.AttachRequest{Name: mountRequest.MountDevice, Host: getHost()}\n\tmountedPath, err := c.Client.Attach(attachRequest)\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to mount volume [%s], Error: %#v\", mountRequest.MountDevice, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\tif mountRequest.Version == k8sresources.KubernetesVersion_1_5 {\n\t\t//For k8s 1.5, by the time we do the attach/mount, the mountDir (MountPath) is not created trying to do mount and ln will fail because the dir is not found, so we need to create the directory before continuing\n\t\tdir := filepath.Dir(mountRequest.MountPath)\n\t\tc.logger.Printf(\"mountrequest.MountPath %s\", mountRequest.MountPath)\n\t\tlnPath = mountRequest.MountPath\n\t\tk8sRequiredMountPoint := path.Join(mountRequest.MountPath, mountRequest.MountDevice)\n\t\tif _, err = os.Stat(k8sRequiredMountPoint); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\n\t\t\t\tc.logger.Printf(\"creating volume directory %s\", dir)\n\t\t\t\terr = os.MkdirAll(dir, 0777)\n\t\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\t\tmsg := fmt.Sprintf(\"Failed creating volume directory %#v\", err)\n\t\t\t\t\tc.logger.Println(msg)\n\n\t\t\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\t\t\tStatus: \"Failure\",\n\t\t\t\t\t\tMessage: msg,\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// For k8s 1.6 and later kubelet creates a folder as the MountPath, including the volume name, whenwe try to create the symlink this will fail because the same name exists. This is why we need to remove it before continuing.\n\t} else {\n\t\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\t\tif strings.HasPrefix(mountedPath, ubiquityMountPrefix) {\n\t\t\tlnPath = mountRequest.MountPath\n\t\t} else {\n\t\t\tlnPath, _ = path.Split(mountRequest.MountPath)\n\t\t}\n\t\tc.logger.Printf(\"removing folder %s\", mountRequest.MountPath)\n\n\t\terr = os.Remove(mountRequest.MountPath)\n\t\tif err != nil && !os.IsExist(err) {\n\t\t\tmsg := fmt.Sprintf(\"Failed removing existing volume directory %#v\", err)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\n\t\t}\n\n\t}\n\tsymLinkCommand := \"/bin/ln\"\n\targs := []string{\"-s\", mountedPath, lnPath}\n\tc.logger.Printf(fmt.Sprintf(\"creating slink from %s -> %s\", mountedPath, lnPath))\n\n\tvar stderr bytes.Buffer\n\tcmd := exec.Command(symLinkCommand, args...)\n\tcmd.Stderr = &stderr\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Controller: mount failed to symlink %#v\", stderr.String())\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: msg,\n\t\t}\n\n\t}\n\tmsg := fmt.Sprintf(\"Volume mounted successfully to %s\", mountedPath)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: msg,\n\t}\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func (d *VolumeDriver) Mount(r volume.MountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Mounting volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tlog.Errorf(\"VolumeDriver Mount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in *impl.RmdirContentsRequest, out *v2alpha1.RmdirContentsRequest) error {\n\treturn autoConvert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func (d *lvm) MountVolume(vol Volume, op *operations.Operation) error {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t// Activate LVM volume if needed.\n\tactivated, err := d.activateVolume(vol)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif activated {\n\t\trevert.Add(func() { _, _ = d.deactivateVolume(vol) })\n\t}\n\n\tif vol.contentType == ContentTypeFS {\n\t\t// Check if already mounted.\n\t\tmountPath := vol.MountPath()\n\t\tif !filesystem.IsMountPoint(mountPath) {\n\t\t\tfsType := vol.ConfigBlockFilesystem()\n\t\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\n\t\t\tif vol.mountFilesystemProbe {\n\t\t\t\tfsType, err = fsProbe(volDevPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed probing filesystem: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = vol.EnsureMountPath()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmountFlags, mountOptions := filesystem.ResolveMountOptions(strings.Split(vol.ConfigBlockMountOptions(), \",\"))\n\t\t\terr = TryMount(volDevPath, mountPath, fsType, mountFlags, mountOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to mount LVM logical volume: %w\", err)\n\t\t\t}\n\n\t\t\td.logger.Debug(\"Mounted logical volume\", logger.Ctx{\"volName\": vol.name, \"dev\": volDevPath, \"path\": mountPath, \"options\": mountOptions})\n\t\t}\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, mount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\terr = d.MountVolume(fsVol, op)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvol.MountRefCountIncrement() // From here on it is up to caller to call UnmountVolume() when done.\n\trevert.Success()\n\treturn nil\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func Convert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in *impl.IsSymlinkRequest, out *v2alpha1.IsSymlinkRequest) error {\n\treturn autoConvert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in, out)\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func ValidateVolumeMount(volumeMount string) (string, error) {\n\tsrc := \"\"\n\tdest := \"\"\n\n\t// validate 'SRC[:DEST]' substring\n\tsplit := strings.Split(volumeMount, \":\")\n\tif len(split) < 1 || len(split) > 2 {\n\t\treturn \"\", fmt.Errorf(\"Invalid volume mount '%s': only one ':' allowed\", volumeMount)\n\t}\n\n\t// we only have SRC specified -> DEST = SRC\n\tif len(split) == 1 {\n\t\tsrc = split[0]\n\t\tdest = src\n\t} else {\n\t\tsrc = split[0]\n\t\tdest = split[1]\n\t}\n\n\t// verify that the source exists\n\tif src != \"\" {\n\t\tif _, err := os.Stat(src); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"Failed to stat file/dir that you're trying to mount: '%s' in '%s'\", src, volumeMount)\n\t\t}\n\t}\n\n\t// verify that the destination is an absolute path\n\tif !strings.HasPrefix(dest, \"/\") {\n\t\treturn \"\", fmt.Errorf(\"Volume mount destination doesn't appear to be an absolute path: '%s' in '%s'\", dest, volumeMount)\n\t}\n\n\treturn fmt.Sprintf(\"%s:%s\", src, dest), nil\n}", "func (a *HyperflexApiService) PatchHyperflexVolume(ctx context.Context, moid string) ApiPatchHyperflexVolumeRequest {\n\treturn ApiPatchHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func MountVolume(vol *apis.LVMVolume, mount *MountInfo, podLVInfo *PodLVInfo) error {\n\tvolume := vol.Spec.VolGroup + \"/\" + vol.Name\n\tmounted, err := verifyMountRequest(vol, mount.MountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif mounted {\n\t\tklog.Infof(\"lvm : already mounted %s => %s\", volume, mount.MountPath)\n\t\treturn nil\n\t}\n\n\tdevicePath := DevPath + volume\n\n\terr = FormatAndMountVol(devicePath, mount)\n\tif err != nil {\n\t\treturn status.Errorf(\n\t\t\tcodes.Internal,\n\t\t\t\"failed to format and mount the volume error: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t}\n\n\tklog.Infof(\"lvm: volume %v mounted %v fs %v\", volume, mount.MountPath, mount.FSType)\n\n\tif ioLimitsEnabled && podLVInfo != nil {\n\t\tif err := setIOLimits(vol, podLVInfo, devicePath); err != nil {\n\t\t\tklog.Warningf(\"lvm: error setting io limits: podUid %s, device %s, err=%v\", podLVInfo.UID, devicePath, err)\n\t\t} else {\n\t\t\tklog.Infof(\"lvm: io limits set for podUid %v, device %s\", podLVInfo.UID, devicePath)\n\t\t}\n\t}\n\n\treturn nil\n}", "func parseMountInfoLine(line string) *Mount {\n\tfields := strings.Split(line, \" \")\n\tif len(fields) < 10 {\n\t\treturn nil\n\t}\n\n\t// Count the optional fields. In case new fields are appended later,\n\t// don't simply assume that n == len(fields) - 4.\n\tn := 6\n\tfor fields[n] != \"-\" {\n\t\tn++\n\t\tif n >= len(fields) {\n\t\t\treturn nil\n\t\t}\n\t}\n\tif n+3 >= len(fields) {\n\t\treturn nil\n\t}\n\n\tvar mnt *Mount = &Mount{}\n\tvar err error\n\tmnt.DeviceNumber, err = newDeviceNumberFromString(fields[2])\n\tif err != nil {\n\t\treturn nil\n\t}\n\tmnt.BindMnt = unescapeString(fields[3]) != \"/\"\n\tmnt.Path = unescapeString(fields[4])\n\tfor _, opt := range strings.Split(fields[5], \",\") {\n\t\tif opt == \"ro\" {\n\t\t\tmnt.ReadOnly = true\n\t\t}\n\t}\n\tmnt.FilesystemType = unescapeString(fields[n+1])\n\tmnt.Device = getDeviceName(mnt.DeviceNumber)\n\treturn mnt\n}", "func (d *DirDriver) Mount(req *volume.MountRequest) (*volume.MountResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Mount() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn nil, fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tvol.mounts[req.ID] = true\n\n\treturn &volume.MountResponse{\n\t\tMountpoint: vol.path,\n\t}, nil\n}", "func Convert_impl_CreateSymlinkRequest_To_v2alpha1_CreateSymlinkRequest(in *impl.CreateSymlinkRequest, out *v2alpha1.CreateSymlinkRequest) error {\n\treturn autoConvert_impl_CreateSymlinkRequest_To_v2alpha1_CreateSymlinkRequest(in, out)\n}", "func (s *VolumeListener) Attach(inctx context.Context, in *protocol.VolumeAttachmentRequest) (_ *googleprotobuf.Empty, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot attach volume\")\n\n\tempty := &googleprotobuf.Empty{}\n\tif s == nil {\n\t\treturn empty, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn empty, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tvolumeRef, _ := srvutils.GetReference(in.GetVolume())\n\tif volumeRef == \"\" {\n\t\treturn empty, fail.InvalidRequestError(\"neither name nor id given as reference for volume\")\n\t}\n\thostRef, _ := srvutils.GetReference(in.GetHost())\n\tif hostRef == \"\" {\n\t\treturn empty, fail.InvalidRequestError(\"neither name nor id given as reference for host\")\n\t}\n\tmountPath := in.GetMountPath()\n\n\tfilesystem := in.GetFormat()\n\tdoNotFormat := in.DoNotFormat\n\tdoNotMount := in.DoNotMount\n\n\tjob, xerr := PrepareJob(inctx, in.GetVolume().GetTenantId(), fmt.Sprintf(\"/volume/%s/host/%s/attach\", volumeRef, hostRef))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer job.Close()\n\n\thandler := VolumeHandler(job)\n\tif xerr = handler.Attach(volumeRef, hostRef, mountPath, filesystem, doNotFormat, doNotMount); xerr != nil {\n\t\treturn empty, xerr\n\t}\n\n\treturn empty, nil\n}", "func (d *MinioDriver) Mount(r volume.MountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Mount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections > 0 {\n\t\tv.connections++\n\t\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n\t}\n\n\tif err := d.mountVolume(v); err != nil {\n\t\tglog.Warningf(\"mounting %#v volume failed: %s\", v, err.Error())\n\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t}\n\n\t// if the mount was successful, then increment the number of connections we\n\t// have to the mount.\n\tv.connections++\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func (d *VolumeDriver) MountVolume(name string, fstype string, id string, isReadOnly bool, skipAttach bool) (string, error) {\n\tlog.Errorf(\"VolumeDriver MountVolume to be implemented\")\n\tmountpoint := getMountPoint(name)\n\treturn mountpoint, nil\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func (in *Mount) DeepCopy() *Mount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Mount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func ParseVolume(input string) (Volume, error) {\n\tparts := strings.Split(input, \":\")\n\tswitch len(parts) {\n\tcase 1:\n\t\treturn Volume{Type: VolumeTypeInstance, Path: input}, nil\n\tcase 2:\n\t\tif vt, mountOptions, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{Type: vt, Path: parts[1], MountOptions: mountOptions}, nil\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0]}, nil\n\tcase 3:\n\t\tif _, _, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t\t}\n\t\toptions, err := parseVolumeOptions(parts[2])\n\t\tif err != nil {\n\t\t\treturn Volume{}, maskAny(err)\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0], Options: options}, nil\n\tdefault:\n\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t}\n}", "func Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1alpha1.VolumeAttachmentSpec, s conversion.Scope) error {\n\treturn autoConvert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in, out, s)\n}", "func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {\n\tif req == nil {\n\t\treturn \"\", fmt.Errorf(\"must provide non-nil request to MountVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlogrus.Infof(\"Mounting volume %s using plugin %s for container %s\", req.Name, p.Name, req.ID)\n\n\tresp, err := p.sendRequest(req, mountPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, mountPath, req.Name); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmountRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tmountResp := new(volume.MountResponse)\n\tif err := json.Unmarshal(mountRespBytes, mountResp); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshalling volume plugin %s path response: %w\", p.Name, err)\n\t}\n\n\treturn mountResp.Mountpoint, nil\n}", "func (c *UFSClient) NewAddUFSVolumeMountPointRequest() *AddUFSVolumeMountPointRequest {\n\treq := &AddUFSVolumeMountPointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *Stack) CreateVolume(request resources.VolumeRequest) (volume *resources.Volume, err error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif request.Name == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"request.Name\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", request.Name), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvolume, err = s.GetVolume(request.Name)\n\tif err != nil {\n\t\tif _, ok := err.(scerr.ErrNotFound); !ok {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif volume != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\taz, err := s.SelectedAvailabilityZone()\n\tif err != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\tvar v resources.Volume\n\tswitch s.versions[\"volume\"] {\n\tcase \"v1\":\n\t\tvar vol *volumesv1.Volume\n\t\tvol, err = volumesv1.Create(s.VolumeClient, volumesv1.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tcase \"v2\":\n\t\tvar vol *volumesv2.Volume\n\t\tvol, err = volumesv2.Create(s.VolumeClient, volumesv2.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tdefault:\n\t\terr = scerr.Errorf(fmt.Sprintf(\"unmanaged service 'volume' version '%s'\", s.versions[\"volume\"]), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error creating volume : %s\", ProviderErrorToString(err)))\n\t}\n\n\treturn &v, nil\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func NewVolume(volumeRequest provider.Volume) Volume {\n\t// Build the template to send to backend\n\n\tvolume := Volume{\n\t\tID: volumeRequest.VolumeID,\n\t\tCRN: volumeRequest.CRN,\n\t\tTags: volumeRequest.VPCVolume.Tags,\n\t\tZone: &Zone{\n\t\t\tName: volumeRequest.Az,\n\t\t},\n\t\tProvider: string(volumeRequest.Provider),\n\t\tVolumeType: string(volumeRequest.VolumeType),\n\t}\n\tif volumeRequest.Name != nil {\n\t\tvolume.Name = *volumeRequest.Name\n\t}\n\tif volumeRequest.Capacity != nil {\n\t\tvolume.Capacity = int64(*volumeRequest.Capacity)\n\t}\n\tif volumeRequest.VPCVolume.Profile != nil {\n\t\tvolume.Profile = &Profile{\n\t\t\tName: volumeRequest.VPCVolume.Profile.Name,\n\t\t}\n\t}\n\tif volumeRequest.VPCVolume.ResourceGroup != nil {\n\t\tvolume.ResourceGroup = &ResourceGroup{\n\t\t\tID: volumeRequest.VPCVolume.ResourceGroup.ID,\n\t\t\tName: volumeRequest.VPCVolume.ResourceGroup.Name,\n\t\t}\n\t}\n\n\tif volumeRequest.Iops != nil {\n\t\tvalue, err := strconv.ParseInt(*volumeRequest.Iops, 10, 64)\n\t\tif err != nil {\n\t\t\tvolume.Iops = 0\n\t\t}\n\t\tvolume.Iops = value\n\t}\n\tif volumeRequest.VPCVolume.VolumeEncryptionKey != nil && len(volumeRequest.VPCVolume.VolumeEncryptionKey.CRN) > 0 {\n\t\tencryptionKeyCRN := volumeRequest.VPCVolume.VolumeEncryptionKey.CRN\n\t\tvolume.VolumeEncryptionKey = &VolumeEncryptionKey{CRN: encryptionKeyCRN}\n\t}\n\n\tvolume.Cluster = volumeRequest.Attributes[ClusterIDTagName]\n\tvolume.Status = StatusType(volumeRequest.Attributes[VolumeStatus])\n\treturn volume\n}", "func (in *VpVolumeAndMount) DeepCopy() *VpVolumeAndMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VpVolumeAndMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (*MountRequest) Descriptor() ([]byte, []int) {\n\treturn file_provider_v1alpha1_service_proto_rawDescGZIP(), []int{2}\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func NewAdmissionRequest(reviewRequestBytes []byte) (*admissionv1.AdmissionRequest, error) {\n\tvar ar admissionv1.AdmissionReview\n\t_, _, err := deserializer.Decode(reviewRequestBytes, nil, &ar)\n\n\tlog.Printf(\"Received AdmissionReview, APIVersion: %s, Kind: %s\\n\", ar.APIVersion, ar.Kind)\n\treturn ar.Request, err\n}", "func NewPatchStorageV1alpha1VolumeAttachment(ctx *middleware.Context, handler PatchStorageV1alpha1VolumeAttachmentHandler) *PatchStorageV1alpha1VolumeAttachment {\n\treturn &PatchStorageV1alpha1VolumeAttachment{Context: ctx, Handler: handler}\n}", "func Convert_ecskube_ECSMountPoint_To_v1alpha1_ECSMountPoint(in *ecskube.ECSMountPoint, out *ECSMountPoint, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSMountPoint_To_v1alpha1_ECSMountPoint(in, out, s)\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *ServiceBindingVolumeMount) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateContainerDir(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDeviceType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDriver(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMode(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func mountVolume(ctx context.Context, src, dest, vID string, size int64, readOnly bool) error {\n\tglog.V(5).Infof(\"[mountVolume] source: %v destination: %v\", src, dest)\n\tif err := SafeMount(src, dest, string(FSTypeXFS),\n\t\tfunc() []MountOption {\n\t\t\tmOpts := []MountOption{\n\t\t\t\tMountOptionMSBind,\n\t\t\t}\n\t\t\tif readOnly {\n\t\t\t\tmOpts = append(mOpts, MountOptionMSReadOnly)\n\t\t\t}\n\t\t\treturn mOpts\n\t\t}(), []string{quotaOption}); err != nil {\n\t\treturn err\n\t}\n\n\tif size > 0 {\n\t\txfsQuota := &xfs.XFSQuota{\n\t\t\tPath: dest,\n\t\t\tProjectID: vID,\n\t\t}\n\t\tif err := xfsQuota.SetQuota(ctx, size); err != nil {\n\t\t\treturn status.Errorf(codes.Internal, \"Error while setting xfs limits: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *QtreeCreateRequest) SetVolume(newValue string) *QtreeCreateRequest {\n\to.VolumePtr = &newValue\n\treturn o\n}", "func FormatAndMountVol(devicePath string, mountInfo *MountInfo) error {\n\tmounter := &mount.SafeFormatAndMount{Interface: mount.New(\"\"), Exec: utilexec.New()}\n\n\terr := mounter.FormatAndMount(devicePath, mountInfo.MountPath, mountInfo.FSType, mountInfo.MountOptions)\n\tif err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: failed to mount volume %s [%s] to %s, error %v\",\n\t\t\tdevicePath, mountInfo.FSType, mountInfo.MountPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func (vm *ContainerVM) overlayMount() error {\n\tvm.effectivePath = filepath.Join(vm.instancePath, \"fs\")\n\tworkPath := filepath.Join(vm.instancePath, \"fs_work\")\n\n\terr := os.MkdirAll(vm.effectivePath, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(workPath, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create the overlay mountpoint\n\targs := []string{\n\t\t\"mount\",\n\t\t\"-t\",\n\t\t\"overlay\",\n\t\tfmt.Sprintf(\"megamount_%v\", vm.ID),\n\t\t\"-o\",\n\t\tfmt.Sprintf(\"lowerdir=%v,upperdir=%v,workdir=%v\", vm.FSPath, vm.effectivePath, workPath),\n\t\tvm.effectivePath,\n\t}\n\tlog.Debug(\"mounting overlay: %v\", args)\n\tout, err := processWrapper(args...)\n\tif err != nil {\n\t\tlog.Error(\"overlay mount: %v %v\", err, out)\n\t\treturn err\n\t}\n\treturn nil\n}", "func ParseMountTable(filter FilterFunc) ([]*MountInfo, error) {\n\tvar rawEntries *C.struct_statfs\n\n\tcount := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))\n\tif count == 0 {\n\t\treturn nil, fmt.Errorf(\"Failed to call getmntinfo\")\n\t}\n\n\tvar entries []C.struct_statfs\n\theader := (*reflect.SliceHeader)(unsafe.Pointer(&entries))\n\theader.Cap = count\n\theader.Len = count\n\theader.Data = uintptr(unsafe.Pointer(rawEntries))\n\n\tvar out []*MountInfo\n\tfor _, entry := range entries {\n\t\tvar mountinfo MountInfo\n\t\tvar skip, stop bool\n\t\tmountinfo.MountPoint = C.GoString(&entry.f_mntonname[0])\n\n\t\tif filter != nil {\n\t\t\t// filter out entries we're not interested in\n\t\t\tskip, stop = filter(p)\n\n\t\t\tif skip {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tmountinfo.MountSource = C.GoString(&entry.f_mntfromname[0])\n\t\tmountinfo.FilesystemType = C.GoString(&entry.f_fstypename[0])\n\n\t\tout = append(out, &mountinfo)\n\t\tif stop {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn out, nil\n}", "func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif req.GetName() == \"\" {\n\t\treturn errors.New(\"volume name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn errors.New(\"volume capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn errors.New(\"block access type not allowed\")\n\t\t}\n\t}\n\n\tif req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {\n\t\treturn errors.New(\"secrets cannot be nil or empty\")\n\t}\n\n\treturn nil\n}", "func NewMountCapability(\n\tmode csi.VolumeCapability_AccessMode_Mode,\n\tfsType string,\n\tmountFlags []string) *csi.VolumeCapability {\n\n\treturn &csi.VolumeCapability{\n\t\tAccessMode: &csi.VolumeCapability_AccessMode{\n\t\t\tMode: mode,\n\t\t},\n\t\tAccessType: &csi.VolumeCapability_Mount{\n\t\t\tMount: &csi.VolumeCapability_MountVolume{\n\t\t\t\tFsType: fsType,\n\t\t\t\tMountFlags: mountFlags,\n\t\t\t},\n\t\t},\n\t}\n}", "func (cl *Client) VolumeAttach(ctx context.Context, vaa *csp.VolumeAttachArgs) (*csp.Volume, error) {\n\tsvc, vid, _ := VolumeIdentifierParse(vaa.VolumeIdentifier)\n\tswitch svc {\n\tcase ServiceGCE:\n\t\treturn cl.gceVolumeAttach(ctx, vaa, vid)\n\t}\n\treturn nil, fmt.Errorf(\"storage type currently unsupported\")\n}", "func (client *Client) CreateVolume(request api.VolumeRequest) (*api.Volume, error) {\n\t// Check if a volume already exist with the same name\n\tvolume, err := metadata.LoadVolume(providers.FromClient(client), request.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif volume != nil {\n\t\treturn nil, fmt.Errorf(\"Volume '%s' already exists\", request.Name)\n\t}\n\n\tvol, err := volumes.Create(client.Volume, volumes.CreateOpts{\n\t\tName: request.Name,\n\t\tSize: request.Size,\n\t\tVolumeType: client.getVolumeType(request.Speed),\n\t}).Extract()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating volume : %s\", ProviderErrorToString(err))\n\t}\n\tv := api.Volume{\n\t\tID: vol.ID,\n\t\tName: vol.Name,\n\t\tSize: vol.Size,\n\t\tSpeed: client.getVolumeSpeed(vol.VolumeType),\n\t\tState: toVolumeState(vol.Status),\n\t}\n\terr = metadata.SaveVolume(providers.FromClient(client), &v)\n\tif err != nil {\n\t\tclient.DeleteVolume(v.ID)\n\t\treturn nil, fmt.Errorf(\"Error creating volume : %s\", ProviderErrorToString(err))\n\t}\n\n\treturn &v, nil\n}", "func NewMountSpec(mountType string, path string) (*MountSpec, error) {\n\tmt := MountSpecType(mountType)\n\tswitch mt {\n\tcase MountSpecDev, MountSpecLib, MountSpecSym, MountSpecDir:\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected mount type: %v\", mt)\n\t}\n\tif path == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid path: %v\", path)\n\t}\n\n\tmount := MountSpec{\n\t\tType: mt,\n\t\tPath: path,\n\t}\n\n\treturn &mount, nil\n}", "func (c *Catalog) DefaultVolumeMount(name string) corev1.VolumeMount {\n\treturn corev1.VolumeMount{\n\t\tName: name,\n\t\tMountPath: \"/etc/random\",\n\t}\n}", "func GetVolumeMountFromCustomConfigSpec(cfcm *apicommonv1.CustomConfig, volumeName, volumePath, defaultSubPath string) corev1.VolumeMount {\n\tsubPath := defaultSubPath\n\tif cfcm.ConfigMap != nil && len(cfcm.ConfigMap.Items) > 0 {\n\t\tsubPath = cfcm.ConfigMap.Items[0].Path\n\t}\n\n\treturn corev1.VolumeMount{\n\t\tName: volumeName,\n\t\tMountPath: volumePath,\n\t\tSubPath: subPath,\n\t\tReadOnly: true,\n\t}\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func NewMountSpecFromLine(line string) (*MountSpec, error) {\n\tparts := strings.SplitN(strings.TrimSpace(line), \",\", 2)\n\tif len(parts) < 2 {\n\t\treturn nil, fmt.Errorf(\"failed to parse line: %v\", line)\n\t}\n\tmountType := strings.TrimSpace(parts[0])\n\tpath := strings.TrimSpace(parts[1])\n\n\treturn NewMountSpec(mountType, path)\n}", "func parseMountEntry(entry string) (*mount, error) {\n\tif strings.HasPrefix(entry, \"#\") { // The entry is a comment.\n\t\treturn nil, nil\n\t}\n\tparts := strings.Split(entry, \" \")\n\tif len(parts) < 4 {\n\t\treturn nil, fmt.Errorf(\"invalid format: expected at least 4 space-separated columns\")\n\t}\n\t// There may be 4 escaped characters (space (\\040), tab (\\011), newline (\\012)\n\t// and backslash (\\134)), so we unescape them if necessary.\n\tunescape := strings.NewReplacer(\n\t\t`\\040`, \"\\040\",\n\t\t`\\011`, \"\\011\",\n\t\t`\\012`, \"\\012\",\n\t\t`\\134`, \"\\134\",\n\t)\n\treturn &mount{\n\t\tdevice: unescape.Replace(parts[0]),\n\t\tmountPoint: unescape.Replace(parts[1]),\n\t\tfsType: unescape.Replace(parts[2]),\n\t\toptions: unescape.Replace(parts[3]),\n\t}, nil\n}", "func parseMountFlag(m string) (specs.Mount, error) {\n\tmount := specs.Mount{}\n\tr := csv.NewReader(strings.NewReader(m))\n\n\tfields, err := r.Read()\n\tif err != nil {\n\t\treturn mount, err\n\t}\n\n\tfor _, field := range fields {\n\t\tkey, val, ok := strings.Cut(field, \"=\")\n\t\tif !ok {\n\t\t\treturn mount, fmt.Errorf(\"invalid mount specification: expected key=val\")\n\t\t}\n\n\t\tswitch key {\n\t\tcase \"type\":\n\t\t\tmount.Type = val\n\t\tcase \"source\", \"src\":\n\t\t\tmount.Source = val\n\t\tcase \"destination\", \"dst\":\n\t\t\tmount.Destination = val\n\t\tcase \"options\":\n\t\t\tmount.Options = strings.Split(val, \":\")\n\t\tdefault:\n\t\t\treturn mount, fmt.Errorf(\"mount option %q not supported\", key)\n\t\t}\n\t}\n\n\treturn mount, nil\n}", "func startGlusterVolume(name string) {\n\tclient.StartVolume(name)\n}", "func (r ApiGetHyperflexVolumeListRequest) Select_(select_ string) ApiGetHyperflexVolumeListRequest {\n\tr.select_ = &select_\n\treturn r\n}", "func (s *StackEbrc) CreateVolumeAttachment(request abstract.VolumeAttachmentRequest) (string, fail.Error) {\n\tlogrus.Debugf(\">>> stacks.ebrc::CreateVolumeAttachment(%s)\", request.Name)\n\tdefer logrus.Debugf(\"<<< stacks.ebrc::CreateVolumeAttachment(%s)\", request.Name)\n\n\tvm, err := s.findVMByID(request.HostID)\n\tif err != nil || utils.IsEmpty(vm) {\n\t\treturn \"\", fail.Wrap(err, fmt.Sprintf(\"Error creating attachment, vm empty\"))\n\t}\n\n\tdisk, err := s.findDiskByID(request.VolumeID)\n\tif err != nil || utils.IsEmpty(disk) {\n\t\treturn \"\", fail.Wrap(err, fmt.Sprintf(\"Error creating attachment, disk empty\"))\n\t}\n\n\tattask, err := vm.AttachDisk(&types.DiskAttachOrDetachParams{Disk: &types.Reference{HREF: disk.Disk.HREF}})\n\tif err != nil {\n\t\treturn \"\", fail.Wrap(err, fmt.Sprintf(\"Error creating attachment\"))\n\t}\n\n\terr = attask.WaitTaskCompletion()\n\tif err != nil {\n\t\treturn \"\", fail.Wrap(err, fmt.Sprintf(\"Error creating attachment\"))\n\t}\n\n\treturn getAttachmentID(request.HostID, request.VolumeID), nil\n}", "func (m *MountNewCreateDisksParamsVMVolume) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateElfStoragePolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSize(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSizeUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ReadStorageV1alpha1VolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Exact != nil {\n\n\t\t// query param exact\n\t\tvar qrExact bool\n\t\tif o.Exact != nil {\n\t\t\tqrExact = *o.Exact\n\t\t}\n\t\tqExact := swag.FormatBool(qrExact)\n\t\tif qExact != \"\" {\n\t\t\tif err := r.SetQueryParam(\"exact\", qExact); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Export != nil {\n\n\t\t// query param export\n\t\tvar qrExport bool\n\t\tif o.Export != nil {\n\t\t\tqrExport = *o.Export\n\t\t}\n\t\tqExport := swag.FormatBool(qrExport)\n\t\tif qExport != \"\" {\n\t\t\tif err := r.SetQueryParam(\"export\", qExport); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Pretty != nil {\n\n\t\t// query param pretty\n\t\tvar qrPretty string\n\t\tif o.Pretty != nil {\n\t\t\tqrPretty = *o.Pretty\n\t\t}\n\t\tqPretty := qrPretty\n\t\tif qPretty != \"\" {\n\t\t\tif err := r.SetQueryParam(\"pretty\", qPretty); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (in *FileMount) DeepCopy() *FileMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(FileMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *UFSClient) NewRemoveUFSVolumeMountPointRequest() *RemoveUFSVolumeMountPointRequest {\n\treq := &RemoveUFSVolumeMountPointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (r ApiGetHyperflexVolumeListRequest) Expand(expand string) ApiGetHyperflexVolumeListRequest {\n\tr.expand = &expand\n\treturn r\n}", "func ParseVolume(volume string) (name, host, container, mode string, err error) {\n\tseparator := \":\"\n\n\t// Parse based on \":\"\n\tvolumeStrings := strings.Split(volume, separator)\n\tif len(volumeStrings) == 0 {\n\t\treturn\n\t}\n\n\t// Set name if existed\n\tif !isPath(volumeStrings[0]) {\n\t\tname = volumeStrings[0]\n\t\tvolumeStrings = volumeStrings[1:]\n\t}\n\n\t// Check if *anything* has been passed\n\tif len(volumeStrings) == 0 {\n\t\terr = fmt.Errorf(\"invalid volume format: %s\", volume)\n\t\treturn\n\t}\n\n\t// Get the last \":\" passed which is presumingly the \"access mode\"\n\tpossibleAccessMode := volumeStrings[len(volumeStrings)-1]\n\n\t// Check to see if :Z or :z exists. We do not support SELinux relabeling at the moment.\n\t// See https://github.com/kubernetes/kompose/issues/176\n\t// Otherwise, check to see if \"rw\" or \"ro\" has been passed\n\tif possibleAccessMode == \"z\" || possibleAccessMode == \"Z\" {\n\t\tlogrus.Warnf(\"Volume mount \\\"%s\\\" will be mounted without labeling support. :z or :Z not supported\", volume)\n\t\tmode = \"\"\n\t\tvolumeStrings = volumeStrings[:len(volumeStrings)-1]\n\t} else if possibleAccessMode == \"rw\" || possibleAccessMode == \"ro\" {\n\t\tmode = possibleAccessMode\n\t\tvolumeStrings = volumeStrings[:len(volumeStrings)-1]\n\t}\n\n\t// Check the volume format as well as host\n\tcontainer = volumeStrings[len(volumeStrings)-1]\n\tvolumeStrings = volumeStrings[:len(volumeStrings)-1]\n\tif len(volumeStrings) == 1 {\n\t\thost = volumeStrings[0]\n\t}\n\tif !isPath(container) || (len(host) > 0 && !isPath(host)) || len(volumeStrings) > 1 {\n\t\terr = fmt.Errorf(\"invalid volume format: %s\", volume)\n\t\treturn\n\t}\n\treturn\n}", "func (m *ServiceBindingVolumeMount) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateDevice(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (srv *VolumeService) Attach(volumename string, vmname string, path string, format string) error {\n\t// Get volume ID\n\tvolume, err := srv.Get(volumename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"No volume found with name or id '%s'\", volumename)\n\t}\n\n\t// Get VM ID\n\tvmService := NewVMService(srv.provider)\n\tvm, err := vmService.Get(vmname)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"No VM found with name or id '%s'\", vmname)\n\t}\n\n\tvolatt, err := srv.provider.CreateVolumeAttachment(api.VolumeAttachmentRequest{\n\t\tName: fmt.Sprintf(\"%s-%s\", volume.Name, vm.Name),\n\t\tServerID: vm.ID,\n\t\tVolumeID: volume.ID,\n\t})\n\tif err != nil {\n\t\t// TODO Use more explicit error\n\t\treturn err\n\t}\n\n\t// Create mount point\n\tmountPoint := path\n\tif path == api.DefaultVolumeMountPoint {\n\t\tmountPoint = api.DefaultVolumeMountPoint + volume.Name\n\t}\n\n\tsshConfig, err := srv.provider.GetSSHConfig(vm.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tserver, err := nfs.NewServer(sshConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = server.MountBlockDevice(volatt.Device, mountPoint)\n\n\tif err != nil {\n\t\tsrv.Detach(volumename, vmname)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Client) AttachVolume(ctx context.Context, params *AttachVolumeInput, optFns ...func(*Options)) (*AttachVolumeOutput, error) {\n\tif params == nil {\n\t\tparams = &AttachVolumeInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"AttachVolume\", params, optFns, c.addOperationAttachVolumeMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*AttachVolumeOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (a *HyperflexApiService) UpdateHyperflexVolume(ctx context.Context, moid string) ApiUpdateHyperflexVolumeRequest {\n\treturn ApiUpdateHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (stack *OpenstackVolumes) AttachVolume(volume *volumes.Volume) error {\n\topts := volumeattach.CreateOpts{\n\t\tVolumeID: volume.ProviderID,\n\t}\n\tmc := NewMetricContext(\"volume\", \"attach\")\n\tvolumeAttachment, err := volumeattach.Create(stack.computeClient, stack.meta.ServerID, opts).Extract()\n\tif mc.ObserveRequest(err) != nil {\n\t\treturn fmt.Errorf(\"error attaching volume %s to server %s: %v\", opts.VolumeID, stack.meta.ServerID, err)\n\t}\n\tvolume.LocalDevice = volumeAttachment.Device\n\treturn nil\n}", "func (client *Client) CreateVolumeAttachment(request api.VolumeAttachmentRequest) (*api.VolumeAttachment, error) {\n\t// Create the attachment\n\tva, err := volumeattach.Create(client.Compute, request.ServerID, volumeattach.CreateOpts{\n\t\tVolumeID: request.VolumeID,\n\t}).Extract()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating volume attachment between server %s and volume %s: %s\", request.ServerID, request.VolumeID, ProviderErrorToString(err))\n\t}\n\n\tvaapi := &api.VolumeAttachment{\n\t\tID: va.ID,\n\t\tServerID: va.ServerID,\n\t\tVolumeID: va.VolumeID,\n\t\tDevice: va.Device,\n\t}\n\n\t// Update the metadata\n\n\tmtdVol, err := metadata.LoadVolume(providers.FromClient(client), request.VolumeID)\n\tif err != nil {\n\n\t\t// Detach volume\n\t\tdetach_err := volumeattach.Delete(client.Compute, va.ServerID, va.ID).ExtractErr()\n\t\tif detach_err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error deleting volume attachment %s: %s\", va.ID, ProviderErrorToString(err))\n\t\t}\n\n\t\treturn nil, err\n\t}\n\terr = mtdVol.Attach(vaapi)\n\tif err != nil {\n\t\t// Detach volume\n\t\tdetach_err := volumeattach.Delete(client.Compute, va.ServerID, va.ID).ExtractErr()\n\t\tif detach_err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error deleting volume attachment %s: %s\", va.ID, ProviderErrorToString(err))\n\t\t}\n\n\t\treturn vaapi, err\n\t}\n\n\treturn vaapi, nil\n}", "func (in *AdmissionRequest) DeepCopy() *AdmissionRequest {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AdmissionRequest)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (request UpdateARoomRequest) Validate() error {\n\tif request.Name == \"\" || request.ID == nil {\n\t\treturn ErrInvalidRequest\n\t}\n\treturn nil\n}", "func VolumeEncryption(value bool) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.EncryptVols = value\n\t\treturn nil\n\t}\n}", "func (c *Controller) Attach(attachRequest k8sresources.FlexVolumeAttachRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-attach-start\")\n\tdefer c.logger.Println(\"controller-attach-end\")\n\n\tif attachRequest.Version == k8sresources.KubernetesVersion_1_5 {\n\t\tc.logger.Printf(\"k8s 1.5 attach just returning Success\")\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Success\",\n\t\t}\n\t}\n\tc.logger.Printf(\"For k8s version 1.6 and higher, Ubiquity just returns NOT supported for Attach API. This might change in the future\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}" ]
[ "0.771414", "0.7240175", "0.716826", "0.7108231", "0.65441865", "0.61721224", "0.6155006", "0.5976585", "0.58205014", "0.5781181", "0.5761903", "0.5609689", "0.5573325", "0.5536772", "0.5304802", "0.5304802", "0.5304802", "0.5304802", "0.53047204", "0.5283534", "0.5283479", "0.51544476", "0.5081763", "0.49671912", "0.49447614", "0.4930883", "0.4912122", "0.48544607", "0.4756501", "0.47563294", "0.47494957", "0.47304124", "0.4704724", "0.4694233", "0.4691687", "0.4679019", "0.46717876", "0.4665018", "0.46577817", "0.4652239", "0.46382198", "0.46365383", "0.4634657", "0.46198806", "0.45806086", "0.45672795", "0.45531625", "0.45397496", "0.4515718", "0.45140707", "0.44986236", "0.44974905", "0.44686615", "0.44660357", "0.44529542", "0.44496304", "0.44473675", "0.44345367", "0.44166335", "0.4415091", "0.43994096", "0.4390243", "0.43800655", "0.43734118", "0.43311036", "0.43255433", "0.4324746", "0.4324503", "0.42991653", "0.4297645", "0.42965105", "0.42835623", "0.42815316", "0.4268541", "0.42677525", "0.42660895", "0.42548984", "0.42510962", "0.42499846", "0.4205336", "0.41992903", "0.41928765", "0.41927785", "0.41891393", "0.41854087", "0.41611543", "0.4158083", "0.41275755", "0.41225937", "0.41190842", "0.4112438", "0.41088322", "0.4106142", "0.4100591", "0.40989858", "0.40980917", "0.4097088", "0.4092859", "0.40926698", "0.4068235" ]
0.8811924
0
Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse is an autogenerated conversion function.
func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error { return autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (d *VolumeDriver) Mount(r volume.MountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Mounting volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tlog.Errorf(\"VolumeDriver Mount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func (d *MinioDriver) Mount(r volume.MountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Mount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections > 0 {\n\t\tv.connections++\n\t\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n\t}\n\n\tif err := d.mountVolume(v); err != nil {\n\t\tglog.Warningf(\"mounting %#v volume failed: %s\", v, err.Error())\n\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t}\n\n\t// if the mount was successful, then increment the number of connections we\n\t// have to the mount.\n\tv.connections++\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func (proxy *remoteDriverProxy) Mount(name, id string) (string, error) {\n\tvar req = remoteVolumeMountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeMountResp\n\n\tif err := proxy.client.CallService(remoteVolumeMountService, &req, &resp, true); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn \"\", errors.New(resp.Err)\n\t}\n\n\treturn resp.Mountpoint, nil\n}", "func (d *DirDriver) Mount(req *volume.MountRequest) (*volume.MountResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Mount() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn nil, fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tvol.mounts[req.ID] = true\n\n\treturn &volume.MountResponse{\n\t\tMountpoint: vol.path,\n\t}, nil\n}", "func (c *Controller) Mount(mountRequest k8sresources.FlexVolumeMountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-mount-start\")\n\tdefer c.logger.Println(\"controller-mount-end\")\n\tc.logger.Println(fmt.Sprintf(\"mountRequest [%#v]\", mountRequest))\n\tvar lnPath string\n\tattachRequest := resources.AttachRequest{Name: mountRequest.MountDevice, Host: getHost()}\n\tmountedPath, err := c.Client.Attach(attachRequest)\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to mount volume [%s], Error: %#v\", mountRequest.MountDevice, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\tif mountRequest.Version == k8sresources.KubernetesVersion_1_5 {\n\t\t//For k8s 1.5, by the time we do the attach/mount, the mountDir (MountPath) is not created trying to do mount and ln will fail because the dir is not found, so we need to create the directory before continuing\n\t\tdir := filepath.Dir(mountRequest.MountPath)\n\t\tc.logger.Printf(\"mountrequest.MountPath %s\", mountRequest.MountPath)\n\t\tlnPath = mountRequest.MountPath\n\t\tk8sRequiredMountPoint := path.Join(mountRequest.MountPath, mountRequest.MountDevice)\n\t\tif _, err = os.Stat(k8sRequiredMountPoint); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\n\t\t\t\tc.logger.Printf(\"creating volume directory %s\", dir)\n\t\t\t\terr = os.MkdirAll(dir, 0777)\n\t\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\t\tmsg := fmt.Sprintf(\"Failed creating volume directory %#v\", err)\n\t\t\t\t\tc.logger.Println(msg)\n\n\t\t\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\t\t\tStatus: \"Failure\",\n\t\t\t\t\t\tMessage: msg,\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// For k8s 1.6 and later kubelet creates a folder as the MountPath, including the volume name, whenwe try to create the symlink this will fail because the same name exists. This is why we need to remove it before continuing.\n\t} else {\n\t\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\t\tif strings.HasPrefix(mountedPath, ubiquityMountPrefix) {\n\t\t\tlnPath = mountRequest.MountPath\n\t\t} else {\n\t\t\tlnPath, _ = path.Split(mountRequest.MountPath)\n\t\t}\n\t\tc.logger.Printf(\"removing folder %s\", mountRequest.MountPath)\n\n\t\terr = os.Remove(mountRequest.MountPath)\n\t\tif err != nil && !os.IsExist(err) {\n\t\t\tmsg := fmt.Sprintf(\"Failed removing existing volume directory %#v\", err)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\n\t\t}\n\n\t}\n\tsymLinkCommand := \"/bin/ln\"\n\targs := []string{\"-s\", mountedPath, lnPath}\n\tc.logger.Printf(fmt.Sprintf(\"creating slink from %s -> %s\", mountedPath, lnPath))\n\n\tvar stderr bytes.Buffer\n\tcmd := exec.Command(symLinkCommand, args...)\n\tcmd.Stderr = &stderr\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Controller: mount failed to symlink %#v\", stderr.String())\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: msg,\n\t\t}\n\n\t}\n\tmsg := fmt.Sprintf(\"Volume mounted successfully to %s\", mountedPath)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: msg,\n\t}\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (d *lvm) MountVolume(vol Volume, op *operations.Operation) error {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t// Activate LVM volume if needed.\n\tactivated, err := d.activateVolume(vol)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif activated {\n\t\trevert.Add(func() { _, _ = d.deactivateVolume(vol) })\n\t}\n\n\tif vol.contentType == ContentTypeFS {\n\t\t// Check if already mounted.\n\t\tmountPath := vol.MountPath()\n\t\tif !filesystem.IsMountPoint(mountPath) {\n\t\t\tfsType := vol.ConfigBlockFilesystem()\n\t\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\n\t\t\tif vol.mountFilesystemProbe {\n\t\t\t\tfsType, err = fsProbe(volDevPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed probing filesystem: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = vol.EnsureMountPath()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmountFlags, mountOptions := filesystem.ResolveMountOptions(strings.Split(vol.ConfigBlockMountOptions(), \",\"))\n\t\t\terr = TryMount(volDevPath, mountPath, fsType, mountFlags, mountOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to mount LVM logical volume: %w\", err)\n\t\t\t}\n\n\t\t\td.logger.Debug(\"Mounted logical volume\", logger.Ctx{\"volName\": vol.name, \"dev\": volDevPath, \"path\": mountPath, \"options\": mountOptions})\n\t\t}\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, mount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\terr = d.MountVolume(fsVol, op)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvol.MountRefCountIncrement() // From here on it is up to caller to call UnmountVolume() when done.\n\trevert.Success()\n\treturn nil\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func (d *VolumeDriver) MountVolume(name string, fstype string, id string, isReadOnly bool, skipAttach bool) (string, error) {\n\tlog.Errorf(\"VolumeDriver MountVolume to be implemented\")\n\tmountpoint := getMountPoint(name)\n\treturn mountpoint, nil\n}", "func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {\n\tif req == nil {\n\t\treturn \"\", fmt.Errorf(\"must provide non-nil request to MountVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlogrus.Infof(\"Mounting volume %s using plugin %s for container %s\", req.Name, p.Name, req.ID)\n\n\tresp, err := p.sendRequest(req, mountPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, mountPath, req.Name); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmountRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tmountResp := new(volume.MountResponse)\n\tif err := json.Unmarshal(mountRespBytes, mountResp); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshalling volume plugin %s path response: %w\", p.Name, err)\n\t}\n\n\treturn mountResp.Mountpoint, nil\n}", "func (d ImagefsDriver) Mount(r *volume.MountRequest) (*volume.MountResponse, error) {\n\tcontainerID, err := d.FindVolumeContainer(r.Name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\terr = d.cli.ContainerStart(\n\t\tcontext.Background(),\n\t\tcontainerID,\n\t\ttypes.ContainerStartOptions{},\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tvar _ret *volume.MountResponse\n\tret, err := d.Path(&volume.PathRequest{Name: r.Name})\n\tif ret != nil {\n\t\t_ret = &volume.MountResponse{\n\t\t\tMountpoint: ret.Mountpoint,\n\t\t}\n\t}\n\treturn _ret, err\n}", "func (m *SimpleCSIProviderServer) Mount(ctx context.Context, req *v1alpha1.MountRequest) (*v1alpha1.MountResponse, error) {\n\tvar attrib, secret map[string]string\n\tvar filePermission os.FileMode\n\tvar err error\n\n\tif err = json.Unmarshal([]byte(req.GetAttributes()), &attrib); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal attributes, error: %+v\", err)\n\t}\n\tif err = json.Unmarshal([]byte(req.GetSecrets()), &secret); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal secrets, error: %+v\", err)\n\t}\n\tif err = json.Unmarshal([]byte(req.GetPermission()), &filePermission); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal file permission, error: %+v\", err)\n\t}\n\tif len(req.GetTargetPath()) == 0 {\n\t\treturn nil, fmt.Errorf(\"missing target path\")\n\t}\n\n\tresp := &v1alpha1.MountResponse{\n\t\tObjectVersion: []*v1alpha1.ObjectVersion{},\n\t}\n\n\tif rawTokenContent, ok := attrib[\"csi.storage.k8s.io/serviceAccount.tokens\"]; ok {\n\t\ttokens := map[string]KubernetesTokenContent{}\n\t\terr := json.Unmarshal([]byte(rawTokenContent), &tokens)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Error unmarshaling tokens attribute: %v\", err)\n\t\t}\n\t\tfiles := []*v1alpha1.File{}\n\t\tfor sub, content := range tokens {\n\t\t\tu, _ := url.Parse(sub)\n\n\t\t\tpath := filepath.Join(u.Hostname(), u.EscapedPath())\n\t\t\tfiles = append(files, &v1alpha1.File{\n\t\t\t\tPath: path,\n\t\t\t\tContents: []byte(content.Token),\n\t\t\t})\n\t\t\tresp.ObjectVersion = append(resp.ObjectVersion, &v1alpha1.ObjectVersion{Id: fmt.Sprintf(\"secret/%s\", path), Version: \"v1\"})\n\t\t}\n\t\tresp.Files = append(resp.Files, files...)\n\n\t}\n\tif rawSecretContent, ok := attrib[\"secrets\"]; ok {\n\t\tsecretContents := []SimpleSecretKeyValue{}\n\t\terr := yaml.Unmarshal([]byte(rawSecretContent), &secretContents)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Error unmarshaling secret attribute: %v\", err)\n\t\t}\n\n\t\tfiles := []*v1alpha1.File{}\n\t\tfor _, kv := range secretContents {\n\t\t\tfiles = append(files, &v1alpha1.File{\n\t\t\t\tPath: kv.Key,\n\t\t\t\tContents: []byte(kv.Value),\n\t\t\t})\n\t\t\tresp.ObjectVersion = append(resp.ObjectVersion, &v1alpha1.ObjectVersion{Id: fmt.Sprintf(\"secret/%s\", kv.Key), Version: \"v1\"})\n\t\t}\n\t\tresp.Files = append(resp.Files, files...)\n\t}\n\treturn resp, nil\n}", "func MountVolume(vol *apis.LVMVolume, mount *MountInfo, podLVInfo *PodLVInfo) error {\n\tvolume := vol.Spec.VolGroup + \"/\" + vol.Name\n\tmounted, err := verifyMountRequest(vol, mount.MountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif mounted {\n\t\tklog.Infof(\"lvm : already mounted %s => %s\", volume, mount.MountPath)\n\t\treturn nil\n\t}\n\n\tdevicePath := DevPath + volume\n\n\terr = FormatAndMountVol(devicePath, mount)\n\tif err != nil {\n\t\treturn status.Errorf(\n\t\t\tcodes.Internal,\n\t\t\t\"failed to format and mount the volume error: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t}\n\n\tklog.Infof(\"lvm: volume %v mounted %v fs %v\", volume, mount.MountPath, mount.FSType)\n\n\tif ioLimitsEnabled && podLVInfo != nil {\n\t\tif err := setIOLimits(vol, podLVInfo, devicePath); err != nil {\n\t\t\tklog.Warningf(\"lvm: error setting io limits: podUid %s, device %s, err=%v\", podLVInfo.UID, devicePath, err)\n\t\t} else {\n\t\t\tklog.Infof(\"lvm: io limits set for podUid %v, device %s\", podLVInfo.UID, devicePath)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Controller) MountDevice(mountDeviceRequest k8sresources.FlexVolumeMountDeviceRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-MountDevice-start\")\n\tdefer c.logger.Println(\"controller-MountDevice-end\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error {\n\tdriver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/)\n\tif err != nil || driver == nil {\n\t\tglog.Errorf(\"Failed to get portworx driver. Err: %v\", err)\n\t\treturn err\n\t}\n\n\terr = driver.Mount(m.volName, mountPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Error mounting Portworx Volume (%v) on Path (%v): %v\", m.volName, mountPath, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in *v2alpha1.MkdirResponse, out *impl.MkdirResponse) error {\n\treturn autoConvert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in, out)\n}", "func (vm *ContainerVM) overlayMount() error {\n\tvm.effectivePath = filepath.Join(vm.instancePath, \"fs\")\n\tworkPath := filepath.Join(vm.instancePath, \"fs_work\")\n\n\terr := os.MkdirAll(vm.effectivePath, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(workPath, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create the overlay mountpoint\n\targs := []string{\n\t\t\"mount\",\n\t\t\"-t\",\n\t\t\"overlay\",\n\t\tfmt.Sprintf(\"megamount_%v\", vm.ID),\n\t\t\"-o\",\n\t\tfmt.Sprintf(\"lowerdir=%v,upperdir=%v,workdir=%v\", vm.FSPath, vm.effectivePath, workPath),\n\t\tvm.effectivePath,\n\t}\n\tlog.Debug(\"mounting overlay: %v\", args)\n\tout, err := processWrapper(args...)\n\tif err != nil {\n\t\tlog.Error(\"overlay mount: %v %v\", err, out)\n\t\treturn err\n\t}\n\treturn nil\n}", "func FormatAndMountVol(devicePath string, mountInfo *MountInfo) error {\n\tmounter := &mount.SafeFormatAndMount{Interface: mount.New(\"\"), Exec: utilexec.New()}\n\n\terr := mounter.FormatAndMount(devicePath, mountInfo.MountPath, mountInfo.FSType, mountInfo.MountOptions)\n\tif err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: failed to mount volume %s [%s] to %s, error %v\",\n\t\t\tdevicePath, mountInfo.FSType, mountInfo.MountPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (d *driverInfo) Mount(volume *Volume) error {\n\t// don't mount twice\n\tif err := volume.CheckUnmounted(); err != nil {\n\t\treturn err\n\t}\n\n\tvolume.MountPath = d.getMountPath(volume.Name)\n\texists, err := fs.DirExists(volume.MountPath)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error accessing mount path '%s': %v\", volume.MountPath, err)\n\t}\n\n\tif !exists {\n\t\tif err := fs.CreateDir(volume.MountPath, true, 0700); err != nil {\n\t\t\treturn fmt.Errorf(\"error creating mount path '%s': %v\", volume.MountPath, err)\n\t\t}\n\t}\n\n\tif err := d.storage.Mount(volume); err != nil {\n\t\tvolume.MountPath = \"\"\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (*MountResponse) Descriptor() ([]byte, []int) {\n\treturn file_provider_v1alpha1_service_proto_rawDescGZIP(), []int{3}\n}", "func (driver *Driver) Mount(volumeName, volumeID string, overwriteFs bool, newFsType string) (string, error) {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn \"\", errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn \"\", errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn \"\", errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn \"\", errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn \"\", errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(\n\t\tvolumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\tvolumeAttachment, err = driver.sdm.AttachVolume(\n\t\t\tfalse, volumes[0].VolumeID, instances[0].InstanceID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn \"\", errors.New(\"Volume did not attach\")\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(mounts) > 0 {\n\t\treturn mounts[0].Mountpoint, nil\n\t}\n\n\tswitch {\n\tcase os.Getenv(\"REXRAY_DOCKER_VOLUMETYPE\") != \"\":\n\t\tnewFsType = os.Getenv(\"REXRAY_DOCKER_VOLUMETYPE\")\n\tcase newFsType == \"\":\n\t\tnewFsType = \"ext4\"\n\t}\n\n\tif err := driver.osdm.Format(volumeAttachment[0].DeviceName, newFsType, overwriteFs); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmountPath, err := getVolumeMountPath(volumes[0].Name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := os.MkdirAll(mountPath, 0755); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := driver.osdm.Mount(volumeAttachment[0].DeviceName, mountPath, \"\", \"\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn mountPath, nil\n}", "func (o *Filesystem) Mount(ctx context.Context, options map[string]dbus.Variant) (mountPath string, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Mount\", 0, options).Store(&mountPath)\n\treturn\n}", "func (o FioSpecOutput) Volume() FioSpecVolumeOutput {\n\treturn o.ApplyT(func(v FioSpec) FioSpecVolume { return v.Volume }).(FioSpecVolumeOutput)\n}", "func (service *LoanSrvc) Mount(ctx context.Context, mux goahttp.Muxer) goahttp.Server {\n\tendpoints := loan.NewEndpoints(service)\n\tsrv := server.New(endpoints, mux, goahttp.RequestDecoder, goahttp.ResponseEncoder, api.ErrorHandler, nil)\n\tserver.Mount(mux, srv)\n\n\tfor _, m := range srv.Mounts {\n\t\tlog.WithContext(ctx).Infof(\"%q mounted on %s %s\", m.Method, m.Verb, m.Pattern)\n\t}\n\treturn srv\n}", "func (d *VolumeDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Unmounting Volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tif d.refCounts.IsInitialized() != true {\n\t\t// if refcounting hasn't been succesful,\n\t\t// no refcounting, no unmount. All unmounts are delayed\n\t\t// until we succesfully populate the refcount map\n\t\td.refCounts.MarkDirty()\n\t\treturn volume.Response{Err: \"\"}\n\t}\n\n\tlog.Errorf(\"VolumeDriver Unmount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func (o DiskResponseOutput) MountPoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiskResponse) string { return v.MountPoint }).(pulumi.StringOutput)\n}", "func (z *ZfsH) Mount(d *Dataset, overlay bool, options []string) (*Dataset, error) {\n\tif d.Type == DatasetSnapshot {\n\t\treturn nil, errors.New(\"cannot mount snapshots\")\n\t}\n\targs := make([]string, 1, 5)\n\targs[0] = \"mount\"\n\tif overlay {\n\t\targs = append(args, \"-O\")\n\t}\n\tif options != nil {\n\t\targs = append(args, \"-o\")\n\t\targs = append(args, strings.Join(options, \",\"))\n\t}\n\targs = append(args, d.Name)\n\t_, err := z.zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn z.GetDataset(d.Name)\n}", "func (c *Controller) Unmount(unmountRequest k8sresources.FlexVolumeUnmountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"Controller: unmount start\")\n\tdefer c.logger.Println(\"Controller: unmount end\")\n\tc.logger.Printf(\"unmountRequest %#v\", unmountRequest)\n\tvar detachRequest resources.DetachRequest\n\tvar pvName string\n\n\t// Validate that the mountpoint is a symlink as ubiquity expect it to be\n\trealMountPoint, err := c.exec.EvalSymlinks(unmountRequest.MountPath)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Cannot execute umount because the mountPath [%s] is not a symlink as expected. Error: %#v\", unmountRequest.MountPath, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t}\n\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\tif strings.HasPrefix(realMountPoint, ubiquityMountPrefix) {\n\t\t// SCBE backend flow\n\t\tpvName = path.Base(unmountRequest.MountPath)\n\n\t\tdetachRequest = resources.DetachRequest{Name: pvName, Host: getHost()}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tpvName,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t\tc.logger.Println(fmt.Sprintf(\"Removing the slink [%s] to the real mountpoint [%s]\", unmountRequest.MountPath, realMountPoint))\n\t\terr := c.exec.Remove(unmountRequest.MountPath)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"fail to remove slink %s. Error %#v\", unmountRequest.MountPath, err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t} else {\n\n\t\tlistVolumeRequest := resources.ListVolumesRequest{}\n\t\tvolumes, err := c.Client.ListVolumes(listVolumeRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Error getting the volume list from ubiquity server %#v\", err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tvolume, err := getVolumeForMountpoint(unmountRequest.MountPath, volumes)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Error finding the volume with mountpoint [%s] from the list of ubiquity volumes %#v. Error is : %#v\",\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\tvolumes,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tdetachRequest = resources.DetachRequest{Name: volume.Name}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil && err.Error() != \"fileset not linked\" {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tvolume.Name,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tpvName = volume.Name\n\t}\n\n\tmsg := fmt.Sprintf(\n\t\t\"Succeeded to umount volume [%s] on mountpoint [%s]\",\n\t\tpvName,\n\t\tunmountRequest.MountPath,\n\t)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: \"Volume unmounted successfully\",\n\t}\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func (in *Mount) DeepCopy() *Mount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Mount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (m *Mount) Mount(target string) error {\n\treturn ErrNotImplementOnUnix\n}", "func Convert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in *v1beta1.DismountVolumeResponse, out *internal.DismountVolumeResponse) error {\n\treturn autoConvert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in *v2alpha1.RmdirContentsResponse, out *impl.RmdirContentsResponse) error {\n\treturn autoConvert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in, out)\n}", "func RepoVolumeMount() corev1.VolumeMount {\n\treturn corev1.VolumeMount{Name: \"pgbackrest-repo\", MountPath: repoMountPath}\n}", "func (d *MinioDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Unmount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections <= 1 {\n\t\tif err := d.unmountVolume(v); err != nil {\n\t\t\tglog.Warningf(\"Unmounting %s volume failed with: %s\", v, err)\n\t\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t\t}\n\t\tv.connections = 0\n\t\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n\t}\n\tv.connections--\n\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n}", "func (m *Manager) Mount() error {\n\tvar err error\n\tm.mountPoint, err = m.b.Mount(m.b.MountLabel)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif m.mountPoint == \"\" {\n\t\treturn fmt.Errorf(\"container-id '%s' is not mounted\", m.b.ContainerID)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func (o SecretBackendV2Output) Mount() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SecretBackendV2) pulumi.StringOutput { return v.Mount }).(pulumi.StringOutput)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (client *Client) MapVolume(name, host, access string, lun int) (*Response, *ResponseStatus, error) {\n\treturn client.FormattedRequest(\"/map/volume/access/%s/lun/%d/host/%s/\\\"%s\\\"\", access, lun, host, name)\n}", "func (k *Kubernetes) AddPodMountVolume(service *apistructs.Service, podSpec *corev1.PodSpec,\n\tsecretvolmounts []corev1.VolumeMount, secretvolumes []corev1.Volume) error {\n\n\tif len(podSpec.Volumes) == 0 {\n\t\tpodSpec.Volumes = make([]corev1.Volume, 0)\n\t}\n\n\t//Pay attention to the settings mentioned above, there is only one container in a pod\n\tif len(podSpec.Containers[0].VolumeMounts) == 0 {\n\t\tpodSpec.Containers[0].VolumeMounts = make([]corev1.VolumeMount, 0)\n\t}\n\n\t// get cluster info\n\tclusterInfo, err := k.ClusterInfo.Get()\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to get cluster info, clusterName: %s, (%v)\", k.clusterName, err)\n\t}\n\n\t// hostPath type\n\tfor i, bind := range service.Binds {\n\t\tif bind.HostPath == \"\" || bind.ContainerPath == \"\" {\n\t\t\treturn errors.New(\"bind HostPath or ContainerPath is empty\")\n\t\t}\n\t\t//Name formation '[a-z0-9]([-a-z0-9]*[a-z0-9])?'\n\t\tname := \"volume\" + \"-bind-\" + strconv.Itoa(i)\n\n\t\thostPath, err := ParseJobHostBindTemplate(bind.HostPath, clusterInfo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// The hostPath that does not start with an absolute path is used to apply for local disk resources in the old volume interface\n\t\tif !strings.HasPrefix(hostPath, \"/\") {\n\t\t\t//hostPath = strutil.Concat(\"/mnt/k8s/\", hostPath)\n\t\t\tpvcName := strings.Replace(hostPath, \"_\", \"-\", -1)\n\t\t\tsc := \"dice-local-volume\"\n\t\t\tif err := k.pvc.CreateIfNotExists(&corev1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(\"%s-%s\", service.Name, pvcName),\n\t\t\t\t\tNamespace: service.Namespace,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PersistentVolumeClaimSpec{\n\t\t\t\t\tAccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},\n\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\tcorev1.ResourceStorage: resource.MustParse(\"10Gi\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tStorageClassName: &sc,\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpodSpec.Volumes = append(podSpec.Volumes,\n\t\t\t\tcorev1.Volume{\n\t\t\t\t\tName: name,\n\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: fmt.Sprintf(\"%s-%s\", service.Name, pvcName),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\n\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts,\n\t\t\t\tcorev1.VolumeMount{\n\t\t\t\t\tName: name,\n\t\t\t\t\tMountPath: bind.ContainerPath,\n\t\t\t\t\tReadOnly: bind.ReadOnly,\n\t\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tpodSpec.Volumes = append(podSpec.Volumes,\n\t\t\tcorev1.Volume{\n\t\t\t\tName: name,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tHostPath: &corev1.HostPathVolumeSource{\n\t\t\t\t\t\tPath: hostPath,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\n\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts,\n\t\t\tcorev1.VolumeMount{\n\t\t\t\tName: name,\n\t\t\t\tMountPath: bind.ContainerPath,\n\t\t\t\tReadOnly: bind.ReadOnly,\n\t\t\t})\n\t}\n\n\t// pvc volume type\n\tif len(service.Volumes) > 0 {\n\t\tif err := k.setStatelessServiceVolumes(service, podSpec); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Configure the business container sidecar shared directory\n\tfor name, sidecar := range service.SideCars {\n\t\tfor _, dir := range sidecar.SharedDirs {\n\t\t\temptyDirVolumeName := strutil.Concat(name, shardDirSuffix)\n\n\t\t\tquantitySize := resource.MustParse(k8sapi.PodEmptyDirSizeLimit10Gi)\n\t\t\tif sidecar.Resources.EmptyDirCapacity > 0 {\n\t\t\t\tmaxEmptyDir := fmt.Sprintf(\"%dGi\", sidecar.Resources.EmptyDirCapacity)\n\t\t\t\tquantitySize = resource.MustParse(maxEmptyDir)\n\t\t\t}\n\n\t\t\tsrcMount := corev1.VolumeMount{\n\t\t\t\tName: emptyDirVolumeName,\n\t\t\t\tMountPath: dir.Main,\n\t\t\t\tReadOnly: false, // rw\n\t\t\t}\n\t\t\t// Business master container\n\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, srcMount)\n\n\t\t\tpodSpec.Volumes = append(podSpec.Volumes, corev1.Volume{\n\t\t\t\tName: emptyDirVolumeName,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{\n\t\t\t\t\t\tSizeLimit: &quantitySize,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\tif service.InitContainer != nil {\n\t\tfor name, initc := range service.InitContainer {\n\t\t\tfor i, dir := range initc.SharedDirs {\n\t\t\t\tname := fmt.Sprintf(\"%s-%d\", name, i)\n\t\t\t\tsrcMount := corev1.VolumeMount{\n\t\t\t\t\tName: name,\n\t\t\t\t\tMountPath: dir.Main,\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t}\n\t\t\t\tquantitySize := resource.MustParse(k8sapi.PodEmptyDirSizeLimit10Gi)\n\t\t\t\tif initc.Resources.EmptyDirCapacity > 0 {\n\t\t\t\t\tmaxEmptyDir := fmt.Sprintf(\"%dGi\", initc.Resources.EmptyDirCapacity)\n\t\t\t\t\tquantitySize = resource.MustParse(maxEmptyDir)\n\t\t\t\t}\n\t\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, srcMount)\n\t\t\t\tpodSpec.Volumes = append(podSpec.Volumes, corev1.Volume{\n\t\t\t\t\tName: name,\n\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{\n\t\t\t\t\t\t\tSizeLimit: &quantitySize,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tpodSpec.Volumes = append(podSpec.Volumes, secretvolumes...)\n\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, secretvolmounts...)\n\n\treturn nil\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func (o FunctionServiceConfigSecretVolumeOutput) MountPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FunctionServiceConfigSecretVolume) string { return v.MountPath }).(pulumi.StringOutput)\n}", "func (client *Client) UnmapVolume(name, host string) (*Response, *ResponseStatus, error) {\n\tif host == \"\" {\n\t\treturn client.FormattedRequest(\"/unmap/volume/\\\"%s\\\"\", name)\n\t}\n\n\treturn client.FormattedRequest(\"/unmap/volume/host/\\\"%s\\\"/\\\"%s\\\"\", host, name)\n}", "func (osh *SystemHandler) Mount(source string, target string, fsType string, flags uintptr, data string) error {\n\treturn syscall.Mount(source, target, fsType, flags, data)\n}", "func (mounter *csiProxyMounterV1Beta) Mount(source string, target string, fstype string, options []string) error {\n\tklog.V(4).Infof(\"Mount: old name: %s. new name: %s\", source, target)\n\t// Mount is called after the format is done.\n\t// TODO: Confirm that fstype is empty.\n\tlinkRequest := &fs.LinkPathRequest{\n\t\tSourcePath: normalizeWindowsPath(source),\n\t\tTargetPath: normalizeWindowsPath(target),\n\t}\n\t_, err := mounter.FsClient.LinkPath(context.Background(), linkRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (v *Volume) mount() error {\n\tif !v.needsMount() {\n\t\treturn nil\n\t}\n\n\t// Update the volume from the DB to get an accurate mount counter.\n\tif err := v.update(); err != nil {\n\t\treturn err\n\t}\n\n\t// If the count is non-zero, the volume is already mounted.\n\t// Nothing to do.\n\tif v.state.MountCount > 0 {\n\t\tv.state.MountCount++\n\t\tlogrus.Debugf(\"Volume %s mount count now at %d\", v.Name(), v.state.MountCount)\n\t\treturn v.save()\n\t}\n\n\t// Volume plugins implement their own mount counter, based on the ID of\n\t// the mounting container. But we already have one, and honestly I trust\n\t// ours more. So hardcode container ID to something reasonable, and use\n\t// the same one for everything.\n\tif v.UsesVolumeDriver() {\n\t\tif v.plugin == nil {\n\t\t\treturn fmt.Errorf(\"volume plugin %s (needed by volume %s) missing: %w\", v.Driver(), v.Name(), define.ErrMissingPlugin)\n\t\t}\n\n\t\treq := new(pluginapi.MountRequest)\n\t\treq.Name = v.Name()\n\t\treq.ID = pseudoCtrID\n\t\tmountPoint, err := v.plugin.MountVolume(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tv.state.MountCount++\n\t\tv.state.MountPoint = mountPoint\n\t\treturn v.save()\n\t} else if v.config.Driver == define.VolumeDriverImage {\n\t\tmountPoint, err := v.runtime.storageService.MountContainerImage(v.config.StorageID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"mounting volume %s image failed: %w\", v.Name(), err)\n\t\t}\n\n\t\tv.state.MountCount++\n\t\tv.state.MountPoint = mountPoint\n\t\treturn v.save()\n\t}\n\n\tvolDevice := v.config.Options[\"device\"]\n\tvolType := v.config.Options[\"type\"]\n\tvolOptions := v.config.Options[\"o\"]\n\n\t// Some filesystems (tmpfs) don't have a device, but we still need to\n\t// give the kernel something.\n\tif volDevice == \"\" && volType != \"\" {\n\t\tvolDevice = volType\n\t}\n\n\t// We need to use the actual mount command.\n\t// Convincing unix.Mount to use the same semantics as the mount command\n\t// itself seems prohibitively difficult.\n\t// TODO: might want to cache this path in the runtime?\n\tmountPath, err := exec.LookPath(\"mount\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"locating 'mount' binary: %w\", err)\n\t}\n\tmountArgs := []string{}\n\tif volOptions != \"\" {\n\t\tmountArgs = append(mountArgs, \"-o\", volOptions)\n\t}\n\tswitch volType {\n\tcase \"\":\n\tcase define.TypeBind:\n\t\tmountArgs = append(mountArgs, \"-o\", volType)\n\tdefault:\n\t\tmountArgs = append(mountArgs, \"-t\", volType)\n\t}\n\n\tmountArgs = append(mountArgs, volDevice, v.config.MountPoint)\n\tmountCmd := exec.Command(mountPath, mountArgs...)\n\n\tlogrus.Debugf(\"Running mount command: %s %s\", mountPath, strings.Join(mountArgs, \" \"))\n\tif output, err := mountCmd.CombinedOutput(); err != nil {\n\t\tlogrus.Debugf(\"Mount %v failed with %v\", mountCmd, err)\n\t\treturn errors.New(string(output))\n\t}\n\n\tlogrus.Debugf(\"Mounted volume %s\", v.Name())\n\n\t// Increment the mount counter\n\tv.state.MountCount++\n\tlogrus.Debugf(\"Volume %s mount count now at %d\", v.Name(), v.state.MountCount)\n\treturn v.save()\n}", "func (in *FileMount) DeepCopy() *FileMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(FileMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (z *zfsctl) Mount(ctx context.Context) *execute {\n\targs := []string{\"mount\"}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (s *OsdCsiServer) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\tlogrus.Debugf(\"ListVolumes req[%#v]\", req)\n\n\t// Until the issue #138 on the CSI spec is resolved we will not support\n\t// tokenization\n\tif req.GetMaxEntries() != 0 {\n\t\treturn nil, status.Error(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"Driver does not support tokenization. Please see \"+\n\t\t\t\t\"https://github.com/container-storage-interface/spec/issues/138\")\n\t}\n\n\tvolumes, err := s.driver.Enumerate(&api.VolumeLocator{}, nil)\n\tif err != nil {\n\t\terrs := fmt.Sprintf(\"Unable to get list of volumes: %s\", err.Error())\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tentries := make([]*csi.ListVolumesResponse_Entry, len(volumes))\n\tfor i, v := range volumes {\n\t\t// Initialize entry\n\t\tentries[i] = &csi.ListVolumesResponse_Entry{\n\t\t\tVolume: &csi.Volume{},\n\t\t}\n\n\t\t// Required\n\t\tentries[i].Volume.Id = v.Id\n\n\t\t// This entry is optional in the API, but OSD has\n\t\t// the information available to provide it\n\t\tentries[i].Volume.CapacityBytes = int64(v.Spec.Size)\n\n\t\t// Attributes. We can add or remove as needed since they\n\t\t// are optional and opaque to the Container Orchestrator(CO)\n\t\t// but could be used for debugging using a csi complient client.\n\t\tentries[i].Volume.Attributes = osdVolumeAttributes(v)\n\t}\n\n\treturn &csi.ListVolumesResponse{\n\t\tEntries: entries,\n\t}, nil\n}", "func (k *Kubernetes) AddPodMountVolume(service *apistructs.Service, podSpec *apiv1.PodSpec,\n\tsecretvolmounts []apiv1.VolumeMount, secretvolumes []apiv1.Volume) error {\n\n\tpodSpec.Volumes = make([]apiv1.Volume, 0)\n\n\t//Pay attention to the settings mentioned above, there is only one container in a pod\n\tpodSpec.Containers[0].VolumeMounts = make([]apiv1.VolumeMount, 0)\n\n\t// get cluster info\n\tclusterInfo, err := k.ClusterInfo.Get()\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to get cluster info, clusterName: %s, (%v)\", k.clusterName, err)\n\t}\n\n\t// hostPath type\n\tfor i, bind := range service.Binds {\n\t\tif bind.HostPath == \"\" || bind.ContainerPath == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\t//Name formation '[a-z0-9]([-a-z0-9]*[a-z0-9])?'\n\t\tname := \"volume\" + \"-bind-\" + strconv.Itoa(i)\n\n\t\thostPath, err := ParseJobHostBindTemplate(bind.HostPath, clusterInfo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !strutil.HasPrefixes(hostPath, \"/\") {\n\t\t\tpvcName := strings.Replace(hostPath, \"_\", \"-\", -1)\n\t\t\tsc := \"dice-local-volume\"\n\t\t\tif err := k.pvc.CreateIfNotExists(&apiv1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(\"%s-%s\", service.Name, pvcName),\n\t\t\t\t\tNamespace: service.Namespace,\n\t\t\t\t},\n\t\t\t\tSpec: apiv1.PersistentVolumeClaimSpec{\n\t\t\t\t\tAccessModes: []apiv1.PersistentVolumeAccessMode{apiv1.ReadWriteOnce},\n\t\t\t\t\tResources: apiv1.ResourceRequirements{\n\t\t\t\t\t\tRequests: apiv1.ResourceList{\n\t\t\t\t\t\t\tapiv1.ResourceStorage: resource.MustParse(\"10Gi\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tStorageClassName: &sc,\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpodSpec.Volumes = append(podSpec.Volumes,\n\t\t\t\tapiv1.Volume{\n\t\t\t\t\tName: name,\n\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: fmt.Sprintf(\"%s-%s\", service.Name, pvcName),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\n\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts,\n\t\t\t\tapiv1.VolumeMount{\n\t\t\t\t\tName: name,\n\t\t\t\t\tMountPath: bind.ContainerPath,\n\t\t\t\t\tReadOnly: bind.ReadOnly,\n\t\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tpodSpec.Volumes = append(podSpec.Volumes,\n\t\t\tapiv1.Volume{\n\t\t\t\tName: name,\n\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\tHostPath: &apiv1.HostPathVolumeSource{\n\t\t\t\t\t\tPath: hostPath,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\n\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts,\n\t\t\tapiv1.VolumeMount{\n\t\t\t\tName: name,\n\t\t\t\tMountPath: bind.ContainerPath,\n\t\t\t\tReadOnly: bind.ReadOnly,\n\t\t\t})\n\t}\n\n\t// Configure the business container sidecar shared directory\n\tfor name, sidecar := range service.SideCars {\n\t\tfor _, dir := range sidecar.SharedDirs {\n\t\t\temptyDirVolumeName := strutil.Concat(name, shardDirSuffix)\n\n\t\t\tsrcMount := apiv1.VolumeMount{\n\t\t\t\tName: emptyDirVolumeName,\n\t\t\t\tMountPath: dir.Main,\n\t\t\t\tReadOnly: false, // rw\n\t\t\t}\n\t\t\t// Business master container\n\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, srcMount)\n\n\t\t\tpodSpec.Volumes = append(podSpec.Volumes, apiv1.Volume{\n\t\t\t\tName: emptyDirVolumeName,\n\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\tEmptyDir: &apiv1.EmptyDirVolumeSource{},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\tif service.InitContainer != nil {\n\t\tfor name, initc := range service.InitContainer {\n\t\t\tfor i, dir := range initc.SharedDirs {\n\t\t\t\tname := fmt.Sprintf(\"%s-%d\", name, i)\n\t\t\t\tsrcMount := apiv1.VolumeMount{\n\t\t\t\t\tName: name,\n\t\t\t\t\tMountPath: dir.Main,\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t}\n\t\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, srcMount)\n\t\t\t\tpodSpec.Volumes = append(podSpec.Volumes, apiv1.Volume{\n\t\t\t\t\tName: name,\n\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\tEmptyDir: &apiv1.EmptyDirVolumeSource{},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tpodSpec.Volumes = append(podSpec.Volumes, secretvolumes...)\n\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, secretvolmounts...)\n\n\treturn nil\n}", "func (v *VolumeService) VolumeCreate(ctx context.Context, options volume.VolumeCreateBody) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(options.Name) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound and\n\t// check if the notfound should be ignored\n\tif strings.Contains(options.Name, \"notfound\") &&\n\t\t!strings.Contains(options.Name, \"ignorenotfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// check if the volume is not-found and\n\t// check if the not-found should be ignored\n\tif strings.Contains(options.Name, \"not-found\") &&\n\t\t!strings.Contains(options.Name, \"ignore-not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: options.Driver,\n\t\tLabels: options.Labels,\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: options.Name,\n\t\tOptions: options.DriverOpts,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}", "func (d *MinioDriver) mountVolume(volume *minioVolume) error {\n\n\tminioPath := fmt.Sprintf(\"%s/%s\", d.server, volume.bucketName)\n\n\t//NOTE: make this adjustable in the future for https if secure is passed.\n\tcmd := fmt.Sprintf(\"mount -t minfs http://%s %s\", minioPath, volume.mountpoint)\n\tif err := provisionConfig(d); err != nil {\n\t\treturn err\n\t}\n\n\tout, err := exec.Command(\"sh\", \"-c\", cmd).Output()\n\tif err != nil {\n\t\tglog.Warningf(\"Error while executing mount command (%s): %s\", cmd, err)\n\t\tglog.V(1).Infof(\"Dump output of command: %#v\", out)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (in *VpVolumeAndMount) DeepCopy() *VpVolumeAndMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VpVolumeAndMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func (m *DefaultMounter) Mount(\n\tsource string,\n\ttarget string,\n\tfstype string,\n\tflags uintptr,\n\tdata string,\n\ttimeout int,\n) error {\n\treturn syscall.Mount(source, target, fstype, flags, data)\n}", "func (self *AltaActor) mountVolume() error {\n\t// For each volume\n\tfor _, volume := range self.Model.Spec.Volumes {\n\t\tlog.Infof(\"Mounting volume: %+v\", volume)\n\n\t\t// Mount the volume. create it if it doesnt exist\n\t\terr := volumesCtrler.MountVolume(volume, self.Model.CurrNode)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error mounting volume. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// Trigger next event\n\tself.AltaEvent(\"pullImg\")\n\n\treturn nil\n}", "func mount(a *apl.Apl, L, R apl.Value) (apl.Value, error) {\n\tif L == nil {\n\t\tmtab.Lock()\n\t\tdefer mtab.Unlock()\n\n\t\td := apl.Dict{}\n\t\tfor _, t := range mtab.tab {\n\t\t\tname := apl.String(t.mpt)\n\t\t\td.K = append(d.K, name)\n\t\t\tif d.M == nil {\n\t\t\t\td.M = make(map[apl.Value]apl.Value)\n\t\t\t}\n\t\t\td.M[name] = apl.String(t.src.String())\n\t\t}\n\t\treturn &d, nil\n\t}\n\tvar mpt, src string\n\tif s, ok := L.(apl.String); ok == false {\n\t\treturn nil, fmt.Errorf(\"io mount: left argument must be a string %T\", L)\n\t} else {\n\t\tmpt = string(s)\n\t}\n\tif s, ok := R.(apl.String); ok == false {\n\t\treturn nil, fmt.Errorf(\"io mount: right argument must be a string %T\", R)\n\t} else {\n\t\tsrc = string(s)\n\t}\n\n\t// Test if the filesystem matches a registerd protocol.\n\tfor name, f := range protocols {\n\t\tpre := name + \"://\"\n\t\tif strings.HasPrefix(src, pre) {\n\t\t\tfsys, err := f.FileSystem(strings.TrimPrefix(src, pre))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif err := Mount(mpt, fsys); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn apl.EmptyArray{}, nil\n\t\t}\n\t}\n\n\t// Special case, \".\" remains always relative.\n\tif src == \".\" {\n\t\tif err := Mount(mpt, fs(\".\")); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn apl.EmptyArray{}, nil\n\t}\n\n\t// Mount a directory.\n\tfi, err := os.Stat(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif fi.IsDir() == false {\n\t\treturn nil, fmt.Errorf(\"io mount: src is not a directory: %s\", src)\n\t}\n\tabs, err := filepath.Abs(src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := Mount(mpt, fs(abs)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn apl.EmptyArray{}, nil\n}", "func CephSecretVolumeMount() v1.VolumeMount {\n\treturn v1.VolumeMount{\n\t\tName: cephSecretVolumeName,\n\t\tMountPath: CephSecretMountPath,\n\t\tReadOnly: true,\n\t}\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func (v *VaultFS) Mount() error {\n\tvar err error\n\tv.conn, err = fuse.Mount(\n\t\tv.mountpoint,\n\t\tfuse.FSName(\"vault\"),\n\t\tfuse.VolumeName(\"vault\"),\n\t)\n\n\tlogrus.Debug(\"created conn\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Debug(\"starting to serve\")\n\treturn fs.Serve(v.conn, v)\n}", "func (a *HyperflexApiService) PatchHyperflexVolumeExecute(r ApiPatchHyperflexVolumeRequest) (*HyperflexVolume, *http.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = http.MethodPatch\n\t\tlocalVarPostBody interface{}\n\t\tformFiles []formFile\n\t\tlocalVarReturnValue *HyperflexVolume\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"HyperflexApiService.PatchHyperflexVolume\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, &GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/api/v1/hyperflex/Volumes/{Moid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"Moid\"+\"}\", url.PathEscape(parameterToString(r.moid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\tif r.hyperflexVolume == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"hyperflexVolume is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\", \"application/json-patch+json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ifMatch != nil {\n\t\tlocalVarHeaderParams[\"If-Match\"] = parameterToString(*r.ifMatch, \"\")\n\t}\n\t// body params\n\tlocalVarPostBody = r.hyperflexVolume\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, formFiles)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tvar v Error\n\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\tif err != nil {\n\t\t\tnewErr.error = err.Error()\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := &GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func parseMountInfoLine(line string) *Mount {\n\tfields := strings.Split(line, \" \")\n\tif len(fields) < 10 {\n\t\treturn nil\n\t}\n\n\t// Count the optional fields. In case new fields are appended later,\n\t// don't simply assume that n == len(fields) - 4.\n\tn := 6\n\tfor fields[n] != \"-\" {\n\t\tn++\n\t\tif n >= len(fields) {\n\t\t\treturn nil\n\t\t}\n\t}\n\tif n+3 >= len(fields) {\n\t\treturn nil\n\t}\n\n\tvar mnt *Mount = &Mount{}\n\tvar err error\n\tmnt.DeviceNumber, err = newDeviceNumberFromString(fields[2])\n\tif err != nil {\n\t\treturn nil\n\t}\n\tmnt.BindMnt = unescapeString(fields[3]) != \"/\"\n\tmnt.Path = unescapeString(fields[4])\n\tfor _, opt := range strings.Split(fields[5], \",\") {\n\t\tif opt == \"ro\" {\n\t\t\tmnt.ReadOnly = true\n\t\t}\n\t}\n\tmnt.FilesystemType = unescapeString(fields[n+1])\n\tmnt.Device = getDeviceName(mnt.DeviceNumber)\n\treturn mnt\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (o FioSpecPtrOutput) Volume() FioSpecVolumePtrOutput {\n\treturn o.ApplyT(func(v *FioSpec) *FioSpecVolume {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Volume\n\t}).(FioSpecVolumePtrOutput)\n}", "func (c *Client) Mount(ctx context.Context, svc iaas.Service, export string, mountPoint string, withCache bool) fail.Error {\n\ttimings, xerr := svc.Timings()\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdata := map[string]interface{}{\n\t\t\"Export\": export,\n\t\t\"MountPoint\": mountPoint,\n\t\t\"cacheOption\": map[bool]string{true: \"ac\", false: \"noac\"}[withCache],\n\t}\n\tstdout, xerr := executeScript(ctx, timings, c.SSHConfig, \"nfs_client_share_mount.sh\", data)\n\tif xerr != nil {\n\t\txerr.Annotate(\"stdout\", stdout)\n\t\treturn fail.Wrap(xerr, \"error executing script to mount remote NFS share\")\n\t}\n\treturn nil\n}", "func (o *GetVMVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (o GroupInitContainerVolumeOutput) MountPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupInitContainerVolume) string { return v.MountPath }).(pulumi.StringOutput)\n}", "func GetVolumeMountFromCustomConfigSpec(cfcm *apicommonv1.CustomConfig, volumeName, volumePath, defaultSubPath string) corev1.VolumeMount {\n\tsubPath := defaultSubPath\n\tif cfcm.ConfigMap != nil && len(cfcm.ConfigMap.Items) > 0 {\n\t\tsubPath = cfcm.ConfigMap.Items[0].Path\n\t}\n\n\treturn corev1.VolumeMount{\n\t\tName: volumeName,\n\t\tMountPath: volumePath,\n\t\tSubPath: subPath,\n\t\tReadOnly: true,\n\t}\n}", "func createVolumeMountForStorage(storageType v1alpha1.NuxeoStorage, volumeName string) corev1.VolumeMount {\n\tmountPath := getMountPathForStorageType(storageType)\n\tvolMnt := corev1.VolumeMount{\n\t\tName: volumeName,\n\t\tReadOnly: false,\n\t\tMountPath: mountPath,\n\t}\n\treturn volMnt\n}", "func (d *VolumeDriver) UnmountVolume(name string) error {\n\tlog.Errorf(\"VolumeDriver UnmountVolume to be implemented\")\n\treturn nil\n}", "func sharedUnixSocketVolumeMount() apiv1.VolumeMount {\n\treturn apiv1.VolumeMount{\n\t\tName: sidecar.UnixDomainSocketVolume,\n\t\tMountPath: pluggableComponentSocketMountPath,\n\t}\n}", "func (d *fsStorage) Mount(volume *Volume) error {\n\treturn nil\n}", "func (c Cvmfs) Mount(repo string) (string, error) {\n\treturn c.MountTag(repo, \"trunk\", HASH)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (t *DescribeLogDirsResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ThrottleTimeMs, err = d.Int32()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Results\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Results = make([]DescribeLogDirsResult35, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item DescribeLogDirsResult35\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Results[i] = item\n\t\t}\n\t}\n\treturn err\n}", "func (d *lvm) UnmountVolume(vol Volume, keepBlockDev bool, op *operations.Operation) (bool, error) {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\tvar err error\n\tourUnmount := false\n\tmountPath := vol.MountPath()\n\n\trefCount := vol.MountRefCountDecrement()\n\n\t// Check if already mounted.\n\tif vol.contentType == ContentTypeFS && filesystem.IsMountPoint(mountPath) {\n\t\tif refCount > 0 {\n\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\treturn false, ErrInUse\n\t\t}\n\n\t\terr = TryUnmount(mountPath, 0)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"Failed to unmount LVM logical volume: %w\", err)\n\t\t}\n\n\t\td.logger.Debug(\"Unmounted logical volume\", logger.Ctx{\"volName\": vol.name, \"path\": mountPath, \"keepBlockDev\": keepBlockDev})\n\n\t\t// We only deactivate filesystem volumes if an unmount was needed to better align with our\n\t\t// unmount return value indicator.\n\t\tif !keepBlockDev {\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tourUnmount = true\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, unmount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\tourUnmount, err = d.UnmountVolume(fsVol, false, op)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\tif !keepBlockDev && shared.PathExists(volDevPath) {\n\t\t\tif refCount > 0 {\n\t\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\t\treturn false, ErrInUse\n\t\t\t}\n\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tourUnmount = true\n\t\t}\n\t}\n\n\treturn ourUnmount, nil\n}", "func (proxy *remoteDriverProxy) Unmount(name, id string) error {\n\tvar req = remoteVolumeUnmountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeUnmountResp\n\n\tif err := proxy.client.CallService(remoteVolumeUnmountService, &req, &resp, true); err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn errors.New(resp.Err)\n\t}\n\n\treturn nil\n}", "func ParseVolume(input string) (Volume, error) {\n\tparts := strings.Split(input, \":\")\n\tswitch len(parts) {\n\tcase 1:\n\t\treturn Volume{Type: VolumeTypeInstance, Path: input}, nil\n\tcase 2:\n\t\tif vt, mountOptions, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{Type: vt, Path: parts[1], MountOptions: mountOptions}, nil\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0]}, nil\n\tcase 3:\n\t\tif _, _, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t\t}\n\t\toptions, err := parseVolumeOptions(parts[2])\n\t\tif err != nil {\n\t\t\treturn Volume{}, maskAny(err)\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0], Options: options}, nil\n\tdefault:\n\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t}\n}", "func (o *RemoveVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewRemoveVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewRemoveVolumeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 409:\n\t\tresult := NewRemoveVolumeConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewRemoveVolumeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}" ]
[ "0.70370895", "0.7028021", "0.67126447", "0.64057815", "0.6355857", "0.62809324", "0.6111949", "0.60714614", "0.6016495", "0.6016495", "0.6016495", "0.6016495", "0.6009047", "0.59229225", "0.5860798", "0.5855875", "0.5839067", "0.5703288", "0.55267155", "0.5500901", "0.55005896", "0.5463593", "0.5440593", "0.5427788", "0.52870816", "0.52732325", "0.52055675", "0.51709527", "0.51523286", "0.51435775", "0.5121928", "0.5118403", "0.50587535", "0.50567836", "0.50511587", "0.5039291", "0.5006985", "0.49978027", "0.4959416", "0.49381799", "0.49250108", "0.4920152", "0.4893831", "0.48850867", "0.48474377", "0.48445433", "0.48057184", "0.4805339", "0.47873107", "0.47624326", "0.4756901", "0.4741147", "0.47375435", "0.47347957", "0.47263932", "0.4726126", "0.4725497", "0.4724422", "0.4722732", "0.46994218", "0.46979472", "0.46769398", "0.46747366", "0.4672347", "0.46556842", "0.46519676", "0.46484652", "0.46451834", "0.4631562", "0.46219084", "0.46181569", "0.4606443", "0.46058375", "0.45940527", "0.45896658", "0.4585819", "0.45783395", "0.45760632", "0.45743817", "0.45717868", "0.45592323", "0.45544958", "0.45544735", "0.45484078", "0.45417982", "0.45391792", "0.45382413", "0.45381457", "0.45330054", "0.45239654", "0.45237032", "0.45139137", "0.4513639", "0.45127487", "0.45124197", "0.45055857", "0.44962186", "0.44950303", "0.4488851", "0.44744414" ]
0.83011043
0
Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse is an autogenerated conversion function.
func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error { return autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func (c *Controller) Mount(mountRequest k8sresources.FlexVolumeMountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-mount-start\")\n\tdefer c.logger.Println(\"controller-mount-end\")\n\tc.logger.Println(fmt.Sprintf(\"mountRequest [%#v]\", mountRequest))\n\tvar lnPath string\n\tattachRequest := resources.AttachRequest{Name: mountRequest.MountDevice, Host: getHost()}\n\tmountedPath, err := c.Client.Attach(attachRequest)\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to mount volume [%s], Error: %#v\", mountRequest.MountDevice, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: msg,\n\t\t}\n\t}\n\tif mountRequest.Version == k8sresources.KubernetesVersion_1_5 {\n\t\t//For k8s 1.5, by the time we do the attach/mount, the mountDir (MountPath) is not created trying to do mount and ln will fail because the dir is not found, so we need to create the directory before continuing\n\t\tdir := filepath.Dir(mountRequest.MountPath)\n\t\tc.logger.Printf(\"mountrequest.MountPath %s\", mountRequest.MountPath)\n\t\tlnPath = mountRequest.MountPath\n\t\tk8sRequiredMountPoint := path.Join(mountRequest.MountPath, mountRequest.MountDevice)\n\t\tif _, err = os.Stat(k8sRequiredMountPoint); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\n\t\t\t\tc.logger.Printf(\"creating volume directory %s\", dir)\n\t\t\t\terr = os.MkdirAll(dir, 0777)\n\t\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\t\tmsg := fmt.Sprintf(\"Failed creating volume directory %#v\", err)\n\t\t\t\t\tc.logger.Println(msg)\n\n\t\t\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\t\t\tStatus: \"Failure\",\n\t\t\t\t\t\tMessage: msg,\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// For k8s 1.6 and later kubelet creates a folder as the MountPath, including the volume name, whenwe try to create the symlink this will fail because the same name exists. This is why we need to remove it before continuing.\n\t} else {\n\t\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\t\tif strings.HasPrefix(mountedPath, ubiquityMountPrefix) {\n\t\t\tlnPath = mountRequest.MountPath\n\t\t} else {\n\t\t\tlnPath, _ = path.Split(mountRequest.MountPath)\n\t\t}\n\t\tc.logger.Printf(\"removing folder %s\", mountRequest.MountPath)\n\n\t\terr = os.Remove(mountRequest.MountPath)\n\t\tif err != nil && !os.IsExist(err) {\n\t\t\tmsg := fmt.Sprintf(\"Failed removing existing volume directory %#v\", err)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\n\t\t}\n\n\t}\n\tsymLinkCommand := \"/bin/ln\"\n\targs := []string{\"-s\", mountedPath, lnPath}\n\tc.logger.Printf(fmt.Sprintf(\"creating slink from %s -> %s\", mountedPath, lnPath))\n\n\tvar stderr bytes.Buffer\n\tcmd := exec.Command(symLinkCommand, args...)\n\tcmd.Stderr = &stderr\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Controller: mount failed to symlink %#v\", stderr.String())\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Failure\",\n\t\t\tMessage: msg,\n\t\t}\n\n\t}\n\tmsg := fmt.Sprintf(\"Volume mounted successfully to %s\", mountedPath)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: msg,\n\t}\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (d *VolumeDriver) Mount(r volume.MountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Mounting volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tlog.Errorf(\"VolumeDriver Mount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VolumeMount) DeepCopy() *VolumeMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VolumeMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (proxy *remoteDriverProxy) Mount(name, id string) (string, error) {\n\tvar req = remoteVolumeMountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeMountResp\n\n\tif err := proxy.client.CallService(remoteVolumeMountService, &req, &resp, true); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn \"\", errors.New(resp.Err)\n\t}\n\n\treturn resp.Mountpoint, nil\n}", "func Convert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in *impl.RmdirContentsResponse, out *v2alpha1.RmdirContentsResponse) error {\n\treturn autoConvert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in, out)\n}", "func (d *DirDriver) Mount(req *volume.MountRequest) (*volume.MountResponse, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Mount() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn nil, fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tvol.mounts[req.ID] = true\n\n\treturn &volume.MountResponse{\n\t\tMountpoint: vol.path,\n\t}, nil\n}", "func (d *lvm) MountVolume(vol Volume, op *operations.Operation) error {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t// Activate LVM volume if needed.\n\tactivated, err := d.activateVolume(vol)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif activated {\n\t\trevert.Add(func() { _, _ = d.deactivateVolume(vol) })\n\t}\n\n\tif vol.contentType == ContentTypeFS {\n\t\t// Check if already mounted.\n\t\tmountPath := vol.MountPath()\n\t\tif !filesystem.IsMountPoint(mountPath) {\n\t\t\tfsType := vol.ConfigBlockFilesystem()\n\t\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\n\t\t\tif vol.mountFilesystemProbe {\n\t\t\t\tfsType, err = fsProbe(volDevPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed probing filesystem: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = vol.EnsureMountPath()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmountFlags, mountOptions := filesystem.ResolveMountOptions(strings.Split(vol.ConfigBlockMountOptions(), \",\"))\n\t\t\terr = TryMount(volDevPath, mountPath, fsType, mountFlags, mountOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to mount LVM logical volume: %w\", err)\n\t\t\t}\n\n\t\t\td.logger.Debug(\"Mounted logical volume\", logger.Ctx{\"volName\": vol.name, \"dev\": volDevPath, \"path\": mountPath, \"options\": mountOptions})\n\t\t}\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, mount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\terr = d.MountVolume(fsVol, op)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvol.MountRefCountIncrement() // From here on it is up to caller to call UnmountVolume() when done.\n\trevert.Success()\n\treturn nil\n}", "func (d *MinioDriver) Mount(r volume.MountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Mount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections > 0 {\n\t\tv.connections++\n\t\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n\t}\n\n\tif err := d.mountVolume(v); err != nil {\n\t\tglog.Warningf(\"mounting %#v volume failed: %s\", v, err.Error())\n\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t}\n\n\t// if the mount was successful, then increment the number of connections we\n\t// have to the mount.\n\tv.connections++\n\treturn volumeResp(v.mountpoint, r.Name, nil, capability, \"\")\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func (*MountResponse) Descriptor() ([]byte, []int) {\n\treturn file_provider_v1alpha1_service_proto_rawDescGZIP(), []int{3}\n}", "func FormatAndMountVol(devicePath string, mountInfo *MountInfo) error {\n\tmounter := &mount.SafeFormatAndMount{Interface: mount.New(\"\"), Exec: utilexec.New()}\n\n\terr := mounter.FormatAndMount(devicePath, mountInfo.MountPath, mountInfo.FSType, mountInfo.MountOptions)\n\tif err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: failed to mount volume %s [%s] to %s, error %v\",\n\t\t\tdevicePath, mountInfo.FSType, mountInfo.MountPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Controller) MountDevice(mountDeviceRequest k8sresources.FlexVolumeMountDeviceRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-MountDevice-start\")\n\tdefer c.logger.Println(\"controller-MountDevice-end\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {\n\tif req == nil {\n\t\treturn \"\", fmt.Errorf(\"must provide non-nil request to MountVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlogrus.Infof(\"Mounting volume %s using plugin %s for container %s\", req.Name, p.Name, req.ID)\n\n\tresp, err := p.sendRequest(req, mountPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := p.handleErrorResponse(resp, mountPath, req.Name); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmountRespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"reading response body from volume plugin %s: %w\", p.Name, err)\n\t}\n\n\tmountResp := new(volume.MountResponse)\n\tif err := json.Unmarshal(mountRespBytes, mountResp); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshalling volume plugin %s path response: %w\", p.Name, err)\n\t}\n\n\treturn mountResp.Mountpoint, nil\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func (m *SimpleCSIProviderServer) Mount(ctx context.Context, req *v1alpha1.MountRequest) (*v1alpha1.MountResponse, error) {\n\tvar attrib, secret map[string]string\n\tvar filePermission os.FileMode\n\tvar err error\n\n\tif err = json.Unmarshal([]byte(req.GetAttributes()), &attrib); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal attributes, error: %+v\", err)\n\t}\n\tif err = json.Unmarshal([]byte(req.GetSecrets()), &secret); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal secrets, error: %+v\", err)\n\t}\n\tif err = json.Unmarshal([]byte(req.GetPermission()), &filePermission); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal file permission, error: %+v\", err)\n\t}\n\tif len(req.GetTargetPath()) == 0 {\n\t\treturn nil, fmt.Errorf(\"missing target path\")\n\t}\n\n\tresp := &v1alpha1.MountResponse{\n\t\tObjectVersion: []*v1alpha1.ObjectVersion{},\n\t}\n\n\tif rawTokenContent, ok := attrib[\"csi.storage.k8s.io/serviceAccount.tokens\"]; ok {\n\t\ttokens := map[string]KubernetesTokenContent{}\n\t\terr := json.Unmarshal([]byte(rawTokenContent), &tokens)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Error unmarshaling tokens attribute: %v\", err)\n\t\t}\n\t\tfiles := []*v1alpha1.File{}\n\t\tfor sub, content := range tokens {\n\t\t\tu, _ := url.Parse(sub)\n\n\t\t\tpath := filepath.Join(u.Hostname(), u.EscapedPath())\n\t\t\tfiles = append(files, &v1alpha1.File{\n\t\t\t\tPath: path,\n\t\t\t\tContents: []byte(content.Token),\n\t\t\t})\n\t\t\tresp.ObjectVersion = append(resp.ObjectVersion, &v1alpha1.ObjectVersion{Id: fmt.Sprintf(\"secret/%s\", path), Version: \"v1\"})\n\t\t}\n\t\tresp.Files = append(resp.Files, files...)\n\n\t}\n\tif rawSecretContent, ok := attrib[\"secrets\"]; ok {\n\t\tsecretContents := []SimpleSecretKeyValue{}\n\t\terr := yaml.Unmarshal([]byte(rawSecretContent), &secretContents)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Error unmarshaling secret attribute: %v\", err)\n\t\t}\n\n\t\tfiles := []*v1alpha1.File{}\n\t\tfor _, kv := range secretContents {\n\t\t\tfiles = append(files, &v1alpha1.File{\n\t\t\t\tPath: kv.Key,\n\t\t\t\tContents: []byte(kv.Value),\n\t\t\t})\n\t\t\tresp.ObjectVersion = append(resp.ObjectVersion, &v1alpha1.ObjectVersion{Id: fmt.Sprintf(\"secret/%s\", kv.Key), Version: \"v1\"})\n\t\t}\n\t\tresp.Files = append(resp.Files, files...)\n\t}\n\treturn resp, nil\n}", "func (d *VolumeDriver) MountVolume(name string, fstype string, id string, isReadOnly bool, skipAttach bool) (string, error) {\n\tlog.Errorf(\"VolumeDriver MountVolume to be implemented\")\n\tmountpoint := getMountPoint(name)\n\treturn mountpoint, nil\n}", "func Convert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in *impl.CreateSymlinkResponse, out *v2alpha1.CreateSymlinkResponse) error {\n\treturn autoConvert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in, out)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func (driver *Driver) Mount(volumeName, volumeID string, overwriteFs bool, newFsType string) (string, error) {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn \"\", errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn \"\", errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn \"\", errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn \"\", errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn \"\", errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(\n\t\tvolumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\tvolumeAttachment, err = driver.sdm.AttachVolume(\n\t\t\tfalse, volumes[0].VolumeID, instances[0].InstanceID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn \"\", errors.New(\"Volume did not attach\")\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(mounts) > 0 {\n\t\treturn mounts[0].Mountpoint, nil\n\t}\n\n\tswitch {\n\tcase os.Getenv(\"REXRAY_DOCKER_VOLUMETYPE\") != \"\":\n\t\tnewFsType = os.Getenv(\"REXRAY_DOCKER_VOLUMETYPE\")\n\tcase newFsType == \"\":\n\t\tnewFsType = \"ext4\"\n\t}\n\n\tif err := driver.osdm.Format(volumeAttachment[0].DeviceName, newFsType, overwriteFs); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmountPath, err := getVolumeMountPath(volumes[0].Name)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := os.MkdirAll(mountPath, 0755); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := driver.osdm.Mount(volumeAttachment[0].DeviceName, mountPath, \"\", \"\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn mountPath, nil\n}", "func (vm *ContainerVM) overlayMount() error {\n\tvm.effectivePath = filepath.Join(vm.instancePath, \"fs\")\n\tworkPath := filepath.Join(vm.instancePath, \"fs_work\")\n\n\terr := os.MkdirAll(vm.effectivePath, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(workPath, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create the overlay mountpoint\n\targs := []string{\n\t\t\"mount\",\n\t\t\"-t\",\n\t\t\"overlay\",\n\t\tfmt.Sprintf(\"megamount_%v\", vm.ID),\n\t\t\"-o\",\n\t\tfmt.Sprintf(\"lowerdir=%v,upperdir=%v,workdir=%v\", vm.FSPath, vm.effectivePath, workPath),\n\t\tvm.effectivePath,\n\t}\n\tlog.Debug(\"mounting overlay: %v\", args)\n\tout, err := processWrapper(args...)\n\tif err != nil {\n\t\tlog.Error(\"overlay mount: %v %v\", err, out)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o DiskResponseOutput) MountPoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiskResponse) string { return v.MountPoint }).(pulumi.StringOutput)\n}", "func (o SecretBackendV2Output) Mount() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SecretBackendV2) pulumi.StringOutput { return v.Mount }).(pulumi.StringOutput)\n}", "func (service *LoanSrvc) Mount(ctx context.Context, mux goahttp.Muxer) goahttp.Server {\n\tendpoints := loan.NewEndpoints(service)\n\tsrv := server.New(endpoints, mux, goahttp.RequestDecoder, goahttp.ResponseEncoder, api.ErrorHandler, nil)\n\tserver.Mount(mux, srv)\n\n\tfor _, m := range srv.Mounts {\n\t\tlog.WithContext(ctx).Infof(\"%q mounted on %s %s\", m.Method, m.Verb, m.Pattern)\n\t}\n\treturn srv\n}", "func MountVolume(vol *apis.LVMVolume, mount *MountInfo, podLVInfo *PodLVInfo) error {\n\tvolume := vol.Spec.VolGroup + \"/\" + vol.Name\n\tmounted, err := verifyMountRequest(vol, mount.MountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif mounted {\n\t\tklog.Infof(\"lvm : already mounted %s => %s\", volume, mount.MountPath)\n\t\treturn nil\n\t}\n\n\tdevicePath := DevPath + volume\n\n\terr = FormatAndMountVol(devicePath, mount)\n\tif err != nil {\n\t\treturn status.Errorf(\n\t\t\tcodes.Internal,\n\t\t\t\"failed to format and mount the volume error: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t}\n\n\tklog.Infof(\"lvm: volume %v mounted %v fs %v\", volume, mount.MountPath, mount.FSType)\n\n\tif ioLimitsEnabled && podLVInfo != nil {\n\t\tif err := setIOLimits(vol, podLVInfo, devicePath); err != nil {\n\t\t\tklog.Warningf(\"lvm: error setting io limits: podUid %s, device %s, err=%v\", podLVInfo.UID, devicePath, err)\n\t\t} else {\n\t\t\tklog.Infof(\"lvm: io limits set for podUid %v, device %s\", podLVInfo.UID, devicePath)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (mounter *csiProxyMounterV1Beta) Mount(source string, target string, fstype string, options []string) error {\n\tklog.V(4).Infof(\"Mount: old name: %s. new name: %s\", source, target)\n\t// Mount is called after the format is done.\n\t// TODO: Confirm that fstype is empty.\n\tlinkRequest := &fs.LinkPathRequest{\n\t\tSourcePath: normalizeWindowsPath(source),\n\t\tTargetPath: normalizeWindowsPath(target),\n\t}\n\t_, err := mounter.FsClient.LinkPath(context.Background(), linkRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (v *VolumeService) VolumeCreate(ctx context.Context, options volume.VolumeCreateBody) (types.Volume, error) {\n\t// verify a volume was provided\n\tif len(options.Name) == 0 {\n\t\treturn types.Volume{}, errors.New(\"no volume provided\")\n\t}\n\n\t// check if the volume is notfound and\n\t// check if the notfound should be ignored\n\tif strings.Contains(options.Name, \"notfound\") &&\n\t\t!strings.Contains(options.Name, \"ignorenotfound\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// check if the volume is not-found and\n\t// check if the not-found should be ignored\n\tif strings.Contains(options.Name, \"not-found\") &&\n\t\t!strings.Contains(options.Name, \"ignore-not-found\") {\n\t\treturn types.Volume{},\n\t\t\t// nolint:golint,stylecheck // messsage is capitalized to match Docker messages\n\t\t\terrdefs.NotFound(fmt.Errorf(\"Error: No such volume: %s\", options.Name))\n\t}\n\n\t// create response object to return\n\tresponse := types.Volume{\n\t\tCreatedAt: time.Now().String(),\n\t\tDriver: options.Driver,\n\t\tLabels: options.Labels,\n\t\tMountpoint: fmt.Sprintf(\"/var/lib/docker/volumes/%s/_data\", stringid.GenerateRandomID()),\n\t\tName: options.Name,\n\t\tOptions: options.DriverOpts,\n\t\tScope: \"local\",\n\t}\n\n\treturn response, nil\n}", "func Convert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in *impl.IsSymlinkResponse, out *v2alpha1.IsSymlinkResponse) error {\n\treturn autoConvert_impl_IsSymlinkResponse_To_v2alpha1_IsSymlinkResponse(in, out)\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func (d ImagefsDriver) Mount(r *volume.MountRequest) (*volume.MountResponse, error) {\n\tcontainerID, err := d.FindVolumeContainer(r.Name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\terr = d.cli.ContainerStart(\n\t\tcontext.Background(),\n\t\tcontainerID,\n\t\ttypes.ContainerStartOptions{},\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tvar _ret *volume.MountResponse\n\tret, err := d.Path(&volume.PathRequest{Name: r.Name})\n\tif ret != nil {\n\t\t_ret = &volume.MountResponse{\n\t\t\tMountpoint: ret.Mountpoint,\n\t\t}\n\t}\n\treturn _ret, err\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (in *Mount) DeepCopy() *Mount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Mount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VpVolumeAndMount) DeepCopy() *VpVolumeAndMount {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VpVolumeAndMount)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func VolumeManifest(v models.V1VolumeResponse, name, namespace string) error {\n\tfilesystem := corev1.PersistentVolumeFilesystem\n\tpv := corev1.PersistentVolume{\n\t\tTypeMeta: v1.TypeMeta{Kind: \"PersistentVolume\", APIVersion: \"v1\"},\n\t\tObjectMeta: v1.ObjectMeta{Name: name, Namespace: namespace},\n\t\tSpec: corev1.PersistentVolumeSpec{\n\t\t\tAccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},\n\t\t\tVolumeMode: &filesystem,\n\t\t\t// FIXME add Capacity once figured out\n\t\t\tStorageClassName: *v.StorageClass,\n\t\t\tPersistentVolumeSource: corev1.PersistentVolumeSource{\n\t\t\t\tCSI: &corev1.CSIPersistentVolumeSource{\n\t\t\t\t\tDriver: \"csi.lightbitslabs.com\",\n\t\t\t\t\tFSType: \"ext4\",\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t\tVolumeHandle: *v.VolumeHandle,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tjs, err := json.Marshal(pv)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to marshal to yaml:%w\", err)\n\t}\n\ty, err := yaml.JSONToYAML(js)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to marshal to yaml:%w\", err)\n\t}\n\tif len(v.ConnectedHosts) > 0 {\n\t\tnodes := ConnectedHosts(&v)\n\t\tfmt.Printf(\"# be cautios! at the time being your volume:%s is still attached to worker node:%s, you can not mount it twice\\n\", *v.VolumeID, strings.Join(nodes, \",\"))\n\t}\n\n\tfmt.Printf(\"%s\\n\", string(y))\n\treturn nil\n}", "func (self *AltaActor) mountVolume() error {\n\t// For each volume\n\tfor _, volume := range self.Model.Spec.Volumes {\n\t\tlog.Infof(\"Mounting volume: %+v\", volume)\n\n\t\t// Mount the volume. create it if it doesnt exist\n\t\terr := volumesCtrler.MountVolume(volume, self.Model.CurrNode)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error mounting volume. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// Trigger next event\n\tself.AltaEvent(\"pullImg\")\n\n\treturn nil\n}", "func parseMountInfoLine(line string) *Mount {\n\tfields := strings.Split(line, \" \")\n\tif len(fields) < 10 {\n\t\treturn nil\n\t}\n\n\t// Count the optional fields. In case new fields are appended later,\n\t// don't simply assume that n == len(fields) - 4.\n\tn := 6\n\tfor fields[n] != \"-\" {\n\t\tn++\n\t\tif n >= len(fields) {\n\t\t\treturn nil\n\t\t}\n\t}\n\tif n+3 >= len(fields) {\n\t\treturn nil\n\t}\n\n\tvar mnt *Mount = &Mount{}\n\tvar err error\n\tmnt.DeviceNumber, err = newDeviceNumberFromString(fields[2])\n\tif err != nil {\n\t\treturn nil\n\t}\n\tmnt.BindMnt = unescapeString(fields[3]) != \"/\"\n\tmnt.Path = unescapeString(fields[4])\n\tfor _, opt := range strings.Split(fields[5], \",\") {\n\t\tif opt == \"ro\" {\n\t\t\tmnt.ReadOnly = true\n\t\t}\n\t}\n\tmnt.FilesystemType = unescapeString(fields[n+1])\n\tmnt.Device = getDeviceName(mnt.DeviceNumber)\n\treturn mnt\n}", "func (m *Manager) Mount() error {\n\tvar err error\n\tm.mountPoint, err = m.b.Mount(m.b.MountLabel)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif m.mountPoint == \"\" {\n\t\treturn fmt.Errorf(\"container-id '%s' is not mounted\", m.b.ContainerID)\n\t}\n\treturn nil\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func ParseMountTable(filter FilterFunc) ([]*MountInfo, error) {\n\tvar rawEntries *C.struct_statfs\n\n\tcount := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))\n\tif count == 0 {\n\t\treturn nil, fmt.Errorf(\"Failed to call getmntinfo\")\n\t}\n\n\tvar entries []C.struct_statfs\n\theader := (*reflect.SliceHeader)(unsafe.Pointer(&entries))\n\theader.Cap = count\n\theader.Len = count\n\theader.Data = uintptr(unsafe.Pointer(rawEntries))\n\n\tvar out []*MountInfo\n\tfor _, entry := range entries {\n\t\tvar mountinfo MountInfo\n\t\tvar skip, stop bool\n\t\tmountinfo.MountPoint = C.GoString(&entry.f_mntonname[0])\n\n\t\tif filter != nil {\n\t\t\t// filter out entries we're not interested in\n\t\t\tskip, stop = filter(p)\n\n\t\t\tif skip {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tmountinfo.MountSource = C.GoString(&entry.f_mntfromname[0])\n\t\tmountinfo.FilesystemType = C.GoString(&entry.f_fstypename[0])\n\n\t\tout = append(out, &mountinfo)\n\t\tif stop {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn out, nil\n}", "func (o FunctionServiceConfigSecretVolumeOutput) MountPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FunctionServiceConfigSecretVolume) string { return v.MountPath }).(pulumi.StringOutput)\n}", "func (z *ZfsH) Mount(d *Dataset, overlay bool, options []string) (*Dataset, error) {\n\tif d.Type == DatasetSnapshot {\n\t\treturn nil, errors.New(\"cannot mount snapshots\")\n\t}\n\targs := make([]string, 1, 5)\n\targs[0] = \"mount\"\n\tif overlay {\n\t\targs = append(args, \"-O\")\n\t}\n\tif options != nil {\n\t\targs = append(args, \"-o\")\n\t\targs = append(args, strings.Join(options, \",\"))\n\t}\n\targs = append(args, d.Name)\n\t_, err := z.zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn z.GetDataset(d.Name)\n}", "func (s *OsdCsiServer) ListVolumes(\n\tctx context.Context,\n\treq *csi.ListVolumesRequest,\n) (*csi.ListVolumesResponse, error) {\n\n\tlogrus.Debugf(\"ListVolumes req[%#v]\", req)\n\n\t// Until the issue #138 on the CSI spec is resolved we will not support\n\t// tokenization\n\tif req.GetMaxEntries() != 0 {\n\t\treturn nil, status.Error(\n\t\t\tcodes.Unimplemented,\n\t\t\t\"Driver does not support tokenization. Please see \"+\n\t\t\t\t\"https://github.com/container-storage-interface/spec/issues/138\")\n\t}\n\n\tvolumes, err := s.driver.Enumerate(&api.VolumeLocator{}, nil)\n\tif err != nil {\n\t\terrs := fmt.Sprintf(\"Unable to get list of volumes: %s\", err.Error())\n\t\tlogrus.Errorln(errs)\n\t\treturn nil, status.Error(codes.Internal, errs)\n\t}\n\tentries := make([]*csi.ListVolumesResponse_Entry, len(volumes))\n\tfor i, v := range volumes {\n\t\t// Initialize entry\n\t\tentries[i] = &csi.ListVolumesResponse_Entry{\n\t\t\tVolume: &csi.Volume{},\n\t\t}\n\n\t\t// Required\n\t\tentries[i].Volume.Id = v.Id\n\n\t\t// This entry is optional in the API, but OSD has\n\t\t// the information available to provide it\n\t\tentries[i].Volume.CapacityBytes = int64(v.Spec.Size)\n\n\t\t// Attributes. We can add or remove as needed since they\n\t\t// are optional and opaque to the Container Orchestrator(CO)\n\t\t// but could be used for debugging using a csi complient client.\n\t\tentries[i].Volume.Attributes = osdVolumeAttributes(v)\n\t}\n\n\treturn &csi.ListVolumesResponse{\n\t\tEntries: entries,\n\t}, nil\n}", "func (client *Client) CreateVolumeAttachment(request api.VolumeAttachmentRequest) (*api.VolumeAttachment, error) {\n\t// Create the attachment\n\tva, err := volumeattach.Create(client.Compute, request.ServerID, volumeattach.CreateOpts{\n\t\tVolumeID: request.VolumeID,\n\t}).Extract()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating volume attachment between server %s and volume %s: %s\", request.ServerID, request.VolumeID, ProviderErrorToString(err))\n\t}\n\n\tvaapi := &api.VolumeAttachment{\n\t\tID: va.ID,\n\t\tServerID: va.ServerID,\n\t\tVolumeID: va.VolumeID,\n\t\tDevice: va.Device,\n\t}\n\n\t// Update the metadata\n\n\tmtdVol, err := metadata.LoadVolume(providers.FromClient(client), request.VolumeID)\n\tif err != nil {\n\n\t\t// Detach volume\n\t\tdetach_err := volumeattach.Delete(client.Compute, va.ServerID, va.ID).ExtractErr()\n\t\tif detach_err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error deleting volume attachment %s: %s\", va.ID, ProviderErrorToString(err))\n\t\t}\n\n\t\treturn nil, err\n\t}\n\terr = mtdVol.Attach(vaapi)\n\tif err != nil {\n\t\t// Detach volume\n\t\tdetach_err := volumeattach.Delete(client.Compute, va.ServerID, va.ID).ExtractErr()\n\t\tif detach_err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error deleting volume attachment %s: %s\", va.ID, ProviderErrorToString(err))\n\t\t}\n\n\t\treturn vaapi, err\n\t}\n\n\treturn vaapi, nil\n}", "func (d *driverInfo) Mount(volume *Volume) error {\n\t// don't mount twice\n\tif err := volume.CheckUnmounted(); err != nil {\n\t\treturn err\n\t}\n\n\tvolume.MountPath = d.getMountPath(volume.Name)\n\texists, err := fs.DirExists(volume.MountPath)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error accessing mount path '%s': %v\", volume.MountPath, err)\n\t}\n\n\tif !exists {\n\t\tif err := fs.CreateDir(volume.MountPath, true, 0700); err != nil {\n\t\t\treturn fmt.Errorf(\"error creating mount path '%s': %v\", volume.MountPath, err)\n\t\t}\n\t}\n\n\tif err := d.storage.Mount(volume); err != nil {\n\t\tvolume.MountPath = \"\"\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (osUtils *OsUtils) EnsureMountVol(ctx context.Context, volCap *csi.VolumeCapability) (string, []string, error) {\n\tlog := logger.GetLogger(ctx)\n\tmountVol := volCap.GetMount()\n\tif mountVol == nil {\n\t\treturn \"\", nil, logger.LogNewErrorCode(log, codes.InvalidArgument, \"access type missing\")\n\t}\n\tfs, err := osUtils.GetVolumeCapabilityFsType(ctx, volCap)\n\tif err != nil {\n\t\tlog.Errorf(\"GetVolumeCapabilityFsType failed with err: %v\", err)\n\t\treturn \"\", nil, err\n\t}\n\n\tmntFlags := mountVol.GetMountFlags()\n\t// By default, xfs does not allow mounting of two volumes with the same filesystem uuid.\n\t// Force ignore this uuid to be able to mount volume + its clone / restored snapshot on the same node.\n\tif fs == common.XFSType {\n\t\tmntFlags = append(mntFlags, \"nouuid\")\n\t}\n\n\treturn fs, mntFlags, nil\n}", "func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error {\n\tdriver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/)\n\tif err != nil || driver == nil {\n\t\tglog.Errorf(\"Failed to get portworx driver. Err: %v\", err)\n\t\treturn err\n\t}\n\n\terr = driver.Mount(m.volName, mountPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Error mounting Portworx Volume (%v) on Path (%v): %v\", m.volName, mountPath, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (k *Kubernetes) AddPodMountVolume(service *apistructs.Service, podSpec *corev1.PodSpec,\n\tsecretvolmounts []corev1.VolumeMount, secretvolumes []corev1.Volume) error {\n\n\tif len(podSpec.Volumes) == 0 {\n\t\tpodSpec.Volumes = make([]corev1.Volume, 0)\n\t}\n\n\t//Pay attention to the settings mentioned above, there is only one container in a pod\n\tif len(podSpec.Containers[0].VolumeMounts) == 0 {\n\t\tpodSpec.Containers[0].VolumeMounts = make([]corev1.VolumeMount, 0)\n\t}\n\n\t// get cluster info\n\tclusterInfo, err := k.ClusterInfo.Get()\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to get cluster info, clusterName: %s, (%v)\", k.clusterName, err)\n\t}\n\n\t// hostPath type\n\tfor i, bind := range service.Binds {\n\t\tif bind.HostPath == \"\" || bind.ContainerPath == \"\" {\n\t\t\treturn errors.New(\"bind HostPath or ContainerPath is empty\")\n\t\t}\n\t\t//Name formation '[a-z0-9]([-a-z0-9]*[a-z0-9])?'\n\t\tname := \"volume\" + \"-bind-\" + strconv.Itoa(i)\n\n\t\thostPath, err := ParseJobHostBindTemplate(bind.HostPath, clusterInfo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// The hostPath that does not start with an absolute path is used to apply for local disk resources in the old volume interface\n\t\tif !strings.HasPrefix(hostPath, \"/\") {\n\t\t\t//hostPath = strutil.Concat(\"/mnt/k8s/\", hostPath)\n\t\t\tpvcName := strings.Replace(hostPath, \"_\", \"-\", -1)\n\t\t\tsc := \"dice-local-volume\"\n\t\t\tif err := k.pvc.CreateIfNotExists(&corev1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(\"%s-%s\", service.Name, pvcName),\n\t\t\t\t\tNamespace: service.Namespace,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PersistentVolumeClaimSpec{\n\t\t\t\t\tAccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},\n\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\tcorev1.ResourceStorage: resource.MustParse(\"10Gi\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tStorageClassName: &sc,\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpodSpec.Volumes = append(podSpec.Volumes,\n\t\t\t\tcorev1.Volume{\n\t\t\t\t\tName: name,\n\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: fmt.Sprintf(\"%s-%s\", service.Name, pvcName),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\n\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts,\n\t\t\t\tcorev1.VolumeMount{\n\t\t\t\t\tName: name,\n\t\t\t\t\tMountPath: bind.ContainerPath,\n\t\t\t\t\tReadOnly: bind.ReadOnly,\n\t\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tpodSpec.Volumes = append(podSpec.Volumes,\n\t\t\tcorev1.Volume{\n\t\t\t\tName: name,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tHostPath: &corev1.HostPathVolumeSource{\n\t\t\t\t\t\tPath: hostPath,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\n\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts,\n\t\t\tcorev1.VolumeMount{\n\t\t\t\tName: name,\n\t\t\t\tMountPath: bind.ContainerPath,\n\t\t\t\tReadOnly: bind.ReadOnly,\n\t\t\t})\n\t}\n\n\t// pvc volume type\n\tif len(service.Volumes) > 0 {\n\t\tif err := k.setStatelessServiceVolumes(service, podSpec); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Configure the business container sidecar shared directory\n\tfor name, sidecar := range service.SideCars {\n\t\tfor _, dir := range sidecar.SharedDirs {\n\t\t\temptyDirVolumeName := strutil.Concat(name, shardDirSuffix)\n\n\t\t\tquantitySize := resource.MustParse(k8sapi.PodEmptyDirSizeLimit10Gi)\n\t\t\tif sidecar.Resources.EmptyDirCapacity > 0 {\n\t\t\t\tmaxEmptyDir := fmt.Sprintf(\"%dGi\", sidecar.Resources.EmptyDirCapacity)\n\t\t\t\tquantitySize = resource.MustParse(maxEmptyDir)\n\t\t\t}\n\n\t\t\tsrcMount := corev1.VolumeMount{\n\t\t\t\tName: emptyDirVolumeName,\n\t\t\t\tMountPath: dir.Main,\n\t\t\t\tReadOnly: false, // rw\n\t\t\t}\n\t\t\t// Business master container\n\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, srcMount)\n\n\t\t\tpodSpec.Volumes = append(podSpec.Volumes, corev1.Volume{\n\t\t\t\tName: emptyDirVolumeName,\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{\n\t\t\t\t\t\tSizeLimit: &quantitySize,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\tif service.InitContainer != nil {\n\t\tfor name, initc := range service.InitContainer {\n\t\t\tfor i, dir := range initc.SharedDirs {\n\t\t\t\tname := fmt.Sprintf(\"%s-%d\", name, i)\n\t\t\t\tsrcMount := corev1.VolumeMount{\n\t\t\t\t\tName: name,\n\t\t\t\t\tMountPath: dir.Main,\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t}\n\t\t\t\tquantitySize := resource.MustParse(k8sapi.PodEmptyDirSizeLimit10Gi)\n\t\t\t\tif initc.Resources.EmptyDirCapacity > 0 {\n\t\t\t\t\tmaxEmptyDir := fmt.Sprintf(\"%dGi\", initc.Resources.EmptyDirCapacity)\n\t\t\t\t\tquantitySize = resource.MustParse(maxEmptyDir)\n\t\t\t\t}\n\t\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, srcMount)\n\t\t\t\tpodSpec.Volumes = append(podSpec.Volumes, corev1.Volume{\n\t\t\t\t\tName: name,\n\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{\n\t\t\t\t\t\t\tSizeLimit: &quantitySize,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tpodSpec.Volumes = append(podSpec.Volumes, secretvolumes...)\n\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, secretvolmounts...)\n\n\treturn nil\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func (o *Filesystem) Mount(ctx context.Context, options map[string]dbus.Variant) (mountPath string, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Mount\", 0, options).Store(&mountPath)\n\treturn\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateActionDiskRmaResponse() (response *ActionDiskRmaResponse) {\n\tresponse = &ActionDiskRmaResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *ListStorageV1alpha1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o FioSpecOutput) Volume() FioSpecVolumeOutput {\n\treturn o.ApplyT(func(v FioSpec) FioSpecVolume { return v.Volume }).(FioSpecVolumeOutput)\n}", "func (l *LiveComponent) Mount(a *LiveTimeChannel) {\n\tl.Component.BeforeMount(l)\n\tl.IsMounted = true\n\tl.LifeTimeChannel = a\n\tl.Component.Mounted(l)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (client VolumesClient) ListResponder(resp *http.Response) (result VolumeList, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (o GroupInitContainerVolumeOutput) MountPath() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupInitContainerVolume) string { return v.MountPath }).(pulumi.StringOutput)\n}", "func (c *Catalog) DefaultVolumeMount(name string) corev1.VolumeMount {\n\treturn corev1.VolumeMount{\n\t\tName: name,\n\t\tMountPath: \"/etc/random\",\n\t}\n}", "func (s *DataStore) CreateVolume(v *longhorn.Volume) (*longhorn.Volume, error) {\n\tif err := FixupRecurringJob(v); err != nil {\n\t\treturn nil, err\n\t}\n\n\tret, err := s.lhClient.LonghornV1beta2().Volumes(s.namespace).Create(context.TODO(), v, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif SkipListerCheck {\n\t\treturn ret, nil\n\t}\n\n\tobj, err := verifyCreation(ret.Name, \"volume\", func(name string) (runtime.Object, error) {\n\t\treturn s.GetVolumeRO(name)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret, ok := obj.(*longhorn.Volume)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"BUG: datastore: verifyCreation returned wrong type for volume\")\n\t}\n\n\treturn ret.DeepCopy(), nil\n}", "func createVolumeMountForStorage(storageType v1alpha1.NuxeoStorage, volumeName string) corev1.VolumeMount {\n\tmountPath := getMountPathForStorageType(storageType)\n\tvolMnt := corev1.VolumeMount{\n\t\tName: volumeName,\n\t\tReadOnly: false,\n\t\tMountPath: mountPath,\n\t}\n\treturn volMnt\n}", "func Convert_ecskube_ECSMountPoint_To_v1alpha1_ECSMountPoint(in *ecskube.ECSMountPoint, out *ECSMountPoint, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSMountPoint_To_v1alpha1_ECSMountPoint(in, out, s)\n}", "func (r *vdm) Mount(\n\tvolumeName, volumeID string,\n\toverwriteFs bool, newFsType string, preempt bool) (string, error) {\n\tfor _, d := range r.drivers {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"moduleName\": r.rexray.Context,\n\t\t\t\"driverName\": d.Name(),\n\t\t\t\"volumeName\": volumeName,\n\t\t\t\"volumeID\": volumeID,\n\t\t\t\"overwriteFs\": overwriteFs,\n\t\t\t\"newFsType\": newFsType,\n\t\t\t\"preempt\": preempt}).Info(\"vdm.Mount\")\n\n\t\tif !preempt {\n\t\t\tpreempt = r.preempt()\n\t\t}\n\n\t\tmp, err := d.Mount(volumeName, volumeID, overwriteFs, newFsType, preempt)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tr.countUse(volumeName)\n\n\t\treturn mp, nil\n\t}\n\treturn \"\", errors.ErrNoVolumesDetected\n}", "func (o FioSpecVolumeVolumeSourceOutput) FlexVolume() FioSpecVolumeVolumeSourceFlexVolumePtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSource) *FioSpecVolumeVolumeSourceFlexVolume { return v.FlexVolume }).(FioSpecVolumeVolumeSourceFlexVolumePtrOutput)\n}", "func (h *RestHandler) Mount(root *echo.Group) {\n\tv1Root := root.Group(helper.V1)\n\n\tcustomer := v1Root.Group(\"/customer\")\n\tcustomer.GET(\"\", h.hello)\n}", "func CreateModifyDirectoryResponse() (response *ModifyDirectoryResponse) {\n\tresponse = &ModifyDirectoryResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (k *Kubernetes) AddPodMountVolume(service *apistructs.Service, podSpec *apiv1.PodSpec,\n\tsecretvolmounts []apiv1.VolumeMount, secretvolumes []apiv1.Volume) error {\n\n\tpodSpec.Volumes = make([]apiv1.Volume, 0)\n\n\t//Pay attention to the settings mentioned above, there is only one container in a pod\n\tpodSpec.Containers[0].VolumeMounts = make([]apiv1.VolumeMount, 0)\n\n\t// get cluster info\n\tclusterInfo, err := k.ClusterInfo.Get()\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to get cluster info, clusterName: %s, (%v)\", k.clusterName, err)\n\t}\n\n\t// hostPath type\n\tfor i, bind := range service.Binds {\n\t\tif bind.HostPath == \"\" || bind.ContainerPath == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\t//Name formation '[a-z0-9]([-a-z0-9]*[a-z0-9])?'\n\t\tname := \"volume\" + \"-bind-\" + strconv.Itoa(i)\n\n\t\thostPath, err := ParseJobHostBindTemplate(bind.HostPath, clusterInfo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !strutil.HasPrefixes(hostPath, \"/\") {\n\t\t\tpvcName := strings.Replace(hostPath, \"_\", \"-\", -1)\n\t\t\tsc := \"dice-local-volume\"\n\t\t\tif err := k.pvc.CreateIfNotExists(&apiv1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(\"%s-%s\", service.Name, pvcName),\n\t\t\t\t\tNamespace: service.Namespace,\n\t\t\t\t},\n\t\t\t\tSpec: apiv1.PersistentVolumeClaimSpec{\n\t\t\t\t\tAccessModes: []apiv1.PersistentVolumeAccessMode{apiv1.ReadWriteOnce},\n\t\t\t\t\tResources: apiv1.ResourceRequirements{\n\t\t\t\t\t\tRequests: apiv1.ResourceList{\n\t\t\t\t\t\t\tapiv1.ResourceStorage: resource.MustParse(\"10Gi\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tStorageClassName: &sc,\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpodSpec.Volumes = append(podSpec.Volumes,\n\t\t\t\tapiv1.Volume{\n\t\t\t\t\tName: name,\n\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: fmt.Sprintf(\"%s-%s\", service.Name, pvcName),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\n\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts,\n\t\t\t\tapiv1.VolumeMount{\n\t\t\t\t\tName: name,\n\t\t\t\t\tMountPath: bind.ContainerPath,\n\t\t\t\t\tReadOnly: bind.ReadOnly,\n\t\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\tpodSpec.Volumes = append(podSpec.Volumes,\n\t\t\tapiv1.Volume{\n\t\t\t\tName: name,\n\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\tHostPath: &apiv1.HostPathVolumeSource{\n\t\t\t\t\t\tPath: hostPath,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\n\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts,\n\t\t\tapiv1.VolumeMount{\n\t\t\t\tName: name,\n\t\t\t\tMountPath: bind.ContainerPath,\n\t\t\t\tReadOnly: bind.ReadOnly,\n\t\t\t})\n\t}\n\n\t// Configure the business container sidecar shared directory\n\tfor name, sidecar := range service.SideCars {\n\t\tfor _, dir := range sidecar.SharedDirs {\n\t\t\temptyDirVolumeName := strutil.Concat(name, shardDirSuffix)\n\n\t\t\tsrcMount := apiv1.VolumeMount{\n\t\t\t\tName: emptyDirVolumeName,\n\t\t\t\tMountPath: dir.Main,\n\t\t\t\tReadOnly: false, // rw\n\t\t\t}\n\t\t\t// Business master container\n\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, srcMount)\n\n\t\t\tpodSpec.Volumes = append(podSpec.Volumes, apiv1.Volume{\n\t\t\t\tName: emptyDirVolumeName,\n\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\tEmptyDir: &apiv1.EmptyDirVolumeSource{},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\tif service.InitContainer != nil {\n\t\tfor name, initc := range service.InitContainer {\n\t\t\tfor i, dir := range initc.SharedDirs {\n\t\t\t\tname := fmt.Sprintf(\"%s-%d\", name, i)\n\t\t\t\tsrcMount := apiv1.VolumeMount{\n\t\t\t\t\tName: name,\n\t\t\t\t\tMountPath: dir.Main,\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t}\n\t\t\t\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, srcMount)\n\t\t\t\tpodSpec.Volumes = append(podSpec.Volumes, apiv1.Volume{\n\t\t\t\t\tName: name,\n\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\tEmptyDir: &apiv1.EmptyDirVolumeSource{},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tpodSpec.Volumes = append(podSpec.Volumes, secretvolumes...)\n\tpodSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, secretvolmounts...)\n\n\treturn nil\n}", "func (o *GetVMVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *OsdCsiServer) CreateVolume(\n\tctx context.Context,\n\treq *csi.CreateVolumeRequest,\n) (*csi.CreateVolumeResponse, error) {\n\n\t// Log request\n\tlogrus.Debugf(\"CreateVolume req[%#v]\", *req)\n\n\tif len(req.GetName()) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Name must be provided\")\n\t}\n\tif req.GetVolumeCapabilities() == nil || len(req.GetVolumeCapabilities()) == 0 {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume capabilities must be provided\")\n\t}\n\n\t// Get parameters\n\tspec, locator, source, err := s.specHandler.SpecFromOpts(req.GetParameters())\n\tif err != nil {\n\t\te := fmt.Sprintf(\"Unable to get parameters: %s\\n\", err.Error())\n\t\tlogrus.Errorln(e)\n\t\treturn nil, status.Error(codes.InvalidArgument, e)\n\t}\n\n\t// Get Size\n\tif req.GetCapacityRange() != nil && req.GetCapacityRange().GetRequiredBytes() != 0 {\n\t\tspec.Size = uint64(req.GetCapacityRange().GetRequiredBytes())\n\t} else {\n\t\tspec.Size = defaultCSIVolumeSize\n\t}\n\n\t// Create response\n\tvolume := &csi.Volume{}\n\tresp := &csi.CreateVolumeResponse{\n\t\tVolume: volume,\n\t}\n\n\t// Check if the volume has already been created or is in process of creation\n\tv, err := util.VolumeFromName(s.driver, req.GetName())\n\tif err == nil {\n\t\t// Check the requested arguments match that of the existing volume\n\t\tif spec.Size != v.GetSpec().GetSize() {\n\t\t\treturn nil, status.Errorf(\n\t\t\t\tcodes.AlreadyExists,\n\t\t\t\t\"Existing volume has a size of %v which differs from requested size of %v\",\n\t\t\t\tv.GetSpec().GetSize(),\n\t\t\t\tspec.Size)\n\t\t}\n\t\tif v.GetSpec().GetShared() != csiRequestsSharedVolume(req) {\n\t\t\treturn nil, status.Errorf(\n\t\t\t\tcodes.AlreadyExists,\n\t\t\t\t\"Existing volume has shared=%v while request is asking for shared=%v\",\n\t\t\t\tv.GetSpec().GetShared(),\n\t\t\t\tcsiRequestsSharedVolume(req))\n\t\t}\n\t\tif v.GetSource().GetParent() != source.GetParent() {\n\t\t\treturn nil, status.Error(codes.AlreadyExists, \"Existing volume has conflicting parent value\")\n\t\t}\n\n\t\t// Return information on existing volume\n\t\tosdToCsiVolumeInfo(volume, v)\n\t\treturn resp, nil\n\t}\n\n\t// Check if this is a cloning request to create a volume from a snapshot\n\tif req.GetVolumeContentSource().GetSnapshot() != nil {\n\t\tsource.Parent = req.GetVolumeContentSource().GetSnapshot().GetId()\n\t}\n\n\t// Check if the caller is asking to create a snapshot or for a new volume\n\tvar id string\n\tif source != nil && len(source.GetParent()) != 0 {\n\t\t// Get parent volume information\n\t\tparent, err := util.VolumeFromName(s.driver, source.Parent)\n\t\tif err != nil {\n\t\t\te := fmt.Sprintf(\"unable to get parent volume information: %s\\n\", err.Error())\n\t\t\tlogrus.Errorln(e)\n\t\t\treturn nil, status.Error(codes.InvalidArgument, e)\n\t\t}\n\n\t\t// Create a snapshot from the parent\n\t\tid, err = s.driver.Snapshot(parent.GetId(), false, &api.VolumeLocator{\n\t\t\tName: req.GetName(),\n\t\t},\n\t\t\tfalse)\n\t\tif err != nil {\n\t\t\te := fmt.Sprintf(\"unable to create snapshot: %s\\n\", err.Error())\n\t\t\tlogrus.Errorln(e)\n\t\t\treturn nil, status.Error(codes.Internal, e)\n\t\t}\n\t} else {\n\t\t// Get Capabilities and Size\n\t\tspec.Shared = csiRequestsSharedVolume(req)\n\n\t\t// Create the volume\n\t\tlocator.Name = req.GetName()\n\n\t\t// get enforced policy specs\n\t\t// 0.3 Does not support user context or auth\n\t\tspec, err = sdkVol.GetDefaultVolSpecs(context.Background(), spec, false /* not an update */)\n\t\tif err != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, err.Error())\n\t\t}\n\t\tid, err = s.driver.Create(context.TODO(), locator, source, spec)\n\t\tif err != nil {\n\t\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t\t}\n\t}\n\n\t// id must have been set\n\tv, err = util.VolumeFromName(s.driver, id)\n\tif err != nil {\n\t\te := fmt.Sprintf(\"Unable to find newly created volume: %s\", err.Error())\n\t\tlogrus.Errorln(e)\n\t\treturn nil, status.Error(codes.Internal, e)\n\t}\n\tosdToCsiVolumeInfo(volume, v)\n\treturn resp, nil\n}", "func (z *zfsctl) Mount(ctx context.Context) *execute {\n\targs := []string{\"mount\"}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (s *Stack) CreateVolume(request resources.VolumeRequest) (volume *resources.Volume, err error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif request.Name == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"request.Name\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", request.Name), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvolume, err = s.GetVolume(request.Name)\n\tif err != nil {\n\t\tif _, ok := err.(scerr.ErrNotFound); !ok {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif volume != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\taz, err := s.SelectedAvailabilityZone()\n\tif err != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\tvar v resources.Volume\n\tswitch s.versions[\"volume\"] {\n\tcase \"v1\":\n\t\tvar vol *volumesv1.Volume\n\t\tvol, err = volumesv1.Create(s.VolumeClient, volumesv1.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tcase \"v2\":\n\t\tvar vol *volumesv2.Volume\n\t\tvol, err = volumesv2.Create(s.VolumeClient, volumesv2.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tdefault:\n\t\terr = scerr.Errorf(fmt.Sprintf(\"unmanaged service 'volume' version '%s'\", s.versions[\"volume\"]), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error creating volume : %s\", ProviderErrorToString(err)))\n\t}\n\n\treturn &v, nil\n}", "func (o *DeleteStorageV1VolumeAttachmentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (c *restClient) CreateVolume(ctx context.Context, req *netapppb.CreateVolumeRequest, opts ...gax.CallOption) (*CreateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/volumes\", req.GetParent())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tparams.Add(\"volumeId\", fmt.Sprintf(\"%v\", req.GetVolumeId()))\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"parent\", url.QueryEscape(req.GetParent()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &CreateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func ParseLockDomainTransferResponse(rsp *http.Response) (*LockDomainTransferResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &LockDomainTransferResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest ScalewayDomainV2alpha2Domain\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func (s *SnapshotsServiceOp) ListVolume(opt *ListOptions) ([]Snapshot, *Response, error) {\n\tlistOpt := listSnapshotOptions{ResourceType: \"volume\"}\n\treturn s.list(opt, &listOpt)\n}", "func (o *PostAPI24VolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPostApi24VolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostApi24VolumesBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}" ]
[ "0.75360465", "0.7032238", "0.6891064", "0.6662742", "0.6020406", "0.59880185", "0.5882485", "0.5840293", "0.57796705", "0.57698405", "0.5749577", "0.5717668", "0.5673246", "0.5665232", "0.55357146", "0.5533523", "0.542637", "0.542637", "0.542637", "0.542637", "0.531564", "0.5297787", "0.524953", "0.5172566", "0.5140691", "0.5128864", "0.5113449", "0.5109826", "0.50880295", "0.5069417", "0.5059172", "0.5038982", "0.50335026", "0.49996862", "0.49970496", "0.4984895", "0.49695453", "0.49533293", "0.49468502", "0.49377668", "0.48814303", "0.48742723", "0.48673326", "0.48617312", "0.4789242", "0.47855943", "0.478497", "0.47495443", "0.47357145", "0.47123674", "0.46918353", "0.46913183", "0.46427947", "0.4575457", "0.45680007", "0.45234358", "0.4522401", "0.45197365", "0.4508772", "0.45077488", "0.4482511", "0.44685835", "0.44519535", "0.4417804", "0.44113714", "0.44095686", "0.44064584", "0.44022778", "0.43989566", "0.4397773", "0.43817133", "0.4380345", "0.43767813", "0.43696108", "0.43678904", "0.4362418", "0.43559405", "0.43452072", "0.4344771", "0.43416417", "0.4340521", "0.4337704", "0.43364298", "0.43344668", "0.43337652", "0.43309787", "0.43293706", "0.43272057", "0.4321355", "0.4317111", "0.43169343", "0.4302677", "0.43022054", "0.43017742", "0.42895588", "0.42883724", "0.42821866", "0.42767945", "0.42745596", "0.4271891" ]
0.85738486
0
Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest is an autogenerated conversion function.
func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error { return autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in *v1beta1.ResizeVolumeRequest, out *internal.ResizeVolumeRequest) error {\n\treturn autoConvert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in *v2alpha1.RmdirRequest, out *impl.RmdirRequest) error {\n\treturn autoConvert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in, out)\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func ResizeVolume(vol *apis.ZFSVolume, newSize int64) error {\n\n\tvol.Spec.Capacity = strconv.FormatInt(int64(newSize), 10)\n\n\t_, err := volbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(vol)\n\treturn err\n}", "func InitVolumeOperationRequestInterface(ctx context.Context, cleanupInterval int,\n\tisBlockVolumeSnapshotEnabled func() bool) (VolumeOperationRequest, error) {\n\tlog := logger.GetLogger(ctx)\n\tcsiNamespace = getCSINamespace()\n\n\toperationStoreInitLock.Lock()\n\tdefer operationStoreInitLock.Unlock()\n\tif operationRequestStoreInstance == nil {\n\t\t// Create CnsVolumeOperationRequest definition on API server.\n\t\tlog.Info(\n\t\t\t\"Creating CnsVolumeOperationRequest definition on API server and initializing VolumeOperationRequest instance\",\n\t\t)\n\t\terr := k8s.CreateCustomResourceDefinitionFromManifest(ctx,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFile,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFileName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create CnsVolumeOperationRequest CRD with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get in cluster config for client to API server.\n\t\tconfig, err := k8s.GetKubeConfig(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get kubeconfig with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create client to API server.\n\t\tk8sclient, err := k8s.NewClientForGroup(ctx, config, cnsvolumeoprequestv1alpha1.SchemeGroupVersion.Group)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create k8sClient with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Initialize the operationRequestStoreOnETCD implementation of\n\t\t// VolumeOperationRequest interface.\n\t\t// NOTE: Currently there is only a single implementation of this\n\t\t// interface. Future implementations will need modify this step.\n\t\toperationRequestStoreInstance = &operationRequestStore{\n\t\t\tk8sclient: k8sclient,\n\t\t}\n\t\tgo operationRequestStoreInstance.cleanupStaleInstances(cleanupInterval, isBlockVolumeSnapshotEnabled)\n\t}\n\n\treturn operationRequestStoreInstance, nil\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func (r *csiResizer) Resize(pv *v1.PersistentVolume, requestSize resource.Quantity) (resource.Quantity, bool, error) {\n\toldSize := pv.Spec.Capacity[v1.ResourceStorage]\n\n\tvar volumeID string\n\tvar source *v1.CSIPersistentVolumeSource\n\tvar pvSpec v1.PersistentVolumeSpec\n\tvar migrated bool\n\tif pv.Spec.CSI != nil {\n\t\t// handle CSI volume\n\t\tsource = pv.Spec.CSI\n\t\tvolumeID = source.VolumeHandle\n\t\tpvSpec = pv.Spec\n\t} else {\n\t\ttranslator := csitrans.New()\n\t\tif translator.IsMigratedCSIDriverByName(r.name) {\n\t\t\t// handle migrated in-tree volume\n\t\t\tcsiPV, err := translator.TranslateInTreePVToCSI(pv)\n\t\t\tif err != nil {\n\t\t\t\treturn oldSize, false, fmt.Errorf(\"failed to translate persistent volume: %v\", err)\n\t\t\t}\n\t\t\tmigrated = true\n\t\t\tsource = csiPV.Spec.CSI\n\t\t\tpvSpec = csiPV.Spec\n\t\t\tvolumeID = source.VolumeHandle\n\t\t} else {\n\t\t\t// non-migrated in-tree volume\n\t\t\treturn oldSize, false, fmt.Errorf(\"volume %v is not migrated to CSI\", pv.Name)\n\t\t}\n\t}\n\n\tif len(volumeID) == 0 {\n\t\treturn oldSize, false, errors.New(\"empty volume handle\")\n\t}\n\n\tvar secrets map[string]string\n\tsecreRef := source.ControllerExpandSecretRef\n\tif secreRef != nil {\n\t\tvar err error\n\t\tsecrets, err = getCredentials(r.k8sClient, secreRef)\n\t\tif err != nil {\n\t\t\treturn oldSize, false, err\n\t\t}\n\t}\n\n\tcapability, err := r.getVolumeCapabilities(pvSpec)\n\tif err != nil {\n\t\treturn oldSize, false, fmt.Errorf(\"failed to get capabilities of volume %s with %v\", pv.Name, err)\n\t}\n\n\tctx, cancel := timeoutCtx(r.timeout)\n\tresizeCtx := context.WithValue(ctx, connection.AdditionalInfoKey, connection.AdditionalInfo{Migrated: strconv.FormatBool(migrated)})\n\n\tdefer cancel()\n\tnewSizeBytes, nodeResizeRequired, err := r.client.Expand(resizeCtx, volumeID, requestSize.Value(), secrets, capability)\n\tif err != nil {\n\t\treturn oldSize, nodeResizeRequired, err\n\t}\n\n\treturn *resource.NewQuantity(newSizeBytes, resource.BinarySI), nodeResizeRequired, err\n}", "func Convert_v2alpha1_RmdirContentsRequest_To_impl_RmdirContentsRequest(in *v2alpha1.RmdirContentsRequest, out *impl.RmdirContentsRequest) error {\n\treturn autoConvert_v2alpha1_RmdirContentsRequest_To_impl_RmdirContentsRequest(in, out)\n}", "func (o *VolumeDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Force != nil {\n\n\t\t// query param force\n\t\tvar qrForce bool\n\t\tif o.Force != nil {\n\t\t\tqrForce = *o.Force\n\t\t}\n\t\tqForce := swag.FormatBool(qrForce)\n\t\tif qForce != \"\" {\n\t\t\tif err := r.SetQueryParam(\"force\", qForce); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (a *HyperflexApiService) UpdateHyperflexVolume(ctx context.Context, moid string) ApiUpdateHyperflexVolumeRequest {\n\treturn ApiUpdateHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (r *ProjectsLocationsVolumesService) Resize(volume string, resizevolumerequest *ResizeVolumeRequest) *ProjectsLocationsVolumesResizeCall {\n\tc := &ProjectsLocationsVolumesResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.volume = volume\n\tc.resizevolumerequest = resizevolumerequest\n\treturn c\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in *v2alpha1.GetVolumeIDFromTargetPathRequest, out *impl.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathRequest_To_impl_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {\n\t/* first check if the volume is already of a requested size */\n\tvolumeOutput, err := c.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get information about the volume: %v\", err)\n\t}\n\tvol := volumeOutput.Volumes[0]\n\tif *vol.VolumeId != volumeID {\n\t\treturn fmt.Errorf(\"describe volume %q returned information about a non-matching volume %q\", volumeID, *vol.VolumeId)\n\t}\n\tif *vol.Size == newSize {\n\t\t// nothing to do\n\t\treturn nil\n\t}\n\tinput := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID}\n\toutput, err := c.connection.ModifyVolume(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not modify persistent volume: %v\", err)\n\t}\n\n\tstate := *output.VolumeModification.ModificationState\n\tif state == constants.EBSVolumeStateFailed {\n\t\treturn fmt.Errorf(\"could not modify persistent volume %q: modification state failed\", volumeID)\n\t}\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"received empty modification status\")\n\t}\n\tif state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted {\n\t\treturn nil\n\t}\n\t// wait until the volume reaches the \"optimizing\" or \"completed\" state\n\tin := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}}\n\treturn retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tout, err := c.connection.DescribeVolumesModifications(&in)\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"could not describe volume modification: %v\", err)\n\t\t\t}\n\t\t\tif len(out.VolumesModifications) != 1 {\n\t\t\t\treturn false, fmt.Errorf(\"describe volume modification didn't return one record for volume %q\", volumeID)\n\t\t\t}\n\t\t\tif *out.VolumesModifications[0].VolumeId != volumeID {\n\t\t\t\treturn false, fmt.Errorf(\"non-matching volume id when describing modifications: %q is different from %q\",\n\t\t\t\t\t*out.VolumesModifications[0].VolumeId, volumeID)\n\t\t\t}\n\t\t\treturn *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil\n\t\t})\n}", "func (o *GetContainersUUIDVolumesVolumeUUIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\t// path param uuid\n\tif err := r.SetPathParam(\"uuid\", o.UUID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param volume_uuid\n\tif err := r.SetPathParam(\"volume_uuid\", o.VolumeUUID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (a *HyperflexApiService) PatchHyperflexVolume(ctx context.Context, moid string) ApiPatchHyperflexVolumeRequest {\n\treturn ApiPatchHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (r *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {\n\t/* first check if the volume is already of a requested size */\n\tvolumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get information about the volume: %v\", err)\n\t}\n\tvol := volumeOutput.Volumes[0]\n\tif *vol.VolumeId != volumeID {\n\t\treturn fmt.Errorf(\"describe volume %q returned information about a non-matching volume %q\", volumeID, *vol.VolumeId)\n\t}\n\tif *vol.Size == newSize {\n\t\t// nothing to do\n\t\treturn nil\n\t}\n\tinput := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID}\n\toutput, err := r.connection.ModifyVolume(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not modify persistent volume: %v\", err)\n\t}\n\n\tstate := *output.VolumeModification.ModificationState\n\tif state == constants.EBSVolumeStateFailed {\n\t\treturn fmt.Errorf(\"could not modify persistent volume %q: modification state failed\", volumeID)\n\t}\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"received empty modification status\")\n\t}\n\tif state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted {\n\t\treturn nil\n\t}\n\t// wait until the volume reaches the \"optimizing\" or \"completed\" state\n\tin := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}}\n\treturn retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tout, err := r.connection.DescribeVolumesModifications(&in)\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"could not describe volume modification: %v\", err)\n\t\t\t}\n\t\t\tif len(out.VolumesModifications) != 1 {\n\t\t\t\treturn false, fmt.Errorf(\"describe volume modification didn't return one record for volume %q\", volumeID)\n\t\t\t}\n\t\t\tif *out.VolumesModifications[0].VolumeId != volumeID {\n\t\t\t\treturn false, fmt.Errorf(\"non-matching volume id when describing modifications: %q is different from %q\",\n\t\t\t\t\t*out.VolumesModifications[0].VolumeId, volumeID)\n\t\t\t}\n\t\t\treturn *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil\n\t\t})\n}", "func (o *ReadStorageV1alpha1VolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Exact != nil {\n\n\t\t// query param exact\n\t\tvar qrExact bool\n\t\tif o.Exact != nil {\n\t\t\tqrExact = *o.Exact\n\t\t}\n\t\tqExact := swag.FormatBool(qrExact)\n\t\tif qExact != \"\" {\n\t\t\tif err := r.SetQueryParam(\"exact\", qExact); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Export != nil {\n\n\t\t// query param export\n\t\tvar qrExport bool\n\t\tif o.Export != nil {\n\t\t\tqrExport = *o.Export\n\t\t}\n\t\tqExport := swag.FormatBool(qrExport)\n\t\tif qExport != \"\" {\n\t\t\tif err := r.SetQueryParam(\"export\", qExport); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Pretty != nil {\n\n\t\t// query param pretty\n\t\tvar qrPretty string\n\t\tif o.Pretty != nil {\n\t\t\tqrPretty = *o.Pretty\n\t\t}\n\t\tqPretty := qrPretty\n\t\tif qPretty != \"\" {\n\t\t\tif err := r.SetQueryParam(\"pretty\", qPretty); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (b *Buffer) Resize(w, h int) (*Buffer, error) {\n\treturn nil, errors.New(\"Not implemented\")\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func NewModifySubscriptionRequest(requestHeader ExtensionObjectDefinition, subscriptionId uint32, requestedPublishingInterval float64, requestedLifetimeCount uint32, requestedMaxKeepAliveCount uint32, maxNotificationsPerPublish uint32, priority uint8) *_ModifySubscriptionRequest {\n\t_result := &_ModifySubscriptionRequest{\n\t\tRequestHeader: requestHeader,\n\t\tSubscriptionId: subscriptionId,\n\t\tRequestedPublishingInterval: requestedPublishingInterval,\n\t\tRequestedLifetimeCount: requestedLifetimeCount,\n\t\tRequestedMaxKeepAliveCount: requestedMaxKeepAliveCount,\n\t\tMaxNotificationsPerPublish: maxNotificationsPerPublish,\n\t\tPriority: priority,\n\t\t_ExtensionObjectDefinition: NewExtensionObjectDefinition(),\n\t}\n\t_result._ExtensionObjectDefinition._ExtensionObjectDefinitionChildRequirements = _result\n\treturn _result\n}", "func (o *ContainerRenameParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\tif o.OpID != nil {\n\n\t\t// header param Op-ID\n\t\tif err := r.SetHeaderParam(\"Op-ID\", *o.OpID); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// path param handle\n\tif err := r.SetPathParam(\"handle\", o.Handle); err != nil {\n\t\treturn err\n\t}\n\n\t// query param name\n\tqrName := o.Name\n\tqName := qrName\n\tif qName != \"\" {\n\t\tif err := r.SetQueryParam(\"name\", qName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *UHostClient) NewResizeAttachedDiskRequest() *ResizeAttachedDiskRequest {\n\treq := &ResizeAttachedDiskRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func (c *BlockVolumeClient) Resize(params *BlockVolumeParams) (*BlockVolumeResize, error) {\n\tvar result BlockVolumeResize\n\terr := c.Backend.CallIntoInterface(\"v1/Storage/Block/Volume/resize\", params, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}", "func CreateUpdateMediaStorageClassRequest() (request *UpdateMediaStorageClassRequest) {\n\trequest = &UpdateMediaStorageClassRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"vod\", \"2017-03-21\", \"UpdateMediaStorageClass\", \"vod\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateChangeMediaStatusRequest() (request *ChangeMediaStatusRequest) {\n\trequest = &ChangeMediaStatusRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"UniMkt\", \"2018-12-12\", \"ChangeMediaStatus\", \"uniMkt\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *UFSClient) NewRemoveUFSVolumeRequest() *RemoveUFSVolumeRequest {\n\treq := &RemoveUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (o *Encrypted) Resize(ctx context.Context, size uint64, options map[string]dbus.Variant) (err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceEncrypted+\".Resize\", 0, size, options).Store()\n\treturn\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *AwsVolume) resize() error {\n\tLog.Infof(\"Resizing EBS volume %s\", m.name())\n\tsnapshot, err := m.createSnapshot()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := m.Delete(); err != nil {\n\t\treturn err\n\t}\n\tif err := m.createAwsVolume(snapshot.SnapshotId); err != nil {\n\t\treturn err\n\t}\n\tif err := m.deleteSnapshot(snapshot); err != nil {\n\t\tLog.Errorf(\"Error deleting snapshot %s: %s\", *snapshot.SnapshotId, err.Error())\n\t}\n\treturn nil\n}", "func Convert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in *v1beta1.ListVolumesOnDiskRequest, out *internal.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskRequest_To_internal_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in *v1beta1.ResizeVolumeResponse, out *internal.ResizeVolumeResponse) error {\n\treturn autoConvert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in, out)\n}", "func (a *DefaultApiService) VmResizePut(ctx _context.Context, vmResize VmResize) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/vm.resize\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &vmResize\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (o *Partition) Resize(ctx context.Context, size uint64, options map[string]dbus.Variant) (err error) {\n\terr = o.object.CallWithContext(ctx, InterfacePartition+\".Resize\", 0, size, options).Store()\n\treturn\n}", "func (o *CreateIscsiLunSnapshotParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ContentLanguage != nil {\n\n\t\t// header param content-language\n\t\tif err := r.SetHeaderParam(\"content-language\", *o.ContentLanguage); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif o.RequestBody != nil {\n\t\tif err := r.SetBodyParam(o.RequestBody); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *VectorThumbnailParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Reload != nil {\n\n\t\t// query param reload\n\t\tvar qrReload string\n\n\t\tif o.Reload != nil {\n\t\t\tqrReload = *o.Reload\n\t\t}\n\t\tqReload := qrReload\n\t\tif qReload != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"reload\", qReload); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// path param resource_id\n\tif err := r.SetPathParam(\"resource_id\", o.ResourceID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param type\n\tif err := r.SetPathParam(\"type\", o.Type); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func EncodeRingbufferCapacityRequest(name string) *proto.ClientMessage {\n\tclientMessage := proto.NewClientMessageForEncode()\n\tclientMessage.SetRetryable(true)\n\n\tinitialFrame := proto.NewFrameWith(make([]byte, RingbufferCapacityCodecRequestInitialFrameSize), proto.UnfragmentedMessage)\n\tclientMessage.AddFrame(initialFrame)\n\tclientMessage.SetMessageType(RingbufferCapacityCodecRequestMessageType)\n\tclientMessage.SetPartitionId(-1)\n\n\tEncodeString(clientMessage, name)\n\n\treturn clientMessage\n}", "func (client *Client) ExpandVolume(name, size string) (*Response, *ResponseStatus, error) {\n\treturn client.FormattedRequest(\"/expand/volume/size/\\\"%s\\\"/\\\"%s\\\"\", size, name)\n}", "func (o *DeleteGroupConfigurationAdminV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param configurationCode\n\tif err := r.SetPathParam(\"configurationCode\", o.ConfigurationCode); err != nil {\n\t\treturn err\n\t}\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (o *SizeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Parameters); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func (o *CreateVolumeBackupParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.CreateVolumeBackupDetails == nil {\n\t\to.CreateVolumeBackupDetails = new(models.CreateVolumeBackupDetails)\n\t}\n\n\tif err := r.SetBodyParam(o.CreateVolumeBackupDetails); err != nil {\n\t\treturn err\n\t}\n\n\tif o.OpcRetryToken != nil {\n\n\t\t// header param opc-retry-token\n\t\tif err := r.SetHeaderParam(\"opc-retry-token\", *o.OpcRetryToken); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PatchStorageVirtualDriveExtensionsMoidParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param moid\n\tif err := r.SetPathParam(\"moid\", o.Moid); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (o *ContainerUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetBodyParam(o.Update); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *UFSClient) NewDescribeUFSVolumePriceRequest() *DescribeUFSVolumePriceRequest {\n\treq := &DescribeUFSVolumePriceRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (o *ImagePruneParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Filters != nil {\n\n\t\t// query param filters\n\t\tvar qrFilters string\n\t\tif o.Filters != nil {\n\t\t\tqrFilters = *o.Filters\n\t\t}\n\t\tqFilters := qrFilters\n\t\tif qFilters != \"\" {\n\t\t\tif err := r.SetQueryParam(\"filters\", qFilters); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func (o *GetIngredientVersionRevisionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param ingredient_id\n\tif err := r.SetPathParam(\"ingredient_id\", o.IngredientID.String()); err != nil {\n\t\treturn err\n\t}\n\n\t// path param ingredient_version_id\n\tif err := r.SetPathParam(\"ingredient_version_id\", o.IngredientVersionID.String()); err != nil {\n\t\treturn err\n\t}\n\n\t// path param revision\n\tif err := r.SetPathParam(\"revision\", swag.FormatInt64(o.Revision)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func (s *Module) DiskResize(name string, size gridtypes.Unit) (disk pkg.VDisk, err error) {\n\tpath, err := s.findDisk(name)\n\tif err != nil {\n\t\treturn disk, errors.Wrapf(os.ErrNotExist, \"disk with id '%s' does not exists\", name)\n\t}\n\n\tfile, err := os.OpenFile(path, os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn pkg.VDisk{}, err\n\t}\n\n\tdefer file.Close()\n\n\tif err = syscall.Fallocate(int(file.Fd()), 0, 0, int64(size)); err != nil {\n\t\treturn disk, errors.Wrap(err, \"failed to truncate disk to size\")\n\t}\n\n\treturn pkg.VDisk{Path: path, Size: int64(size)}, nil\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func (o *RemoveAPIKeyPrivilegeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param key\n\tif err := r.SetPathParam(\"key\", o.Key); err != nil {\n\t\treturn err\n\t}\n\n\tif o.RemoveAPIKeyPrivilegeRequest != nil {\n\t\tif err := r.SetBodyParam(o.RemoveAPIKeyPrivilegeRequest); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *RemoveDropRequestParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param drop-request-id\n\tif err := r.SetPathParam(\"drop-request-id\", o.DropRequestID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (resizer *DeploymentConfigResizer) Resize(namespace, name string, newSize uint, preconditions *kubectl.ResizePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {\n\tif preconditions == nil {\n\t\tpreconditions = &kubectl.ResizePrecondition{-1, \"\"}\n\t}\n\tif retry == nil {\n\t\t// Make it try only once, immediately\n\t\tretry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}\n\t}\n\tcond := kubectl.ResizeCondition(resizer, preconditions, namespace, name, newSize)\n\tif err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {\n\t\treturn err\n\t}\n\tif waitForReplicas != nil {\n\t\trc := &kapi.ReplicationController{ObjectMeta: kapi.ObjectMeta{Namespace: namespace, Name: rcName}}\n\t\treturn wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,\n\t\t\tresizer.c.ControllerHasDesiredReplicas(rc))\n\t}\n\treturn nil\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *DeleteConditionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param condition-id\n\tif err := r.SetPathParam(\"condition-id\", o.ConditionID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param organization\n\tif err := r.SetPathParam(\"organization\", o.Organization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param project\n\tif err := r.SetPathParam(\"project\", o.Project); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetDeploymentPreview1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param environment\n\tif err := r.SetPathParam(\"environment\", o.Environment); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param tenant\n\tif err := r.SetPathParam(\"tenant\", o.Tenant); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func (o *MediaConnectionCloseParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param media_connection_id\n\tif err := r.SetPathParam(\"media_connection_id\", o.MediaConnectionID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func makeSizeDetectionVolumeSpec(pvcName string) *corev1.Volume {\n\treturn &corev1.Volume{\n\t\tName: cc.DataVolName,\n\t\tVolumeSource: corev1.VolumeSource{\n\t\t\tPersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{\n\t\t\t\tClaimName: pvcName,\n\t\t\t},\n\t\t},\n\t}\n}", "func (o *GetPackageSearchActionOldSpacesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param baseSpaceId\n\tif err := r.SetPathParam(\"baseSpaceId\", o.BaseSpaceID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func (c *UFSClient) NewDescribeUFSVolumeMountpointRequest() *DescribeUFSVolumeMountpointRequest {\n\treq := &DescribeUFSVolumeMountpointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (_BaseContentSpace *BaseContentSpaceFilterer) WatchUpdateRequest(opts *bind.WatchOpts, sink chan<- *BaseContentSpaceUpdateRequest) (event.Subscription, error) {\n\n\tlogs, sub, err := _BaseContentSpace.contract.WatchLogs(opts, \"UpdateRequest\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(BaseContentSpaceUpdateRequest)\n\t\t\t\tif err := _BaseContentSpace.contract.UnpackLog(event, \"UpdateRequest\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (o *AddVMParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\tif o.Body == nil {\n\t\to.Body = new(models.VM)\n\t}\n\n\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetLogicalPortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param lport-id\n\tif err := r.SetPathParam(\"lport-id\", o.LportID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CacheServiceMetricsKeySizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (a *HyperflexApiService) GetHyperflexVolumeByMoid(ctx context.Context, moid string) ApiGetHyperflexVolumeByMoidRequest {\n\treturn ApiGetHyperflexVolumeByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (o *UploadDeployFileParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param deploy_id\n\tif err := r.SetPathParam(\"deploy_id\", o.DeployID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.FileBody != nil {\n\t\tif err := r.SetBodyParam(o.FileBody); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param path\n\tif err := r.SetPathParam(\"path\", o.Path); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Size != nil {\n\n\t\t// query param size\n\t\tvar qrSize int64\n\t\tif o.Size != nil {\n\t\t\tqrSize = *o.Size\n\t\t}\n\t\tqSize := swag.FormatInt64(qrSize)\n\t\tif qSize != \"\" {\n\t\t\tif err := r.SetQueryParam(\"size\", qSize); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func UpdateVolumeInVolumeParameters(volParam map[string]map[string]string) map[string]map[string]string {\n\tupdatedVolumeParam := make(map[string]map[string]string)\n\tfor _, param := range volParam {\n\t\tif _, ok := param[k8s.PvcNameKey]; ok {\n\t\t\tif _, ok := param[k8s.PvcNamespaceKey]; ok {\n\t\t\t\tpvcName, pvcNamespace := param[k8s.PvcNameKey], param[k8s.PvcNamespaceKey]\n\t\t\t\tPVName, err := Inst().S.GetVolumeDriverVolumeName(pvcName, pvcNamespace)\n\t\t\t\texpect(err).NotTo(haveOccurred())\n\t\t\t\tupdatedVolumeParam[PVName] = param\n\t\t\t}\n\t\t}\n\n\t}\n\treturn updatedVolumeParam\n}", "func (o *CatalogTierPriceStorageV1ReplacePutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.CatalogTierPriceStorageV1ReplacePutBody); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in *v2alpha1.GetClosestVolumeIDFromTargetPathRequest, out *impl.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathRequest_To_impl_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func (o *ValidateCreateServiceRequestNamingParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PutProductsNameParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\tif err := r.SetBodyParam(o.Product); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PostHyperflexAutoSupportPoliciesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}" ]
[ "0.73424244", "0.7137503", "0.67646444", "0.63496286", "0.63221925", "0.6248215", "0.6229517", "0.5956863", "0.5956117", "0.58054453", "0.5629652", "0.55630666", "0.5538474", "0.5433632", "0.52979827", "0.52327645", "0.51988304", "0.51064426", "0.5095173", "0.5059962", "0.5055952", "0.49765384", "0.49318993", "0.49082", "0.4903217", "0.48071548", "0.47531334", "0.47476286", "0.47393182", "0.47340786", "0.47033724", "0.46809956", "0.4654309", "0.46540424", "0.4639689", "0.4618531", "0.46069476", "0.45949244", "0.4593363", "0.45573458", "0.45268607", "0.45157856", "0.45127612", "0.45120382", "0.4495116", "0.44637161", "0.44577017", "0.4456485", "0.44436067", "0.44422165", "0.44355878", "0.44006443", "0.4397521", "0.43926623", "0.43823612", "0.4378421", "0.43767658", "0.43767658", "0.43693513", "0.4353918", "0.43356717", "0.43159577", "0.43106338", "0.42959005", "0.42946514", "0.42761463", "0.4270025", "0.42655998", "0.42598417", "0.4255671", "0.4251069", "0.4242065", "0.42359796", "0.4235837", "0.42297438", "0.42211142", "0.42045087", "0.41788635", "0.41766757", "0.4173152", "0.4170328", "0.41489255", "0.41395792", "0.41354734", "0.4133747", "0.41296437", "0.4127589", "0.4126028", "0.41185427", "0.4106471", "0.40949133", "0.40874532", "0.4086253", "0.40845534", "0.40840754", "0.4072922", "0.4071817", "0.40621567", "0.40572712", "0.40572134" ]
0.88608325
0
Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest is an autogenerated conversion function.
func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error { return autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in *v1beta1.ResizeVolumeRequest, out *internal.ResizeVolumeRequest) error {\n\treturn autoConvert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func Convert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in *internal.IsVolumeFormattedRequest, out *v1beta1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_internal_IsVolumeFormattedRequest_To_v1beta1_IsVolumeFormattedRequest(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func (o *ReadStorageV1alpha1VolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Exact != nil {\n\n\t\t// query param exact\n\t\tvar qrExact bool\n\t\tif o.Exact != nil {\n\t\t\tqrExact = *o.Exact\n\t\t}\n\t\tqExact := swag.FormatBool(qrExact)\n\t\tif qExact != \"\" {\n\t\t\tif err := r.SetQueryParam(\"exact\", qExact); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Export != nil {\n\n\t\t// query param export\n\t\tvar qrExport bool\n\t\tif o.Export != nil {\n\t\t\tqrExport = *o.Export\n\t\t}\n\t\tqExport := swag.FormatBool(qrExport)\n\t\tif qExport != \"\" {\n\t\t\tif err := r.SetQueryParam(\"export\", qExport); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Pretty != nil {\n\n\t\t// query param pretty\n\t\tvar qrPretty string\n\t\tif o.Pretty != nil {\n\t\t\tqrPretty = *o.Pretty\n\t\t}\n\t\tqPretty := qrPretty\n\t\tif qPretty != \"\" {\n\t\t\tif err := r.SetQueryParam(\"pretty\", qPretty); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func Convert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in *impl.RmdirContentsRequest, out *v2alpha1.RmdirContentsRequest) error {\n\treturn autoConvert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in, out)\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func Convert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in *internal.VolumeDiskNumberRequest, out *v1beta1.VolumeDiskNumberRequest) error {\n\treturn autoConvert_internal_VolumeDiskNumberRequest_To_v1beta1_VolumeDiskNumberRequest(in, out)\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func ResizeVolume(vol *apis.ZFSVolume, newSize int64) error {\n\n\tvol.Spec.Capacity = strconv.FormatInt(int64(newSize), 10)\n\n\t_, err := volbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(vol)\n\treturn err\n}", "func Convert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in *impl.GetDiskNumberFromVolumeIDRequest, out *v2alpha1.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDRequest_To_v2alpha1_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func CreateChangeMediaStatusRequest() (request *ChangeMediaStatusRequest) {\n\trequest = &ChangeMediaStatusRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"UniMkt\", \"2018-12-12\", \"ChangeMediaStatus\", \"uniMkt\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (a *HyperflexApiService) UpdateHyperflexVolume(ctx context.Context, moid string) ApiUpdateHyperflexVolumeRequest {\n\treturn ApiUpdateHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func Convert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in *ecskube.ECSVolumeFrom, out *ECSVolumeFrom, s conversion.Scope) error {\n\treturn autoConvert_ecskube_ECSVolumeFrom_To_v1alpha1_ECSVolumeFrom(in, out, s)\n}", "func (r *RequestAPI) CreateRequestV1(ctx context.Context, req *desc.CreateRequestV1Request) (*desc.CreateRequestV1Response, error) {\n\tlog.Printf(\"Got create request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"CreateRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.CreateEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewReq := models.NewRequest(\n\t\t0,\n\t\treq.UserId,\n\t\treq.Type,\n\t\treq.Text,\n\t)\n\tnewId, err := r.repo.Add(ctx, newReq)\n\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tStr(\"endpoint\", \"CreateRequestV1\").\n\t\t\tErr(err).\n\t\t\tMsgf(\"Failed to create request\")\n\t\treturn nil, err\n\t}\n\n\tr.producer.Send(producer.NewEvent(ctx, newId, producer.CreateEvent, err))\n\tr.metrics.IncCreate(1, \"CreateRequestV1\")\n\treturn &desc.CreateRequestV1Response{\n\t\tRequestId: newId,\n\t}, nil\n}", "func (a *HyperflexApiService) PatchHyperflexVolume(ctx context.Context, moid string) ApiPatchHyperflexVolumeRequest {\n\treturn ApiPatchHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func (r *RequestAPI) UpdateRequestV1(ctx context.Context, req *desc.UpdateRequestV1Request) (*desc.UpdateRequestV1Response, error) {\n\tlog.Printf(\"Got update request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"UpdateRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.UpdateEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := r.repo.Update(\n\t\tctx, models.NewRequest(req.RequestId, req.UserId, req.Type, req.Text),\n\t)\n\tif errors.Is(err, repository.NotFound) {\n\t\treturn nil, status.Error(codes.NotFound, \"request does not exist\")\n\t} else if err != nil {\n\t\tlog.Error().\n\t\t\tUint64(\"request_id\", req.RequestId).\n\t\t\tStr(\"endpoint\", \"UpdateRequestV1\").\n\t\t\tErr(err).\n\t\t\tMsgf(\"Failed to update request\")\n\t\treturn nil, err\n\t}\n\n\tr.producer.Send(producer.NewEvent(ctx, req.RequestId, producer.UpdateEvent, err))\n\tr.metrics.IncUpdate(1, \"UpdateRequestV1\")\n\treturn &desc.UpdateRequestV1Response{}, nil\n}", "func CreateModifyHostAvailabilityRequest() (request *ModifyHostAvailabilityRequest) {\n\trequest = &ModifyHostAvailabilityRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"ModifyHostAvailability\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *UHostClient) NewResizeAttachedDiskRequest() *ResizeAttachedDiskRequest {\n\treq := &ResizeAttachedDiskRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (client *Client) ExpandVolume(name, size string) (*Response, *ResponseStatus, error) {\n\treturn client.FormattedRequest(\"/expand/volume/size/\\\"%s\\\"/\\\"%s\\\"\", size, name)\n}", "func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, diskEncryptionSet)\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {\n\t/* first check if the volume is already of a requested size */\n\tvolumeOutput, err := c.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get information about the volume: %v\", err)\n\t}\n\tvol := volumeOutput.Volumes[0]\n\tif *vol.VolumeId != volumeID {\n\t\treturn fmt.Errorf(\"describe volume %q returned information about a non-matching volume %q\", volumeID, *vol.VolumeId)\n\t}\n\tif *vol.Size == newSize {\n\t\t// nothing to do\n\t\treturn nil\n\t}\n\tinput := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID}\n\toutput, err := c.connection.ModifyVolume(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not modify persistent volume: %v\", err)\n\t}\n\n\tstate := *output.VolumeModification.ModificationState\n\tif state == constants.EBSVolumeStateFailed {\n\t\treturn fmt.Errorf(\"could not modify persistent volume %q: modification state failed\", volumeID)\n\t}\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"received empty modification status\")\n\t}\n\tif state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted {\n\t\treturn nil\n\t}\n\t// wait until the volume reaches the \"optimizing\" or \"completed\" state\n\tin := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}}\n\treturn retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tout, err := c.connection.DescribeVolumesModifications(&in)\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"could not describe volume modification: %v\", err)\n\t\t\t}\n\t\t\tif len(out.VolumesModifications) != 1 {\n\t\t\t\treturn false, fmt.Errorf(\"describe volume modification didn't return one record for volume %q\", volumeID)\n\t\t\t}\n\t\t\tif *out.VolumesModifications[0].VolumeId != volumeID {\n\t\t\t\treturn false, fmt.Errorf(\"non-matching volume id when describing modifications: %q is different from %q\",\n\t\t\t\t\t*out.VolumesModifications[0].VolumeId, volumeID)\n\t\t\t}\n\t\t\treturn *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil\n\t\t})\n}", "func (r *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {\n\t/* first check if the volume is already of a requested size */\n\tvolumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get information about the volume: %v\", err)\n\t}\n\tvol := volumeOutput.Volumes[0]\n\tif *vol.VolumeId != volumeID {\n\t\treturn fmt.Errorf(\"describe volume %q returned information about a non-matching volume %q\", volumeID, *vol.VolumeId)\n\t}\n\tif *vol.Size == newSize {\n\t\t// nothing to do\n\t\treturn nil\n\t}\n\tinput := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID}\n\toutput, err := r.connection.ModifyVolume(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not modify persistent volume: %v\", err)\n\t}\n\n\tstate := *output.VolumeModification.ModificationState\n\tif state == constants.EBSVolumeStateFailed {\n\t\treturn fmt.Errorf(\"could not modify persistent volume %q: modification state failed\", volumeID)\n\t}\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"received empty modification status\")\n\t}\n\tif state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted {\n\t\treturn nil\n\t}\n\t// wait until the volume reaches the \"optimizing\" or \"completed\" state\n\tin := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}}\n\treturn retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tout, err := r.connection.DescribeVolumesModifications(&in)\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"could not describe volume modification: %v\", err)\n\t\t\t}\n\t\t\tif len(out.VolumesModifications) != 1 {\n\t\t\t\treturn false, fmt.Errorf(\"describe volume modification didn't return one record for volume %q\", volumeID)\n\t\t\t}\n\t\t\tif *out.VolumesModifications[0].VolumeId != volumeID {\n\t\t\t\treturn false, fmt.Errorf(\"non-matching volume id when describing modifications: %q is different from %q\",\n\t\t\t\t\t*out.VolumesModifications[0].VolumeId, volumeID)\n\t\t\t}\n\t\t\treturn *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil\n\t\t})\n}", "func EncodeRingbufferCapacityRequest(name string) *proto.ClientMessage {\n\tclientMessage := proto.NewClientMessageForEncode()\n\tclientMessage.SetRetryable(true)\n\n\tinitialFrame := proto.NewFrameWith(make([]byte, RingbufferCapacityCodecRequestInitialFrameSize), proto.UnfragmentedMessage)\n\tclientMessage.AddFrame(initialFrame)\n\tclientMessage.SetMessageType(RingbufferCapacityCodecRequestMessageType)\n\tclientMessage.SetPartitionId(-1)\n\n\tEncodeString(clientMessage, name)\n\n\treturn clientMessage\n}", "func NewModifySubscriptionRequest(requestHeader ExtensionObjectDefinition, subscriptionId uint32, requestedPublishingInterval float64, requestedLifetimeCount uint32, requestedMaxKeepAliveCount uint32, maxNotificationsPerPublish uint32, priority uint8) *_ModifySubscriptionRequest {\n\t_result := &_ModifySubscriptionRequest{\n\t\tRequestHeader: requestHeader,\n\t\tSubscriptionId: subscriptionId,\n\t\tRequestedPublishingInterval: requestedPublishingInterval,\n\t\tRequestedLifetimeCount: requestedLifetimeCount,\n\t\tRequestedMaxKeepAliveCount: requestedMaxKeepAliveCount,\n\t\tMaxNotificationsPerPublish: maxNotificationsPerPublish,\n\t\tPriority: priority,\n\t\t_ExtensionObjectDefinition: NewExtensionObjectDefinition(),\n\t}\n\t_result._ExtensionObjectDefinition._ExtensionObjectDefinitionChildRequirements = _result\n\treturn _result\n}", "func InitVolumeOperationRequestInterface(ctx context.Context, cleanupInterval int,\n\tisBlockVolumeSnapshotEnabled func() bool) (VolumeOperationRequest, error) {\n\tlog := logger.GetLogger(ctx)\n\tcsiNamespace = getCSINamespace()\n\n\toperationStoreInitLock.Lock()\n\tdefer operationStoreInitLock.Unlock()\n\tif operationRequestStoreInstance == nil {\n\t\t// Create CnsVolumeOperationRequest definition on API server.\n\t\tlog.Info(\n\t\t\t\"Creating CnsVolumeOperationRequest definition on API server and initializing VolumeOperationRequest instance\",\n\t\t)\n\t\terr := k8s.CreateCustomResourceDefinitionFromManifest(ctx,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFile,\n\t\t\tcnsvolumeoperationrequestconfig.EmbedCnsVolumeOperationRequestFileName)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create CnsVolumeOperationRequest CRD with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get in cluster config for client to API server.\n\t\tconfig, err := k8s.GetKubeConfig(ctx)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to get kubeconfig with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create client to API server.\n\t\tk8sclient, err := k8s.NewClientForGroup(ctx, config, cnsvolumeoprequestv1alpha1.SchemeGroupVersion.Group)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to create k8sClient with error: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Initialize the operationRequestStoreOnETCD implementation of\n\t\t// VolumeOperationRequest interface.\n\t\t// NOTE: Currently there is only a single implementation of this\n\t\t// interface. Future implementations will need modify this step.\n\t\toperationRequestStoreInstance = &operationRequestStore{\n\t\t\tk8sclient: k8sclient,\n\t\t}\n\t\tgo operationRequestStoreInstance.cleanupStaleInstances(cleanupInterval, isBlockVolumeSnapshotEnabled)\n\t}\n\n\treturn operationRequestStoreInstance, nil\n}", "func (o *GetDeploymentPreview1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param environment\n\tif err := r.SetPathParam(\"environment\", o.Environment); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param tenant\n\tif err := r.SetPathParam(\"tenant\", o.Tenant); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r ApiGetHyperflexVolumeListRequest) Expand(expand string) ApiGetHyperflexVolumeListRequest {\n\tr.expand = &expand\n\treturn r\n}", "func (o *VolumeDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Force != nil {\n\n\t\t// query param force\n\t\tvar qrForce bool\n\t\tif o.Force != nil {\n\t\t\tqrForce = *o.Force\n\t\t}\n\t\tqForce := swag.FormatBool(qrForce)\n\t\tif qForce != \"\" {\n\t\t\tif err := r.SetQueryParam(\"force\", qForce); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (client *KeyVaultClient) encryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientEncryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/encrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsBeginDeallocateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif vmScaleSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter vmScaleSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vmScaleSetName}\", url.PathEscape(vmScaleSetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\tif options != nil && options.VMInstanceIDs != nil {\n\t\treturn req, runtime.MarshalAsJSON(req, *options.VMInstanceIDs)\n\t}\n\treturn req, nil\n}", "func (client *CapacityReservationsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate, options *CapacityReservationsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *KeyVaultClient) encryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientEncryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/encrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"comp\", \"lease\")\n\treqQP.Set(\"restype\", \"container\")\n\tif options != nil && options.Timeout != nil {\n\t\treqQP.Set(\"timeout\", strconv.FormatInt(int64(*options.Timeout), 10))\n\t}\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"x-ms-lease-action\"] = []string{\"change\"}\n\treq.Raw().Header[\"x-ms-lease-id\"] = []string{leaseID}\n\treq.Raw().Header[\"x-ms-proposed-lease-id\"] = []string{proposedLeaseID}\n\tif modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {\n\t\treq.Raw().Header[\"If-Modified-Since\"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}\n\t}\n\tif modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {\n\t\treq.Raw().Header[\"If-Unmodified-Since\"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}\n\t}\n\treq.Raw().Header[\"x-ms-version\"] = []string{\"2020-10-02\"}\n\tif options != nil && options.RequestID != nil {\n\t\treq.Raw().Header[\"x-ms-client-request-id\"] = []string{*options.RequestID}\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/xml\"}\n\treturn req, nil\n}", "func (c *BlockVolumeClient) Resize(params *BlockVolumeParams) (*BlockVolumeResize, error) {\n\tvar result BlockVolumeResize\n\terr := c.Backend.CallIntoInterface(\"v1/Storage/Block/Volume/resize\", params, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}", "func (c *UFSClient) NewDescribeUFSVolume2Request() *DescribeUFSVolume2Request {\n\treq := &DescribeUFSVolume2Request{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (s *Stack) CreateVolume(request resources.VolumeRequest) (volume *resources.Volume, err error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif request.Name == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"request.Name\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", request.Name), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvolume, err = s.GetVolume(request.Name)\n\tif err != nil {\n\t\tif _, ok := err.(scerr.ErrNotFound); !ok {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif volume != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\taz, err := s.SelectedAvailabilityZone()\n\tif err != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\tvar v resources.Volume\n\tswitch s.versions[\"volume\"] {\n\tcase \"v1\":\n\t\tvar vol *volumesv1.Volume\n\t\tvol, err = volumesv1.Create(s.VolumeClient, volumesv1.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tcase \"v2\":\n\t\tvar vol *volumesv2.Volume\n\t\tvol, err = volumesv2.Create(s.VolumeClient, volumesv2.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tdefault:\n\t\terr = scerr.Errorf(fmt.Sprintf(\"unmanaged service 'volume' version '%s'\", s.versions[\"volume\"]), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error creating volume : %s\", ProviderErrorToString(err)))\n\t}\n\n\treturn &v, nil\n}", "func Convert_internal_StopServiceRequest_To_v1alpha1_StopServiceRequest(in *internal.StopServiceRequest, out *v1alpha1.StopServiceRequest) error {\n\treturn autoConvert_internal_StopServiceRequest_To_v1alpha1_StopServiceRequest(in, out)\n}", "func (r *RequestAPI) RemoveRequestV1(ctx context.Context, req *desc.RemoveRequestV1Request) (*desc.RemoveRequestV1Response, error) {\n\tlog.Printf(\"Got remove request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"RemoveRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.DeleteEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := r.repo.Remove(ctx, req.RequestId)\n\tif errors.Is(err, repository.NotFound) {\n\t\treturn nil, status.Error(codes.NotFound, \"request does not exist\")\n\t} else if err != nil {\n\t\tlog.Error().\n\t\t\tErr(err).\n\t\t\tUint64(\"request_id\", req.RequestId).\n\t\t\tStr(\"endpoint\", \"RemoveRequestV1\").\n\t\t\tMsgf(\"Failed to remove request\")\n\t\treturn nil, err\n\t}\n\tr.producer.Send(producer.NewEvent(ctx, req.RequestId, producer.DeleteEvent, err))\n\tr.metrics.IncRemove(1, \"RemoveRequestV1\")\n\treturn &desc.RemoveRequestV1Response{}, nil\n}", "func (o *CreateVolumeBackupParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.CreateVolumeBackupDetails == nil {\n\t\to.CreateVolumeBackupDetails = new(models.CreateVolumeBackupDetails)\n\t}\n\n\tif err := r.SetBodyParam(o.CreateVolumeBackupDetails); err != nil {\n\t\treturn err\n\t}\n\n\tif o.OpcRetryToken != nil {\n\n\t\t// header param opc-retry-token\n\t\tif err := r.SetHeaderParam(\"opc-retry-token\", *o.OpcRetryToken); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func NewChangeRequest() *ChangeRequest {\n\tattr := &ChangeRequest{}\n\treturn attr\n}", "func NewVolume(volumeRequest provider.Volume) Volume {\n\t// Build the template to send to backend\n\n\tvolume := Volume{\n\t\tID: volumeRequest.VolumeID,\n\t\tCRN: volumeRequest.CRN,\n\t\tTags: volumeRequest.VPCVolume.Tags,\n\t\tZone: &Zone{\n\t\t\tName: volumeRequest.Az,\n\t\t},\n\t\tProvider: string(volumeRequest.Provider),\n\t\tVolumeType: string(volumeRequest.VolumeType),\n\t}\n\tif volumeRequest.Name != nil {\n\t\tvolume.Name = *volumeRequest.Name\n\t}\n\tif volumeRequest.Capacity != nil {\n\t\tvolume.Capacity = int64(*volumeRequest.Capacity)\n\t}\n\tif volumeRequest.VPCVolume.Profile != nil {\n\t\tvolume.Profile = &Profile{\n\t\t\tName: volumeRequest.VPCVolume.Profile.Name,\n\t\t}\n\t}\n\tif volumeRequest.VPCVolume.ResourceGroup != nil {\n\t\tvolume.ResourceGroup = &ResourceGroup{\n\t\t\tID: volumeRequest.VPCVolume.ResourceGroup.ID,\n\t\t\tName: volumeRequest.VPCVolume.ResourceGroup.Name,\n\t\t}\n\t}\n\n\tif volumeRequest.Iops != nil {\n\t\tvalue, err := strconv.ParseInt(*volumeRequest.Iops, 10, 64)\n\t\tif err != nil {\n\t\t\tvolume.Iops = 0\n\t\t}\n\t\tvolume.Iops = value\n\t}\n\tif volumeRequest.VPCVolume.VolumeEncryptionKey != nil && len(volumeRequest.VPCVolume.VolumeEncryptionKey.CRN) > 0 {\n\t\tencryptionKeyCRN := volumeRequest.VPCVolume.VolumeEncryptionKey.CRN\n\t\tvolume.VolumeEncryptionKey = &VolumeEncryptionKey{CRN: encryptionKeyCRN}\n\t}\n\n\tvolume.Cluster = volumeRequest.Attributes[ClusterIDTagName]\n\tvolume.Status = StatusType(volumeRequest.Attributes[VolumeStatus])\n\treturn volume\n}", "func (r *ProjectsLocationsVolumesService) Resize(volume string, resizevolumerequest *ResizeVolumeRequest) *ProjectsLocationsVolumesResizeCall {\n\tc := &ProjectsLocationsVolumesResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.volume = volume\n\tc.resizevolumerequest = resizevolumerequest\n\treturn c\n}", "func CreateUpdateMediaStorageClassRequest() (request *UpdateMediaStorageClassRequest) {\n\trequest = &UpdateMediaStorageClassRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"vod\", \"2017-03-21\", \"UpdateMediaStorageClass\", \"vod\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif req.GetName() == \"\" {\n\t\treturn errors.New(\"volume name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn errors.New(\"volume capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn errors.New(\"block access type not allowed\")\n\t\t}\n\t}\n\n\tif req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {\n\t\treturn errors.New(\"secrets cannot be nil or empty\")\n\t}\n\n\treturn nil\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func Convert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in *FakeRequest, out *v1alpha2.FakeRequest, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in, out, s)\n}", "func (client *KeyVaultClient) decryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientDecryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/decrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (client *KeyVaultClient) decryptCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters, options *KeyVaultClientDecryptOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}/decrypt\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func CreateCreateMcubeUpgradePackageRequest() (request *CreateMcubeUpgradePackageRequest) {\n\trequest = &CreateMcubeUpgradePackageRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"mPaaS\", \"2020-10-28\", \"CreateMcubeUpgradePackage\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func CreateModifyDirectoryRequest() (request *ModifyDirectoryRequest) {\n\trequest = &ModifyDirectoryRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"vs\", \"2018-12-12\", \"ModifyDirectory\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (a *RequestServiceApiService) InitRequestParameters(ctx _context.Context, uuid string) ApiInitRequestParametersRequest {\n\treturn ApiInitRequestParametersRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tuuid: uuid,\n\t}\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in *v2alpha1.GetDiskNumberFromVolumeIDRequest, out *impl.GetDiskNumberFromVolumeIDRequest) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDRequest_To_impl_GetDiskNumberFromVolumeIDRequest(in, out)\n}", "func NewAlterConfigsRequest(clientID string, resources []*AlterConfigsRequestResource) *AlterConfigsRequest {\n\trequestHeader := &RequestHeader{\n\t\tApiKey: API_AlterConfigs,\n\t\tApiVersion: 0,\n\t\tClientId: clientID,\n\t}\n\treturn &AlterConfigsRequest{requestHeader, resources}\n}", "func (client Client) ChangeSizePreparer(ctx context.Context, nasVolumeInstanceNo string, volumeSize string) (*http.Request, error) {\n\tqueryParameters := map[string]interface{}{\n\t\t\"nasVolumeInstanceNo\": autorest.Encode(\"query\", nasVolumeInstanceNo),\n\t\t\"responseFormatType\": autorest.Encode(\"query\", \"json\"),\n\t\t\"volumeSize\": autorest.Encode(\"query\", volumeSize),\n\t}\n\n\tqueryParameters[\"regionCode\"] = autorest.Encode(\"query\", \"FKR\")\n\n\ttimestamp := strconv.FormatInt(time.Now().UnixNano()/int64(time.Millisecond), 10)\n\tsec := security.NewSignature(client.Secretkey, crypto.SHA256)\n\tsignature, err := sec.Signature(\"POST\", common.GetPath(DefaultBaseURI, \"/changeNasVolumeSize\")+\"?\"+common.GetQuery(queryParameters), client.AccessKey, timestamp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsPost(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPath(\"/changeNasVolumeSize\"),\n\t\tautorest.WithQueryParameters(queryParameters),\n\t\tautorest.WithHeader(\"x-ncp-apigw-timestamp\", timestamp),\n\t\tautorest.WithHeader(\"x-ncp-iam-access-key\", client.AccessKey),\n\t\tautorest.WithHeader(\"x-ncp-apigw-signature-v2\", signature))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client *KeyVaultClient) updateKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters, options *KeyVaultClientUpdateKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\tif keyVersion == \"\" {\n\t\treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.2\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func CreateModifyClusterServiceConfigForAdminRequest() (request *ModifyClusterServiceConfigForAdminRequest) {\n\trequest = &ModifyClusterServiceConfigForAdminRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Emr\", \"2016-04-08\", \"ModifyClusterServiceConfigForAdmin\", \"emr\", \"openAPI\")\n\treturn\n}", "func (client *GalleryImageVersionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsBeginUpdateOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryName}\", url.PathEscape(galleryName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageName}\", url.PathEscape(galleryImageName))\n\turlPath = strings.ReplaceAll(urlPath, \"{galleryImageVersionName}\", url.PathEscape(galleryImageVersionName))\n\treq, err := azcore.NewRequest(ctx, http.MethodPatch, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-09-30\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, req.MarshalAsJSON(galleryImageVersion)\n}", "func (r *RemoteWriteClient) PrepareRequest(queue *util.EvictingQueue) ([]byte, error) {\n\t// prepare labels and samples from queue\n\terr := r.prepare(queue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := cortexpb.ToWriteRequest(r.labels, r.samples, nil, cortexpb.RULE)\n\tdefer cortexpb.ReuseSlice(req.Timeseries)\n\n\treqBytes, err := req.Marshal()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn snappy.Encode(nil, reqBytes), nil\n}", "func VolumeType(volumeType string) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.VolumeType = volumeType\n\t\treturn nil\n\t}\n}", "func (client *ContainerGroupsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, containerGroupName string, resource Resource, options *ContainerGroupsClientUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerInstance/containerGroups/{containerGroupName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif containerGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter containerGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{containerGroupName}\", url.PathEscape(containerGroupName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, resource)\n}", "func Convert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in *impl.IsSymlinkRequest, out *v2alpha1.IsSymlinkRequest) error {\n\treturn autoConvert_impl_IsSymlinkRequest_To_v2alpha1_IsSymlinkRequest(in, out)\n}", "func (client *KeyVaultClient) updateKeyCreateRequest(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters, options *KeyVaultClientUpdateKeyOptions) (*policy.Request, error) {\n\thost := \"{vaultBaseUrl}\"\n\thost = strings.ReplaceAll(host, \"{vaultBaseUrl}\", vaultBaseURL)\n\turlPath := \"/keys/{key-name}/{key-version}\"\n\tif keyName == \"\" {\n\t\treturn nil, errors.New(\"parameter keyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{key-name}\", url.PathEscape(keyName))\n\t// if keyVersion == \"\" {\n\t// \treturn nil, errors.New(\"parameter keyVersion cannot be empty\")\n\t// }\n\turlPath = strings.ReplaceAll(urlPath, \"{key-version}\", url.PathEscape(keyVersion))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"7.3\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func Convert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in *v1beta1.IsVolumeFormattedRequest, out *internal.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedRequest_To_internal_IsVolumeFormattedRequest(in, out)\n}", "func (r *RequestAPI) ListRequestV1(ctx context.Context, req *desc.ListRequestsV1Request) (*desc.ListRequestsV1Response, error) {\n\tlog.Printf(\"Got list request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"ListRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.ReadEvent); err != nil {\n\t\treturn nil, err\n\t}\n\tvar (\n\t\trequests []models.Request\n\t\terr error\n\t)\n\n\tif req.SearchQuery != \"\" { // ideally would move search to a separate endpoint, so it's easier to extend\n\t\trequests, err = r.searcher.Search(ctx, req.SearchQuery, req.Limit, req.Offset)\n\t} else {\n\t\trequests, err = r.repo.List(ctx, req.Limit, req.Offset)\n\t}\n\n\tif err != nil {\n\t\tlog.Error().\n\t\t\tErr(err).\n\t\t\tStr(\"endpoint\", \"ListRequestV1\").\n\t\t\tUint64(\"limit\", req.Limit).\n\t\t\tUint64(\"offset\", req.Offset).\n\t\t\tMsgf(\"Failed to list requests\")\n\t\tr.producer.Send(producer.NewEvent(ctx, 0, producer.ReadEvent, err))\n\t\treturn nil, err\n\t}\n\n\tret := make([]*desc.Request, 0, len(requests))\n\teventMsgs := make([]producer.EventMsg, 0, len(requests))\n\n\tfor _, req := range requests {\n\t\tret = append(ret, &desc.Request{\n\t\t\tId: req.Id,\n\t\t\tUserId: req.UserId,\n\t\t\tType: req.Type,\n\t\t\tText: req.Text,\n\t\t})\n\t\teventMsgs = append(eventMsgs, producer.NewEvent(ctx, req.Id, producer.ReadEvent, nil))\n\t\tr.producer.Send(eventMsgs...)\n\n\t}\n\tr.metrics.IncList(1, \"ListRequestV1\")\n\treturn &desc.ListRequestsV1Response{\n\t\tRequests: ret,\n\t}, nil\n}", "func (r *RequestAPI) DescribeRequestV1(ctx context.Context, req *desc.DescribeRequestV1Request) (*desc.DescribeRequestV1Response, error) {\n\tlog.Printf(\"Got describe request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"DescribeRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.ReadEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\tret, err := r.repo.Describe(ctx, req.RequestId)\n\n\tif errors.Is(err, repository.NotFound) {\n\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t} else if err != nil {\n\t\tlog.Error().\n\t\t\tStr(\"endpoint\", \"DescribeRequestV1\").\n\t\t\tUint64(\"request_id\", req.RequestId).\n\t\t\tErr(err).\n\t\t\tMsgf(\"Failed to read request\")\n\t\treturn nil, err\n\t}\n\n\tr.producer.Send(producer.NewEvent(ctx, req.RequestId, producer.ReadEvent, err))\n\tr.metrics.IncRead(1, \"DescribeRequestV1\")\n\n\treturn &desc.DescribeRequestV1Response{\n\t\tRequest: &desc.Request{\n\t\t\tId: ret.Id,\n\t\t\tUserId: ret.UserId,\n\t\t\tType: ret.Type,\n\t\t\tText: ret.Text,\n\t\t},\n\t}, nil\n\n}", "func (r Virtual_Guest) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getUpgradeRequest\", nil, &r.Options, &resp)\n\treturn\n}" ]
[ "0.7606813", "0.72850883", "0.70685357", "0.69904757", "0.6655676", "0.6355068", "0.6267144", "0.6249365", "0.6177558", "0.61248964", "0.5815484", "0.5763709", "0.56612617", "0.56192935", "0.55897033", "0.5541267", "0.54326737", "0.54090255", "0.53793997", "0.53580225", "0.53215194", "0.52581143", "0.52519053", "0.5239559", "0.5234612", "0.5224484", "0.5199244", "0.5153082", "0.51234406", "0.5092641", "0.50523454", "0.50205123", "0.49891445", "0.49674207", "0.4922431", "0.49010834", "0.48610535", "0.48515052", "0.4838412", "0.48319197", "0.48207214", "0.4820213", "0.48123145", "0.47625744", "0.47567236", "0.47534165", "0.47527567", "0.47039038", "0.46971428", "0.4686421", "0.46811822", "0.46676815", "0.4647914", "0.46327442", "0.45997357", "0.4558363", "0.45381632", "0.45380962", "0.45313275", "0.45274216", "0.45209208", "0.44982007", "0.44967696", "0.4483051", "0.4482062", "0.44762126", "0.44662964", "0.445854", "0.44479063", "0.44450918", "0.4441045", "0.44377002", "0.44373345", "0.44282416", "0.44251117", "0.4416976", "0.44168663", "0.44048357", "0.44018105", "0.4400514", "0.4396906", "0.43962234", "0.43934178", "0.43778357", "0.43765417", "0.43742514", "0.43601528", "0.43510026", "0.43500724", "0.4347327", "0.4347136", "0.43381712", "0.4332113", "0.43292317", "0.43279836", "0.43250757", "0.4318976", "0.43188968", "0.43030486", "0.42927676" ]
0.8801989
0
Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse is an autogenerated conversion function.
func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error { return autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in *v1beta1.ResizeVolumeResponse, out *internal.ResizeVolumeResponse) error {\n\treturn autoConvert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in *v2alpha1.WriteVolumeCacheResponse, out *impl.WriteVolumeCacheResponse) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in *v2alpha1.MkdirResponse, out *impl.MkdirResponse) error {\n\treturn autoConvert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in, out)\n}", "func Convert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in *v2alpha1.RmdirContentsResponse, out *impl.RmdirContentsResponse) error {\n\treturn autoConvert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func (client Client) ChangeSizeResponder(resp *http.Response) (result VolumeSizeResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (pbrr PageBlobsResizeResponse) Response() *http.Response {\n\treturn pbrr.rawResponse\n}", "func Convert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in *v2alpha1.GetDiskNumberFromVolumeIDResponse, out *impl.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_v2alpha1_GetDiskNumberFromVolumeIDResponse_To_impl_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func Convert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in *v1beta1.VolumeStatsResponse, out *internal.VolumeStatsResponse) error {\n\treturn autoConvert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func CreateUpdateMediaStorageClassResponse() (response *UpdateMediaStorageClassResponse) {\n\tresponse = &UpdateMediaStorageClassResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *GetVMVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in *v2alpha1.GetVolumeIDFromTargetPathResponse, out *impl.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeIDFromTargetPathResponse_To_impl_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in *v1beta1.ResizeVolumeRequest, out *internal.ResizeVolumeRequest) error {\n\treturn autoConvert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in, out)\n}", "func Convert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in *v1beta1.DismountVolumeResponse, out *internal.DismountVolumeResponse) error {\n\treturn autoConvert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in, out)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func ResizeVolume(vol *apis.ZFSVolume, newSize int64) error {\n\n\tvol.Spec.Capacity = strconv.FormatInt(int64(newSize), 10)\n\n\t_, err := volbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(vol)\n\treturn err\n}", "func (o *GetVMVolumeDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *RemoveVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewRemoveVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewRemoveVolumeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 409:\n\t\tresult := NewRemoveVolumeConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewRemoveVolumeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func CreateUpdateServiceAutoScalerResponse() (response *UpdateServiceAutoScalerResponse) {\n\tresponse = &UpdateServiceAutoScalerResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {\n\t/* first check if the volume is already of a requested size */\n\tvolumeOutput, err := c.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get information about the volume: %v\", err)\n\t}\n\tvol := volumeOutput.Volumes[0]\n\tif *vol.VolumeId != volumeID {\n\t\treturn fmt.Errorf(\"describe volume %q returned information about a non-matching volume %q\", volumeID, *vol.VolumeId)\n\t}\n\tif *vol.Size == newSize {\n\t\t// nothing to do\n\t\treturn nil\n\t}\n\tinput := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID}\n\toutput, err := c.connection.ModifyVolume(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not modify persistent volume: %v\", err)\n\t}\n\n\tstate := *output.VolumeModification.ModificationState\n\tif state == constants.EBSVolumeStateFailed {\n\t\treturn fmt.Errorf(\"could not modify persistent volume %q: modification state failed\", volumeID)\n\t}\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"received empty modification status\")\n\t}\n\tif state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted {\n\t\treturn nil\n\t}\n\t// wait until the volume reaches the \"optimizing\" or \"completed\" state\n\tin := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}}\n\treturn retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tout, err := c.connection.DescribeVolumesModifications(&in)\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"could not describe volume modification: %v\", err)\n\t\t\t}\n\t\t\tif len(out.VolumesModifications) != 1 {\n\t\t\t\treturn false, fmt.Errorf(\"describe volume modification didn't return one record for volume %q\", volumeID)\n\t\t\t}\n\t\t\tif *out.VolumesModifications[0].VolumeId != volumeID {\n\t\t\t\treturn false, fmt.Errorf(\"non-matching volume id when describing modifications: %q is different from %q\",\n\t\t\t\t\t*out.VolumesModifications[0].VolumeId, volumeID)\n\t\t\t}\n\t\t\treturn *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil\n\t\t})\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {\n\t/* first check if the volume is already of a requested size */\n\tvolumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get information about the volume: %v\", err)\n\t}\n\tvol := volumeOutput.Volumes[0]\n\tif *vol.VolumeId != volumeID {\n\t\treturn fmt.Errorf(\"describe volume %q returned information about a non-matching volume %q\", volumeID, *vol.VolumeId)\n\t}\n\tif *vol.Size == newSize {\n\t\t// nothing to do\n\t\treturn nil\n\t}\n\tinput := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID}\n\toutput, err := r.connection.ModifyVolume(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not modify persistent volume: %v\", err)\n\t}\n\n\tstate := *output.VolumeModification.ModificationState\n\tif state == constants.EBSVolumeStateFailed {\n\t\treturn fmt.Errorf(\"could not modify persistent volume %q: modification state failed\", volumeID)\n\t}\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"received empty modification status\")\n\t}\n\tif state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted {\n\t\treturn nil\n\t}\n\t// wait until the volume reaches the \"optimizing\" or \"completed\" state\n\tin := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}}\n\treturn retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tout, err := r.connection.DescribeVolumesModifications(&in)\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"could not describe volume modification: %v\", err)\n\t\t\t}\n\t\t\tif len(out.VolumesModifications) != 1 {\n\t\t\t\treturn false, fmt.Errorf(\"describe volume modification didn't return one record for volume %q\", volumeID)\n\t\t\t}\n\t\t\tif *out.VolumesModifications[0].VolumeId != volumeID {\n\t\t\t\treturn false, fmt.Errorf(\"non-matching volume id when describing modifications: %q is different from %q\",\n\t\t\t\t\t*out.VolumesModifications[0].VolumeId, volumeID)\n\t\t\t}\n\t\t\treturn *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil\n\t\t})\n}", "func (o *GetVMVolumeBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(400)\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in *v1beta1.IsVolumeFormattedResponse, out *internal.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v1beta1_IsVolumeFormattedResponse_To_internal_IsVolumeFormattedResponse(in, out)\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ListStorageV1alpha1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateModifyCallRatioResponse() (response *ModifyCallRatioResponse) {\n\tresponse = &ModifyCallRatioResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_v2alpha1_PathExistsResponse_To_impl_PathExistsResponse(in *v2alpha1.PathExistsResponse, out *impl.PathExistsResponse) error {\n\treturn autoConvert_v2alpha1_PathExistsResponse_To_impl_PathExistsResponse(in, out)\n}", "func (c *BlockVolumeClient) Resize(params *BlockVolumeParams) (*BlockVolumeResize, error) {\n\tvar result BlockVolumeResize\n\terr := c.Backend.CallIntoInterface(\"v1/Storage/Block/Volume/resize\", params, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}", "func (client *GalleryImageVersionsClient) updateHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in *v2alpha1.GetClosestVolumeIDFromTargetPathResponse, out *impl.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_v2alpha1_GetClosestVolumeIDFromTargetPathResponse_To_impl_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func (o *GetVMVolumeMetricsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetVMVolumeMetricsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetVMVolumeMetricsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewGetVMVolumeMetricsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewGetVMVolumeMetricsInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PatchCoreV1PersistentVolumeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PatchCoreV1PersistentVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *DeleteCoreV1NamespacedConfigMapAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVMVolumeNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(404)\n}", "func CreateDropPartitionResponse() (response *DropPartitionResponse) {\n\tresponse = &DropPartitionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *VolumeCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 201:\n\t\tresult := NewVolumeCreateCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewVolumeCreateInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (m *AwsVolume) resize() error {\n\tLog.Infof(\"Resizing EBS volume %s\", m.name())\n\tsnapshot, err := m.createSnapshot()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := m.Delete(); err != nil {\n\t\treturn err\n\t}\n\tif err := m.createAwsVolume(snapshot.SnapshotId); err != nil {\n\t\treturn err\n\t}\n\tif err := m.deleteSnapshot(snapshot); err != nil {\n\t\tLog.Errorf(\"Error deleting snapshot %s: %s\", *snapshot.SnapshotId, err.Error())\n\t}\n\treturn nil\n}", "func (o *DeleteRuntimeContainerOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (*Session) HandleResponse(msg proto.Message, err error) error {\n\tif err != nil {\n\t\t// check, if it is a gRPC error\n\t\ts, ok := status.FromError(err)\n\n\t\t// otherwise, forward the error message\n\t\tif !ok {\n\t\t\treturn err\n\t\t}\n\n\t\t// create a new error with just the message\n\t\treturn errors.New(s.Message())\n\t}\n\n\topt := protojson.MarshalOptions{\n\t\tMultiline: true,\n\t\tIndent: \" \",\n\t\tEmitUnpopulated: true,\n\t}\n\n\tb, _ := opt.Marshal(msg)\n\n\t_, err = fmt.Fprintf(Output, \"%s\\n\", string(b))\n\n\treturn err\n}", "func (r *csiResizer) Resize(pv *v1.PersistentVolume, requestSize resource.Quantity) (resource.Quantity, bool, error) {\n\toldSize := pv.Spec.Capacity[v1.ResourceStorage]\n\n\tvar volumeID string\n\tvar source *v1.CSIPersistentVolumeSource\n\tvar pvSpec v1.PersistentVolumeSpec\n\tvar migrated bool\n\tif pv.Spec.CSI != nil {\n\t\t// handle CSI volume\n\t\tsource = pv.Spec.CSI\n\t\tvolumeID = source.VolumeHandle\n\t\tpvSpec = pv.Spec\n\t} else {\n\t\ttranslator := csitrans.New()\n\t\tif translator.IsMigratedCSIDriverByName(r.name) {\n\t\t\t// handle migrated in-tree volume\n\t\t\tcsiPV, err := translator.TranslateInTreePVToCSI(pv)\n\t\t\tif err != nil {\n\t\t\t\treturn oldSize, false, fmt.Errorf(\"failed to translate persistent volume: %v\", err)\n\t\t\t}\n\t\t\tmigrated = true\n\t\t\tsource = csiPV.Spec.CSI\n\t\t\tpvSpec = csiPV.Spec\n\t\t\tvolumeID = source.VolumeHandle\n\t\t} else {\n\t\t\t// non-migrated in-tree volume\n\t\t\treturn oldSize, false, fmt.Errorf(\"volume %v is not migrated to CSI\", pv.Name)\n\t\t}\n\t}\n\n\tif len(volumeID) == 0 {\n\t\treturn oldSize, false, errors.New(\"empty volume handle\")\n\t}\n\n\tvar secrets map[string]string\n\tsecreRef := source.ControllerExpandSecretRef\n\tif secreRef != nil {\n\t\tvar err error\n\t\tsecrets, err = getCredentials(r.k8sClient, secreRef)\n\t\tif err != nil {\n\t\t\treturn oldSize, false, err\n\t\t}\n\t}\n\n\tcapability, err := r.getVolumeCapabilities(pvSpec)\n\tif err != nil {\n\t\treturn oldSize, false, fmt.Errorf(\"failed to get capabilities of volume %s with %v\", pv.Name, err)\n\t}\n\n\tctx, cancel := timeoutCtx(r.timeout)\n\tresizeCtx := context.WithValue(ctx, connection.AdditionalInfoKey, connection.AdditionalInfo{Migrated: strconv.FormatBool(migrated)})\n\n\tdefer cancel()\n\tnewSizeBytes, nodeResizeRequired, err := r.client.Expand(resizeCtx, volumeID, requestSize.Value(), secrets, capability)\n\tif err != nil {\n\t\treturn oldSize, nodeResizeRequired, err\n\t}\n\n\treturn *resource.NewQuantity(newSizeBytes, resource.BinarySI), nodeResizeRequired, err\n}", "func (o *UpdateDeploymentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func NewRepublishResponse(responseHeader ExtensionObjectDefinition, notificationMessage ExtensionObjectDefinition) *_RepublishResponse {\n\t_result := &_RepublishResponse{\n\t\tResponseHeader: responseHeader,\n\t\tNotificationMessage: notificationMessage,\n\t\t_ExtensionObjectDefinition: NewExtensionObjectDefinition(),\n\t}\n\t_result._ExtensionObjectDefinition._ExtensionObjectDefinitionChildRequirements = _result\n\treturn _result\n}", "func (o *DeleteStorageV1VolumeAttachmentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func DecodeStorageVolumesDeleteResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_delete\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_v1beta1_VolumeDiskNumberResponse_To_internal_VolumeDiskNumberResponse(in *v1beta1.VolumeDiskNumberResponse, out *internal.VolumeDiskNumberResponse) error {\n\treturn autoConvert_v1beta1_VolumeDiskNumberResponse_To_internal_VolumeDiskNumberResponse(in, out)\n}", "func (o *ListAutoscalingV1NamespacedHorizontalPodAutoscalerOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (a *HyperflexApiService) UpdateHyperflexVolume(ctx context.Context, moid string) ApiUpdateHyperflexVolumeRequest {\n\treturn ApiUpdateHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (o *DeleteCoreV1CollectionNamespacedLimitRangeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (bshhr BlobsSetHTTPHeadersResponse) Response() *http.Response {\n\treturn bshhr.rawResponse\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (*VodUpdateMediaInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_vod_response_response_vod_proto_rawDescGZIP(), []int{10}\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out)\n}", "func (bdr BlobsDeleteResponse) Response() *http.Response {\n\treturn bdr.rawResponse\n}", "func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func (client *GalleryImageVersionsClient) createOrUpdateHandleResponse(resp *azcore.Response) (GalleryImageVersionResponse, error) {\n\tvar val *GalleryImageVersion\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn GalleryImageVersionResponse{}, err\n\t}\n\treturn GalleryImageVersionResponse{RawResponse: resp.Response, GalleryImageVersion: val}, nil\n}", "func (client *SubscriptionClient) updateHandleResponse(resp *http.Response) (SubscriptionClientUpdateResponse, error) {\n\tresult := SubscriptionClientUpdateResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SubscriptionContract); err != nil {\n\t\treturn SubscriptionClientUpdateResponse{}, err\n\t}\n\treturn result, nil\n}", "func (bur BlobsUndeleteResponse) Response() *http.Response {\n\treturn bur.rawResponse\n}", "func Convert_v2alpha1_CreateSymlinkResponse_To_impl_CreateSymlinkResponse(in *v2alpha1.CreateSymlinkResponse, out *impl.CreateSymlinkResponse) error {\n\treturn autoConvert_v2alpha1_CreateSymlinkResponse_To_impl_CreateSymlinkResponse(in, out)\n}", "func CreateNormalRpcHsfApiResponse() (response *NormalRpcHsfApiResponse) {\n\tresponse = &NormalRpcHsfApiResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *PostAPI24VolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPostApi24VolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostApi24VolumesBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ToPbVolume(in api.Volume) *pb.Volume {\n\treturn &pb.Volume{\n\t\tID: in.ID,\n\t\tName: in.Name,\n\t\tSize: int32(in.Size),\n\t\tSpeed: pb.VolumeSpeed(in.Speed),\n\t}\n}", "func FormatRenameResponse(createResp *CreateResponse) RenameResponse {\n\tnewResp := RenameResponse{}\n\tnewResp.ContentLength = createResp.ContentLength\n\tnewResp.Continuation = createResp.Continuation\n\tnewResp.Date = createResp.Date\n\tnewResp.ETag = createResp.ETag\n\tnewResp.EncryptionKeySHA256 = createResp.EncryptionKeySHA256\n\tnewResp.IsServerEncrypted = createResp.IsServerEncrypted\n\tnewResp.LastModified = createResp.LastModified\n\tnewResp.RequestID = createResp.RequestID\n\tnewResp.Version = createResp.Version\n\treturn newResp\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *DeleteCoreV1NamespacedPodAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}" ]
[ "0.7631037", "0.7236067", "0.7139145", "0.6887091", "0.670598", "0.659009", "0.6356872", "0.6324222", "0.63145655", "0.6251719", "0.6240178", "0.6036197", "0.59368616", "0.59101564", "0.5883287", "0.57643956", "0.5587666", "0.5355317", "0.5292024", "0.5287608", "0.52770436", "0.52355087", "0.52313954", "0.5198235", "0.5189336", "0.5180418", "0.516267", "0.51288456", "0.5118303", "0.5079834", "0.5068504", "0.50398076", "0.5025468", "0.5010221", "0.49733448", "0.49683583", "0.49644616", "0.49635378", "0.49582678", "0.4932947", "0.4900344", "0.48922062", "0.4881152", "0.48445886", "0.48266098", "0.480859", "0.47853085", "0.47675902", "0.47640467", "0.47237614", "0.47025287", "0.4679288", "0.46772975", "0.46698448", "0.46639317", "0.46639007", "0.46636578", "0.4660782", "0.46590614", "0.4648676", "0.46458337", "0.46444958", "0.4632599", "0.46314985", "0.4624797", "0.46132448", "0.46059588", "0.46028596", "0.4586387", "0.45843253", "0.45789087", "0.45776513", "0.4571653", "0.45711407", "0.4568941", "0.45682728", "0.4564669", "0.4544608", "0.4536115", "0.45296386", "0.45259914", "0.4522791", "0.45041668", "0.45024028", "0.44970608", "0.4493713", "0.44865695", "0.4479368", "0.44782087", "0.4473221", "0.4470318", "0.44612435", "0.44595763", "0.4458828", "0.44575155", "0.44521448", "0.44487253", "0.44449225", "0.4444153", "0.4435521" ]
0.8637057
0
Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse is an autogenerated conversion function.
func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error { return autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in *v1beta1.ResizeVolumeResponse, out *internal.ResizeVolumeResponse) error {\n\treturn autoConvert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in, out)\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in *impl.RmdirContentsResponse, out *v2alpha1.RmdirContentsResponse) error {\n\treturn autoConvert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func (o *GetVMVolumeBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(400)\n}", "func (o *GetVMVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func (client Client) ChangeSizeResponder(resp *http.Response) (result VolumeSizeResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in *internal.IsVolumeFormattedResponse, out *v1beta1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_internal_IsVolumeFormattedResponse_To_v1beta1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ListStorageV1alpha1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func ResizeVolume(vol *apis.ZFSVolume, newSize int64) error {\n\n\tvol.Spec.Capacity = strconv.FormatInt(int64(newSize), 10)\n\n\t_, err := volbuilder.NewKubeclient().WithNamespace(OpenEBSNamespace).Update(vol)\n\treturn err\n}", "func (r *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {\n\t/* first check if the volume is already of a requested size */\n\tvolumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get information about the volume: %v\", err)\n\t}\n\tvol := volumeOutput.Volumes[0]\n\tif *vol.VolumeId != volumeID {\n\t\treturn fmt.Errorf(\"describe volume %q returned information about a non-matching volume %q\", volumeID, *vol.VolumeId)\n\t}\n\tif *vol.Size == newSize {\n\t\t// nothing to do\n\t\treturn nil\n\t}\n\tinput := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID}\n\toutput, err := r.connection.ModifyVolume(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not modify persistent volume: %v\", err)\n\t}\n\n\tstate := *output.VolumeModification.ModificationState\n\tif state == constants.EBSVolumeStateFailed {\n\t\treturn fmt.Errorf(\"could not modify persistent volume %q: modification state failed\", volumeID)\n\t}\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"received empty modification status\")\n\t}\n\tif state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted {\n\t\treturn nil\n\t}\n\t// wait until the volume reaches the \"optimizing\" or \"completed\" state\n\tin := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}}\n\treturn retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tout, err := r.connection.DescribeVolumesModifications(&in)\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"could not describe volume modification: %v\", err)\n\t\t\t}\n\t\t\tif len(out.VolumesModifications) != 1 {\n\t\t\t\treturn false, fmt.Errorf(\"describe volume modification didn't return one record for volume %q\", volumeID)\n\t\t\t}\n\t\t\tif *out.VolumesModifications[0].VolumeId != volumeID {\n\t\t\t\treturn false, fmt.Errorf(\"non-matching volume id when describing modifications: %q is different from %q\",\n\t\t\t\t\t*out.VolumesModifications[0].VolumeId, volumeID)\n\t\t\t}\n\t\t\treturn *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil\n\t\t})\n}", "func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {\n\t/* first check if the volume is already of a requested size */\n\tvolumeOutput, err := c.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not get information about the volume: %v\", err)\n\t}\n\tvol := volumeOutput.Volumes[0]\n\tif *vol.VolumeId != volumeID {\n\t\treturn fmt.Errorf(\"describe volume %q returned information about a non-matching volume %q\", volumeID, *vol.VolumeId)\n\t}\n\tif *vol.Size == newSize {\n\t\t// nothing to do\n\t\treturn nil\n\t}\n\tinput := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID}\n\toutput, err := c.connection.ModifyVolume(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not modify persistent volume: %v\", err)\n\t}\n\n\tstate := *output.VolumeModification.ModificationState\n\tif state == constants.EBSVolumeStateFailed {\n\t\treturn fmt.Errorf(\"could not modify persistent volume %q: modification state failed\", volumeID)\n\t}\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"received empty modification status\")\n\t}\n\tif state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted {\n\t\treturn nil\n\t}\n\t// wait until the volume reaches the \"optimizing\" or \"completed\" state\n\tin := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}}\n\treturn retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tout, err := c.connection.DescribeVolumesModifications(&in)\n\t\t\tif err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"could not describe volume modification: %v\", err)\n\t\t\t}\n\t\t\tif len(out.VolumesModifications) != 1 {\n\t\t\t\treturn false, fmt.Errorf(\"describe volume modification didn't return one record for volume %q\", volumeID)\n\t\t\t}\n\t\t\tif *out.VolumesModifications[0].VolumeId != volumeID {\n\t\t\t\treturn false, fmt.Errorf(\"non-matching volume id when describing modifications: %q is different from %q\",\n\t\t\t\t\t*out.VolumesModifications[0].VolumeId, volumeID)\n\t\t\t}\n\t\t\treturn *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil\n\t\t})\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (o *GetVMVolumeDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *VolumeCreateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 201:\n\t\tresult := NewVolumeCreateCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewVolumeCreateInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func Convert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in *impl.GetDiskNumberFromVolumeIDResponse, out *v2alpha1.GetDiskNumberFromVolumeIDResponse) error {\n\treturn autoConvert_impl_GetDiskNumberFromVolumeIDResponse_To_v2alpha1_GetDiskNumberFromVolumeIDResponse(in, out)\n}", "func (o *DeleteStorageV1VolumeAttachmentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PatchCoreV1PersistentVolumeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (c *BlockVolumeClient) Resize(params *BlockVolumeParams) (*BlockVolumeResize, error) {\n\tvar result BlockVolumeResize\n\terr := c.Backend.CallIntoInterface(\"v1/Storage/Block/Volume/resize\", params, &result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &result, nil\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func CreateDropPartitionResponse() (response *DropPartitionResponse) {\n\tresponse = &DropPartitionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateChangeMediaStatusResponse() (response *ChangeMediaStatusResponse) {\n\tresponse = &ChangeMediaStatusResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *RemoveVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewRemoveVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewRemoveVolumeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 409:\n\t\tresult := NewRemoveVolumeConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewRemoveVolumeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (pbrr PageBlobsResizeResponse) Response() *http.Response {\n\treturn pbrr.rawResponse\n}", "func CreateModifyDirectoryResponse() (response *ModifyDirectoryResponse) {\n\tresponse = &ModifyDirectoryResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateUpdateMediaStorageClassResponse() (response *UpdateMediaStorageClassResponse) {\n\tresponse = &UpdateMediaStorageClassResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *PostAPI24VolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPostApi24VolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostApi24VolumesBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UpdateDeploymentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *PostManagementKubernetesIoV1NodesAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(202)\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func CreateUpdateServiceAutoScalerResponse() (response *UpdateServiceAutoScalerResponse) {\n\tresponse = &UpdateServiceAutoScalerResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *Client) ExpandVolume(name, size string) (*Response, *ResponseStatus, error) {\n\treturn client.FormattedRequest(\"/expand/volume/size/\\\"%s\\\"/\\\"%s\\\"\", size, name)\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *CreateAutoscalingV2beta2NamespacedHorizontalPodAutoscalerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewCreateAutoscalingV2beta2NamespacedHorizontalPodAutoscalerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 201:\n\t\tresult := NewCreateAutoscalingV2beta2NamespacedHorizontalPodAutoscalerCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewCreateAutoscalingV2beta2NamespacedHorizontalPodAutoscalerAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewCreateAutoscalingV2beta2NamespacedHorizontalPodAutoscalerUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PatchCoreV1PersistentVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in *impl.CreateSymlinkResponse, out *v2alpha1.CreateSymlinkResponse) error {\n\treturn autoConvert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in, out)\n}", "func (o *GetVMVolumeNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(404)\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func Convert_internal_StopServiceResponse_To_v1alpha1_StopServiceResponse(in *internal.StopServiceResponse, out *v1alpha1.StopServiceResponse) error {\n\treturn autoConvert_internal_StopServiceResponse_To_v1alpha1_StopServiceResponse(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in *impl.GetClosestVolumeIDFromTargetPathResponse, out *v2alpha1.GetClosestVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathResponse_To_v2alpha1_GetClosestVolumeIDFromTargetPathResponse(in, out)\n}", "func CreateRedeployDedicatedHostResponse() (response *RedeployDedicatedHostResponse) {\n\tresponse = &RedeployDedicatedHostResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *GetEchoNameBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(400)\n}", "func (a *HyperflexApiService) UpdateHyperflexVolume(ctx context.Context, moid string) ApiUpdateHyperflexVolumeRequest {\n\treturn ApiUpdateHyperflexVolumeRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func CreateCreateMcubeUpgradePackageResponse() (response *CreateMcubeUpgradePackageResponse) {\n\tresponse = &CreateMcubeUpgradePackageResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *ListAutoscalingV1NamespacedHorizontalPodAutoscalerUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *RequestBackupUploadOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in *v2alpha1.WriteVolumeCacheResponse, out *impl.WriteVolumeCacheResponse) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in, out)\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func CreateDescribeReservedInstancesResponse() (response *DescribeReservedInstancesResponse) {\n\tresponse = &DescribeReservedInstancesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ListAutoscalingV1NamespacedHorizontalPodAutoscalerOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostAudioAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(202)\n}", "func (o *CreateDiscoveryV1beta1NamespacedEndpointSliceAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func CreateUpgradeDBVersionResponse() (response *UpgradeDBVersionResponse) {\n\tresponse = &UpgradeDBVersionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func FormatRenameResponse(createResp *CreateResponse) RenameResponse {\n\tnewResp := RenameResponse{}\n\tnewResp.ContentLength = createResp.ContentLength\n\tnewResp.Continuation = createResp.Continuation\n\tnewResp.Date = createResp.Date\n\tnewResp.ETag = createResp.ETag\n\tnewResp.EncryptionKeySHA256 = createResp.EncryptionKeySHA256\n\tnewResp.IsServerEncrypted = createResp.IsServerEncrypted\n\tnewResp.LastModified = createResp.LastModified\n\tnewResp.RequestID = createResp.RequestID\n\tnewResp.Version = createResp.Version\n\treturn newResp\n}", "func Convert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in *v2alpha1.MkdirResponse, out *impl.MkdirResponse) error {\n\treturn autoConvert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in, out)\n}", "func CreateActionDiskRmaResponse() (response *ActionDiskRmaResponse) {\n\tresponse = &ActionDiskRmaResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *CreateCoreV1PersistentVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewCreateCoreV1PersistentVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 201:\n\t\tresult := NewCreateCoreV1PersistentVolumeCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewCreateCoreV1PersistentVolumeAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewCreateCoreV1PersistentVolumeUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func CreateModifyContainerAppAttributesResponse() (response *ModifyContainerAppAttributesResponse) {\n\tresponse = &ModifyContainerAppAttributesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (m MultiVersionResponse) DecodeResponse1() (resp MessagesResponse, err error) {\n\treturn resp, rlp.DecodeBytes(m.Response, &resp)\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func CreateModifySkillGroupExResponse() (response *ModifySkillGroupExResponse) {\n\tresponse = &ModifySkillGroupExResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}" ]
[ "0.7811737", "0.7236402", "0.7062594", "0.6831711", "0.67614347", "0.6731284", "0.6531464", "0.64840823", "0.6303111", "0.5995277", "0.58993286", "0.58616346", "0.58404154", "0.58055323", "0.5736063", "0.57093203", "0.5596003", "0.55937433", "0.55925083", "0.5474091", "0.5400209", "0.5399851", "0.5358403", "0.53339976", "0.53279245", "0.52988034", "0.5296555", "0.5284847", "0.52446204", "0.5230532", "0.5221008", "0.5197687", "0.517437", "0.5161624", "0.51460564", "0.5122513", "0.51217836", "0.50945944", "0.5084839", "0.5084707", "0.49911487", "0.49893585", "0.49393332", "0.49153113", "0.49119654", "0.49073407", "0.48984802", "0.4875662", "0.48733026", "0.4859712", "0.48473653", "0.48281622", "0.47891364", "0.4781838", "0.4780481", "0.47756442", "0.47549742", "0.47519323", "0.4738706", "0.4729676", "0.47280282", "0.4725981", "0.47236001", "0.47200337", "0.47053248", "0.4691914", "0.4682809", "0.46783748", "0.46742886", "0.46737283", "0.4672529", "0.46632957", "0.46538037", "0.46528837", "0.4646025", "0.4628106", "0.46261257", "0.46154502", "0.46099895", "0.4608452", "0.45958528", "0.45953035", "0.45871937", "0.45860478", "0.45815694", "0.45728588", "0.4571406", "0.45583236", "0.45549512", "0.45476282", "0.4547603", "0.4547", "0.45452884", "0.4542404", "0.45411634", "0.45406774", "0.45284903", "0.4522749", "0.45226395", "0.45213082" ]
0.8602147
0
Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest is an autogenerated conversion function.
func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error { return autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func (c *UFSClient) NewRemoveUFSVolumeRequest() *RemoveUFSVolumeRequest {\n\treq := &RemoveUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (d *MinioDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Unmount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections <= 1 {\n\t\tif err := d.unmountVolume(v); err != nil {\n\t\t\tglog.Warningf(\"Unmounting %s volume failed with: %s\", v, err)\n\t\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t\t}\n\t\tv.connections = 0\n\t\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n\t}\n\tv.connections--\n\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n}", "func (d *VolumeDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Unmounting Volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tif d.refCounts.IsInitialized() != true {\n\t\t// if refcounting hasn't been succesful,\n\t\t// no refcounting, no unmount. All unmounts are delayed\n\t\t// until we succesfully populate the refcount map\n\t\td.refCounts.MarkDirty()\n\t\treturn volume.Response{Err: \"\"}\n\t}\n\n\tlog.Errorf(\"VolumeDriver Unmount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func (c *UFSClient) NewRemoveUFSVolumeMountPointRequest() *RemoveUFSVolumeMountPointRequest {\n\treq := &RemoveUFSVolumeMountPointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (d *DirDriver) Unmount(req *volume.UnmountRequest) error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Unmount() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tmount := vol.mounts[req.ID]\n\tif !mount {\n\t\tlogrus.Debugf(\"Volume %s is not mounted by %s\", req.Name, req.ID)\n\t\treturn fmt.Errorf(\"volume %s is not mounted by %s\", req.Name, req.ID)\n\t}\n\n\tdelete(vol.mounts, req.ID)\n\n\treturn nil\n}", "func (client *Client) UnmapVolume(name, host string) (*Response, *ResponseStatus, error) {\n\tif host == \"\" {\n\t\treturn client.FormattedRequest(\"/unmap/volume/\\\"%s\\\"\", name)\n\t}\n\n\treturn client.FormattedRequest(\"/unmap/volume/host/\\\"%s\\\"/\\\"%s\\\"\", host, name)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func (c *Controller) Unmount(unmountRequest k8sresources.FlexVolumeUnmountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"Controller: unmount start\")\n\tdefer c.logger.Println(\"Controller: unmount end\")\n\tc.logger.Printf(\"unmountRequest %#v\", unmountRequest)\n\tvar detachRequest resources.DetachRequest\n\tvar pvName string\n\n\t// Validate that the mountpoint is a symlink as ubiquity expect it to be\n\trealMountPoint, err := c.exec.EvalSymlinks(unmountRequest.MountPath)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Cannot execute umount because the mountPath [%s] is not a symlink as expected. Error: %#v\", unmountRequest.MountPath, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t}\n\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\tif strings.HasPrefix(realMountPoint, ubiquityMountPrefix) {\n\t\t// SCBE backend flow\n\t\tpvName = path.Base(unmountRequest.MountPath)\n\n\t\tdetachRequest = resources.DetachRequest{Name: pvName, Host: getHost()}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tpvName,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t\tc.logger.Println(fmt.Sprintf(\"Removing the slink [%s] to the real mountpoint [%s]\", unmountRequest.MountPath, realMountPoint))\n\t\terr := c.exec.Remove(unmountRequest.MountPath)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"fail to remove slink %s. Error %#v\", unmountRequest.MountPath, err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t} else {\n\n\t\tlistVolumeRequest := resources.ListVolumesRequest{}\n\t\tvolumes, err := c.Client.ListVolumes(listVolumeRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Error getting the volume list from ubiquity server %#v\", err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tvolume, err := getVolumeForMountpoint(unmountRequest.MountPath, volumes)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Error finding the volume with mountpoint [%s] from the list of ubiquity volumes %#v. Error is : %#v\",\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\tvolumes,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tdetachRequest = resources.DetachRequest{Name: volume.Name}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil && err.Error() != \"fileset not linked\" {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tvolume.Name,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tpvName = volume.Name\n\t}\n\n\tmsg := fmt.Sprintf(\n\t\t\"Succeeded to umount volume [%s] on mountpoint [%s]\",\n\t\tpvName,\n\t\tunmountRequest.MountPath,\n\t)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: \"Volume unmounted successfully\",\n\t}\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func (d *lvm) UnmountVolume(vol Volume, keepBlockDev bool, op *operations.Operation) (bool, error) {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\tvar err error\n\tourUnmount := false\n\tmountPath := vol.MountPath()\n\n\trefCount := vol.MountRefCountDecrement()\n\n\t// Check if already mounted.\n\tif vol.contentType == ContentTypeFS && filesystem.IsMountPoint(mountPath) {\n\t\tif refCount > 0 {\n\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\treturn false, ErrInUse\n\t\t}\n\n\t\terr = TryUnmount(mountPath, 0)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"Failed to unmount LVM logical volume: %w\", err)\n\t\t}\n\n\t\td.logger.Debug(\"Unmounted logical volume\", logger.Ctx{\"volName\": vol.name, \"path\": mountPath, \"keepBlockDev\": keepBlockDev})\n\n\t\t// We only deactivate filesystem volumes if an unmount was needed to better align with our\n\t\t// unmount return value indicator.\n\t\tif !keepBlockDev {\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tourUnmount = true\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, unmount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\tourUnmount, err = d.UnmountVolume(fsVol, false, op)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\tif !keepBlockDev && shared.PathExists(volDevPath) {\n\t\t\tif refCount > 0 {\n\t\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\t\treturn false, ErrInUse\n\t\t\t}\n\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tourUnmount = true\n\t\t}\n\t}\n\n\treturn ourUnmount, nil\n}", "func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {\n\tif req == nil {\n\t\treturn fmt.Errorf(\"must provide non-nil request to UnmountVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Unmounting volume %s using plugin %s for container %s\", req.Name, p.Name, req.ID)\n\n\tresp, err := p.sendRequest(req, unmountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn p.handleErrorResponse(resp, unmountPath, req.Name)\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume) error {\n\ttargetPath := cs.getInternalMountPath(vol)\n\n\t// Unmount nfs server at base-dir\n\tklog.V(4).Infof(\"internally unmounting %v\", targetPath)\n\t_, err := cs.Driver.ns.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{\n\t\tVolumeId: vol.id,\n\t\tTargetPath: cs.getInternalMountPath(vol),\n\t})\n\treturn err\n}", "func (c *UFSClient) NewDescribeUFSVolumeMountpointRequest() *DescribeUFSVolumeMountpointRequest {\n\treq := &DescribeUFSVolumeMountpointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func ParseNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) (*VolumeInfo, error) {\n\tvolumeID := req.GetVolumeId()\n\tif volumeID == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\n\ttargetPath := req.GetTargetPath()\n\tif targetPath == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Target path missing in request\")\n\t}\n\n\treturn &VolumeInfo{volumeID, targetPath}, nil\n}", "func (s *VolumeListener) Detach(inctx context.Context, in *protocol.VolumeDetachmentRequest) (empty *googleprotobuf.Empty, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot detach volume\")\n\n\tempty = &googleprotobuf.Empty{}\n\tif s == nil {\n\t\treturn empty, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn empty, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn empty, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tvolumeRef, volumeRefLabel := srvutils.GetReference(in.GetVolume())\n\tif volumeRef == \"\" {\n\t\treturn empty, fail.InvalidRequestError(\"neither name nor id given as reference for volume\")\n\t}\n\thostRef, hostRefLabel := srvutils.GetReference(in.GetHost())\n\tif hostRef == \"\" {\n\t\treturn empty, fail.InvalidRequestError(\"neither name nor id given as reference for host\")\n\t}\n\n\tjob, xerr := PrepareJob(inctx, in.GetVolume().GetTenantId(), fmt.Sprintf(\"/volume/%s/host/%s/detach\", volumeRef, hostRef))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer job.Close()\n\n\thandler := VolumeHandler(job)\n\tif xerr = handler.Detach(volumeRef, hostRef); xerr != nil {\n\t\treturn empty, xerr\n\t}\n\n\tlogrus.WithContext(job.Context()).Infof(\"Volume %s successfully detached from %s.\", volumeRefLabel, hostRefLabel)\n\treturn empty, nil\n}", "func UmountVolume(vol *apis.LVMVolume, targetPath string,\n) error {\n\tmounter := &mount.SafeFormatAndMount{Interface: mount.New(\"\"), Exec: utilexec.New()}\n\n\tdev, ref, err := mount.GetDeviceNameFromMount(mounter, targetPath)\n\tif err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: umount volume: failed to get device from mnt: %s\\nError: %v\",\n\t\t\ttargetPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\t// device has already been un-mounted, return successful\n\tif len(dev) == 0 || ref == 0 {\n\t\tklog.Warningf(\n\t\t\t\"Warning: Unmount skipped because volume %s not mounted: %v\",\n\t\t\tvol.Name, targetPath,\n\t\t)\n\t\treturn nil\n\t}\n\n\tif pathExists, pathErr := mount.PathExists(targetPath); pathErr != nil {\n\t\treturn fmt.Errorf(\"error checking if path exists: %v\", pathErr)\n\t} else if !pathExists {\n\t\tklog.Warningf(\n\t\t\t\"Warning: Unmount skipped because path does not exist: %v\",\n\t\t\ttargetPath,\n\t\t)\n\t\treturn nil\n\t}\n\n\tif err = mounter.Unmount(targetPath); err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: failed to unmount %s: path %s err: %v\",\n\t\t\tvol.Name, targetPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\tif err := os.Remove(targetPath); err != nil {\n\t\tklog.Errorf(\"lvm: failed to remove mount path vol %s err : %v\", vol.Name, err)\n\t}\n\n\tklog.Infof(\"umount done %s path %v\", vol.Name, targetPath)\n\n\treturn nil\n}", "func Convert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in *v2alpha1.RmdirRequest, out *impl.RmdirRequest) error {\n\treturn autoConvert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in, out)\n}", "func Unmount(d Driver, vName string) error {\n\tlog.Debugf(\"Entering Unmount: name: %s\", vName)\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tv, m, err := getVolumeMount(d, vName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif m.GetConnections() <= 1 {\n\t\tcmd := fmt.Sprintf(\"/usr/bin/umount %s\", m.GetPath())\n\t\tif err := d.RunCmd(cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tSetN(0, m, v)\n\t} else {\n\t\tAddN(-1, m, v)\n\t}\n\n\treturn d.SaveConfig()\n}", "func (d *VolumeDriver) UnmountVolume(name string) error {\n\tlog.Errorf(\"VolumeDriver UnmountVolume to be implemented\")\n\treturn nil\n}", "func (d *MinioDriver) unmountVolume(volume *minioVolume) error {\n\treturn exec.Command(\"umount\", volume.mountpoint).Run()\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func (proxy *remoteDriverProxy) Unmount(name, id string) error {\n\tvar req = remoteVolumeUnmountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeUnmountResp\n\n\tif err := proxy.client.CallService(remoteVolumeUnmountService, &req, &resp, true); err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn errors.New(resp.Err)\n\t}\n\n\treturn nil\n}", "func (o *Filesystem) Unmount(ctx context.Context, options map[string]dbus.Variant) (err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Unmount\", 0, options).Store()\n\treturn\n}", "func DecodeUnsealRequest(_ context.Context, grpcReq interface{}) (interface{}, error) {\n\treq := grpcReq.(*pb.UnsealRequest)\n\treturn &endpoints.UnsealRequest{Key: req.Key, Reset: req.Reset_}, nil\n}", "func (d ImagefsDriver) Unmount(r *volume.UnmountRequest) error {\n\tfmt.Printf(\"-> Unmount %+v\\n\", r)\n\tcontainerID, err := d.FindVolumeContainer(r.Name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\ttimeout := time.Second * 5\n\terr = d.cli.ContainerStop(\n\t\tcontext.Background(),\n\t\tcontainerID,\n\t\t&timeout,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tfmt.Printf(\"<- OK\\n\")\n\treturn nil\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Unmount(out io.Writer, logger log.FieldLogger) (err error) {\n\tdisk, err := queryPhysicalVolume(logger)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif disk == \"\" {\n\t\tlogger.Info(\"No physical volumes found.\")\n\t\treturn nil\n\t}\n\tlogger.Infof(\"Found physical volume on disk %v.\", disk)\n\tconfig := &config{\n\t\tFieldLogger: logger,\n\t\tdisk: disk,\n\t\tout: out,\n\t}\n\tif err = config.removeLingeringDevices(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removeLogicalVolume(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removeVolumeGroup(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removePhysicalVolume(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}", "func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error {\n\tdriver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/)\n\tif err != nil || driver == nil {\n\t\tglog.Errorf(\"Failed to get portworx driver. Err: %v\", err)\n\t\treturn err\n\t}\n\n\terr = driver.Unmount(u.volName, mountPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Error unmounting Portworx Volume (%v) on Path (%v): %v\", u.volName, mountPath, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func Unmount(target string) error {\n\tlogrus.Infof(\"Unmount %s\", target)\n\terr := os.Remove(target)\n\tif err == nil {\n\t\trespondSuccess()\n\t}\n\treturn err\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func (d *Driver) internalUnmount(ctx context.Context, vol *smbVolume) error {\n\ttargetPath := getInternalMountPath(d.workingMountDir, vol)\n\n\t// Unmount smb server at base-dir\n\tklog.V(4).Infof(\"internally unmounting %v\", targetPath)\n\t_, err := d.NodeUnstageVolume(ctx, &csi.NodeUnstageVolumeRequest{\n\t\tVolumeId: vol.id,\n\t\tStagingTargetPath: targetPath,\n\t})\n\treturn err\n}", "func (c *Controller) UnmountDevice(unmountDeviceRequest k8sresources.FlexVolumeUnmountDeviceRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-UnmountDevice-start\")\n\tdefer c.logger.Println(\"controller-UnmountDevice-end\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func (d *Driver) Unmount(mountDir string) {\n\tDebug(\"findmnt: \" + mountDir)\n\t_, err := RunCommand(\"findmnt\", \"-n\", \"-o\", \"SOURCE\", \"--target\", mountDir)\n\tif err != nil {\n\t\tDebug(err.Error())\n\t}\n\n\tDebug(\"syscall.Unmount: \" + mountDir)\n\tif err := syscall.Unmount(mountDir, 0); err != nil {\n\t\tFailure(err)\n\t}\n\n\tDebug(\"Detach hetzner volume from server\")\n\tvolume := GetVolume(d.client, d.options.PVOrVolumeName)\n\t_, _, errDetach := d.client.Volume.Detach(context.Background(), volume)\n\n\tif errDetach != nil {\n\t\tFailure(errDetach)\n\t}\n\n\t// Delete json file with token in it\n\t//Debug(\"os.Remove\")\n\t//if err := os.Remove(jsonOptionsFile); err != nil {\n\t//\tfailure(err)\n\t//}\n\n\tSuccess()\n}", "func (c *client) Unmount(\n\tctx types.Context,\n\tmountPoint string,\n\topts types.Store) error {\n\n\tif c.isController() {\n\t\treturn utils.NewUnsupportedForClientTypeError(\n\t\t\tc.clientType, \"Unmount\")\n\t}\n\n\tif lsxSO, _ := c.Supported(ctx, opts); !lsxSO.Umount() {\n\t\treturn errExecutorNotSupported\n\t}\n\n\tctx = context.RequireTX(ctx.Join(c.ctx))\n\n\tserviceName, ok := context.ServiceName(ctx)\n\tif !ok {\n\t\treturn goof.New(\"missing service name\")\n\t}\n\n\tsi, err := c.getServiceInfo(serviceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdriverName := si.Driver.Name\n\n\tif _, err = c.runExecutor(\n\t\tctx,\n\t\tdriverName,\n\t\ttypes.LSXCmdUmount,\n\t\tmountPoint); err != nil {\n\t\treturn err\n\t}\n\n\tctx.Debug(\"xli umount success\")\n\treturn nil\n}", "func Unmount(dest string) error {\n\treturn syscall.Unmount(dest, 0)\n}", "func (d *fsStorage) Unmount(volume *Volume) error {\n\treturn nil\n}", "func (zr *ZRequest) Unmarshal(v interface{}) error {\n\tif !zr.ended {\n\t\treturn ErrRequestNotComp\n\t}\n\tif zr.err != nil {\n\t\treturn zr.err\n\t}\n\tif zr.resp == nil {\n\t\tif zr.err != nil {\n\t\t\treturn zr.err\n\t\t}\n\t\treturn ErrRequestNotComp\n\t}\n\n\t// copy to discard\n\tdefer func() {\n\t\tio.Copy(ioutil.Discard, zr.resp.Body)\n\t\tzr.resp.Body.Close()\n\t}()\n\n\tvar decoder Decoder\n\trespContentType := zr.resp.Header.Get(HdrContentType)\n\n\t// xml or json\n\tif xmlCheck.MatchString(respContentType) {\n\t\tdecoder = xml.NewDecoder(zr.resp.Body)\n\t} else {\n\t\tdecoder = json.NewDecoder(zr.resp.Body)\n\t}\n\n\tzr.err = decoder.Decode(v)\n\tif zr.err != nil {\n\t\tzr.err = errors.New(\"decoder.Decode: \" + zr.err.Error())\n\t\treturn zr.err\n\t}\n\treturn nil\n}", "func Unmount(mount string, flags int) error {\n\treturn ErrNotImplementOnUnix\n}", "func ParseVolume(input string) (Volume, error) {\n\tparts := strings.Split(input, \":\")\n\tswitch len(parts) {\n\tcase 1:\n\t\treturn Volume{Type: VolumeTypeInstance, Path: input}, nil\n\tcase 2:\n\t\tif vt, mountOptions, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{Type: vt, Path: parts[1], MountOptions: mountOptions}, nil\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0]}, nil\n\tcase 3:\n\t\tif _, _, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t\t}\n\t\toptions, err := parseVolumeOptions(parts[2])\n\t\tif err != nil {\n\t\t\treturn Volume{}, maskAny(err)\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0], Options: options}, nil\n\tdefault:\n\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t}\n}", "func Unmount(dest string) error {\n\treturn nil\n}", "func UnmapBlockVolume(\n\tblkUtil volumepathhandler.BlockVolumePathHandler,\n\tglobalUnmapPath,\n\tpodDeviceUnmapPath,\n\tvolumeMapName string,\n\tpodUID utypes.UID,\n) error {\n\t// Release file descriptor lock.\n\terr := blkUtil.DetachFileDevice(filepath.Join(globalUnmapPath, string(podUID)))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s: %v\",\n\t\t\tglobalUnmapPath, string(podUID), err)\n\t}\n\n\t// unmap devicePath from pod volume path\n\tunmapDeviceErr := blkUtil.UnmapDevice(podDeviceUnmapPath, volumeMapName, false /* bindMount */)\n\tif unmapDeviceErr != nil {\n\t\treturn fmt.Errorf(\"blkUtil.DetachFileDevice failed. podDeviceUnmapPath:%s, volumeMapName: %s, bindMount: %v: %v\",\n\t\t\tpodDeviceUnmapPath, volumeMapName, false, unmapDeviceErr)\n\t}\n\n\t// unmap devicePath from global node path\n\tunmapDeviceErr = blkUtil.UnmapDevice(globalUnmapPath, string(podUID), true /* bindMount */)\n\tif unmapDeviceErr != nil {\n\t\treturn fmt.Errorf(\"blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s, bindMount: %v: %v\",\n\t\t\tglobalUnmapPath, string(podUID), true, unmapDeviceErr)\n\t}\n\treturn nil\n}", "func (m *Mounter) Unmount(\n\tdevPath string,\n\tpath string,\n\tflags int,\n\ttimeout int,\n\topts map[string]string,\n) error {\n\tm.Lock()\n\t// device gets overwritten if opts specifies fuse mount with\n\t// options.OptionsDeviceFuseMount.\n\tdevice := devPath\n\tpath = normalizeMountPath(path)\n\tif value, ok := opts[options.OptionsDeviceFuseMount]; ok {\n\t\t// fuse mounts show-up with this key as device.\n\t\tdevice = value\n\t}\n\tinfo, ok := m.mounts[device]\n\tif !ok {\n\t\tlogrus.Warnf(\"Unable to unmount device %q path %q: %v\",\n\t\t\tdevPath, path, ErrEnoent.Error())\n\t\tlogrus.Infof(\"Found %v mounts in mounter's cache: \", len(m.mounts))\n\t\tlogrus.Infof(\"Mounter has the following mountpoints: \")\n\t\tfor dev, info := range m.mounts {\n\t\t\tlogrus.Infof(\"For Device %v: Info: %v\", dev, info)\n\t\t\tif info == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, path := range info.Mountpoint {\n\t\t\t\tlogrus.Infof(\"\\t Mountpath: %v Rootpath: %v\", path.Path, path.Root)\n\t\t\t}\n\t\t}\n\t\tm.Unlock()\n\t\treturn ErrEnoent\n\t}\n\tm.Unlock()\n\tinfo.Lock()\n\tdefer info.Unlock()\n\tfor i, p := range info.Mountpoint {\n\t\tif p.Path != path {\n\t\t\tcontinue\n\t\t}\n\t\terr := m.mountImpl.Unmount(path, flags, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Blow away this mountpoint.\n\t\tinfo.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1]\n\t\tinfo.Mountpoint = info.Mountpoint[0 : len(info.Mountpoint)-1]\n\t\tm.maybeRemoveDevice(device)\n\t\tif options.IsBoolOptionSet(opts, options.OptionsDeleteAfterUnmount) {\n\t\t\tm.RemoveMountPath(path, opts)\n\t\t}\n\n\t\treturn nil\n\t}\n\tlogrus.Warnf(\"Device %q is not mounted at path %q\", device, path)\n\treturn ErrEnoent\n}", "func (c *UFSClient) NewDescribeUFSVolume2Request() *DescribeUFSVolume2Request {\n\treq := &DescribeUFSVolume2Request{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (m *DefaultMounter) Unmount(target string, flags int, timeout int) error {\n\treturn syscall.Unmount(target, flags)\n}", "func Unmount(mountpoint string) (err error) {\n\tlog.Println(\"Unmounting filesystem\")\n\terr = fuse.Unmount(mountpoint)\n\treturn\n}", "func (c *UDiskClient) NewDeleteUDiskRequest() *DeleteUDiskRequest {\n\treq := &DeleteUDiskRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func CreateUnAssignPrivateIpAddressRequest() (request *UnAssignPrivateIpAddressRequest) {\n\trequest = &UnAssignPrivateIpAddressRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"eflo\", \"2022-05-30\", \"UnAssignPrivateIpAddress\", \"eflo\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *restClient) DeleteVolume(ctx context.Context, req *netapppb.DeleteVolumeRequest, opts ...gax.CallOption) (*DeleteVolumeOperation, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetForce() {\n\t\tparams.Add(\"force\", fmt.Sprintf(\"%v\", req.GetForce()))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"DELETE\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &DeleteVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Unmount(h hostRunner, target string) error {\n\tout, err := h.RunSSHCommand(fmt.Sprintf(\"findmnt -T %s && sudo umount %s || true\", target, target))\n\tif err != nil {\n\t\treturn errors.Wrap(err, out)\n\t}\n\treturn nil\n}", "func (v *Volume) unmount(force bool) error {\n\tif !v.needsMount() {\n\t\treturn nil\n\t}\n\n\t// Update the volume from the DB to get an accurate mount counter.\n\tif err := v.update(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.state.MountCount == 0 {\n\t\tlogrus.Debugf(\"Volume %s already unmounted\", v.Name())\n\t\treturn nil\n\t}\n\n\tif !force {\n\t\tv.state.MountCount--\n\t} else {\n\t\tv.state.MountCount = 0\n\t}\n\n\tlogrus.Debugf(\"Volume %s mount count now at %d\", v.Name(), v.state.MountCount)\n\n\tif v.state.MountCount == 0 {\n\t\tif v.UsesVolumeDriver() {\n\t\t\tif v.plugin == nil {\n\t\t\t\treturn fmt.Errorf(\"volume plugin %s (needed by volume %s) missing: %w\", v.Driver(), v.Name(), define.ErrMissingPlugin)\n\t\t\t}\n\n\t\t\treq := new(pluginapi.UnmountRequest)\n\t\t\treq.Name = v.Name()\n\t\t\treq.ID = pseudoCtrID\n\t\t\tif err := v.plugin.UnmountVolume(req); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tv.state.MountPoint = \"\"\n\t\t\treturn v.save()\n\t\t} else if v.config.Driver == define.VolumeDriverImage {\n\t\t\tif _, err := v.runtime.storageService.UnmountContainerImage(v.config.StorageID, force); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unmounting volume %s image: %w\", v.Name(), err)\n\t\t\t}\n\n\t\t\tv.state.MountPoint = \"\"\n\t\t\treturn v.save()\n\t\t}\n\n\t\t// Unmount the volume\n\t\tif err := detachUnmount(v.config.MountPoint); err != nil {\n\t\t\tif err == unix.EINVAL {\n\t\t\t\t// Ignore EINVAL - the mount no longer exists.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unmounting volume %s: %w\", v.Name(), err)\n\t\t}\n\t\tlogrus.Debugf(\"Unmounted volume %s\", v.Name())\n\t}\n\n\treturn v.save()\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewUpdateUFSVolumeInfoRequest() *UpdateUFSVolumeInfoRequest {\n\treq := &UpdateUFSVolumeInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (mounter *Mounter) Unmount(target string) error {\n\treturn mounter.unmount(target, UMOUNT_COMMAND)\n}", "func (mounter *csiProxyMounterV1Beta) Unmount(target string) error {\n\tklog.V(4).Infof(\"Unmount: %s\", target)\n\treturn mounter.Rmdir(target)\n}", "func (m *DefaultMounter) Unmount() error {\n\tif m.mnt == nil {\n\t\treturn nil\n\t}\n\tm.lock.Lock()\n\th := m.mnt\n\tm.lock.Unlock()\n\treturn h.Close()\n}", "func (fs *FS) Unmount(ctx context.Context, target string) error {\n\treturn fs.unmount(ctx, target)\n}", "func Unmount(pMountPoint string) error {\n\tvUnmountError := fuse.Unmount(pMountPoint)\n\tif vUnmountError != nil {\n\t\treturn diagnostic.NewError(\"An error occurred while unmounting onedrive filesystem mounted at %s\", vUnmountError, pMountPoint)\n\t}\n\treturn nil\n}", "func NewCmdDiskDetach() *cobra.Command {\n\tvar async, yes *bool\n\tvar udiskIDs *[]string\n\treq := base.BizClient.NewDetachUDiskRequest()\n\tcmd := &cobra.Command{\n\t\tUse: \"detach\",\n\t\tShort: \"Detach udisk instances from an uhost\",\n\t\tLong: \"Detach udisk instances from an uhost\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\ttext := `Please confirm that you have already unmounted file system corresponding to this hard drive,(See \"https://docs.ucloud.cn/storage_cdn/udisk/userguide/umount\" for help), otherwise it will cause file system damage and UHost cannot be normally shut down. Sure to detach?`\n\t\t\tif !*yes {\n\t\t\t\tsure, err := ux.Prompt(text)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbase.Cxt.PrintErr(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !sure {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, id := range *udiskIDs {\n\t\t\t\tid = base.PickResourceID(id)\n\t\t\t\tany, err := describeUdiskByID(id, *req.ProjectId, *req.Region, *req.Zone)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbase.HandleError(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif any == nil {\n\t\t\t\t\tbase.Cxt.PrintErr(fmt.Errorf(\"udisk[%v] is not exist\", any))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tins, ok := any.(*udisk.UDiskDataSet)\n\t\t\t\tif !ok {\n\t\t\t\t\tbase.Cxt.PrintErr(fmt.Errorf(\"%#v convert to udisk failed\", any))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treq.UHostId = &ins.UHostId\n\t\t\t\treq.UDiskId = &id\n\t\t\t\t*req.UHostId = base.PickResourceID(*req.UHostId)\n\t\t\t\tresp, err := base.BizClient.DetachUDisk(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbase.HandleError(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttext := fmt.Sprintf(\"udisk[%s] is detaching from uhost[%s]\", resp.UDiskId, resp.UHostId)\n\t\t\t\tif *async {\n\t\t\t\t\tbase.Cxt.Println(text)\n\t\t\t\t} else {\n\t\t\t\t\tpollDisk(resp.UDiskId, *req.ProjectId, *req.Region, *req.Zone, text, []string{status.DISK_AVAILABLE, status.DISK_FAILED})\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\tflags := cmd.Flags()\n\tflags.SortFlags = false\n\tudiskIDs = flags.StringSlice(\"udisk-id\", nil, \"Required. Resource ID of the udisk instances to detach\")\n\treq.ProjectId = flags.String(\"project-id\", base.ConfigInstance.ProjectID, \"Optional. Assign project-id\")\n\treq.Region = flags.String(\"region\", base.ConfigInstance.Region, \"Optional. Assign region\")\n\treq.Zone = flags.String(\"zone\", base.ConfigInstance.Zone, \"Optional. Assign availability zone\")\n\tasync = flags.Bool(\"async\", false, \"Optional. Do not wait for the long-running operation to finish.\")\n\tyes = flags.BoolP(\"yes\", \"y\", false, \"Optional. Do not prompt for confirmation.\")\n\n\tflags.SetFlagValuesFunc(\"udisk-id\", func() []string {\n\t\treturn getDiskList([]string{status.DISK_INUSE}, *req.ProjectId, *req.Region, *req.Zone)\n\t})\n\n\tcmd.MarkFlagRequired(\"udisk-id\")\n\treturn cmd\n}", "func unmount(target string) error {\n\tif mounted, err := mounted(target); err != nil || !mounted {\n\t\treturn err\n\t}\n\treturn forceUnmount(target)\n}", "func Unmount(ctx context.Context, mountPath string, opts ...UnmountOpt) error {\n\tuo := unmountOpts{\n\t\tfusermountPath: \"fusermount\",\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(&uo); err != nil {\n\t\t\treturn fmt.Errorf(\"%w\", err)\n\t\t}\n\t}\n\n\treturn unmountSquashFS(ctx, mountPath, uo)\n}", "func (c *CryptohomeBinary) Unmount(ctx context.Context, username string) ([]byte, error) {\n\treturn c.call(ctx, \"--action=unmount\", \"--user=\"+username)\n}", "func Unmount(mountpoint string) error {\n\treturn syscall.Unmount(mountpoint, 0)\n}", "func (o *VolumeDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Force != nil {\n\n\t\t// query param force\n\t\tvar qrForce bool\n\t\tif o.Force != nil {\n\t\t\tqrForce = *o.Force\n\t\t}\n\t\tqForce := swag.FormatBool(qrForce)\n\t\tif qForce != \"\" {\n\t\t\tif err := r.SetQueryParam(\"force\", qForce); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func UnmarshalWorkspaceStatusUpdateRequest(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(WorkspaceStatusUpdateRequest)\n\terr = core.UnmarshalPrimitive(m, \"frozen\", &obj.Frozen)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"frozen_at\", &obj.FrozenAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"frozen_by\", &obj.FrozenBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked\", &obj.Locked)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked_by\", &obj.LockedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked_time\", &obj.LockedTime)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (driver *Driver) Unmount(volumeName, volumeID string) error {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn nil\n\t}\n\n\terr = driver.osdm.Unmount(mounts[0].Mountpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = driver.sdm.DetachVolume(false, volumes[0].VolumeID, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func validateVanillaControllerUnpublishVolumeRequest(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) error {\n\treturn common.ValidateControllerUnpublishVolumeRequest(ctx, req)\n}", "func umount(a *apl.Apl, L, R apl.Value) (apl.Value, error) {\n\ts, ok := R.(apl.String)\n\tif ok == false {\n\t\treturn nil, fmt.Errorf(\"io umount: argument must be a string %T\", R)\n\t}\n\tUmount(string(s))\n\treturn apl.EmptyArray{}, nil\n}", "func DeleteVolume(req systemsproto.VolumeRequest) (*systemsproto.SystemsResponse, error) {\n\tconn, err := services.ODIMService.Client(services.Systems)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create client connection: %v\", err)\n\t}\n\tdefer conn.Close()\n\tasService := systemsproto.NewSystemsClient(conn)\n\tresp, err := asService.DeleteVolume(context.TODO(), &req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error: RPC error: %v\", err)\n\t}\n\treturn resp, nil\n}", "func (f *FakeMounter) Unmount(target string) error {\n\tf.mutex.Lock()\n\tdefer f.mutex.Unlock()\n\n\t// If target is a symlink, get its absolute path\n\tabsTarget, err := filepath.EvalSymlinks(target)\n\tif err != nil {\n\t\tabsTarget = target\n\t}\n\n\tnewMountpoints := []MountPoint{}\n\tfor _, mp := range f.MountPoints {\n\t\tif mp.Path == absTarget {\n\t\t\tif f.UnmountFunc != nil {\n\t\t\t\terr := f.UnmountFunc(absTarget)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tklog.V(5).Infof(\"Fake mounter: unmounted %s from %s\", mp.Device, absTarget)\n\t\t\t// Don't copy it to newMountpoints\n\t\t\tcontinue\n\t\t}\n\t\tnewMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type})\n\t}\n\tf.MountPoints = newMountpoints\n\tf.log = append(f.log, FakeAction{Action: FakeActionUnmount, Target: absTarget})\n\tdelete(f.MountCheckErrors, target)\n\treturn nil\n}", "func (z *zfsctl) Umount(ctx context.Context, name string, force, all bool) *execute {\n\targs := []string{\"umount\"}\n\tif force {\n\t\targs = append(args, \"-f\")\n\t}\n\tif all {\n\t\targs = append(args, \"-a\")\n\t} else {\n\t\targs = append(args, name)\n\t}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func OptUnmountFusermountPath(path string) UnmountOpt {\n\treturn func(mo *unmountOpts) error {\n\t\tif filepath.Base(path) == path {\n\t\t\treturn errFusermountPathInvalid\n\t\t}\n\t\tmo.fusermountPath = path\n\t\treturn nil\n\t}\n}", "func (img *Image) Unmount(mountPoint string) error {\n\treturn devUnmount(img, mountPoint)\n}", "func (c *Client) Unmount(ctx context.Context, svc iaas.Service, export string) fail.Error {\n\ttimings, xerr := svc.Timings()\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdata := map[string]interface{}{\"Export\": export}\n\tstdout, xerr := executeScript(ctx, timings, c.SSHConfig, \"nfs_client_share_unmount.sh\", data)\n\tif xerr != nil {\n\t\txerr.Annotate(\"stdout\", stdout)\n\t\treturn fail.Wrap(xerr, \"error executing script to unmount remote NFS share\")\n\t}\n\treturn nil\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (d *lvm) MountVolume(vol Volume, op *operations.Operation) error {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t// Activate LVM volume if needed.\n\tactivated, err := d.activateVolume(vol)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif activated {\n\t\trevert.Add(func() { _, _ = d.deactivateVolume(vol) })\n\t}\n\n\tif vol.contentType == ContentTypeFS {\n\t\t// Check if already mounted.\n\t\tmountPath := vol.MountPath()\n\t\tif !filesystem.IsMountPoint(mountPath) {\n\t\t\tfsType := vol.ConfigBlockFilesystem()\n\t\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\n\t\t\tif vol.mountFilesystemProbe {\n\t\t\t\tfsType, err = fsProbe(volDevPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed probing filesystem: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = vol.EnsureMountPath()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmountFlags, mountOptions := filesystem.ResolveMountOptions(strings.Split(vol.ConfigBlockMountOptions(), \",\"))\n\t\t\terr = TryMount(volDevPath, mountPath, fsType, mountFlags, mountOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to mount LVM logical volume: %w\", err)\n\t\t\t}\n\n\t\t\td.logger.Debug(\"Mounted logical volume\", logger.Ctx{\"volName\": vol.name, \"dev\": volDevPath, \"path\": mountPath, \"options\": mountOptions})\n\t\t}\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, mount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\terr = d.MountVolume(fsVol, op)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvol.MountRefCountIncrement() // From here on it is up to caller to call UnmountVolume() when done.\n\trevert.Success()\n\treturn nil\n}", "func RemoveVolume(c *check.C, name string) error {\n\tpath := \"/volumes/\" + name\n\tresp, err := request.Delete(path)\n\tdefer resp.Body.Close()\n\n\tc.Assert(err, check.IsNil)\n\tCheckRespStatus(c, resp, 204)\n\n\treturn err\n}", "func (c *Client) Unmount(export string) error {\n\tdata := map[string]interface{}{\n\t\t\"Export\": export,\n\t}\n\tretcode, stdout, stderr, err := executeScript(*c.SSHConfig, \"nfs_client_share_unmount.sh\", data)\n\treturn handleExecuteScriptReturn(retcode, stdout, stderr, err, \"Error executing script to unmount remote NFS share\")\n}", "func Unmount(path string, force, lazy bool) error {\n\tvar flags = unix.UMOUNT_NOFOLLOW\n\tif len(path) == 0 {\n\t\treturn errors.New(\"path cannot be empty\")\n\t}\n\tif force && lazy {\n\t\treturn errors.New(\"force and lazy unmount cannot both be set\")\n\t}\n\tif force {\n\t\tflags |= unix.MNT_FORCE\n\t}\n\tif lazy {\n\t\tflags |= unix.MNT_DETACH\n\t}\n\tif err := unix.Unmount(path, flags); err != nil {\n\t\treturn fmt.Errorf(\"umount %q flags %x: %v\", path, flags, err)\n\t}\n\treturn nil\n}", "func (m *mount) unmount() error {\n\tlog.Infof(\"Unmounting %s\", m.MountPoint())\n\n\t// try unmounting with fuse lib\n\terr := fuse.Unmount(m.MountPoint())\n\tif err == nil {\n\t\treturn nil\n\t}\n\tlog.Error(\"fuse unmount err: %s\", err)\n\n\t// try closing the fuseConn\n\terr = m.fuseConn.Close()\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\tlog.Error(\"fuse conn error: %s\", err)\n\t}\n\n\t// try mount.ForceUnmountManyTimes\n\tif err := ForceUnmountManyTimes(m, 10); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Seemingly unmounted %s\", m.MountPoint())\n\treturn nil\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func execUnmount(mountDir string) error {\n\t// CONTAINER=`docker ps --filter \"label=mountpath=${mount_dir}\" --format \"{{.ID}}\"`\n\toutput, err := exec.Command(\"docker\",\n\t\t\"ps\",\n\t\t\"--filter\",\n\t\t\"label=mountpath=\"+mountDir,\n\t\t\"--format\",\n\t\t\"{{.ID}}\").CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"docker ps failed: %v\", err)\n\t}\n\n\t// docker rm ${CONTAINER} -f\n\tstr := strings.Replace(string(output), \"\\n\", \"\", -1)\n\t_, err = exec.Command(\"docker\",\n\t\t\"rm\",\n\t\tstr,\n\t\t\"-f\").CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"docker rm failed: %v\", err)\n\t}\n\n\t// umount -l ${mount_dir}\n\t_, err = exec.Command(\"umount\",\n\t\t\"-l\",\n\t\tmountDir).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"umount failed: %v\", err)\n\t}\n\n\t// rmdir ${mount_dir}\n\t_, err = exec.Command(\"rm\",\n\t\t\"-rf\",\n\t\tmountDir).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"rmdir failed: %v\", err)\n\t}\n\n\treturn nil\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func (d *VolumeDriver) Mount(r volume.MountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Mounting volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tlog.Errorf(\"VolumeDriver Mount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func Unmount(path string) error {\n\tcmd := exec.Command(\"umount\", path)\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"umount: %s\", utils.OneLine(out))\n\t}\n\n\treturn nil\n}", "func NewUnMarshaller() *UnMarshaller {\n\treturn &UnMarshaller{\n\t\theaderMarshaller: new(headerMarshaller),\n\t\theaderUnmarshaller: new(headerUnmarshaller),\n\t}\n}", "func (c *CrosDisks) Unmount(ctx context.Context, devicePath string, options []string) error {\n\tvar status MountError\n\tif err := c.call(ctx, \"Unmount\", devicePath, options).Store(&status); err != nil {\n\t\treturn err\n\t}\n\n\tif status != MountErrorNone {\n\t\treturn status\n\t}\n\n\treturn nil\n}", "func UnmarshalWorkspaceStatusRequest(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(WorkspaceStatusRequest)\n\terr = core.UnmarshalPrimitive(m, \"frozen\", &obj.Frozen)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"frozen_at\", &obj.FrozenAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"frozen_by\", &obj.FrozenBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked\", &obj.Locked)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked_by\", &obj.LockedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked_time\", &obj.LockedTime)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func UnmarshalWorkspaceVariableRequest(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(WorkspaceVariableRequest)\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secure\", &obj.Secure)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"type\", &obj.Type)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"use_default\", &obj.UseDefault)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"value\", &obj.Value)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func MountVolume(vol *apis.LVMVolume, mount *MountInfo, podLVInfo *PodLVInfo) error {\n\tvolume := vol.Spec.VolGroup + \"/\" + vol.Name\n\tmounted, err := verifyMountRequest(vol, mount.MountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif mounted {\n\t\tklog.Infof(\"lvm : already mounted %s => %s\", volume, mount.MountPath)\n\t\treturn nil\n\t}\n\n\tdevicePath := DevPath + volume\n\n\terr = FormatAndMountVol(devicePath, mount)\n\tif err != nil {\n\t\treturn status.Errorf(\n\t\t\tcodes.Internal,\n\t\t\t\"failed to format and mount the volume error: %s\",\n\t\t\terr.Error(),\n\t\t)\n\t}\n\n\tklog.Infof(\"lvm: volume %v mounted %v fs %v\", volume, mount.MountPath, mount.FSType)\n\n\tif ioLimitsEnabled && podLVInfo != nil {\n\t\tif err := setIOLimits(vol, podLVInfo, devicePath); err != nil {\n\t\t\tklog.Warningf(\"lvm: error setting io limits: podUid %s, device %s, err=%v\", podLVInfo.UID, devicePath, err)\n\t\t} else {\n\t\t\tklog.Infof(\"lvm: io limits set for podUid %v, device %s\", podLVInfo.UID, devicePath)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (z *ZfsH) Unmount(d *Dataset, force bool) (*Dataset, error) {\n\tif d.Type == DatasetSnapshot {\n\t\treturn nil, errors.New(\"cannot unmount snapshots\")\n\t}\n\targs := make([]string, 1, 3)\n\targs[0] = \"umount\"\n\tif force {\n\t\targs = append(args, \"-f\")\n\t}\n\targs = append(args, d.Name)\n\t_, err := z.zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn z.GetDataset(d.Name)\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}" ]
[ "0.6793599", "0.64460963", "0.62859815", "0.5805749", "0.5740742", "0.5738976", "0.5573756", "0.55394506", "0.5485255", "0.54272425", "0.5387513", "0.52386385", "0.5211439", "0.5121333", "0.5117376", "0.51012397", "0.5056214", "0.50310487", "0.5011161", "0.49947315", "0.49851015", "0.49829596", "0.49708214", "0.49002397", "0.489143", "0.48828992", "0.48439032", "0.48244092", "0.48185095", "0.48159578", "0.48063245", "0.47992036", "0.47979137", "0.47628063", "0.4753922", "0.46984485", "0.46797135", "0.4613938", "0.46136838", "0.45906577", "0.45872045", "0.45712548", "0.45669642", "0.45659482", "0.4556051", "0.4546562", "0.45383424", "0.45340726", "0.45208532", "0.4486793", "0.44866866", "0.44785878", "0.44776845", "0.44101828", "0.4407769", "0.44036445", "0.44014487", "0.43975607", "0.43975607", "0.43776578", "0.4368971", "0.43463466", "0.4345732", "0.43268216", "0.43252903", "0.43179703", "0.42990547", "0.4298453", "0.428325", "0.42747855", "0.42674297", "0.4264385", "0.42607826", "0.42457998", "0.42392886", "0.4237189", "0.42327937", "0.42198744", "0.4219106", "0.42079547", "0.41938692", "0.41894078", "0.41836882", "0.41646612", "0.41566718", "0.41547298", "0.41432846", "0.41412094", "0.4130259", "0.41216588", "0.41202867", "0.41104165", "0.41074443", "0.41055518", "0.40931892", "0.40904763", "0.409032", "0.40853158", "0.40773255", "0.40748513" ]
0.89608026
0
Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest is an autogenerated conversion function.
func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error { return autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func (c *UFSClient) NewRemoveUFSVolumeMountPointRequest() *RemoveUFSVolumeMountPointRequest {\n\treq := &RemoveUFSVolumeMountPointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewExtendUFSVolumeRequest() *ExtendUFSVolumeRequest {\n\treq := &ExtendUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (c *UFSClient) NewRemoveUFSVolumeRequest() *RemoveUFSVolumeRequest {\n\treq := &RemoveUFSVolumeRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func (d *VolumeDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Unmounting Volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tif d.refCounts.IsInitialized() != true {\n\t\t// if refcounting hasn't been succesful,\n\t\t// no refcounting, no unmount. All unmounts are delayed\n\t\t// until we succesfully populate the refcount map\n\t\td.refCounts.MarkDirty()\n\t\treturn volume.Response{Err: \"\"}\n\t}\n\n\tlog.Errorf(\"VolumeDriver Unmount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func (d *MinioDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Unmount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections <= 1 {\n\t\tif err := d.unmountVolume(v); err != nil {\n\t\t\tglog.Warningf(\"Unmounting %s volume failed with: %s\", v, err)\n\t\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t\t}\n\t\tv.connections = 0\n\t\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n\t}\n\tv.connections--\n\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n}", "func (c *UFSClient) NewDescribeUFSVolume2Request() *DescribeUFSVolume2Request {\n\treq := &DescribeUFSVolume2Request{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in *v1beta1.MountVolumeRequest, out *internal.MountVolumeRequest) error {\n\treturn autoConvert_v1beta1_MountVolumeRequest_To_internal_MountVolumeRequest(in, out)\n}", "func Convert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in *internal.VolumeIDFromMountRequest, out *v1beta1.VolumeIDFromMountRequest) error {\n\treturn autoConvert_internal_VolumeIDFromMountRequest_To_v1beta1_VolumeIDFromMountRequest(in, out)\n}", "func (d *DirDriver) Unmount(req *volume.UnmountRequest) error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Unmount() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tmount := vol.mounts[req.ID]\n\tif !mount {\n\t\tlogrus.Debugf(\"Volume %s is not mounted by %s\", req.Name, req.ID)\n\t\treturn fmt.Errorf(\"volume %s is not mounted by %s\", req.Name, req.ID)\n\t}\n\n\tdelete(vol.mounts, req.ID)\n\n\treturn nil\n}", "func (c *Controller) Unmount(unmountRequest k8sresources.FlexVolumeUnmountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"Controller: unmount start\")\n\tdefer c.logger.Println(\"Controller: unmount end\")\n\tc.logger.Printf(\"unmountRequest %#v\", unmountRequest)\n\tvar detachRequest resources.DetachRequest\n\tvar pvName string\n\n\t// Validate that the mountpoint is a symlink as ubiquity expect it to be\n\trealMountPoint, err := c.exec.EvalSymlinks(unmountRequest.MountPath)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Cannot execute umount because the mountPath [%s] is not a symlink as expected. Error: %#v\", unmountRequest.MountPath, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t}\n\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\tif strings.HasPrefix(realMountPoint, ubiquityMountPrefix) {\n\t\t// SCBE backend flow\n\t\tpvName = path.Base(unmountRequest.MountPath)\n\n\t\tdetachRequest = resources.DetachRequest{Name: pvName, Host: getHost()}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tpvName,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t\tc.logger.Println(fmt.Sprintf(\"Removing the slink [%s] to the real mountpoint [%s]\", unmountRequest.MountPath, realMountPoint))\n\t\terr := c.exec.Remove(unmountRequest.MountPath)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"fail to remove slink %s. Error %#v\", unmountRequest.MountPath, err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t} else {\n\n\t\tlistVolumeRequest := resources.ListVolumesRequest{}\n\t\tvolumes, err := c.Client.ListVolumes(listVolumeRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Error getting the volume list from ubiquity server %#v\", err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tvolume, err := getVolumeForMountpoint(unmountRequest.MountPath, volumes)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Error finding the volume with mountpoint [%s] from the list of ubiquity volumes %#v. Error is : %#v\",\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\tvolumes,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tdetachRequest = resources.DetachRequest{Name: volume.Name}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil && err.Error() != \"fileset not linked\" {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tvolume.Name,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tpvName = volume.Name\n\t}\n\n\tmsg := fmt.Sprintf(\n\t\t\"Succeeded to umount volume [%s] on mountpoint [%s]\",\n\t\tpvName,\n\t\tunmountRequest.MountPath,\n\t)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: \"Volume unmounted successfully\",\n\t}\n}", "func (s *VolumeListener) Detach(inctx context.Context, in *protocol.VolumeDetachmentRequest) (empty *googleprotobuf.Empty, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot detach volume\")\n\n\tempty = &googleprotobuf.Empty{}\n\tif s == nil {\n\t\treturn empty, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn empty, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn empty, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tvolumeRef, volumeRefLabel := srvutils.GetReference(in.GetVolume())\n\tif volumeRef == \"\" {\n\t\treturn empty, fail.InvalidRequestError(\"neither name nor id given as reference for volume\")\n\t}\n\thostRef, hostRefLabel := srvutils.GetReference(in.GetHost())\n\tif hostRef == \"\" {\n\t\treturn empty, fail.InvalidRequestError(\"neither name nor id given as reference for host\")\n\t}\n\n\tjob, xerr := PrepareJob(inctx, in.GetVolume().GetTenantId(), fmt.Sprintf(\"/volume/%s/host/%s/detach\", volumeRef, hostRef))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer job.Close()\n\n\thandler := VolumeHandler(job)\n\tif xerr = handler.Detach(volumeRef, hostRef); xerr != nil {\n\t\treturn empty, xerr\n\t}\n\n\tlogrus.WithContext(job.Context()).Infof(\"Volume %s successfully detached from %s.\", volumeRefLabel, hostRefLabel)\n\treturn empty, nil\n}", "func ParseNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) (*VolumeInfo, error) {\n\tvolumeID := req.GetVolumeId()\n\tif volumeID == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Volume ID missing in request\")\n\t}\n\n\ttargetPath := req.GetTargetPath()\n\tif targetPath == \"\" {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"Target path missing in request\")\n\t}\n\n\treturn &VolumeInfo{volumeID, targetPath}, nil\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func (client *Client) UnmapVolume(name, host string) (*Response, *ResponseStatus, error) {\n\tif host == \"\" {\n\t\treturn client.FormattedRequest(\"/unmap/volume/\\\"%s\\\"\", name)\n\t}\n\n\treturn client.FormattedRequest(\"/unmap/volume/host/\\\"%s\\\"/\\\"%s\\\"\", host, name)\n}", "func (d *lvm) UnmountVolume(vol Volume, keepBlockDev bool, op *operations.Operation) (bool, error) {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\tvar err error\n\tourUnmount := false\n\tmountPath := vol.MountPath()\n\n\trefCount := vol.MountRefCountDecrement()\n\n\t// Check if already mounted.\n\tif vol.contentType == ContentTypeFS && filesystem.IsMountPoint(mountPath) {\n\t\tif refCount > 0 {\n\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\treturn false, ErrInUse\n\t\t}\n\n\t\terr = TryUnmount(mountPath, 0)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"Failed to unmount LVM logical volume: %w\", err)\n\t\t}\n\n\t\td.logger.Debug(\"Unmounted logical volume\", logger.Ctx{\"volName\": vol.name, \"path\": mountPath, \"keepBlockDev\": keepBlockDev})\n\n\t\t// We only deactivate filesystem volumes if an unmount was needed to better align with our\n\t\t// unmount return value indicator.\n\t\tif !keepBlockDev {\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tourUnmount = true\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, unmount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\tourUnmount, err = d.UnmountVolume(fsVol, false, op)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\tif !keepBlockDev && shared.PathExists(volDevPath) {\n\t\t\tif refCount > 0 {\n\t\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\t\treturn false, ErrInUse\n\t\t\t}\n\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tourUnmount = true\n\t\t}\n\t}\n\n\treturn ourUnmount, nil\n}", "func Convert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in *impl.GetVolumeIDFromTargetPathRequest, out *v2alpha1.GetVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathRequest_To_v2alpha1_GetVolumeIDFromTargetPathRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in *v1beta1.DismountVolumeRequest, out *internal.DismountVolumeRequest) error {\n\treturn autoConvert_v1beta1_DismountVolumeRequest_To_internal_DismountVolumeRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func CreateUnAssignPrivateIpAddressRequest() (request *UnAssignPrivateIpAddressRequest) {\n\trequest = &UnAssignPrivateIpAddressRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"eflo\", \"2022-05-30\", \"UnAssignPrivateIpAddress\", \"eflo\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {\n\tif req == nil {\n\t\treturn fmt.Errorf(\"must provide non-nil request to UnmountVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Unmounting volume %s using plugin %s for container %s\", req.Name, p.Name, req.ID)\n\n\tresp, err := p.sendRequest(req, unmountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn p.handleErrorResponse(resp, unmountPath, req.Name)\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in *impl.RmdirContentsRequest, out *v2alpha1.RmdirContentsRequest) error {\n\treturn autoConvert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in, out)\n}", "func Unmount(d Driver, vName string) error {\n\tlog.Debugf(\"Entering Unmount: name: %s\", vName)\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tv, m, err := getVolumeMount(d, vName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif m.GetConnections() <= 1 {\n\t\tcmd := fmt.Sprintf(\"/usr/bin/umount %s\", m.GetPath())\n\t\tif err := d.RunCmd(cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tSetN(0, m, v)\n\t} else {\n\t\tAddN(-1, m, v)\n\t}\n\n\treturn d.SaveConfig()\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func ParseVolume(input string) (Volume, error) {\n\tparts := strings.Split(input, \":\")\n\tswitch len(parts) {\n\tcase 1:\n\t\treturn Volume{Type: VolumeTypeInstance, Path: input}, nil\n\tcase 2:\n\t\tif vt, mountOptions, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{Type: vt, Path: parts[1], MountOptions: mountOptions}, nil\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0]}, nil\n\tcase 3:\n\t\tif _, _, err := parseVolumeType(parts[0]); err == nil {\n\t\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t\t}\n\t\toptions, err := parseVolumeOptions(parts[2])\n\t\tif err != nil {\n\t\t\treturn Volume{}, maskAny(err)\n\t\t}\n\t\treturn Volume{Type: VolumeTypeLocal, Path: parts[1], HostPath: parts[0], Options: options}, nil\n\tdefault:\n\t\treturn Volume{}, maskAny(errgo.WithCausef(nil, ValidationError, \"not a valid volume '%s'\", input))\n\t}\n}", "func UnmarshalSuperBlockV1(superBlockV1Buf []byte) (superBlockV1 *SuperBlockV1Struct, err error) {\n\tsuperBlockV1, err = unmarshalSuperBlockV1(superBlockV1Buf)\n\treturn\n}", "func (request UpdateARoomRequest) Validate() error {\n\tif request.Name == \"\" || request.ID == nil {\n\t\treturn ErrInvalidRequest\n\t}\n\treturn nil\n}", "func Unmount(dest string) error {\n\treturn nil\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func (d *VolumeDriver) UnmountVolume(name string) error {\n\tlog.Errorf(\"VolumeDriver UnmountVolume to be implemented\")\n\treturn nil\n}", "func (zr *ZRequest) Unmarshal(v interface{}) error {\n\tif !zr.ended {\n\t\treturn ErrRequestNotComp\n\t}\n\tif zr.err != nil {\n\t\treturn zr.err\n\t}\n\tif zr.resp == nil {\n\t\tif zr.err != nil {\n\t\t\treturn zr.err\n\t\t}\n\t\treturn ErrRequestNotComp\n\t}\n\n\t// copy to discard\n\tdefer func() {\n\t\tio.Copy(ioutil.Discard, zr.resp.Body)\n\t\tzr.resp.Body.Close()\n\t}()\n\n\tvar decoder Decoder\n\trespContentType := zr.resp.Header.Get(HdrContentType)\n\n\t// xml or json\n\tif xmlCheck.MatchString(respContentType) {\n\t\tdecoder = xml.NewDecoder(zr.resp.Body)\n\t} else {\n\t\tdecoder = json.NewDecoder(zr.resp.Body)\n\t}\n\n\tzr.err = decoder.Decode(v)\n\tif zr.err != nil {\n\t\tzr.err = errors.New(\"decoder.Decode: \" + zr.err.Error())\n\t\treturn zr.err\n\t}\n\treturn nil\n}", "func UmountVolume(vol *apis.LVMVolume, targetPath string,\n) error {\n\tmounter := &mount.SafeFormatAndMount{Interface: mount.New(\"\"), Exec: utilexec.New()}\n\n\tdev, ref, err := mount.GetDeviceNameFromMount(mounter, targetPath)\n\tif err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: umount volume: failed to get device from mnt: %s\\nError: %v\",\n\t\t\ttargetPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\t// device has already been un-mounted, return successful\n\tif len(dev) == 0 || ref == 0 {\n\t\tklog.Warningf(\n\t\t\t\"Warning: Unmount skipped because volume %s not mounted: %v\",\n\t\t\tvol.Name, targetPath,\n\t\t)\n\t\treturn nil\n\t}\n\n\tif pathExists, pathErr := mount.PathExists(targetPath); pathErr != nil {\n\t\treturn fmt.Errorf(\"error checking if path exists: %v\", pathErr)\n\t} else if !pathExists {\n\t\tklog.Warningf(\n\t\t\t\"Warning: Unmount skipped because path does not exist: %v\",\n\t\t\ttargetPath,\n\t\t)\n\t\treturn nil\n\t}\n\n\tif err = mounter.Unmount(targetPath); err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: failed to unmount %s: path %s err: %v\",\n\t\t\tvol.Name, targetPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\tif err := os.Remove(targetPath); err != nil {\n\t\tklog.Errorf(\"lvm: failed to remove mount path vol %s err : %v\", vol.Name, err)\n\t}\n\n\tklog.Infof(\"umount done %s path %v\", vol.Name, targetPath)\n\n\treturn nil\n}", "func (c *Controller) Detach(detachRequest k8sresources.FlexVolumeDetachRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-detach-start\")\n\tdefer c.logger.Println(\"controller-detach-end\")\n\tif detachRequest.Version == k8sresources.KubernetesVersion_1_5 {\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Success\",\n\t\t}\n\t}\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func (c *UFSClient) NewDescribeUFSVolumeMountpointRequest() *DescribeUFSVolumeMountpointRequest {\n\treq := &DescribeUFSVolumeMountpointRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (vol *Volume) ListRequest() (ListCommand, error) {\n\treq := &ListVolumes{\n\t\tAccount: vol.Account,\n\t\tDomainID: vol.DomainID,\n\t\tName: vol.Name,\n\t\tType: vol.Type,\n\t\tVirtualMachineID: vol.VirtualMachineID,\n\t\tZoneID: vol.ZoneID,\n\t}\n\n\treturn req, nil\n}", "func (m *RemoveDocV1Request) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif m.GetId() <= 0 {\n\t\treturn RemoveDocV1RequestValidationError{\n\t\t\tfield: \"Id\",\n\t\t\treason: \"value must be greater than 0\",\n\t\t}\n\t}\n\n\treturn nil\n}", "func (proxy *remoteDriverProxy) Unmount(name, id string) error {\n\tvar req = remoteVolumeUnmountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeUnmountResp\n\n\tif err := proxy.client.CallService(remoteVolumeUnmountService, &req, &resp, true); err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn errors.New(resp.Err)\n\t}\n\n\treturn nil\n}", "func DecodeUnsealRequest(_ context.Context, grpcReq interface{}) (interface{}, error) {\n\treq := grpcReq.(*pb.UnsealRequest)\n\treturn &endpoints.UnsealRequest{Key: req.Key, Reset: req.Reset_}, nil\n}", "func (d *MinioDriver) unmountVolume(volume *minioVolume) error {\n\treturn exec.Command(\"umount\", volume.mountpoint).Run()\n}", "func Unmount(target string) error {\n\tlogrus.Infof(\"Unmount %s\", target)\n\terr := os.Remove(target)\n\tif err == nil {\n\t\trespondSuccess()\n\t}\n\treturn err\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in *impl.GetClosestVolumeIDFromTargetPathRequest, out *v2alpha1.GetClosestVolumeIDFromTargetPathRequest) error {\n\treturn autoConvert_impl_GetClosestVolumeIDFromTargetPathRequest_To_v2alpha1_GetClosestVolumeIDFromTargetPathRequest(in, out)\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cs *controllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\treturn nil\n}", "func (o *ReadStorageV1alpha1VolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Exact != nil {\n\n\t\t// query param exact\n\t\tvar qrExact bool\n\t\tif o.Exact != nil {\n\t\t\tqrExact = *o.Exact\n\t\t}\n\t\tqExact := swag.FormatBool(qrExact)\n\t\tif qExact != \"\" {\n\t\t\tif err := r.SetQueryParam(\"exact\", qExact); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Export != nil {\n\n\t\t// query param export\n\t\tvar qrExport bool\n\t\tif o.Export != nil {\n\t\t\tqrExport = *o.Export\n\t\t}\n\t\tqExport := swag.FormatBool(qrExport)\n\t\tif qExport != \"\" {\n\t\t\tif err := r.SetQueryParam(\"export\", qExport); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Pretty != nil {\n\n\t\t// query param pretty\n\t\tvar qrPretty string\n\t\tif o.Pretty != nil {\n\t\t\tqrPretty = *o.Pretty\n\t\t}\n\t\tqPretty := qrPretty\n\t\tif qPretty != \"\" {\n\t\t\tif err := r.SetQueryParam(\"pretty\", qPretty); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cs *ControllerServer) validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid CreateVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetName() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"volume Capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn status.Error(codes.Unimplemented, \"block volume not supported\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v1alpha3_OSDisk_To_v1alpha2_OSDisk(in *v1alpha3.OSDisk, out *OSDisk, s apiconversion.Scope) error {\n\treturn autoConvert_v1alpha3_OSDisk_To_v1alpha2_OSDisk(in, out, s)\n}", "func (s *Stack) CreateVolume(request resources.VolumeRequest) (volume *resources.Volume, err error) {\n\tif s == nil {\n\t\treturn nil, scerr.InvalidInstanceError()\n\t}\n\tif request.Name == \"\" {\n\t\treturn nil, scerr.InvalidParameterError(\"request.Name\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, fmt.Sprintf(\"(%s)\", request.Name), true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tvolume, err = s.GetVolume(request.Name)\n\tif err != nil {\n\t\tif _, ok := err.(scerr.ErrNotFound); !ok {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif volume != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\taz, err := s.SelectedAvailabilityZone()\n\tif err != nil {\n\t\treturn nil, resources.ResourceDuplicateError(\"volume\", request.Name)\n\t}\n\n\tvar v resources.Volume\n\tswitch s.versions[\"volume\"] {\n\tcase \"v1\":\n\t\tvar vol *volumesv1.Volume\n\t\tvol, err = volumesv1.Create(s.VolumeClient, volumesv1.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tcase \"v2\":\n\t\tvar vol *volumesv2.Volume\n\t\tvol, err = volumesv2.Create(s.VolumeClient, volumesv2.CreateOpts{\n\t\t\tAvailabilityZone: az,\n\t\t\tName: request.Name,\n\t\t\tSize: request.Size,\n\t\t\tVolumeType: s.getVolumeType(request.Speed),\n\t\t}).Extract()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif vol == nil {\n\t\t\terr = scerr.Errorf(fmt.Sprintf(\"volume creation seems to have succeeded, but returned nil value is unexpected\"), nil)\n\t\t\tbreak\n\t\t}\n\t\tv = resources.Volume{\n\t\t\tID: vol.ID,\n\t\t\tName: vol.Name,\n\t\t\tSize: vol.Size,\n\t\t\tSpeed: s.getVolumeSpeed(vol.VolumeType),\n\t\t\tState: toVolumeState(vol.Status),\n\t\t}\n\tdefault:\n\t\terr = scerr.Errorf(fmt.Sprintf(\"unmanaged service 'volume' version '%s'\", s.versions[\"volume\"]), nil)\n\t}\n\tif err != nil {\n\t\treturn nil, scerr.Wrap(err, fmt.Sprintf(\"error creating volume : %s\", ProviderErrorToString(err)))\n\t}\n\n\treturn &v, nil\n}", "func (c *restClient) UpdateVolume(ctx context.Context, req *netapppb.UpdateVolumeRequest, opts ...gax.CallOption) (*UpdateVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tbody := req.GetVolume()\n\tjsonReq, err := m.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetVolume().GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetUpdateMask() != nil {\n\t\tupdateMask, err := protojson.Marshal(req.GetUpdateMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"updateMask\", string(updateMask[1:len(updateMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"volume.name\", url.QueryEscape(req.GetVolume().GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"PATCH\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &UpdateVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func Unmount(dest string) error {\n\treturn syscall.Unmount(dest, 0)\n}", "func (r *RequestAPI) RemoveRequestV1(ctx context.Context, req *desc.RemoveRequestV1Request) (*desc.RemoveRequestV1Response, error) {\n\tlog.Printf(\"Got remove request: %v\", req)\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"RemoveRequestV1\")\n\tdefer span.Finish()\n\n\tif err := r.validateAndSendErrorEvent(ctx, req, producer.DeleteEvent); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := r.repo.Remove(ctx, req.RequestId)\n\tif errors.Is(err, repository.NotFound) {\n\t\treturn nil, status.Error(codes.NotFound, \"request does not exist\")\n\t} else if err != nil {\n\t\tlog.Error().\n\t\t\tErr(err).\n\t\t\tUint64(\"request_id\", req.RequestId).\n\t\t\tStr(\"endpoint\", \"RemoveRequestV1\").\n\t\t\tMsgf(\"Failed to remove request\")\n\t\treturn nil, err\n\t}\n\tr.producer.Send(producer.NewEvent(ctx, req.RequestId, producer.DeleteEvent, err))\n\tr.metrics.IncRemove(1, \"RemoveRequestV1\")\n\treturn &desc.RemoveRequestV1Response{}, nil\n}", "func validateCreateVolumeRequest(req *csi.CreateVolumeRequest) error {\n\tif req.GetName() == \"\" {\n\t\treturn errors.New(\"volume name cannot be empty\")\n\t}\n\n\treqCaps := req.GetVolumeCapabilities()\n\tif reqCaps == nil {\n\t\treturn errors.New(\"volume capabilities cannot be empty\")\n\t}\n\n\tfor _, cap := range reqCaps {\n\t\tif cap.GetBlock() != nil {\n\t\t\treturn errors.New(\"block access type not allowed\")\n\t\t}\n\t}\n\n\tif req.GetSecrets() == nil || len(req.GetSecrets()) == 0 {\n\t\treturn errors.New(\"secrets cannot be nil or empty\")\n\t}\n\n\treturn nil\n}", "func UnmarshalAccessRequest(data []byte, opts ...MarshalOption) (types.AccessRequest, error) {\n\tcfg, err := CollectOptions(opts)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tvar req types.AccessRequestV3\n\tif err := utils.FastUnmarshal(data, &req); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tif err := ValidateAccessRequest(&req); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tif cfg.ID != 0 {\n\t\treq.SetResourceID(cfg.ID)\n\t}\n\tif !cfg.Expires.IsZero() {\n\t\treq.SetExpiry(cfg.Expires)\n\t}\n\treturn &req, nil\n}", "func (o *Filesystem) Unmount(ctx context.Context, options map[string]dbus.Variant) (err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Unmount\", 0, options).Store()\n\treturn\n}", "func (d DobsClient) DetachVolume(ctx Context, volumeID string, dropletID string) (error) {\n\tdropletIDI, err := strconv.Atoi(dropletID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taction, _, err := d.GodoClient.StorageActions.DetachByDropletID(ctx, volumeID, dropletIDI)\t\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = d.waitForAction(ctx, volumeID, action)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func validateVanillaControllerUnpublishVolumeRequest(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) error {\n\treturn common.ValidateControllerUnpublishVolumeRequest(ctx, req)\n}", "func UnmarshalWorkspaceStatusUpdateRequest(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(WorkspaceStatusUpdateRequest)\n\terr = core.UnmarshalPrimitive(m, \"frozen\", &obj.Frozen)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"frozen_at\", &obj.FrozenAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"frozen_by\", &obj.FrozenBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked\", &obj.Locked)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked_by\", &obj.LockedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked_time\", &obj.LockedTime)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func CreateDropPartitionRequest() (request *DropPartitionRequest) {\n\trequest = &DropPartitionRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"openanalytics-open\", \"2020-09-28\", \"DropPartition\", \"openanalytics\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateUnlockBalanceRequest(clientToken string, currencyID uint16, walletID uint64, amount string) *UnlockBalanceRequest {\n\trequestFields := map[string]interface{}{\n\t\t\"client_token\": clientToken,\n\t\t\"currency_id\": currencyID,\n\t\t\"wallet_id\": walletID,\n\t\t\"amount\": amount,\n\t}\n\n\treq := request.Post(\"balance/unlock\", requestFields)\n\treturn &UnlockBalanceRequest{BaseRequest: req}\n}", "func (cs *ControllerServer) validateExpandVolumeRequest(req *csi.ControllerExpandVolumeRequest) error {\n\tif err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_EXPAND_VOLUME); err != nil {\n\t\treturn fmt.Errorf(\"invalid ExpandVolumeRequest: %v\", err)\n\t}\n\n\tif req.GetVolumeId() == \"\" {\n\t\treturn status.Error(codes.InvalidArgument, \"Volume ID cannot be empty\")\n\t}\n\n\tcapRange := req.GetCapacityRange()\n\tif capRange == nil {\n\t\treturn status.Error(codes.InvalidArgument, \"CapacityRange cannot be empty\")\n\t}\n\n\treturn nil\n}", "func (c *CryptohomeBinary) Unmount(ctx context.Context, username string) ([]byte, error) {\n\treturn c.call(ctx, \"--action=unmount\", \"--user=\"+username)\n}", "func Convert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in *internal.ListVolumesOnDiskRequest, out *v1beta1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_internal_ListVolumesOnDiskRequest_To_v1beta1_ListVolumesOnDiskRequest(in, out)\n}", "func (d *lvm) MountVolume(vol Volume, op *operations.Operation) error {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t// Activate LVM volume if needed.\n\tactivated, err := d.activateVolume(vol)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif activated {\n\t\trevert.Add(func() { _, _ = d.deactivateVolume(vol) })\n\t}\n\n\tif vol.contentType == ContentTypeFS {\n\t\t// Check if already mounted.\n\t\tmountPath := vol.MountPath()\n\t\tif !filesystem.IsMountPoint(mountPath) {\n\t\t\tfsType := vol.ConfigBlockFilesystem()\n\t\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\n\t\t\tif vol.mountFilesystemProbe {\n\t\t\t\tfsType, err = fsProbe(volDevPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed probing filesystem: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = vol.EnsureMountPath()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmountFlags, mountOptions := filesystem.ResolveMountOptions(strings.Split(vol.ConfigBlockMountOptions(), \",\"))\n\t\t\terr = TryMount(volDevPath, mountPath, fsType, mountFlags, mountOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to mount LVM logical volume: %w\", err)\n\t\t\t}\n\n\t\t\td.logger.Debug(\"Mounted logical volume\", logger.Ctx{\"volName\": vol.name, \"dev\": volDevPath, \"path\": mountPath, \"options\": mountOptions})\n\t\t}\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, mount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\terr = d.MountVolume(fsVol, op)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvol.MountRefCountIncrement() // From here on it is up to caller to call UnmountVolume() when done.\n\trevert.Success()\n\treturn nil\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (c *restClient) DeleteVolume(ctx context.Context, req *netapppb.DeleteVolumeRequest, opts ...gax.CallOption) (*DeleteVolumeOperation, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetForce() {\n\t\tparams.Add(\"force\", fmt.Sprintf(\"%v\", req.GetForce()))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"DELETE\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &DeleteVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (c *client) Unmount(\n\tctx types.Context,\n\tmountPoint string,\n\topts types.Store) error {\n\n\tif c.isController() {\n\t\treturn utils.NewUnsupportedForClientTypeError(\n\t\t\tc.clientType, \"Unmount\")\n\t}\n\n\tif lsxSO, _ := c.Supported(ctx, opts); !lsxSO.Umount() {\n\t\treturn errExecutorNotSupported\n\t}\n\n\tctx = context.RequireTX(ctx.Join(c.ctx))\n\n\tserviceName, ok := context.ServiceName(ctx)\n\tif !ok {\n\t\treturn goof.New(\"missing service name\")\n\t}\n\n\tsi, err := c.getServiceInfo(serviceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdriverName := si.Driver.Name\n\n\tif _, err = c.runExecutor(\n\t\tctx,\n\t\tdriverName,\n\t\ttypes.LSXCmdUmount,\n\t\tmountPoint); err != nil {\n\t\treturn err\n\t}\n\n\tctx.Debug(\"xli umount success\")\n\treturn nil\n}", "func Unmount(mountpoint string) (err error) {\n\tlog.Println(\"Unmounting filesystem\")\n\terr = fuse.Unmount(mountpoint)\n\treturn\n}", "func NewVolume(volumeRequest provider.Volume) Volume {\n\t// Build the template to send to backend\n\n\tvolume := Volume{\n\t\tID: volumeRequest.VolumeID,\n\t\tCRN: volumeRequest.CRN,\n\t\tTags: volumeRequest.VPCVolume.Tags,\n\t\tZone: &Zone{\n\t\t\tName: volumeRequest.Az,\n\t\t},\n\t\tProvider: string(volumeRequest.Provider),\n\t\tVolumeType: string(volumeRequest.VolumeType),\n\t}\n\tif volumeRequest.Name != nil {\n\t\tvolume.Name = *volumeRequest.Name\n\t}\n\tif volumeRequest.Capacity != nil {\n\t\tvolume.Capacity = int64(*volumeRequest.Capacity)\n\t}\n\tif volumeRequest.VPCVolume.Profile != nil {\n\t\tvolume.Profile = &Profile{\n\t\t\tName: volumeRequest.VPCVolume.Profile.Name,\n\t\t}\n\t}\n\tif volumeRequest.VPCVolume.ResourceGroup != nil {\n\t\tvolume.ResourceGroup = &ResourceGroup{\n\t\t\tID: volumeRequest.VPCVolume.ResourceGroup.ID,\n\t\t\tName: volumeRequest.VPCVolume.ResourceGroup.Name,\n\t\t}\n\t}\n\n\tif volumeRequest.Iops != nil {\n\t\tvalue, err := strconv.ParseInt(*volumeRequest.Iops, 10, 64)\n\t\tif err != nil {\n\t\t\tvolume.Iops = 0\n\t\t}\n\t\tvolume.Iops = value\n\t}\n\tif volumeRequest.VPCVolume.VolumeEncryptionKey != nil && len(volumeRequest.VPCVolume.VolumeEncryptionKey.CRN) > 0 {\n\t\tencryptionKeyCRN := volumeRequest.VPCVolume.VolumeEncryptionKey.CRN\n\t\tvolume.VolumeEncryptionKey = &VolumeEncryptionKey{CRN: encryptionKeyCRN}\n\t}\n\n\tvolume.Cluster = volumeRequest.Attributes[ClusterIDTagName]\n\tvolume.Status = StatusType(volumeRequest.Attributes[VolumeStatus])\n\treturn volume\n}", "func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume) error {\n\ttargetPath := cs.getInternalMountPath(vol)\n\n\t// Unmount nfs server at base-dir\n\tklog.V(4).Infof(\"internally unmounting %v\", targetPath)\n\t_, err := cs.Driver.ns.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{\n\t\tVolumeId: vol.id,\n\t\tTargetPath: cs.getInternalMountPath(vol),\n\t})\n\treturn err\n}", "func DecodeModifyBearerRequest(b []byte) (*ModifyBearerRequest, error) {\n\tlog.Println(\"DecodeModifyBearerRequest is deprecated. use ParseModifyBearerRequest instead\")\n\treturn ParseModifyBearerRequest(b)\n}", "func umount(a *apl.Apl, L, R apl.Value) (apl.Value, error) {\n\ts, ok := R.(apl.String)\n\tif ok == false {\n\t\treturn nil, fmt.Errorf(\"io umount: argument must be a string %T\", R)\n\t}\n\tUmount(string(s))\n\treturn apl.EmptyArray{}, nil\n}", "func (m *RemoveTenantV1Request) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for TenantId\n\n\treturn nil\n}", "func (c *Controller) UnmountDevice(unmountDeviceRequest k8sresources.FlexVolumeUnmountDeviceRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-UnmountDevice-start\")\n\tdefer c.logger.Println(\"controller-UnmountDevice-end\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func removeVolumeMount(volumeName string, mounts []apiv1.VolumeMount) []apiv1.VolumeMount {\n\tnewMounts := mounts[:0]\n\tfor _, v := range mounts {\n\t\tif v.Name != volumeName {\n\t\t\tnewMounts = append(newMounts, v)\n\t\t}\n\t}\n\n\treturn newMounts\n}", "func (driver *Driver) Unmount(volumeName, volumeID string) error {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn nil\n\t}\n\n\terr = driver.osdm.Unmount(mounts[0].Mountpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = driver.sdm.DetachVolume(false, volumes[0].VolumeID, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}", "func (r *CallParams) NewRequestOffLedger(keyPair *ed25519.KeyPair) *request.OffLedger {\n\tret := request.NewOffLedger(r.target, r.entryPoint, r.args).WithTransfer(r.transfer)\n\tret.Sign(keyPair)\n\treturn ret\n}", "func Unmarshal(r *http.Request, o interface{}) error {\n\tr.ParseForm()\n\t// using r.Form instead of r.PostForm, since etcd seems to allow\n\t// parameters set in either\n\treturn unmarshal(mux.Vars(r), r.URL.Query(), r.Form, o)\n}", "func Unmount(mount string, flags int) error {\n\treturn ErrNotImplementOnUnix\n}", "func (d *fsStorage) Unmount(volume *Volume) error {\n\treturn nil\n}", "func (r *DownloadDiffRequest) Validate() error {\n\tif err := requireProject(r.GetProject()); err != nil {\n\t\treturn err\n\t}\n\tif err := requireCommittish(\"committish\", r.GetCommittish()); err != nil {\n\t\treturn err\n\t}\n\tif base := r.GetBase(); base != \"\" {\n\t\tif err := requireCommittish(\"base\", base); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif strings.HasPrefix(r.Path, \"/\") {\n\t\treturn errors.New(\"path must not start with /\")\n\t}\n\treturn nil\n}", "func (mounter *Mounter) Unmount(target string) error {\n\treturn mounter.unmount(target, UMOUNT_COMMAND)\n}", "func NewCmdDiskDetach() *cobra.Command {\n\tvar async, yes *bool\n\tvar udiskIDs *[]string\n\treq := base.BizClient.NewDetachUDiskRequest()\n\tcmd := &cobra.Command{\n\t\tUse: \"detach\",\n\t\tShort: \"Detach udisk instances from an uhost\",\n\t\tLong: \"Detach udisk instances from an uhost\",\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\ttext := `Please confirm that you have already unmounted file system corresponding to this hard drive,(See \"https://docs.ucloud.cn/storage_cdn/udisk/userguide/umount\" for help), otherwise it will cause file system damage and UHost cannot be normally shut down. Sure to detach?`\n\t\t\tif !*yes {\n\t\t\t\tsure, err := ux.Prompt(text)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbase.Cxt.PrintErr(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !sure {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, id := range *udiskIDs {\n\t\t\t\tid = base.PickResourceID(id)\n\t\t\t\tany, err := describeUdiskByID(id, *req.ProjectId, *req.Region, *req.Zone)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbase.HandleError(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif any == nil {\n\t\t\t\t\tbase.Cxt.PrintErr(fmt.Errorf(\"udisk[%v] is not exist\", any))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tins, ok := any.(*udisk.UDiskDataSet)\n\t\t\t\tif !ok {\n\t\t\t\t\tbase.Cxt.PrintErr(fmt.Errorf(\"%#v convert to udisk failed\", any))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treq.UHostId = &ins.UHostId\n\t\t\t\treq.UDiskId = &id\n\t\t\t\t*req.UHostId = base.PickResourceID(*req.UHostId)\n\t\t\t\tresp, err := base.BizClient.DetachUDisk(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbase.HandleError(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttext := fmt.Sprintf(\"udisk[%s] is detaching from uhost[%s]\", resp.UDiskId, resp.UHostId)\n\t\t\t\tif *async {\n\t\t\t\t\tbase.Cxt.Println(text)\n\t\t\t\t} else {\n\t\t\t\t\tpollDisk(resp.UDiskId, *req.ProjectId, *req.Region, *req.Zone, text, []string{status.DISK_AVAILABLE, status.DISK_FAILED})\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\tflags := cmd.Flags()\n\tflags.SortFlags = false\n\tudiskIDs = flags.StringSlice(\"udisk-id\", nil, \"Required. Resource ID of the udisk instances to detach\")\n\treq.ProjectId = flags.String(\"project-id\", base.ConfigInstance.ProjectID, \"Optional. Assign project-id\")\n\treq.Region = flags.String(\"region\", base.ConfigInstance.Region, \"Optional. Assign region\")\n\treq.Zone = flags.String(\"zone\", base.ConfigInstance.Zone, \"Optional. Assign availability zone\")\n\tasync = flags.Bool(\"async\", false, \"Optional. Do not wait for the long-running operation to finish.\")\n\tyes = flags.BoolP(\"yes\", \"y\", false, \"Optional. Do not prompt for confirmation.\")\n\n\tflags.SetFlagValuesFunc(\"udisk-id\", func() []string {\n\t\treturn getDiskList([]string{status.DISK_INUSE}, *req.ProjectId, *req.Region, *req.Zone)\n\t})\n\n\tcmd.MarkFlagRequired(\"udisk-id\")\n\treturn cmd\n}", "func (c *UDBClient) NewEditUDBBackupBlacklistRequest() *EditUDBBackupBlacklistRequest {\n\treq := &EditUDBBackupBlacklistRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (m *CleanupV1Request) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCleanupStepsToSkip(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateEnvironmentCrn(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHosts(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIps(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRoles(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUsers(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (d ImagefsDriver) Unmount(r *volume.UnmountRequest) error {\n\tfmt.Printf(\"-> Unmount %+v\\n\", r)\n\tcontainerID, err := d.FindVolumeContainer(r.Name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\ttimeout := time.Second * 5\n\terr = d.cli.ContainerStop(\n\t\tcontext.Background(),\n\t\tcontainerID,\n\t\t&timeout,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tfmt.Printf(\"<- OK\\n\")\n\treturn nil\n}", "func UnmarshalWorkspaceVariableRequest(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(WorkspaceVariableRequest)\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secure\", &obj.Secure)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"type\", &obj.Type)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"use_default\", &obj.UseDefault)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"value\", &obj.Value)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func UnmarshalWorkspaceStatusRequest(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(WorkspaceStatusRequest)\n\terr = core.UnmarshalPrimitive(m, \"frozen\", &obj.Frozen)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"frozen_at\", &obj.FrozenAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"frozen_by\", &obj.FrozenBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked\", &obj.Locked)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked_by\", &obj.LockedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"locked_time\", &obj.LockedTime)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}" ]
[ "0.7709423", "0.63488305", "0.63477135", "0.5873635", "0.554804", "0.55408627", "0.53781384", "0.5330923", "0.5313279", "0.49934", "0.49808848", "0.49561214", "0.49219385", "0.49163222", "0.49159575", "0.491378", "0.48997864", "0.4886922", "0.48458815", "0.47973657", "0.47820538", "0.4728088", "0.47255546", "0.46802732", "0.46767512", "0.4643877", "0.46144384", "0.4586807", "0.4585981", "0.45801237", "0.45460516", "0.45351717", "0.4495273", "0.4493762", "0.4431636", "0.43896532", "0.43868515", "0.4382478", "0.43237078", "0.43229833", "0.42954314", "0.42859894", "0.42810124", "0.42804885", "0.4268867", "0.42548674", "0.42466304", "0.42414474", "0.4238492", "0.42373443", "0.42306107", "0.42254132", "0.41736627", "0.41735005", "0.4166771", "0.41661456", "0.41548195", "0.41521084", "0.41351187", "0.41302457", "0.412313", "0.41132778", "0.4111102", "0.41060582", "0.41042402", "0.4102623", "0.41024405", "0.4092868", "0.40889293", "0.4080717", "0.4071652", "0.40630892", "0.40615055", "0.4055703", "0.40492713", "0.40457395", "0.40360427", "0.4031712", "0.40309697", "0.40302774", "0.40126", "0.39862087", "0.39853638", "0.3983933", "0.39817134", "0.3978885", "0.39753687", "0.39695758", "0.3967498", "0.39665046", "0.39649263", "0.39569825", "0.39546156", "0.39518407", "0.39464203", "0.39462548", "0.39462057", "0.3945483", "0.3941603", "0.39372534" ]
0.8994223
0
Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse is an autogenerated conversion function.
func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error { return autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func (c *Controller) Unmount(unmountRequest k8sresources.FlexVolumeUnmountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"Controller: unmount start\")\n\tdefer c.logger.Println(\"Controller: unmount end\")\n\tc.logger.Printf(\"unmountRequest %#v\", unmountRequest)\n\tvar detachRequest resources.DetachRequest\n\tvar pvName string\n\n\t// Validate that the mountpoint is a symlink as ubiquity expect it to be\n\trealMountPoint, err := c.exec.EvalSymlinks(unmountRequest.MountPath)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Cannot execute umount because the mountPath [%s] is not a symlink as expected. Error: %#v\", unmountRequest.MountPath, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t}\n\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\tif strings.HasPrefix(realMountPoint, ubiquityMountPrefix) {\n\t\t// SCBE backend flow\n\t\tpvName = path.Base(unmountRequest.MountPath)\n\n\t\tdetachRequest = resources.DetachRequest{Name: pvName, Host: getHost()}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tpvName,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t\tc.logger.Println(fmt.Sprintf(\"Removing the slink [%s] to the real mountpoint [%s]\", unmountRequest.MountPath, realMountPoint))\n\t\terr := c.exec.Remove(unmountRequest.MountPath)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"fail to remove slink %s. Error %#v\", unmountRequest.MountPath, err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t} else {\n\n\t\tlistVolumeRequest := resources.ListVolumesRequest{}\n\t\tvolumes, err := c.Client.ListVolumes(listVolumeRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Error getting the volume list from ubiquity server %#v\", err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tvolume, err := getVolumeForMountpoint(unmountRequest.MountPath, volumes)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Error finding the volume with mountpoint [%s] from the list of ubiquity volumes %#v. Error is : %#v\",\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\tvolumes,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tdetachRequest = resources.DetachRequest{Name: volume.Name}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil && err.Error() != \"fileset not linked\" {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tvolume.Name,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tpvName = volume.Name\n\t}\n\n\tmsg := fmt.Sprintf(\n\t\t\"Succeeded to umount volume [%s] on mountpoint [%s]\",\n\t\tpvName,\n\t\tunmountRequest.MountPath,\n\t)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: \"Volume unmounted successfully\",\n\t}\n}", "func (d *VolumeDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Unmounting Volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tif d.refCounts.IsInitialized() != true {\n\t\t// if refcounting hasn't been succesful,\n\t\t// no refcounting, no unmount. All unmounts are delayed\n\t\t// until we succesfully populate the refcount map\n\t\td.refCounts.MarkDirty()\n\t\treturn volume.Response{Err: \"\"}\n\t}\n\n\tlog.Errorf(\"VolumeDriver Unmount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func (d *MinioDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Unmount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections <= 1 {\n\t\tif err := d.unmountVolume(v); err != nil {\n\t\t\tglog.Warningf(\"Unmounting %s volume failed with: %s\", v, err)\n\t\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t\t}\n\t\tv.connections = 0\n\t\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n\t}\n\tv.connections--\n\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n}", "func (client *Client) UnmapVolume(name, host string) (*Response, *ResponseStatus, error) {\n\tif host == \"\" {\n\t\treturn client.FormattedRequest(\"/unmap/volume/\\\"%s\\\"\", name)\n\t}\n\n\treturn client.FormattedRequest(\"/unmap/volume/host/\\\"%s\\\"/\\\"%s\\\"\", host, name)\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func (proxy *remoteDriverProxy) Unmount(name, id string) error {\n\tvar req = remoteVolumeUnmountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeUnmountResp\n\n\tif err := proxy.client.CallService(remoteVolumeUnmountService, &req, &resp, true); err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn errors.New(resp.Err)\n\t}\n\n\treturn nil\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume) error {\n\ttargetPath := cs.getInternalMountPath(vol)\n\n\t// Unmount nfs server at base-dir\n\tklog.V(4).Infof(\"internally unmounting %v\", targetPath)\n\t_, err := cs.Driver.ns.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{\n\t\tVolumeId: vol.id,\n\t\tTargetPath: cs.getInternalMountPath(vol),\n\t})\n\treturn err\n}", "func Convert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in *v1beta1.DismountVolumeResponse, out *internal.DismountVolumeResponse) error {\n\treturn autoConvert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in, out)\n}", "func (d *DirDriver) Unmount(req *volume.UnmountRequest) error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Unmount() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tmount := vol.mounts[req.ID]\n\tif !mount {\n\t\tlogrus.Debugf(\"Volume %s is not mounted by %s\", req.Name, req.ID)\n\t\treturn fmt.Errorf(\"volume %s is not mounted by %s\", req.Name, req.ID)\n\t}\n\n\tdelete(vol.mounts, req.ID)\n\n\treturn nil\n}", "func (d *lvm) UnmountVolume(vol Volume, keepBlockDev bool, op *operations.Operation) (bool, error) {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\tvar err error\n\tourUnmount := false\n\tmountPath := vol.MountPath()\n\n\trefCount := vol.MountRefCountDecrement()\n\n\t// Check if already mounted.\n\tif vol.contentType == ContentTypeFS && filesystem.IsMountPoint(mountPath) {\n\t\tif refCount > 0 {\n\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\treturn false, ErrInUse\n\t\t}\n\n\t\terr = TryUnmount(mountPath, 0)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"Failed to unmount LVM logical volume: %w\", err)\n\t\t}\n\n\t\td.logger.Debug(\"Unmounted logical volume\", logger.Ctx{\"volName\": vol.name, \"path\": mountPath, \"keepBlockDev\": keepBlockDev})\n\n\t\t// We only deactivate filesystem volumes if an unmount was needed to better align with our\n\t\t// unmount return value indicator.\n\t\tif !keepBlockDev {\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tourUnmount = true\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, unmount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\tourUnmount, err = d.UnmountVolume(fsVol, false, op)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\tif !keepBlockDev && shared.PathExists(volDevPath) {\n\t\t\tif refCount > 0 {\n\t\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\t\treturn false, ErrInUse\n\t\t\t}\n\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tourUnmount = true\n\t\t}\n\t}\n\n\treturn ourUnmount, nil\n}", "func (c *Controller) UnmountDevice(unmountDeviceRequest k8sresources.FlexVolumeUnmountDeviceRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-UnmountDevice-start\")\n\tdefer c.logger.Println(\"controller-UnmountDevice-end\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func (o *Filesystem) Unmount(ctx context.Context, options map[string]dbus.Variant) (err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Unmount\", 0, options).Store()\n\treturn\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func (o *RemoveVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewRemoveVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewRemoveVolumeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 409:\n\t\tresult := NewRemoveVolumeConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewRemoveVolumeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func DecodeStorageVolumesDeleteResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_delete\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func Unmount(target string) error {\n\tlogrus.Infof(\"Unmount %s\", target)\n\terr := os.Remove(target)\n\tif err == nil {\n\t\trespondSuccess()\n\t}\n\treturn err\n}", "func (d ImagefsDriver) Unmount(r *volume.UnmountRequest) error {\n\tfmt.Printf(\"-> Unmount %+v\\n\", r)\n\tcontainerID, err := d.FindVolumeContainer(r.Name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\ttimeout := time.Second * 5\n\terr = d.cli.ContainerStop(\n\t\tcontext.Background(),\n\t\tcontainerID,\n\t\t&timeout,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tfmt.Printf(\"<- OK\\n\")\n\treturn nil\n}", "func Unmount(out io.Writer, logger log.FieldLogger) (err error) {\n\tdisk, err := queryPhysicalVolume(logger)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif disk == \"\" {\n\t\tlogger.Info(\"No physical volumes found.\")\n\t\treturn nil\n\t}\n\tlogger.Infof(\"Found physical volume on disk %v.\", disk)\n\tconfig := &config{\n\t\tFieldLogger: logger,\n\t\tdisk: disk,\n\t\tout: out,\n\t}\n\tif err = config.removeLingeringDevices(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removeLogicalVolume(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removeVolumeGroup(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removePhysicalVolume(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}", "func (c *Client) Unmount(ctx context.Context, svc iaas.Service, export string) fail.Error {\n\ttimings, xerr := svc.Timings()\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdata := map[string]interface{}{\"Export\": export}\n\tstdout, xerr := executeScript(ctx, timings, c.SSHConfig, \"nfs_client_share_unmount.sh\", data)\n\tif xerr != nil {\n\t\txerr.Annotate(\"stdout\", stdout)\n\t\treturn fail.Wrap(xerr, \"error executing script to unmount remote NFS share\")\n\t}\n\treturn nil\n}", "func (d *Driver) Unmount(mountDir string) {\n\tDebug(\"findmnt: \" + mountDir)\n\t_, err := RunCommand(\"findmnt\", \"-n\", \"-o\", \"SOURCE\", \"--target\", mountDir)\n\tif err != nil {\n\t\tDebug(err.Error())\n\t}\n\n\tDebug(\"syscall.Unmount: \" + mountDir)\n\tif err := syscall.Unmount(mountDir, 0); err != nil {\n\t\tFailure(err)\n\t}\n\n\tDebug(\"Detach hetzner volume from server\")\n\tvolume := GetVolume(d.client, d.options.PVOrVolumeName)\n\t_, _, errDetach := d.client.Volume.Detach(context.Background(), volume)\n\n\tif errDetach != nil {\n\t\tFailure(errDetach)\n\t}\n\n\t// Delete json file with token in it\n\t//Debug(\"os.Remove\")\n\t//if err := os.Remove(jsonOptionsFile); err != nil {\n\t//\tfailure(err)\n\t//}\n\n\tSuccess()\n}", "func DecodeUnsealResponse(_ context.Context, grpcReply interface{}) (interface{}, error) {\n\treply := grpcReply.(*pb.UnsealResponse)\n\tstatus := endpoints.UnsealResponse{\n\t\tSealed: reply.SealStatus.Sealed,\n\t\tT: int(reply.SealStatus.T),\n\t\tN: int(reply.SealStatus.N),\n\t\tProgress: int(reply.SealStatus.Progress),\n\t\tVersion: reply.SealStatus.Version,\n\t\tClusterName: reply.SealStatus.ClusterName,\n\t\tClusterID: reply.SealStatus.ClusterId,\n\t\tErr: service.String2Error(reply.Err),\n\t}\n\n\treturn status, nil\n}", "func Convert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in *v2alpha1.RmdirContentsResponse, out *impl.RmdirContentsResponse) error {\n\treturn autoConvert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in, out)\n}", "func Unmount(d Driver, vName string) error {\n\tlog.Debugf(\"Entering Unmount: name: %s\", vName)\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tv, m, err := getVolumeMount(d, vName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif m.GetConnections() <= 1 {\n\t\tcmd := fmt.Sprintf(\"/usr/bin/umount %s\", m.GetPath())\n\t\tif err := d.RunCmd(cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tSetN(0, m, v)\n\t} else {\n\t\tAddN(-1, m, v)\n\t}\n\n\treturn d.SaveConfig()\n}", "func (v *Volume) unmount(force bool) error {\n\tif !v.needsMount() {\n\t\treturn nil\n\t}\n\n\t// Update the volume from the DB to get an accurate mount counter.\n\tif err := v.update(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.state.MountCount == 0 {\n\t\tlogrus.Debugf(\"Volume %s already unmounted\", v.Name())\n\t\treturn nil\n\t}\n\n\tif !force {\n\t\tv.state.MountCount--\n\t} else {\n\t\tv.state.MountCount = 0\n\t}\n\n\tlogrus.Debugf(\"Volume %s mount count now at %d\", v.Name(), v.state.MountCount)\n\n\tif v.state.MountCount == 0 {\n\t\tif v.UsesVolumeDriver() {\n\t\t\tif v.plugin == nil {\n\t\t\t\treturn fmt.Errorf(\"volume plugin %s (needed by volume %s) missing: %w\", v.Driver(), v.Name(), define.ErrMissingPlugin)\n\t\t\t}\n\n\t\t\treq := new(pluginapi.UnmountRequest)\n\t\t\treq.Name = v.Name()\n\t\t\treq.ID = pseudoCtrID\n\t\t\tif err := v.plugin.UnmountVolume(req); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tv.state.MountPoint = \"\"\n\t\t\treturn v.save()\n\t\t} else if v.config.Driver == define.VolumeDriverImage {\n\t\t\tif _, err := v.runtime.storageService.UnmountContainerImage(v.config.StorageID, force); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unmounting volume %s image: %w\", v.Name(), err)\n\t\t\t}\n\n\t\t\tv.state.MountPoint = \"\"\n\t\t\treturn v.save()\n\t\t}\n\n\t\t// Unmount the volume\n\t\tif err := detachUnmount(v.config.MountPoint); err != nil {\n\t\t\tif err == unix.EINVAL {\n\t\t\t\t// Ignore EINVAL - the mount no longer exists.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unmounting volume %s: %w\", v.Name(), err)\n\t\t}\n\t\tlogrus.Debugf(\"Unmounted volume %s\", v.Name())\n\t}\n\n\treturn v.save()\n}", "func (d *Driver) internalUnmount(ctx context.Context, vol *smbVolume) error {\n\ttargetPath := getInternalMountPath(d.workingMountDir, vol)\n\n\t// Unmount smb server at base-dir\n\tklog.V(4).Infof(\"internally unmounting %v\", targetPath)\n\t_, err := d.NodeUnstageVolume(ctx, &csi.NodeUnstageVolumeRequest{\n\t\tVolumeId: vol.id,\n\t\tStagingTargetPath: targetPath,\n\t})\n\treturn err\n}", "func CreateUnAssignPrivateIpAddressResponse() (response *UnAssignPrivateIpAddressResponse) {\n\tresponse = &UnAssignPrivateIpAddressResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (driver *Driver) Unmount(volumeName, volumeID string) error {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn nil\n\t}\n\n\terr = driver.osdm.Unmount(mounts[0].Mountpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = driver.sdm.DetachVolume(false, volumes[0].VolumeID, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (d *VolumeDriver) UnmountVolume(name string) error {\n\tlog.Errorf(\"VolumeDriver UnmountVolume to be implemented\")\n\treturn nil\n}", "func (d *MinioDriver) unmountVolume(volume *minioVolume) error {\n\treturn exec.Command(\"umount\", volume.mountpoint).Run()\n}", "func (t *ControlledShutdownResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// RemainingPartitions\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.RemainingPartitions = make([]RemainingPartition7, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item RemainingPartition7\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.RemainingPartitions[i] = item\n\t\t}\n\t}\n\treturn err\n}", "func (c *CryptohomeBinary) Unmount(ctx context.Context, username string) ([]byte, error) {\n\treturn c.call(ctx, \"--action=unmount\", \"--user=\"+username)\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (c *client) Unmount(\n\tctx types.Context,\n\tmountPoint string,\n\topts types.Store) error {\n\n\tif c.isController() {\n\t\treturn utils.NewUnsupportedForClientTypeError(\n\t\t\tc.clientType, \"Unmount\")\n\t}\n\n\tif lsxSO, _ := c.Supported(ctx, opts); !lsxSO.Umount() {\n\t\treturn errExecutorNotSupported\n\t}\n\n\tctx = context.RequireTX(ctx.Join(c.ctx))\n\n\tserviceName, ok := context.ServiceName(ctx)\n\tif !ok {\n\t\treturn goof.New(\"missing service name\")\n\t}\n\n\tsi, err := c.getServiceInfo(serviceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdriverName := si.Driver.Name\n\n\tif _, err = c.runExecutor(\n\t\tctx,\n\t\tdriverName,\n\t\ttypes.LSXCmdUmount,\n\t\tmountPoint); err != nil {\n\t\treturn err\n\t}\n\n\tctx.Debug(\"xli umount success\")\n\treturn nil\n}", "func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {\n\tif req == nil {\n\t\treturn fmt.Errorf(\"must provide non-nil request to UnmountVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Unmounting volume %s using plugin %s for container %s\", req.Name, p.Name, req.ID)\n\n\tresp, err := p.sendRequest(req, unmountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn p.handleErrorResponse(resp, unmountPath, req.Name)\n}", "func (dfr DeleteFilesystemResponse) Response() *http.Response {\n\treturn dfr.rawResponse\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func UmountVolume(vol *apis.LVMVolume, targetPath string,\n) error {\n\tmounter := &mount.SafeFormatAndMount{Interface: mount.New(\"\"), Exec: utilexec.New()}\n\n\tdev, ref, err := mount.GetDeviceNameFromMount(mounter, targetPath)\n\tif err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: umount volume: failed to get device from mnt: %s\\nError: %v\",\n\t\t\ttargetPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\t// device has already been un-mounted, return successful\n\tif len(dev) == 0 || ref == 0 {\n\t\tklog.Warningf(\n\t\t\t\"Warning: Unmount skipped because volume %s not mounted: %v\",\n\t\t\tvol.Name, targetPath,\n\t\t)\n\t\treturn nil\n\t}\n\n\tif pathExists, pathErr := mount.PathExists(targetPath); pathErr != nil {\n\t\treturn fmt.Errorf(\"error checking if path exists: %v\", pathErr)\n\t} else if !pathExists {\n\t\tklog.Warningf(\n\t\t\t\"Warning: Unmount skipped because path does not exist: %v\",\n\t\t\ttargetPath,\n\t\t)\n\t\treturn nil\n\t}\n\n\tif err = mounter.Unmount(targetPath); err != nil {\n\t\tklog.Errorf(\n\t\t\t\"lvm: failed to unmount %s: path %s err: %v\",\n\t\t\tvol.Name, targetPath, err,\n\t\t)\n\t\treturn err\n\t}\n\n\tif err := os.Remove(targetPath); err != nil {\n\t\tklog.Errorf(\"lvm: failed to remove mount path vol %s err : %v\", vol.Name, err)\n\t}\n\n\tklog.Infof(\"umount done %s path %v\", vol.Name, targetPath)\n\n\treturn nil\n}", "func (c *Client) Unmount(export string) error {\n\tdata := map[string]interface{}{\n\t\t\"Export\": export,\n\t}\n\tretcode, stdout, stderr, err := executeScript(*c.SSHConfig, \"nfs_client_share_unmount.sh\", data)\n\treturn handleExecuteScriptReturn(retcode, stdout, stderr, err, \"Error executing script to unmount remote NFS share\")\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func UnmarshalResponse(data []byte) (Response, error) {\n\tvar r Response\n\terr := json.Unmarshal(data, &r)\n\treturn r, err\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func (t *StopReplicaResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// PartitionErrors\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.PartitionErrors = make([]StopReplicaPartitionError5, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item StopReplicaPartitionError5\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.PartitionErrors[i] = item\n\t\t}\n\t}\n\treturn err\n}", "func execUnmount(mountDir string) error {\n\t// CONTAINER=`docker ps --filter \"label=mountpath=${mount_dir}\" --format \"{{.ID}}\"`\n\toutput, err := exec.Command(\"docker\",\n\t\t\"ps\",\n\t\t\"--filter\",\n\t\t\"label=mountpath=\"+mountDir,\n\t\t\"--format\",\n\t\t\"{{.ID}}\").CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"docker ps failed: %v\", err)\n\t}\n\n\t// docker rm ${CONTAINER} -f\n\tstr := strings.Replace(string(output), \"\\n\", \"\", -1)\n\t_, err = exec.Command(\"docker\",\n\t\t\"rm\",\n\t\tstr,\n\t\t\"-f\").CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"docker rm failed: %v\", err)\n\t}\n\n\t// umount -l ${mount_dir}\n\t_, err = exec.Command(\"umount\",\n\t\t\"-l\",\n\t\tmountDir).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"umount failed: %v\", err)\n\t}\n\n\t// rmdir ${mount_dir}\n\t_, err = exec.Command(\"rm\",\n\t\t\"-rf\",\n\t\tmountDir).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"rmdir failed: %v\", err)\n\t}\n\n\treturn nil\n}", "func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error {\n\tdriver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/)\n\tif err != nil || driver == nil {\n\t\tglog.Errorf(\"Failed to get portworx driver. Err: %v\", err)\n\t\treturn err\n\t}\n\n\terr = driver.Unmount(u.volName, mountPath)\n\tif err != nil {\n\t\tglog.Errorf(\"Error unmounting Portworx Volume (%v) on Path (%v): %v\", u.volName, mountPath, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func Unmount(h hostRunner, target string) error {\n\tout, err := h.RunSSHCommand(fmt.Sprintf(\"findmnt -T %s && sudo umount %s || true\", target, target))\n\tif err != nil {\n\t\treturn errors.Wrap(err, out)\n\t}\n\treturn nil\n}", "func CreateDropPartitionResponse() (response *DropPartitionResponse) {\n\tresponse = &DropPartitionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func Convert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in *v1beta1.VolumeStatsResponse, out *internal.VolumeStatsResponse) error {\n\treturn autoConvert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in, out)\n}", "func Convert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in *v1beta1.ResizeVolumeResponse, out *internal.ResizeVolumeResponse) error {\n\treturn autoConvert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in, out)\n}", "func Convert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in *v1beta1.ListVolumesOnDiskResponse, out *internal.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v1beta1_ListVolumesOnDiskResponse_To_internal_ListVolumesOnDiskResponse(in, out)\n}", "func (mounter *csiProxyMounterV1Beta) Unmount(target string) error {\n\tklog.V(4).Infof(\"Unmount: %s\", target)\n\treturn mounter.Rmdir(target)\n}", "func UnmarshalEnvVariableResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(EnvVariableResponse)\n\terr = core.UnmarshalPrimitive(m, \"hidden\", &obj.Hidden)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secure\", &obj.Secure)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"value\", &obj.Value)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (c *restClient) DeleteVolume(ctx context.Context, req *netapppb.DeleteVolumeRequest, opts ...gax.CallOption) (*DeleteVolumeOperation, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetForce() {\n\t\tparams.Add(\"force\", fmt.Sprintf(\"%v\", req.GetForce()))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"DELETE\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &DeleteVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func UnmarshalDeleteChannelResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(DeleteChannelResponse)\n\terr = core.UnmarshalPrimitive(m, \"channel_id\", &obj.ChannelID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"message\", &obj.Message)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func CreateRemoveAppGroupResponse() (response *RemoveAppGroupResponse) {\n\tresponse = &RemoveAppGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *DeleteStorageV1VolumeAttachmentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (m *Mounter) Unmount(\n\tdevPath string,\n\tpath string,\n\tflags int,\n\ttimeout int,\n\topts map[string]string,\n) error {\n\tm.Lock()\n\t// device gets overwritten if opts specifies fuse mount with\n\t// options.OptionsDeviceFuseMount.\n\tdevice := devPath\n\tpath = normalizeMountPath(path)\n\tif value, ok := opts[options.OptionsDeviceFuseMount]; ok {\n\t\t// fuse mounts show-up with this key as device.\n\t\tdevice = value\n\t}\n\tinfo, ok := m.mounts[device]\n\tif !ok {\n\t\tlogrus.Warnf(\"Unable to unmount device %q path %q: %v\",\n\t\t\tdevPath, path, ErrEnoent.Error())\n\t\tlogrus.Infof(\"Found %v mounts in mounter's cache: \", len(m.mounts))\n\t\tlogrus.Infof(\"Mounter has the following mountpoints: \")\n\t\tfor dev, info := range m.mounts {\n\t\t\tlogrus.Infof(\"For Device %v: Info: %v\", dev, info)\n\t\t\tif info == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, path := range info.Mountpoint {\n\t\t\t\tlogrus.Infof(\"\\t Mountpath: %v Rootpath: %v\", path.Path, path.Root)\n\t\t\t}\n\t\t}\n\t\tm.Unlock()\n\t\treturn ErrEnoent\n\t}\n\tm.Unlock()\n\tinfo.Lock()\n\tdefer info.Unlock()\n\tfor i, p := range info.Mountpoint {\n\t\tif p.Path != path {\n\t\t\tcontinue\n\t\t}\n\t\terr := m.mountImpl.Unmount(path, flags, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Blow away this mountpoint.\n\t\tinfo.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1]\n\t\tinfo.Mountpoint = info.Mountpoint[0 : len(info.Mountpoint)-1]\n\t\tm.maybeRemoveDevice(device)\n\t\tif options.IsBoolOptionSet(opts, options.OptionsDeleteAfterUnmount) {\n\t\t\tm.RemoveMountPath(path, opts)\n\t\t}\n\n\t\treturn nil\n\t}\n\tlogrus.Warnf(\"Device %q is not mounted at path %q\", device, path)\n\treturn ErrEnoent\n}", "func (t *DescribeLogDirsResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ThrottleTimeMs, err = d.Int32()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Results\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Results = make([]DescribeLogDirsResult35, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item DescribeLogDirsResult35\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Results[i] = item\n\t\t}\n\t}\n\treturn err\n}", "func UnmarshalDeleteFilterResp(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(DeleteFilterResp)\n\terr = core.UnmarshalPrimitive(m, \"success\", &obj.Success)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"errors\", &obj.Errors)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"messages\", &obj.Messages)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"result\", &obj.Result, UnmarshalDeleteFilterRespResult)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func UnmarshalAnalyticsEngineResizeClusterResponse(m map[string]interface{}) (result *AnalyticsEngineResizeClusterResponse, err error) {\n\tobj := new(AnalyticsEngineResizeClusterResponse)\n\tobj.RequestID, err = core.UnmarshalString(m, \"request_id\")\n\tif err != nil {\n\t\treturn\n\t}\n\tresult = obj\n\treturn\n}", "func (t *LeaveGroupResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tif version >= 1 {\n\t\tt.ThrottleTimeMs, err = d.Int32()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif version >= 3 {\n\t\t// Members\n\t\tif n, err := d.ArrayLength(); err != nil {\n\t\t\treturn err\n\t\t} else if n >= 0 {\n\t\t\tt.Members = make([]MemberResponse13, n)\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tvar item MemberResponse13\n\t\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.Members[i] = item\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}", "func NewUnlockConnectorResponse(status UnlockStatus) *UnlockConnectorResponse {\n\treturn &UnlockConnectorResponse{Status: status}\n}", "func (z *ZfsH) Unmount(d *Dataset, force bool) (*Dataset, error) {\n\tif d.Type == DatasetSnapshot {\n\t\treturn nil, errors.New(\"cannot unmount snapshots\")\n\t}\n\targs := make([]string, 1, 3)\n\targs[0] = \"umount\"\n\tif force {\n\t\targs = append(args, \"-f\")\n\t}\n\targs = append(args, d.Name)\n\t_, err := z.zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn z.GetDataset(d.Name)\n}", "func ParseUnlockDomainTransferResponse(rsp *http.Response) (*UnlockDomainTransferResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &UnlockDomainTransferResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest ScalewayDomainV2alpha2Domain\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func UnmapBlockVolume(\n\tblkUtil volumepathhandler.BlockVolumePathHandler,\n\tglobalUnmapPath,\n\tpodDeviceUnmapPath,\n\tvolumeMapName string,\n\tpodUID utypes.UID,\n) error {\n\t// Release file descriptor lock.\n\terr := blkUtil.DetachFileDevice(filepath.Join(globalUnmapPath, string(podUID)))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s: %v\",\n\t\t\tglobalUnmapPath, string(podUID), err)\n\t}\n\n\t// unmap devicePath from pod volume path\n\tunmapDeviceErr := blkUtil.UnmapDevice(podDeviceUnmapPath, volumeMapName, false /* bindMount */)\n\tif unmapDeviceErr != nil {\n\t\treturn fmt.Errorf(\"blkUtil.DetachFileDevice failed. podDeviceUnmapPath:%s, volumeMapName: %s, bindMount: %v: %v\",\n\t\t\tpodDeviceUnmapPath, volumeMapName, false, unmapDeviceErr)\n\t}\n\n\t// unmap devicePath from global node path\n\tunmapDeviceErr = blkUtil.UnmapDevice(globalUnmapPath, string(podUID), true /* bindMount */)\n\tif unmapDeviceErr != nil {\n\t\treturn fmt.Errorf(\"blkUtil.DetachFileDevice failed. globalUnmapPath:%s, podUID: %s, bindMount: %v: %v\",\n\t\t\tglobalUnmapPath, string(podUID), true, unmapDeviceErr)\n\t}\n\treturn nil\n}", "func Unmount(path string) error {\n\tcmd := exec.Command(\"fusermount\", \"-u\", path)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"exec of fusermount -u %s failed: %v\", path, err)\n\t}\n\treturn nil\n}", "func Unmount(mountpoint string) (err error) {\n\tlog.Println(\"Unmounting filesystem\")\n\terr = fuse.Unmount(mountpoint)\n\treturn\n}", "func (d *fsStorage) Unmount(volume *Volume) error {\n\treturn nil\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func UnmarshalVersionResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(VersionResponse)\n\terr = core.UnmarshalPrimitive(m, \"builddate\", &obj.Builddate)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"buildno\", &obj.Buildno)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"commitsha\", &obj.Commitsha)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"helm_provider_version\", &obj.HelmProviderVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"helm_version\", &obj.HelmVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"supported_template_types\", &obj.SupportedTemplateTypes)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"terraform_provider_version\", &obj.TerraformProviderVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"terraform_version\", &obj.TerraformVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func NewUnloadNode_Response() *UnloadNode_Response {\n\tself := UnloadNode_Response{}\n\tself.SetDefaults()\n\treturn &self\n}", "func DecodeRemoveResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (any, error) {\n\treturn func(resp *http.Response) (any, error) {\n\t\tif restoreBody {\n\t\t\tb, err := io.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = io.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = io.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusNoContent:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := io.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"storage\", \"remove\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func Unmount(dest string) error {\n\treturn nil\n}", "func UnprocessabeEntityResponse(w http.ResponseWriter, ers validation.Errors) error {\n\tw.WriteHeader(http.StatusUnprocessableEntity)\n\n\tver := validationResponse{\n\t\tMessage: ers.Error(),\n\t\tErrors: ers,\n\t}\n\n\tdata, err := ver.MarshalJSON()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshal json\")\n\t}\n\tif _, err := w.Write(data); err != nil {\n\t\treturn errors.Wrap(err, \"write response\")\n\t}\n\n\treturn nil\n}", "func parseUnparseResponse(out []byte, err error) []byte {\n\tb := flatbuffers.NewBuilder(1024)\n\n\tif err != nil {\n\t\toff := stdError(b, err)\n\t\t__std.ParseUnparseResponseStart(b)\n\t\t__std.ParseUnparseResponseAddRetvalType(b, __std.ParseUnparseRetvalError)\n\t\t__std.ParseUnparseResponseAddRetval(b, off)\n\t\toff = __std.ParseUnparseResponseEnd(b)\n\t\tb.Finish(off)\n\t\treturn b.FinishedBytes()\n\t}\n\n\toff := b.CreateByteString(out)\n\t__std.ParseUnparseDataStart(b)\n\t__std.ParseUnparseDataAddData(b, off)\n\toff = __std.ParseUnparseDataEnd(b)\n\t__std.ParseUnparseResponseStart(b)\n\t__std.ParseUnparseResponseAddRetvalType(b, __std.ParseUnparseRetvalParseUnparseData)\n\t__std.ParseUnparseResponseAddRetval(b, off)\n\toff = __std.ParseUnparseResponseEnd(b)\n\tb.Finish(off)\n\treturn b.FinishedBytes()\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (t *DeleteGroupsResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ThrottleTimeMs, err = d.Int32()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Results\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Results = make([]DeletableGroupResult42, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item DeletableGroupResult42\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Results[i] = item\n\t\t}\n\t}\n\treturn err\n}", "func (bur BlobsUndeleteResponse) Response() *http.Response {\n\treturn bur.rawResponse\n}", "func UnauthorizedResponse(w http.ResponseWriter) error {\n\tw.WriteHeader(http.StatusUnauthorized)\n\n\tdata, err := unauthorizedBody.MarshalJSON()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshal json\")\n\t}\n\tif _, err := w.Write(data); err != nil {\n\t\treturn errors.Wrap(err, \"write response\")\n\t}\n\treturn nil\n}", "func UnmarshalDeleteFiltersResp(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(DeleteFiltersResp)\n\terr = core.UnmarshalPrimitive(m, \"success\", &obj.Success)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"errors\", &obj.Errors)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"messages\", &obj.Messages)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"result\", &obj.Result, UnmarshalDeleteFiltersRespResultItem)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func unmount(target string) error {\n\tif mounted, err := mounted(target); err != nil || !mounted {\n\t\treturn err\n\t}\n\treturn forceUnmount(target)\n}", "func (d *driverInfo) Unmount(volume *Volume) error {\n\tif err := volume.CheckMounted(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.storage.Unmount(volume); err != nil {\n\t\treturn err\n\t}\n\n\tif err := fs.RemoveDir(volume.MountPath, true); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": volume.Name,\n\t\t\t\"mountPath\": volume.MountPath,\n\t\t\t\"error\": err,\n\t\t}).Warning(\"error removing mount path\")\n\t}\n\n\tvolume.MountPath = \"\"\n\treturn nil\n}", "func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error {\n\tv := reflect.Indirect(reflect.ValueOf(data))\n\treturn unmarshalLocationElements(resp, v, lowerCaseHeaderMaps)\n}", "func (t *AlterReplicaLogDirsResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ThrottleTimeMs, err = d.Int32()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Results\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Results = make([]AlterReplicaLogDirTopicResult34, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item AlterReplicaLogDirTopicResult34\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Results[i] = item\n\t\t}\n\t}\n\treturn err\n}", "func UnmarshalResponse(response *http.Response, v interface{}) error {\n\n\t// get the body as []byte\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read the body: %s\", err.Error())\n\t}\n\n\t// try to Unmarshal to struct\n\terr = json.Unmarshal(body, v)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not unmarshal: %s\", err.Error())\n\t}\n\n\treturn nil\n}", "func UnmarshalDeleteMonitorResp(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(DeleteMonitorResp)\n\terr = core.UnmarshalPrimitive(m, \"success\", &obj.Success)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"errors\", &obj.Errors)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"messages\", &obj.Messages)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"result\", &obj.Result, UnmarshalDeleteMonitorRespResult)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (o *GetStoragePureVolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetStoragePureVolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetStoragePureVolumesDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}" ]
[ "0.74237365", "0.7037084", "0.6924371", "0.6280317", "0.60890293", "0.6087793", "0.6059698", "0.588862", "0.57926816", "0.5747039", "0.57276744", "0.5697792", "0.55828136", "0.5547294", "0.5472159", "0.5465805", "0.52910244", "0.5277258", "0.5266905", "0.5248221", "0.5192354", "0.51801866", "0.5154929", "0.51437986", "0.5140662", "0.5133698", "0.5129881", "0.51124674", "0.509713", "0.5094935", "0.50778025", "0.50495404", "0.5033391", "0.500813", "0.50065875", "0.4981908", "0.49795422", "0.49549764", "0.49527156", "0.49402767", "0.49345237", "0.48783028", "0.4870201", "0.48594233", "0.484186", "0.48360276", "0.48352697", "0.48319992", "0.48221305", "0.4789633", "0.47891843", "0.47886467", "0.47866133", "0.4774723", "0.47689512", "0.47655872", "0.47273564", "0.47137785", "0.47121593", "0.47031128", "0.4699354", "0.4693335", "0.46813038", "0.4678458", "0.46776035", "0.46697435", "0.46657804", "0.4662587", "0.4659919", "0.46516946", "0.46441767", "0.46328726", "0.4628609", "0.46204162", "0.46070758", "0.46031126", "0.46001256", "0.45879036", "0.45858186", "0.45842764", "0.45833886", "0.45823988", "0.45805943", "0.45737585", "0.45737016", "0.45704296", "0.45699644", "0.4566822", "0.4553071", "0.454646", "0.45432824", "0.45308554", "0.45300138", "0.4529558", "0.4523297", "0.4521269", "0.4521197", "0.45190167", "0.45132592", "0.45088542" ]
0.8930982
0
Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse is an autogenerated conversion function.
func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error { return autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in *internal.MountVolumeResponse, out *v1beta1.MountVolumeResponse) error {\n\treturn autoConvert_internal_MountVolumeResponse_To_v1beta1_MountVolumeResponse(in, out)\n}", "func (c *Controller) Unmount(unmountRequest k8sresources.FlexVolumeUnmountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"Controller: unmount start\")\n\tdefer c.logger.Println(\"Controller: unmount end\")\n\tc.logger.Printf(\"unmountRequest %#v\", unmountRequest)\n\tvar detachRequest resources.DetachRequest\n\tvar pvName string\n\n\t// Validate that the mountpoint is a symlink as ubiquity expect it to be\n\trealMountPoint, err := c.exec.EvalSymlinks(unmountRequest.MountPath)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Cannot execute umount because the mountPath [%s] is not a symlink as expected. Error: %#v\", unmountRequest.MountPath, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t}\n\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\tif strings.HasPrefix(realMountPoint, ubiquityMountPrefix) {\n\t\t// SCBE backend flow\n\t\tpvName = path.Base(unmountRequest.MountPath)\n\n\t\tdetachRequest = resources.DetachRequest{Name: pvName, Host: getHost()}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tpvName,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t\tc.logger.Println(fmt.Sprintf(\"Removing the slink [%s] to the real mountpoint [%s]\", unmountRequest.MountPath, realMountPoint))\n\t\terr := c.exec.Remove(unmountRequest.MountPath)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"fail to remove slink %s. Error %#v\", unmountRequest.MountPath, err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t} else {\n\n\t\tlistVolumeRequest := resources.ListVolumesRequest{}\n\t\tvolumes, err := c.Client.ListVolumes(listVolumeRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Error getting the volume list from ubiquity server %#v\", err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tvolume, err := getVolumeForMountpoint(unmountRequest.MountPath, volumes)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Error finding the volume with mountpoint [%s] from the list of ubiquity volumes %#v. Error is : %#v\",\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\tvolumes,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tdetachRequest = resources.DetachRequest{Name: volume.Name}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil && err.Error() != \"fileset not linked\" {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tvolume.Name,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tpvName = volume.Name\n\t}\n\n\tmsg := fmt.Sprintf(\n\t\t\"Succeeded to umount volume [%s] on mountpoint [%s]\",\n\t\tpvName,\n\t\tunmountRequest.MountPath,\n\t)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: \"Volume unmounted successfully\",\n\t}\n}", "func Convert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in *internal.DismountVolumeResponse, out *v1beta1.DismountVolumeResponse) error {\n\treturn autoConvert_internal_DismountVolumeResponse_To_v1beta1_DismountVolumeResponse(in, out)\n}", "func (d *VolumeDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\tlog.WithFields(log.Fields{\"name\": r.Name}).Info(\"Unmounting Volume \")\n\n\t// lock the state\n\td.refCounts.StateMtx.Lock()\n\tdefer d.refCounts.StateMtx.Unlock()\n\n\tif d.refCounts.IsInitialized() != true {\n\t\t// if refcounting hasn't been succesful,\n\t\t// no refcounting, no unmount. All unmounts are delayed\n\t\t// until we succesfully populate the refcount map\n\t\td.refCounts.MarkDirty()\n\t\treturn volume.Response{Err: \"\"}\n\t}\n\n\tlog.Errorf(\"VolumeDriver Unmount to be implemented\")\n\treturn volume.Response{Err: \"\"}\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func (d *MinioDriver) Unmount(r volume.UnmountRequest) volume.Response {\n\td.m.Lock()\n\tdefer d.m.Unlock()\n\n\tglog.V(1).Infof(\"Unmount request is: %#v\", r)\n\n\tv, exists := d.volumes[r.Name]\n\tif !exists {\n\t\treturn volumeResp(\"\", \"\", nil, capability, newErrVolNotFound(r.Name).Error())\n\t}\n\n\tif v.connections <= 1 {\n\t\tif err := d.unmountVolume(v); err != nil {\n\t\t\tglog.Warningf(\"Unmounting %s volume failed with: %s\", v, err)\n\t\t\treturn volumeResp(\"\", \"\", nil, capability, err.Error())\n\t\t}\n\t\tv.connections = 0\n\t\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n\t}\n\tv.connections--\n\treturn volumeResp(\"\", \"\", nil, capability, \"\")\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func (proxy *remoteDriverProxy) Unmount(name, id string) error {\n\tvar req = remoteVolumeUnmountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeUnmountResp\n\n\tif err := proxy.client.CallService(remoteVolumeUnmountService, &req, &resp, true); err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn errors.New(resp.Err)\n\t}\n\n\treturn nil\n}", "func (client *Client) UnmapVolume(name, host string) (*Response, *ResponseStatus, error) {\n\tif host == \"\" {\n\t\treturn client.FormattedRequest(\"/unmap/volume/\\\"%s\\\"\", name)\n\t}\n\n\treturn client.FormattedRequest(\"/unmap/volume/host/\\\"%s\\\"/\\\"%s\\\"\", host, name)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func Convert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in *v1beta1.MountVolumeResponse, out *internal.MountVolumeResponse) error {\n\treturn autoConvert_v1beta1_MountVolumeResponse_To_internal_MountVolumeResponse(in, out)\n}", "func Convert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in *impl.RmdirContentsResponse, out *v2alpha1.RmdirContentsResponse) error {\n\treturn autoConvert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func (o *RemoveVolumeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewRemoveVolumeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewRemoveVolumeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 409:\n\t\tresult := NewRemoveVolumeConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewRemoveVolumeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (c *Controller) UnmountDevice(unmountDeviceRequest k8sresources.FlexVolumeUnmountDeviceRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-UnmountDevice-start\")\n\tdefer c.logger.Println(\"controller-UnmountDevice-end\")\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (d *lvm) UnmountVolume(vol Volume, keepBlockDev bool, op *operations.Operation) (bool, error) {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\tvar err error\n\tourUnmount := false\n\tmountPath := vol.MountPath()\n\n\trefCount := vol.MountRefCountDecrement()\n\n\t// Check if already mounted.\n\tif vol.contentType == ContentTypeFS && filesystem.IsMountPoint(mountPath) {\n\t\tif refCount > 0 {\n\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\treturn false, ErrInUse\n\t\t}\n\n\t\terr = TryUnmount(mountPath, 0)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"Failed to unmount LVM logical volume: %w\", err)\n\t\t}\n\n\t\td.logger.Debug(\"Unmounted logical volume\", logger.Ctx{\"volName\": vol.name, \"path\": mountPath, \"keepBlockDev\": keepBlockDev})\n\n\t\t// We only deactivate filesystem volumes if an unmount was needed to better align with our\n\t\t// unmount return value indicator.\n\t\tif !keepBlockDev {\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tourUnmount = true\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, unmount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\tourUnmount, err = d.UnmountVolume(fsVol, false, op)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\tif !keepBlockDev && shared.PathExists(volDevPath) {\n\t\t\tif refCount > 0 {\n\t\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\t\treturn false, ErrInUse\n\t\t\t}\n\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tourUnmount = true\n\t\t}\n\t}\n\n\treturn ourUnmount, nil\n}", "func CreateUnAssignPrivateIpAddressResponse() (response *UnAssignPrivateIpAddressResponse) {\n\tresponse = &UnAssignPrivateIpAddressResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateDropPartitionResponse() (response *DropPartitionResponse) {\n\tresponse = &DropPartitionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (c *CryptohomeBinary) Unmount(ctx context.Context, username string) ([]byte, error) {\n\treturn c.call(ctx, \"--action=unmount\", \"--user=\"+username)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func (o *DeleteStorageV1VolumeAttachmentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewDeleteStorageV1VolumeAttachmentUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (d *DirDriver) Unmount(req *volume.UnmountRequest) error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tlogrus.Infof(\"Hit Unmount() endpoint\")\n\n\tvol, exists := d.volumes[req.Name]\n\tif !exists {\n\t\tlogrus.Debugf(\"Cannot locate volume %s\", req.Name)\n\t\treturn fmt.Errorf(\"no volume with name %s found\", req.Name)\n\t}\n\n\tmount := vol.mounts[req.ID]\n\tif !mount {\n\t\tlogrus.Debugf(\"Volume %s is not mounted by %s\", req.Name, req.ID)\n\t\treturn fmt.Errorf(\"volume %s is not mounted by %s\", req.Name, req.ID)\n\t}\n\n\tdelete(vol.mounts, req.ID)\n\n\treturn nil\n}", "func Unmount(target string) error {\n\tlogrus.Infof(\"Unmount %s\", target)\n\terr := os.Remove(target)\n\tif err == nil {\n\t\trespondSuccess()\n\t}\n\treturn err\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (driver *Driver) Unmount(volumeName, volumeID string) error {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn nil\n\t}\n\n\terr = driver.osdm.Unmount(mounts[0].Mountpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = driver.sdm.DetachVolume(false, volumes[0].VolumeID, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func ParseUnlockDomainTransferResponse(rsp *http.Response) (*UnlockDomainTransferResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &UnlockDomainTransferResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest ScalewayDomainV2alpha2Domain\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func (t *ListOffsetPartitionResponse2) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.PartitionIndex, err = d.Int32()\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif version >= 0 && version <= 0 {\n\t\tt.OldStyleOffsets, err = d.Int64Array()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif version >= 1 {\n\t\tt.Timestamp, err = d.Int64()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif version >= 1 {\n\t\tt.Offset, err = d.Int64()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif version >= 4 {\n\t\tt.LeaderEpoch, err = d.Int32()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}", "func Unmount(d Driver, vName string) error {\n\tlog.Debugf(\"Entering Unmount: name: %s\", vName)\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tv, m, err := getVolumeMount(d, vName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif m.GetConnections() <= 1 {\n\t\tcmd := fmt.Sprintf(\"/usr/bin/umount %s\", m.GetPath())\n\t\tif err := d.RunCmd(cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tSetN(0, m, v)\n\t} else {\n\t\tAddN(-1, m, v)\n\t}\n\n\treturn d.SaveConfig()\n}", "func Convert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in *internal.ListVolumesOnDiskResponse, out *v1beta1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_internal_ListVolumesOnDiskResponse_To_v1beta1_ListVolumesOnDiskResponse(in, out)\n}", "func DecodeStorageVolumesCreateResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_create\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func Convert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in *v1beta1.VolumeIDFromMountResponse, out *internal.VolumeIDFromMountResponse) error {\n\treturn autoConvert_v1beta1_VolumeIDFromMountResponse_To_internal_VolumeIDFromMountResponse(in, out)\n}", "func (c *Client) Unmount(export string) error {\n\tdata := map[string]interface{}{\n\t\t\"Export\": export,\n\t}\n\tretcode, stdout, stderr, err := executeScript(*c.SSHConfig, \"nfs_client_share_unmount.sh\", data)\n\treturn handleExecuteScriptReturn(retcode, stdout, stderr, err, \"Error executing script to unmount remote NFS share\")\n}", "func Convert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in *impl.GetVolumeIDFromTargetPathResponse, out *v2alpha1.GetVolumeIDFromTargetPathResponse) error {\n\treturn autoConvert_impl_GetVolumeIDFromTargetPathResponse_To_v2alpha1_GetVolumeIDFromTargetPathResponse(in, out)\n}", "func (c *Client) Unmount(ctx context.Context, svc iaas.Service, export string) fail.Error {\n\ttimings, xerr := svc.Timings()\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdata := map[string]interface{}{\"Export\": export}\n\tstdout, xerr := executeScript(ctx, timings, c.SSHConfig, \"nfs_client_share_unmount.sh\", data)\n\tif xerr != nil {\n\t\txerr.Annotate(\"stdout\", stdout)\n\t\treturn fail.Wrap(xerr, \"error executing script to unmount remote NFS share\")\n\t}\n\treturn nil\n}", "func UnmarshalCreateEnterpriseResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(CreateEnterpriseResponse)\n\terr = core.UnmarshalPrimitive(m, \"enterprise_id\", &obj.EnterpriseID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"enterprise_account_id\", &obj.EnterpriseAccountID)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (d *Driver) Unmount(mountDir string) {\n\tDebug(\"findmnt: \" + mountDir)\n\t_, err := RunCommand(\"findmnt\", \"-n\", \"-o\", \"SOURCE\", \"--target\", mountDir)\n\tif err != nil {\n\t\tDebug(err.Error())\n\t}\n\n\tDebug(\"syscall.Unmount: \" + mountDir)\n\tif err := syscall.Unmount(mountDir, 0); err != nil {\n\t\tFailure(err)\n\t}\n\n\tDebug(\"Detach hetzner volume from server\")\n\tvolume := GetVolume(d.client, d.options.PVOrVolumeName)\n\t_, _, errDetach := d.client.Volume.Detach(context.Background(), volume)\n\n\tif errDetach != nil {\n\t\tFailure(errDetach)\n\t}\n\n\t// Delete json file with token in it\n\t//Debug(\"os.Remove\")\n\t//if err := os.Remove(jsonOptionsFile); err != nil {\n\t//\tfailure(err)\n\t//}\n\n\tSuccess()\n}", "func DecodeStorageVolumesDeleteResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_delete\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (o *Filesystem) Unmount(ctx context.Context, options map[string]dbus.Variant) (err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Unmount\", 0, options).Store()\n\treturn\n}", "func DecodeUnsealResponse(_ context.Context, grpcReply interface{}) (interface{}, error) {\n\treply := grpcReply.(*pb.UnsealResponse)\n\tstatus := endpoints.UnsealResponse{\n\t\tSealed: reply.SealStatus.Sealed,\n\t\tT: int(reply.SealStatus.T),\n\t\tN: int(reply.SealStatus.N),\n\t\tProgress: int(reply.SealStatus.Progress),\n\t\tVersion: reply.SealStatus.Version,\n\t\tClusterName: reply.SealStatus.ClusterName,\n\t\tClusterID: reply.SealStatus.ClusterId,\n\t\tErr: service.String2Error(reply.Err),\n\t}\n\n\treturn status, nil\n}", "func (v *Volume) unmount(force bool) error {\n\tif !v.needsMount() {\n\t\treturn nil\n\t}\n\n\t// Update the volume from the DB to get an accurate mount counter.\n\tif err := v.update(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.state.MountCount == 0 {\n\t\tlogrus.Debugf(\"Volume %s already unmounted\", v.Name())\n\t\treturn nil\n\t}\n\n\tif !force {\n\t\tv.state.MountCount--\n\t} else {\n\t\tv.state.MountCount = 0\n\t}\n\n\tlogrus.Debugf(\"Volume %s mount count now at %d\", v.Name(), v.state.MountCount)\n\n\tif v.state.MountCount == 0 {\n\t\tif v.UsesVolumeDriver() {\n\t\t\tif v.plugin == nil {\n\t\t\t\treturn fmt.Errorf(\"volume plugin %s (needed by volume %s) missing: %w\", v.Driver(), v.Name(), define.ErrMissingPlugin)\n\t\t\t}\n\n\t\t\treq := new(pluginapi.UnmountRequest)\n\t\t\treq.Name = v.Name()\n\t\t\treq.ID = pseudoCtrID\n\t\t\tif err := v.plugin.UnmountVolume(req); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tv.state.MountPoint = \"\"\n\t\t\treturn v.save()\n\t\t} else if v.config.Driver == define.VolumeDriverImage {\n\t\t\tif _, err := v.runtime.storageService.UnmountContainerImage(v.config.StorageID, force); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unmounting volume %s image: %w\", v.Name(), err)\n\t\t\t}\n\n\t\t\tv.state.MountPoint = \"\"\n\t\t\treturn v.save()\n\t\t}\n\n\t\t// Unmount the volume\n\t\tif err := detachUnmount(v.config.MountPoint); err != nil {\n\t\t\tif err == unix.EINVAL {\n\t\t\t\t// Ignore EINVAL - the mount no longer exists.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unmounting volume %s: %w\", v.Name(), err)\n\t\t}\n\t\tlogrus.Debugf(\"Unmounted volume %s\", v.Name())\n\t}\n\n\treturn v.save()\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (t *LeaveGroupResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tif version >= 1 {\n\t\tt.ThrottleTimeMs, err = d.Int32()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif version >= 3 {\n\t\t// Members\n\t\tif n, err := d.ArrayLength(); err != nil {\n\t\t\treturn err\n\t\t} else if n >= 0 {\n\t\t\tt.Members = make([]MemberResponse13, n)\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tvar item MemberResponse13\n\t\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.Members[i] = item\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}", "func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume) error {\n\ttargetPath := cs.getInternalMountPath(vol)\n\n\t// Unmount nfs server at base-dir\n\tklog.V(4).Infof(\"internally unmounting %v\", targetPath)\n\t_, err := cs.Driver.ns.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{\n\t\tVolumeId: vol.id,\n\t\tTargetPath: cs.getInternalMountPath(vol),\n\t})\n\treturn err\n}", "func (d *VolumeDriver) UnmountVolume(name string) error {\n\tlog.Errorf(\"VolumeDriver UnmountVolume to be implemented\")\n\treturn nil\n}", "func (m MultiVersionResponse) DecodeResponse1() (resp MessagesResponse, err error) {\n\treturn resp, rlp.DecodeBytes(m.Response, &resp)\n}", "func (t *ControlledShutdownResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// RemainingPartitions\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.RemainingPartitions = make([]RemainingPartition7, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item RemainingPartition7\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.RemainingPartitions[i] = item\n\t\t}\n\t}\n\treturn err\n}", "func CreateListFileSystemsResponse() (response *ListFileSystemsResponse) {\n\tresponse = &ListFileSystemsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {\n\tif req == nil {\n\t\treturn fmt.Errorf(\"must provide non-nil request to UnmountVolume: %w\", define.ErrInvalidArg)\n\t}\n\n\tif err := p.verifyReachable(); err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Unmounting volume %s using plugin %s for container %s\", req.Name, p.Name, req.ID)\n\n\tresp, err := p.sendRequest(req, unmountPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn p.handleErrorResponse(resp, unmountPath, req.Name)\n}", "func Convert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in *v1beta1.DismountVolumeResponse, out *internal.DismountVolumeResponse) error {\n\treturn autoConvert_v1beta1_DismountVolumeResponse_To_internal_DismountVolumeResponse(in, out)\n}", "func (c *client) Unmount(\n\tctx types.Context,\n\tmountPoint string,\n\topts types.Store) error {\n\n\tif c.isController() {\n\t\treturn utils.NewUnsupportedForClientTypeError(\n\t\t\tc.clientType, \"Unmount\")\n\t}\n\n\tif lsxSO, _ := c.Supported(ctx, opts); !lsxSO.Umount() {\n\t\treturn errExecutorNotSupported\n\t}\n\n\tctx = context.RequireTX(ctx.Join(c.ctx))\n\n\tserviceName, ok := context.ServiceName(ctx)\n\tif !ok {\n\t\treturn goof.New(\"missing service name\")\n\t}\n\n\tsi, err := c.getServiceInfo(serviceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdriverName := si.Driver.Name\n\n\tif _, err = c.runExecutor(\n\t\tctx,\n\t\tdriverName,\n\t\ttypes.LSXCmdUmount,\n\t\tmountPoint); err != nil {\n\t\treturn err\n\t}\n\n\tctx.Debug(\"xli umount success\")\n\treturn nil\n}", "func (t *StopReplicaResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ErrorCode, err = d.Int16()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// PartitionErrors\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.PartitionErrors = make([]StopReplicaPartitionError5, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item StopReplicaPartitionError5\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.PartitionErrors[i] = item\n\t\t}\n\t}\n\treturn err\n}", "func UnmarshalResponse(data []byte) (Response, error) {\n\tvar r Response\n\terr := json.Unmarshal(data, &r)\n\treturn r, err\n}", "func Unmount(dest string) error {\n\treturn nil\n}", "func Unmount(out io.Writer, logger log.FieldLogger) (err error) {\n\tdisk, err := queryPhysicalVolume(logger)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif disk == \"\" {\n\t\tlogger.Info(\"No physical volumes found.\")\n\t\treturn nil\n\t}\n\tlogger.Infof(\"Found physical volume on disk %v.\", disk)\n\tconfig := &config{\n\t\tFieldLogger: logger,\n\t\tdisk: disk,\n\t\tout: out,\n\t}\n\tif err = config.removeLingeringDevices(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removeLogicalVolume(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removeVolumeGroup(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removePhysicalVolume(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func CreateListAvailableFileSystemTypesResponse() (response *ListAvailableFileSystemTypesResponse) {\n\tresponse = &ListAvailableFileSystemTypesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (d *MinioDriver) unmountVolume(volume *minioVolume) error {\n\treturn exec.Command(\"umount\", volume.mountpoint).Run()\n}", "func unpackResponse(ackMessage []byte) *messages.Response {\n\tbuf := bytes.NewBuffer(ackMessage)\n\tcapMsg, err := capn.ReadFromStream(buf, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error unpacking message - %s\\n\", err.Error())\n\t}\n\ts := messages.ReadRootResponse(capMsg)\n\treturn &s\n}", "func (d ImagefsDriver) Unmount(r *volume.UnmountRequest) error {\n\tfmt.Printf(\"-> Unmount %+v\\n\", r)\n\tcontainerID, err := d.FindVolumeContainer(r.Name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\ttimeout := time.Second * 5\n\terr = d.cli.ContainerStop(\n\t\tcontext.Background(),\n\t\tcontainerID,\n\t\t&timeout,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tfmt.Printf(\"<- OK\\n\")\n\treturn nil\n}", "func (c *Controller) Detach(detachRequest k8sresources.FlexVolumeDetachRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"controller-detach-start\")\n\tdefer c.logger.Println(\"controller-detach-end\")\n\tif detachRequest.Version == k8sresources.KubernetesVersion_1_5 {\n\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\tStatus: \"Success\",\n\t\t}\n\t}\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Not supported\",\n\t}\n}", "func (o *ListStorageV1alpha1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetStoragePureVolumesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetStoragePureVolumesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetStoragePureVolumesDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func DecodeStorageVolumesListResponse(decoder func(*http.Response) goahttp.Decoder, restoreBody bool) func(*http.Response) (interface{}, error) {\n\treturn func(resp *http.Response) (interface{}, error) {\n\t\tif restoreBody {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\tdefer func() {\n\t\t\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(b))\n\t\t\t}()\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\t\t}\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tvar (\n\t\t\t\tbody map[string]string\n\t\t\t\terr error\n\t\t\t)\n\t\t\terr = decoder(resp).Decode(&body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, goahttp.ErrDecodingError(\"spin-registry\", \"storage_volumes_list\", err)\n\t\t\t}\n\t\t\treturn body, nil\n\t\tdefault:\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\treturn nil, goahttp.ErrInvalidResponse(\"spin-registry\", \"storage_volumes_list\", resp.StatusCode, string(body))\n\t\t}\n\t}\n}", "func (c *restClient) DeleteVolume(ctx context.Context, req *netapppb.DeleteVolumeRequest, opts ...gax.CallOption) (*DeleteVolumeOperation, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetForce() {\n\t\tparams.Add(\"force\", fmt.Sprintf(\"%v\", req.GetForce()))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"DELETE\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &DeleteVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func NewUnloadNode_Response() *UnloadNode_Response {\n\tself := UnloadNode_Response{}\n\tself.SetDefaults()\n\treturn &self\n}", "func (dfr DeleteFilesystemResponse) Response() *http.Response {\n\treturn dfr.rawResponse\n}", "func (o *GetVMVolumeBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(400)\n}", "func ParseGetDNSZoneVersionDiffResponse(rsp *http.Response) (*GetDNSZoneVersionDiffResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &GetDNSZoneVersionDiffResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest ScalewayDomainV2alpha2GetDNSZoneVersionDiffResponse\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func (c *restClient) RevertVolume(ctx context.Context, req *netapppb.RevertVolumeRequest, opts ...gax.CallOption) (*RevertVolumeOperation, error) {\n\tm := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}\n\tjsonReq, err := m.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v:revert\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &longrunningpb.Operation{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"POST\", baseUrl.String(), bytes.NewReader(jsonReq))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\toverride := fmt.Sprintf(\"/v1/%s\", resp.GetName())\n\treturn &RevertVolumeOperation{\n\t\tlro: longrunning.InternalNewOperation(*c.LROClient, resp),\n\t\tpollPath: override,\n\t}, nil\n}", "func (c *Client) UnmarshalResponse(response *http.Response, resType interface{}) error {\n\t// Read all the response body\n\tdefer response.Body.Close()\n\tbody, err := io.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// < 200 && >= 300 : API error\n\tif response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusMultipleChoices {\n\t\tapiError := &APIError{\n\t\t\tCode: fmt.Sprintf(\"HTTPStatus: %d\", response.StatusCode),\n\t\t}\n\n\t\tif err = json.Unmarshal(body, apiError); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn apiError\n\t}\n\n\t// Nothing to unmarshal\n\tif len(body) == 0 || resType == nil {\n\t\treturn nil\n\t}\n\n\treturn json.Unmarshal(body, &resType)\n}", "func UnmarshalEnvVariableResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(EnvVariableResponse)\n\terr = core.UnmarshalPrimitive(m, \"hidden\", &obj.Hidden)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"secure\", &obj.Secure)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"value\", &obj.Value)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (d DobsClient) DetachVolume(ctx Context, volumeID string, dropletID string) (error) {\n\tdropletIDI, err := strconv.Atoi(dropletID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taction, _, err := d.GodoClient.StorageActions.DetachByDropletID(ctx, volumeID, dropletIDI)\t\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = d.waitForAction(ctx, volumeID, action)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Convert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in *internal.ResizeVolumeResponse, out *v1beta1.ResizeVolumeResponse) error {\n\treturn autoConvert_internal_ResizeVolumeResponse_To_v1beta1_ResizeVolumeResponse(in, out)\n}", "func UnmarshalVersionResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(VersionResponse)\n\terr = core.UnmarshalPrimitive(m, \"builddate\", &obj.Builddate)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"buildno\", &obj.Buildno)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"commitsha\", &obj.Commitsha)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"helm_provider_version\", &obj.HelmProviderVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"helm_version\", &obj.HelmVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"supported_template_types\", &obj.SupportedTemplateTypes)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"terraform_provider_version\", &obj.TerraformProviderVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"terraform_version\", &obj.TerraformVersion)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (o *GetVMVolumeNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(404)\n}", "func GetLunsFromVolumeNameV2(name string) (LunV2, error) {\n\tquery := \"/api/datacenter/storage/luns?volume.name=\" + name\n\treturn getLunsInfoV2(query)\n}", "func (c *IloClient) UnMountImageDell() (string, error) {\n\turl := c.Hostname + \"/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.EjectMedia\"\n\tpayload := \"{}\"\n\t_, _, _, err := queryData(c, \"POST\", url, []byte(payload))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn \"Image Unmounted\", nil\n}", "func Unmount(h hostRunner, target string) error {\n\tout, err := h.RunSSHCommand(fmt.Sprintf(\"findmnt -T %s && sudo umount %s || true\", target, target))\n\tif err != nil {\n\t\treturn errors.Wrap(err, out)\n\t}\n\treturn nil\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func (m *Response) Unmarshal(v *Loader) error {\n\treturn m.Output.Unmarshal(v)\n}", "func (m *IndexSpaceResponse) UnmarshalJSON(b []byte) error {\n\treturn IndexSpaceResponseJSONUnmarshaler.Unmarshal(bytes.NewReader(b), m)\n}", "func (mounter *csiProxyMounterV1Beta) Unmount(target string) error {\n\tklog.V(4).Infof(\"Unmount: %s\", target)\n\treturn mounter.Rmdir(target)\n}", "func (d *lvm) MountVolume(vol Volume, op *operations.Operation) error {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t// Activate LVM volume if needed.\n\tactivated, err := d.activateVolume(vol)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif activated {\n\t\trevert.Add(func() { _, _ = d.deactivateVolume(vol) })\n\t}\n\n\tif vol.contentType == ContentTypeFS {\n\t\t// Check if already mounted.\n\t\tmountPath := vol.MountPath()\n\t\tif !filesystem.IsMountPoint(mountPath) {\n\t\t\tfsType := vol.ConfigBlockFilesystem()\n\t\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\n\t\t\tif vol.mountFilesystemProbe {\n\t\t\t\tfsType, err = fsProbe(volDevPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed probing filesystem: %w\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = vol.EnsureMountPath()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmountFlags, mountOptions := filesystem.ResolveMountOptions(strings.Split(vol.ConfigBlockMountOptions(), \",\"))\n\t\t\terr = TryMount(volDevPath, mountPath, fsType, mountFlags, mountOptions)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to mount LVM logical volume: %w\", err)\n\t\t\t}\n\n\t\t\td.logger.Debug(\"Mounted logical volume\", logger.Ctx{\"volName\": vol.name, \"dev\": volDevPath, \"path\": mountPath, \"options\": mountOptions})\n\t\t}\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, mount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\terr = d.MountVolume(fsVol, op)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvol.MountRefCountIncrement() // From here on it is up to caller to call UnmountVolume() when done.\n\trevert.Success()\n\treturn nil\n}", "func (m *RemoveDocV1Response) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Found\n\n\treturn nil\n}", "func CreateRemoveAppGroupResponse() (response *RemoveAppGroupResponse) {\n\tresponse = &RemoveAppGroupResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (t *DescribeLogDirsResponse) Decode(d *Decoder, version int16) error {\n\tvar err error\n\tt.ThrottleTimeMs, err = d.Int32()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Results\n\tif n, err := d.ArrayLength(); err != nil {\n\t\treturn err\n\t} else if n >= 0 {\n\t\tt.Results = make([]DescribeLogDirsResult35, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tvar item DescribeLogDirsResult35\n\t\t\tif err := (&item).Decode(d, version); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Results[i] = item\n\t\t}\n\t}\n\treturn err\n}" ]
[ "0.76744556", "0.6982524", "0.6688593", "0.65940976", "0.6138354", "0.60918856", "0.60380083", "0.5727786", "0.57149637", "0.5697249", "0.5672207", "0.5536542", "0.5524165", "0.546702", "0.54152715", "0.53063315", "0.53048056", "0.530014", "0.5275261", "0.5078158", "0.50516564", "0.50474024", "0.49951884", "0.49734005", "0.49545497", "0.49394467", "0.4896237", "0.4885684", "0.48834118", "0.48489138", "0.483906", "0.48371875", "0.48117566", "0.4785605", "0.47770903", "0.4749523", "0.47484988", "0.473604", "0.47186956", "0.47088718", "0.46996224", "0.46870902", "0.46783912", "0.46714675", "0.4665907", "0.46500307", "0.4645414", "0.4645221", "0.4640182", "0.46398345", "0.46390942", "0.46088552", "0.45930472", "0.458325", "0.4582815", "0.45776233", "0.45752114", "0.45571658", "0.45560902", "0.4554286", "0.45540795", "0.4536627", "0.45337", "0.4521608", "0.4521502", "0.45168403", "0.45110983", "0.45043546", "0.44931012", "0.44840717", "0.44801334", "0.4471324", "0.44704607", "0.44682038", "0.44660595", "0.4463242", "0.4462911", "0.44622353", "0.44562292", "0.44474673", "0.4438094", "0.44240648", "0.44105822", "0.440083", "0.43982682", "0.43955752", "0.43869203", "0.43860045", "0.43845528", "0.43791327", "0.43781218", "0.4377102", "0.43649462", "0.43631303", "0.43603224", "0.43548173", "0.43538606", "0.4349238", "0.4345237", "0.43428478" ]
0.8945839
0
Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest is an autogenerated conversion function.
func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error { return autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in *v2alpha1.WriteVolumeCacheResponse, out *impl.WriteVolumeCacheResponse) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error {\n\t// If runtime os is windows, execute Write-VolumeCache powershell command on the disk\n\tif runtime.GOOS == \"windows\" {\n\t\tcmd := fmt.Sprintf(\"Get-Volume -FilePath %s | Write-Volumecache\", deviceMountPath)\n\t\toutput, err := exec.Command(\"powershell\", \"/c\", cmd).CombinedOutput()\n\t\tklog.Infof(\"command (%q) execeuted: %v, output: %q\", cmd, err, string(output))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"command (%q) failed: %v, output: %q\", cmd, err, string(output))\n\t\t}\n\t}\n\t// For linux runtime, it skips because unmount will automatically flush disk data\n\treturn nil\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func WriteCacheUpdate(ctx context.Context, dbConn *db.DB, contractHash *bitcoin.Hash20,\r\n\tassetCode *bitcoin.Hash20, addressHash *bitcoin.Hash20) error {\r\n\r\n\tcacheLock.Lock()\r\n\tdefer cacheLock.Unlock()\r\n\r\n\tif cache == nil {\r\n\t\tcache = make(map[bitcoin.Hash20]*map[bitcoin.Hash20]*map[bitcoin.Hash20]*cacheUpdate)\r\n\t}\r\n\tcontract, exists := cache[*contractHash]\r\n\tif !exists {\r\n\t\tnc := make(map[bitcoin.Hash20]*map[bitcoin.Hash20]*cacheUpdate)\r\n\t\tcache[*contractHash] = &nc\r\n\t\tcontract = &nc\r\n\t}\r\n\tasset, exists := (*contract)[*assetCode]\r\n\tif !exists {\r\n\t\tna := make(map[bitcoin.Hash20]*cacheUpdate)\r\n\t\t(*contract)[*assetCode] = &na\r\n\t\tasset = &na\r\n\t}\r\n\tcu, exists := (*asset)[*addressHash]\r\n\tif !exists {\r\n\t\treturn ErrNotInCache\r\n\t}\r\n\r\n\tcu.lock.Lock()\r\n\tdefer cu.lock.Unlock()\r\n\r\n\tif !cu.modified {\r\n\t\treturn nil\r\n\t}\r\n\r\n\tif err := write(ctx, dbConn, contractHash, assetCode, addressHash, cu.h); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\tcu.modified = false\r\n\treturn nil\r\n}", "func (o *GetFqdnCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Cidr != nil {\n\n\t\t// query param cidr\n\t\tvar qrCidr string\n\n\t\tif o.Cidr != nil {\n\t\t\tqrCidr = *o.Cidr\n\t\t}\n\t\tqCidr := qrCidr\n\t\tif qCidr != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"cidr\", qCidr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Matchpattern != nil {\n\n\t\t// query param matchpattern\n\t\tvar qrMatchpattern string\n\n\t\tif o.Matchpattern != nil {\n\t\t\tqrMatchpattern = *o.Matchpattern\n\t\t}\n\t\tqMatchpattern := qrMatchpattern\n\t\tif qMatchpattern != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"matchpattern\", qMatchpattern); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Source != nil {\n\n\t\t// query param source\n\t\tvar qrSource string\n\n\t\tif o.Source != nil {\n\t\t\tqrSource = *o.Source\n\t\t}\n\t\tqSource := qrSource\n\t\tif qSource != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"source\", qSource); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (d *dataUsageCache) serializeTo(dst io.Writer) error {\n\t// Add version and compress.\n\t_, err := dst.Write([]byte{dataUsageCacheVerCurrent})\n\tif err != nil {\n\t\treturn err\n\t}\n\tenc, err := zstd.NewWriter(dst,\n\t\tzstd.WithEncoderLevel(zstd.SpeedFastest),\n\t\tzstd.WithWindowSize(1<<20),\n\t\tzstd.WithEncoderConcurrency(2))\n\tif err != nil {\n\t\treturn err\n\t}\n\tmEnc := msgp.NewWriter(enc)\n\terr = d.EncodeMsg(mEnc)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = mEnc.Flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = enc.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func (o *NetworkPruneParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Filters != nil {\n\n\t\t// query param filters\n\t\tvar qrFilters string\n\t\tif o.Filters != nil {\n\t\t\tqrFilters = *o.Filters\n\t\t}\n\t\tqFilters := qrFilters\n\t\tif qFilters != \"\" {\n\t\t\tif err := r.SetQueryParam(\"filters\", qFilters); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cache *Cache) Write () {\n err := Config.Check()\n if err != nil { return }\n \n f, err := os.Create(cache.path)\n if err == nil {\n defer f.Close()\n\n if Opt.Verbose { log.Println(\"Writing new cache file\") }\n\n enc := gob.NewEncoder(f)\n err = enc.Encode(cache)\n }\n \n if err != nil {\n qMain.showError(\"Write cache\", err)\n }\n}", "func (o *V2UpdateClusterManifestParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.UpdateManifestParams != nil {\n\t\tif err := r.SetBodyParam(o.UpdateManifestParams); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param cluster_id\n\tif err := r.SetPathParam(\"cluster_id\", o.ClusterID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateChannelSpacesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ChannelResource != nil {\n\t\tif err := r.SetBodyParam(o.ChannelResource); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param baseSpaceId\n\tif err := r.SetPathParam(\"baseSpaceId\", o.BaseSpaceID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in *v2alpha1.RmdirRequest, out *impl.RmdirRequest) error {\n\treturn autoConvert_v2alpha1_RmdirRequest_To_impl_RmdirRequest(in, out)\n}", "func (o *GetPrivateOrderstateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.OrderID != nil {\n\n\t\t// query param orderId\n\t\tvar qrOrderID float64\n\t\tif o.OrderID != nil {\n\t\t\tqrOrderID = *o.OrderID\n\t\t}\n\t\tqOrderID := swag.FormatFloat64(qrOrderID)\n\t\tif qOrderID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"orderId\", qOrderID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *UpdateNetworkHTTPServerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param networkId\n\tif err := r.SetPathParam(\"networkId\", o.NetworkID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.UpdateNetworkHTTPServer != nil {\n\t\tif err := r.SetBodyParam(o.UpdateNetworkHTTPServer); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *UpdateUserIssueSearchOptionsOfProjectVersionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param parentId\n\tif err := r.SetPathParam(\"parentId\", swag.FormatInt64(o.ParentID)); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Resource != nil {\n\t\tif err := r.SetBodyParam(o.Resource); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetLogicalPortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param lport-id\n\tif err := r.SetPathParam(\"lport-id\", o.LportID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (se *shellExecutor) writeCache() {\n\tif !se.inCmd.HasCache() {\n\t\treturn\n\t}\n\n\tdefer util.RecoverPanic(func(e error) {\n\t\tutil.LogWarn(e.Error())\n\t})\n\n\tdir, err := ioutil.TempDir(\"\", \"_cache_output_\")\n\tif err != nil {\n\t\tutil.LogWarn(err.Error())\n\t\treturn\n\t}\n\n\tse.cacheOutputDir = dir\n\tcache := se.inCmd.Cache\n\n\tfor _, path := range cache.Paths {\n\t\tpath = filepath.Clean(path)\n\t\tfullPath := filepath.Join(se.jobDir, path)\n\n\t\tinfo, exist := util.IsFileExistsAndReturnFileInfo(fullPath)\n\t\tif !exist {\n\t\t\tcontinue\n\t\t}\n\n\t\tnewPath := filepath.Join(dir, path)\n\n\t\tif info.IsDir() {\n\t\t\terr := util.CopyDir(fullPath, newPath)\n\t\t\tutil.PanicIfErr(err)\n\n\t\t\tutil.LogDebug(\"dir %s write back to cache dir\", newPath)\n\t\t\tcontinue\n\t\t}\n\n\t\terr := util.CopyFile(fullPath, newPath)\n\t\tutil.PanicIfErr(err)\n\t\tutil.LogDebug(\"file %s write back to cache dir\", newPath)\n\t}\n}", "func (o *GetClusterTemplateByNameInWorkspaceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\t// path param workspaceId\n\tif err := r.SetPathParam(\"workspaceId\", swag.FormatInt64(o.WorkspaceID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ContainerUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetBodyParam(o.Update); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PutClusterForAutoscaleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param crn\n\tif err := r.SetPathParam(\"crn\", o.Crn); err != nil {\n\t\treturn err\n\t}\n\n\t// path param userId\n\tif err := r.SetPathParam(\"userId\", o.UserID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func cacheWriter(cache map[float64]string, in <-chan result, readWG sync.WaitGroup, mutex *sync.Mutex) {\n\tfor r := range in {\n\t\treadWG.Wait()\n\t\tmutex.Lock()\n\n\t\tcache[r.zoom] = r.output\n\n\t\tmutex.Unlock()\n\t}\n}", "func (o *GetVrackServiceNameDedicatedCloudDedicatedCloudParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param dedicatedCloud\n\tif err := r.SetPathParam(\"dedicatedCloud\", o.DedicatedCloud); err != nil {\n\t\treturn err\n\t}\n\n\t// path param serviceName\n\tif err := r.SetPathParam(\"serviceName\", o.ServiceName); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (g *Goproxy) putCache(\n\tctx context.Context,\n\tname string,\n\tcontent io.ReadSeeker,\n) error {\n\tif g.Cacher == nil {\n\t\treturn nil\n\t}\n\n\tif g.CacherMaxCacheBytes != 0 {\n\t\tif size, err := content.Seek(0, io.SeekEnd); err != nil {\n\t\t\treturn err\n\t\t} else if size > int64(g.CacherMaxCacheBytes) {\n\t\t\treturn nil\n\t\t} else if _, err := content.Seek(0, io.SeekStart); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn g.Cacher.Put(ctx, name, content)\n}", "func (o *GetClockParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param X-Killbill-ApiKey\n\tif err := r.SetHeaderParam(\"X-Killbill-ApiKey\", o.XKillbillAPIKey); err != nil {\n\t\treturn err\n\t}\n\n\t// header param X-Killbill-ApiSecret\n\tif err := r.SetHeaderParam(\"X-Killbill-ApiSecret\", o.XKillbillAPISecret); err != nil {\n\t\treturn err\n\t}\n\n\t// header param WithProfilingInfo\n\tif o.WithProfilingInfo != nil && len(*o.WithProfilingInfo) > 0 {\n\t\tif err := r.SetHeaderParam(\"X-Killbill-Profiling-Req\", *o.WithProfilingInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// header param withStackTrace\n\tif o.WithStackTrace != nil && *o.WithStackTrace {\n\t\tif err := r.SetQueryParam(\"withStackTrace\", \"true\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CloudNFSExportAddParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param param\n\tif err := r.SetPathParam(\"param\", o.Param); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PostConditionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Data != nil {\n\t\tif err := r.SetBodyParam(o.Data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param organization\n\tif err := r.SetPathParam(\"organization\", o.Organization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param project\n\tif err := r.SetPathParam(\"project\", o.Project); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateBlueprintInWorkspaceInternalParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.AccountID != nil {\n\n\t\t// query param accountId\n\t\tvar qrAccountID string\n\t\tif o.AccountID != nil {\n\t\t\tqrAccountID = *o.AccountID\n\t\t}\n\t\tqAccountID := qrAccountID\n\t\tif qAccountID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"accountId\", qAccountID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param workspaceId\n\tif err := r.SetPathParam(\"workspaceId\", swag.FormatInt64(o.WorkspaceID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CacheServiceMetricsKeySizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func WriteRequest(ctx context.Context, m binding.Message, httpRequest *http.Request, transformers ...binding.Transformer) error {\n\tstructuredWriter := (*httpRequestWriter)(httpRequest)\n\tbinaryWriter := (*httpRequestWriter)(httpRequest)\n\n\t_, err := binding.Write(\n\t\tctx,\n\t\tm,\n\t\tstructuredWriter,\n\t\tbinaryWriter,\n\t\ttransformers...,\n\t)\n\treturn err\n}", "func (m *MapDisk) NewWriteRequest(fname string) Request {\n\treturn Request{\n\t\treqType: reqWrite,\n\t\tinChan: m.inChan,\n\t\tfname: fname,\n\t\tresChan: make(chan reply),\n\t}\n}", "func (o *GetNetworkAppliancePortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param appliancePortId\n\tif err := r.SetPathParam(\"appliancePortId\", o.AppliancePortID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param networkId\n\tif err := r.SetPathParam(\"networkId\", o.NetworkID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateVolumeBackupParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.CreateVolumeBackupDetails == nil {\n\t\to.CreateVolumeBackupDetails = new(models.CreateVolumeBackupDetails)\n\t}\n\n\tif err := r.SetBodyParam(o.CreateVolumeBackupDetails); err != nil {\n\t\treturn err\n\t}\n\n\tif o.OpcRetryToken != nil {\n\n\t\t// header param opc-retry-token\n\t\tif err := r.SetHeaderParam(\"opc-retry-token\", *o.OpcRetryToken); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetUserIssueSearchOptionsOfProjectVersionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Fields != nil {\n\n\t\t// query param fields\n\t\tvar qrFields string\n\t\tif o.Fields != nil {\n\t\t\tqrFields = *o.Fields\n\t\t}\n\t\tqFields := qrFields\n\t\tif qFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"fields\", qFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param parentId\n\tif err := r.SetPathParam(\"parentId\", swag.FormatInt64(o.ParentID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PatchStorageVirtualDriveExtensionsMoidParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param moid\n\tif err := r.SetPathParam(\"moid\", o.Moid); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *SessionDataUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.SessionDataInput != nil {\n\t\tif err := r.SetBodyParam(o.SessionDataInput); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in *v1beta1.ResizeVolumeRequest, out *internal.ResizeVolumeRequest) error {\n\treturn autoConvert_v1beta1_ResizeVolumeRequest_To_internal_ResizeVolumeRequest(in, out)\n}", "func (o *SyncCmOnDatalakeClusterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ImagePruneParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Filters != nil {\n\n\t\t// query param filters\n\t\tvar qrFilters string\n\t\tif o.Filters != nil {\n\t\t\tqrFilters = *o.Filters\n\t\t}\n\t\tqFilters := qrFilters\n\t\tif qFilters != \"\" {\n\t\t\tif err := r.SetQueryParam(\"filters\", qFilters); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in *v1beta1.FormatVolumeRequest, out *internal.FormatVolumeRequest) error {\n\treturn autoConvert_v1beta1_FormatVolumeRequest_To_internal_FormatVolumeRequest(in, out)\n}", "func (o *AddVMParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\tif o.Body == nil {\n\t\to.Body = new(models.VM)\n\t}\n\n\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *MonitorUpdateMaintenancePeriodForMonitorParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.MaintenancePeriod != nil {\n\t\tif err := r.SetBodyParam(o.MaintenancePeriod); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param maintenancePeriodId\n\tif err := r.SetPathParam(\"maintenancePeriodId\", swag.FormatInt32(o.MaintenancePeriodID)); err != nil {\n\t\treturn err\n\t}\n\n\t// path param monitorGuid\n\tif err := r.SetPathParam(\"monitorGuid\", o.MonitorGUID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetLogicalSwitchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param lswitch-id\n\tif err := r.SetPathParam(\"lswitch-id\", o.LswitchID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *UpdateFolderParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param folder_id\n\tif err := r.SetPathParam(\"folder_id\", o.FolderID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ShowPackageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param media_type\n\tif err := r.SetPathParam(\"media_type\", o.MediaType); err != nil {\n\t\treturn err\n\t}\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\t// path param package\n\tif err := r.SetPathParam(\"package\", o.Package); err != nil {\n\t\treturn err\n\t}\n\n\t// path param release\n\tif err := r.SetPathParam(\"release\", o.Release); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetComponentByNameParams) WriteToRequest(r client.Request, reg strfmt.Registry) error {\n\n\tvar res []error\n\n\t// path param component\n\tif err := r.SetPathParam(\"component\", o.Component); err != nil {\n\t\treturn err\n\t}\n\n\t// path param train\n\tif err := r.SetPathParam(\"train\", o.Train); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *VolumeDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Force != nil {\n\n\t\t// query param force\n\t\tvar qrForce bool\n\t\tif o.Force != nil {\n\t\t\tqrForce = *o.Force\n\t\t}\n\t\tqForce := swag.FormatBool(qrForce)\n\t\tif qForce != \"\" {\n\t\t\tif err := r.SetQueryParam(\"force\", qForce); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ListSourceFileOfProjectVersionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Fields != nil {\n\n\t\t// query param fields\n\t\tvar qrFields string\n\t\tif o.Fields != nil {\n\t\t\tqrFields = *o.Fields\n\t\t}\n\t\tqFields := qrFields\n\t\tif qFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"fields\", qFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param parentId\n\tif err := r.SetPathParam(\"parentId\", swag.FormatInt64(o.ParentID)); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Q != nil {\n\n\t\t// query param q\n\t\tvar qrQ string\n\t\tif o.Q != nil {\n\t\t\tqrQ = *o.Q\n\t\t}\n\t\tqQ := qrQ\n\t\tif qQ != \"\" {\n\t\t\tif err := r.SetQueryParam(\"q\", qQ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetDeploymentPreview1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param environment\n\tif err := r.SetPathParam(\"environment\", o.Environment); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param tenant\n\tif err := r.SetPathParam(\"tenant\", o.Tenant); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetPackageSearchActionOldSpacesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param baseSpaceId\n\tif err := r.SetPathParam(\"baseSpaceId\", o.BaseSpaceID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *TestProjectVersionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ProjectVersionTestRequest != nil {\n\t\tif err := r.SetBodyParam(o.ProjectVersionTestRequest); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_RmdirContentsRequest_To_impl_RmdirContentsRequest(in *v2alpha1.RmdirContentsRequest, out *impl.RmdirContentsRequest) error {\n\treturn autoConvert_v2alpha1_RmdirContentsRequest_To_impl_RmdirContentsRequest(in, out)\n}", "func (o *UpdateNetworkSwitchAccessControlListsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param networkId\n\tif err := r.SetPathParam(\"networkId\", o.NetworkID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.UpdateNetworkSwitchAccessControlLists != nil {\n\t\tif err := r.SetBodyParam(o.UpdateNetworkSwitchAccessControlLists); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *UpdateWidgetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Accept != nil {\n\n\t\t// header param Accept\n\t\tif err := r.SetHeaderParam(\"Accept\", *o.Accept); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif o.ContentType != nil {\n\n\t\t// header param Content-Type\n\t\tif err := r.SetHeaderParam(\"Content-Type\", *o.ContentType); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// path param uuid\n\tif err := r.SetPathParam(\"uuid\", o.UUID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetBodyParam(o.WidgetBody); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetPackageSearchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ValidateUpdateSymfilePinningParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param applicationId\n\tif err := r.SetPathParam(\"applicationId\", o.ApplicationID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param os\n\tif err := r.SetPathParam(\"os\", o.Os); err != nil {\n\t\treturn err\n\t}\n\n\t// path param packageName\n\tif err := r.SetPathParam(\"packageName\", o.PackageName); err != nil {\n\t\treturn err\n\t}\n\n\t// path param versionCode\n\tif err := r.SetPathParam(\"versionCode\", o.VersionCode); err != nil {\n\t\treturn err\n\t}\n\n\t// path param versionName\n\tif err := r.SetPathParam(\"versionName\", o.VersionName); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *pbClientCodec) WriteRequest(r *rpc.Request, body interface{}) (err error) {\n\t// Use a mutex to guarantee the header/body are written in the correct order.\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// This is protobuf, of course we copy it.\n\tpbr := &Request{ServiceMethod: &r.ServiceMethod, Seq: &r.Seq}\n\tdata, err := proto.Marshal(pbr)\n\tif err != nil {\n\t\treturn\n\t}\n\t_, err = WriteNetString(c.rwc, data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Of course this is a protobuf! Trust me or detonate the program.\n\tdata, err = proto.Marshal(body.(proto.Message))\n\tif err != nil {\n\t\treturn\n\t}\n\t_, err = WriteNetString(c.rwc, data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif flusher, ok := c.rwc.(flusher); ok {\n\t\terr = flusher.Flush()\n\t}\n\treturn\n}", "func (o *CreateNetworkGroupPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.CreateNetworkGroupPolicy != nil {\n\t\tif err := r.SetBodyParam(o.CreateNetworkGroupPolicy); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param networkId\n\tif err := r.SetPathParam(\"networkId\", o.NetworkID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GroupV2AddOptionalConversationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param groupId\n\tif err := r.SetPathParam(\"groupId\", swag.FormatInt64(o.GroupID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PutLolPerksV1CurrentpageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetDistroXStatusInternalV1ByCrnsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s3p *S3Proxy) WriteToCache(key string, content string) error {\n\n\tcontentSize := len(content)\n\tdestname := fmt.Sprintf(\"s3://%s/%s\", s3p.bucketName, key)\n\tlog.Printf(\"INFO: uploading to %s (%d bytes)\", destname, contentSize)\n\n\tupParams := s3manager.UploadInput{\n\t\tBucket: &s3p.bucketName,\n\t\tKey: &key,\n\t\tBody: bytes.NewReader([]byte(content)),\n\t}\n\n\t// Perform an upload.\n\tstart := time.Now()\n\t_, err := s3p.uploader.Upload(&upParams)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR: uploading to %s (%s)\", destname, err.Error())\n\t\treturn err\n\t}\n\n\tduration := time.Since(start)\n\tlog.Printf(\"INFO: upload of %s complete in %0.2f seconds\", destname, duration.Seconds())\n\n\treturn nil\n}", "func (o *GetContainersUUIDVolumesVolumeUUIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\t// path param uuid\n\tif err := r.SetPathParam(\"uuid\", o.UUID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param volume_uuid\n\tif err := r.SetPathParam(\"volume_uuid\", o.VolumeUUID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *DecryptParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Parameters); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PostContextsAddPhpParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param name\n\tqrName := o.Name\n\tqName := qrName\n\tif qName != \"\" {\n\n\t\tif err := r.SetQueryParam(\"name\", qName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Private != nil {\n\n\t\t// query param private\n\t\tvar qrPrivate int64\n\n\t\tif o.Private != nil {\n\t\t\tqrPrivate = *o.Private\n\t\t}\n\t\tqPrivate := swag.FormatInt64(qrPrivate)\n\t\tif qPrivate != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"private\", qPrivate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *MoveDirectoryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param DirectoryPath\n\tif err := r.SetPathParam(\"DirectoryPath\", o.DirectoryPath); err != nil {\n\t\treturn err\n\t}\n\n\t// header param x-isi-ifs-set-location\n\tif err := r.SetHeaderParam(\"x-isi-ifs-set-location\", o.XIsiIfsSetLocation); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PostIPLoadbalancingServiceNameHTTPFrontendParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.IPLBHTTPFrontendPost != nil {\n\t\tif err := r.SetBodyParam(o.IPLBHTTPFrontendPost); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param serviceName\n\tif err := r.SetPathParam(\"serviceName\", o.ServiceName); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PutCwfNetworkIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.CwfNetwork != nil {\n\t\tif err := r.SetBodyParam(o.CwfNetwork); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param network_id\n\tif err := r.SetPathParam(\"network_id\", o.NetworkID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *SetBuildQueuePositionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Fields != nil {\n\n\t\t// query param fields\n\t\tvar qrFields string\n\t\tif o.Fields != nil {\n\t\t\tqrFields = *o.Fields\n\t\t}\n\t\tqFields := qrFields\n\t\tif qFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"fields\", qFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param queuePosition\n\tif err := r.SetPathParam(\"queuePosition\", o.QueuePosition); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetFileSystemParametersInternalParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.AccountID != nil {\n\n\t\t// query param accountId\n\t\tvar qrAccountID string\n\t\tif o.AccountID != nil {\n\t\t\tqrAccountID = *o.AccountID\n\t\t}\n\t\tqAccountID := qrAccountID\n\t\tif qAccountID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"accountId\", qAccountID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.AccountName != nil {\n\n\t\t// query param accountName\n\t\tvar qrAccountName string\n\t\tif o.AccountName != nil {\n\t\t\tqrAccountName = *o.AccountName\n\t\t}\n\t\tqAccountName := qrAccountName\n\t\tif qAccountName != \"\" {\n\t\t\tif err := r.SetQueryParam(\"accountName\", qAccountName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.AttachedCluster != nil {\n\n\t\t// query param attachedCluster\n\t\tvar qrAttachedCluster bool\n\t\tif o.AttachedCluster != nil {\n\t\t\tqrAttachedCluster = *o.AttachedCluster\n\t\t}\n\t\tqAttachedCluster := swag.FormatBool(qrAttachedCluster)\n\t\tif qAttachedCluster != \"\" {\n\t\t\tif err := r.SetQueryParam(\"attachedCluster\", qAttachedCluster); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param blueprintName\n\tqrBlueprintName := o.BlueprintName\n\tqBlueprintName := qrBlueprintName\n\tif qBlueprintName != \"\" {\n\t\tif err := r.SetQueryParam(\"blueprintName\", qBlueprintName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// query param clusterName\n\tqrClusterName := o.ClusterName\n\tqClusterName := qrClusterName\n\tif qClusterName != \"\" {\n\t\tif err := r.SetQueryParam(\"clusterName\", qClusterName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// query param fileSystemType\n\tqrFileSystemType := o.FileSystemType\n\tqFileSystemType := qrFileSystemType\n\tif qFileSystemType != \"\" {\n\t\tif err := r.SetQueryParam(\"fileSystemType\", qFileSystemType); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Secure != nil {\n\n\t\t// query param secure\n\t\tvar qrSecure bool\n\t\tif o.Secure != nil {\n\t\t\tqrSecure = *o.Secure\n\t\t}\n\t\tqSecure := swag.FormatBool(qrSecure)\n\t\tif qSecure != \"\" {\n\t\t\tif err := r.SetQueryParam(\"secure\", qSecure); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param storageName\n\tqrStorageName := o.StorageName\n\tqStorageName := qrStorageName\n\tif qStorageName != \"\" {\n\t\tif err := r.SetQueryParam(\"storageName\", qStorageName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param workspaceId\n\tif err := r.SetPathParam(\"workspaceId\", swag.FormatInt64(o.WorkspaceID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateRuntimeMapParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.FileUpload != nil {\n\n\t\tif o.FileUpload != nil {\n\n\t\t\t// form file param file_upload\n\t\t\tif err := r.SetFileParam(\"file_upload\", o.FileUpload); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateRoomParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\tif o.CreateRoomRequest != nil {\n\t\tif err := r.SetBodyParam(o.CreateRoomRequest); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param locationId\n\tif err := r.SetPathParam(\"locationId\", o.LocationID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetPortInfoUsingGET2Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func WriteCache(data WeatherData) {\n\tt := time.Now()\n\tvar strdata []byte\n\tvar cache map[string]string = make(map[string]string)\n\tstrdata, _ = json.Marshal(data)\n\tcache[\"timestamp\"] = strconv.FormatInt(t.Unix(), 10)\n\tcache[\"weatherData\"] = string(strdata)\n\tgodotenv.Write(cache, \".env.cache\")\n}", "func (o *GetCharacterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.APIKey != nil {\n\n\t\t// query param apiKey\n\t\tvar qrAPIKey string\n\t\tif o.APIKey != nil {\n\t\t\tqrAPIKey = *o.APIKey\n\t\t}\n\t\tqAPIKey := qrAPIKey\n\t\tif qAPIKey != \"\" {\n\t\t\tif err := r.SetQueryParam(\"apiKey\", qAPIKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param uid\n\tqrUID := o.UID\n\tqUID := qrUID\n\tif qUID != \"\" {\n\t\tif err := r.SetQueryParam(\"uid\", qUID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetRuntimeServersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param backend\n\tqrBackend := o.Backend\n\tqBackend := qrBackend\n\tif qBackend != \"\" {\n\t\tif err := r.SetQueryParam(\"backend\", qBackend); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ActionDeploymentRequestUsingPOST2Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param action\n\tqrAction := o.Action\n\tqAction := qrAction\n\tif qAction != \"\" {\n\n\t\tif err := r.SetQueryParam(\"action\", qAction); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.APIVersion != nil {\n\n\t\t// query param apiVersion\n\t\tvar qrAPIVersion string\n\n\t\tif o.APIVersion != nil {\n\t\t\tqrAPIVersion = *o.APIVersion\n\t\t}\n\t\tqAPIVersion := qrAPIVersion\n\t\tif qAPIVersion != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"apiVersion\", qAPIVersion); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// path param requestId\n\tif err := r.SetPathParam(\"requestId\", o.RequestID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ListAllKeyspacesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param X-Cassandra-Token\n\tif err := r.SetHeaderParam(\"X-Cassandra-Token\", o.XCassandraToken); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PutClientConfigV2NamespaceChangesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Namespaces != nil {\n\t\tif err := r.SetBodyParam(o.Namespaces); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ServeFieldParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param field\n\tif err := r.SetPathParam(\"field\", o.Field); err != nil {\n\t\treturn err\n\t}\n\n\t// path param vcsRootLocator\n\tif err := r.SetPathParam(\"vcsRootLocator\", o.VcsRootLocator); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *TicketStatisticParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif o.ExtensionCategory != nil {\n\n\t\t// query param extensionCategory\n\t\tvar qrExtensionCategory string\n\t\tif o.ExtensionCategory != nil {\n\t\t\tqrExtensionCategory = *o.ExtensionCategory\n\t\t}\n\t\tqExtensionCategory := qrExtensionCategory\n\t\tif qExtensionCategory != \"\" {\n\t\t\tif err := r.SetQueryParam(\"extensionCategory\", qExtensionCategory); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param category\n\tqrCategory := o.Category\n\tqCategory := qrCategory\n\tif qCategory != \"\" {\n\t\tif err := r.SetQueryParam(\"category\", qCategory); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// setting the default header value\n\tif err := r.SetHeaderParam(\"User-Agent\", utils.UserAgentGen()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetHeaderParam(\"X-Amzn-Trace-Id\", utils.AmazonTraceIDGen()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\n\treturn nil\n}", "func (o *PutFlagSettingParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param environmentKey\n\tif err := r.SetPathParam(\"environmentKey\", o.EnvironmentKey); err != nil {\n\t\treturn err\n\t}\n\n\t// path param featureFlagKey\n\tif err := r.SetPathParam(\"featureFlagKey\", o.FeatureFlagKey); err != nil {\n\t\treturn err\n\t}\n\n\t// path param projectKey\n\tif err := r.SetPathParam(\"projectKey\", o.ProjectKey); err != nil {\n\t\treturn err\n\t}\n\n\t// path param userKey\n\tif err := r.SetPathParam(\"userKey\", o.UserKey); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetBodyParam(o.UserSettingsBody); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *UpdateMTOServiceItemStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param If-Match\n\tif err := r.SetHeaderParam(\"If-Match\", o.IfMatch); err != nil {\n\t\treturn err\n\t}\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param mtoServiceItemID\n\tif err := r.SetPathParam(\"mtoServiceItemID\", o.MtoServiceItemID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *LogRequestDownloadParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Request != nil {\n\t\tif err := r.SetBodyParam(o.Request); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PostHyperflexAutoSupportPoliciesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetNetworkExternalParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *cache) Save(path string) (err error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tvar b bytes.Buffer\n\n\tw := wrapper{\n\t\tChannel: c.channel,\n\t\tLastCheckedAt: c.lastCheckedAt,\n\t\tLatestRelease: c.latestRelease,\n\t\tInvalidVer: c.invalidVer,\n\t}\n\tif c.invalidVer != nil && c.IsCurrentVersionInvalid() == \"\" {\n\t\tw.InvalidVer = nil\n\t}\n\n\tif err = yaml.NewEncoder(&b).Encode(w); err != nil {\n\t\treturn\n\t}\n\n\tvar unlock filemu.UnlockFunc\n\tif unlock, err = filemu.Lock(context.Background(), lockPath()); err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif e := unlock(); err == nil {\n\t\t\terr = e\n\t\t}\n\t}()\n\n\t// TODO: os.WriteFile does NOT flush\n\terr = os.WriteFile(path, b.Bytes(), 0o600)\n\n\treturn\n}", "func (o *SearchWorkspacesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetUsersCurrentPermissionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Relative != nil {\n\n\t\t// query param relative\n\t\tvar qrRelative bool\n\t\tif o.Relative != nil {\n\t\t\tqrRelative = *o.Relative\n\t\t}\n\t\tqRelative := swag.FormatBool(qrRelative)\n\t\tif qRelative != \"\" {\n\t\t\tif err := r.SetQueryParam(\"relative\", qRelative); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Scope != nil {\n\n\t\t// query param scope\n\t\tvar qrScope string\n\t\tif o.Scope != nil {\n\t\t\tqrScope = *o.Scope\n\t\t}\n\t\tqScope := qrScope\n\t\tif qScope != \"\" {\n\t\t\tif err := r.SetQueryParam(\"scope\", qScope); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *UpdateMemberRoleAdminV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param memberRoleId\n\tif err := r.SetPathParam(\"memberRoleId\", o.MemberRoleID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cass *WriterBase) CacheRender(ctx context.Context, path string, from int64, to int64, tags repr.SortingTags) ([]*metrics.RawRenderItem, error) {\n\treturn nil, ErrNotYetimplemented\n}", "func (f *WriteRequest) Serialize(buffer []byte) []byte {\n\tbuffer[0] = byte(f.Flags)\n\tcopy(buffer[1:], f.Data)\n\treturn buffer\n}" ]
[ "0.6856427", "0.59458834", "0.58403116", "0.5713071", "0.5615489", "0.52745515", "0.52193725", "0.49859717", "0.49586096", "0.48472455", "0.47740433", "0.4663155", "0.46558547", "0.46514115", "0.45606583", "0.45490405", "0.4545567", "0.45145956", "0.4509981", "0.45094764", "0.44993392", "0.44832096", "0.44563872", "0.440763", "0.44042236", "0.43992895", "0.43850017", "0.43840438", "0.43802398", "0.43669713", "0.4347799", "0.43316713", "0.43292707", "0.43199533", "0.43137187", "0.4312404", "0.43116617", "0.43050098", "0.43038404", "0.430112", "0.42928526", "0.42790923", "0.427233", "0.4271686", "0.42708734", "0.4266816", "0.4256691", "0.42481935", "0.42324594", "0.42317292", "0.42213893", "0.4212173", "0.42081115", "0.4207312", "0.42064834", "0.41977808", "0.4183702", "0.41810292", "0.41769743", "0.4176864", "0.41759115", "0.4174236", "0.41703427", "0.41633973", "0.41591173", "0.4157957", "0.41510624", "0.41467613", "0.41434303", "0.41336632", "0.41336477", "0.41333315", "0.41327977", "0.41326088", "0.41310358", "0.41265434", "0.41262624", "0.4124171", "0.41216904", "0.41162634", "0.41150227", "0.4105117", "0.40975675", "0.40969992", "0.40962273", "0.40915474", "0.40858933", "0.40845197", "0.4082552", "0.40795866", "0.40793926", "0.40776172", "0.4070172", "0.40673074", "0.40670058", "0.40669805", "0.4063764", "0.4059047", "0.4051934", "0.40518522" ]
0.8846225
0
Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest is an autogenerated conversion function.
func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error { return autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out)\n}", "func Convert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in *impl.FormatVolumeRequest, out *v2alpha1.FormatVolumeRequest) error {\n\treturn autoConvert_impl_FormatVolumeRequest_To_v2alpha1_FormatVolumeRequest(in, out)\n}", "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in *impl.ResizeVolumeRequest, out *v2alpha1.ResizeVolumeRequest) error {\n\treturn autoConvert_impl_ResizeVolumeRequest_To_v2alpha1_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in *impl.UnmountVolumeRequest, out *v2alpha1.UnmountVolumeRequest) error {\n\treturn autoConvert_impl_UnmountVolumeRequest_To_v2alpha1_UnmountVolumeRequest(in, out)\n}", "func Convert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in *impl.MountVolumeRequest, out *v2alpha1.MountVolumeRequest) error {\n\treturn autoConvert_impl_MountVolumeRequest_To_v2alpha1_MountVolumeRequest(in, out)\n}", "func Convert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in *impl.MkdirRequest, out *v2alpha1.MkdirRequest) error {\n\treturn autoConvert_impl_MkdirRequest_To_v2alpha1_MkdirRequest(in, out)\n}", "func Convert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in *impl.GetVolumeStatsRequest, out *v2alpha1.GetVolumeStatsRequest) error {\n\treturn autoConvert_impl_GetVolumeStatsRequest_To_v2alpha1_GetVolumeStatsRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in *v2alpha1.FormatVolumeRequest, out *impl.FormatVolumeRequest) error {\n\treturn autoConvert_v2alpha1_FormatVolumeRequest_To_impl_FormatVolumeRequest(in, out)\n}", "func Convert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in *impl.RmdirRequest, out *v2alpha1.RmdirRequest) error {\n\treturn autoConvert_impl_RmdirRequest_To_v2alpha1_RmdirRequest(in, out)\n}", "func Convert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in *impl.IsVolumeFormattedRequest, out *v2alpha1.IsVolumeFormattedRequest) error {\n\treturn autoConvert_impl_IsVolumeFormattedRequest_To_v2alpha1_IsVolumeFormattedRequest(in, out)\n}", "func (o *V2UpdateClusterManifestParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.UpdateManifestParams != nil {\n\t\tif err := r.SetBodyParam(o.UpdateManifestParams); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param cluster_id\n\tif err := r.SetPathParam(\"cluster_id\", o.ClusterID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in *internal.FormatVolumeRequest, out *v1beta1.FormatVolumeRequest) error {\n\treturn autoConvert_internal_FormatVolumeRequest_To_v1beta1_FormatVolumeRequest(in, out)\n}", "func (o *GetDeploymentPreview1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param environment\n\tif err := r.SetPathParam(\"environment\", o.Environment); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param tenant\n\tif err := r.SetPathParam(\"tenant\", o.Tenant); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in *v2alpha1.MountVolumeRequest, out *impl.MountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_MountVolumeRequest_To_impl_MountVolumeRequest(in, out)\n}", "func (o *ReadStorageV1alpha1VolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Exact != nil {\n\n\t\t// query param exact\n\t\tvar qrExact bool\n\t\tif o.Exact != nil {\n\t\t\tqrExact = *o.Exact\n\t\t}\n\t\tqExact := swag.FormatBool(qrExact)\n\t\tif qExact != \"\" {\n\t\t\tif err := r.SetQueryParam(\"exact\", qExact); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Export != nil {\n\n\t\t// query param export\n\t\tvar qrExport bool\n\t\tif o.Export != nil {\n\t\t\tqrExport = *o.Export\n\t\t}\n\t\tqExport := swag.FormatBool(qrExport)\n\t\tif qExport != \"\" {\n\t\t\tif err := r.SetQueryParam(\"export\", qExport); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Pretty != nil {\n\n\t\t// query param pretty\n\t\tvar qrPretty string\n\t\tif o.Pretty != nil {\n\t\t\tqrPretty = *o.Pretty\n\t\t}\n\t\tqPretty := qrPretty\n\t\tif qPretty != \"\" {\n\t\t\tif err := r.SetQueryParam(\"pretty\", qPretty); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in *impl.ListVolumesOnDiskRequest, out *v2alpha1.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_impl_ListVolumesOnDiskRequest_To_v2alpha1_ListVolumesOnDiskRequest(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in *v2alpha1.GetVolumeStatsRequest, out *impl.GetVolumeStatsRequest) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsRequest_To_impl_GetVolumeStatsRequest(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in *v2alpha1.ResizeVolumeRequest, out *impl.ResizeVolumeRequest) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeRequest_To_impl_ResizeVolumeRequest(in, out)\n}", "func Convert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in *impl.RmdirContentsRequest, out *v2alpha1.RmdirContentsRequest) error {\n\treturn autoConvert_impl_RmdirContentsRequest_To_v2alpha1_RmdirContentsRequest(in, out)\n}", "func (o *ActionDeploymentRequestUsingPOST2Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param action\n\tqrAction := o.Action\n\tqAction := qrAction\n\tif qAction != \"\" {\n\n\t\tif err := r.SetQueryParam(\"action\", qAction); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.APIVersion != nil {\n\n\t\t// query param apiVersion\n\t\tvar qrAPIVersion string\n\n\t\tif o.APIVersion != nil {\n\t\t\tqrAPIVersion = *o.APIVersion\n\t\t}\n\t\tqAPIVersion := qrAPIVersion\n\t\tif qAPIVersion != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"apiVersion\", qAPIVersion); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// path param requestId\n\tif err := r.SetPathParam(\"requestId\", o.RequestID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateVolumeBackupParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.CreateVolumeBackupDetails == nil {\n\t\to.CreateVolumeBackupDetails = new(models.CreateVolumeBackupDetails)\n\t}\n\n\tif err := r.SetBodyParam(o.CreateVolumeBackupDetails); err != nil {\n\t\treturn err\n\t}\n\n\tif o.OpcRetryToken != nil {\n\n\t\t// header param opc-retry-token\n\t\tif err := r.SetHeaderParam(\"opc-retry-token\", *o.OpcRetryToken); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in *v2alpha1.UnmountVolumeRequest, out *impl.UnmountVolumeRequest) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeRequest_To_impl_UnmountVolumeRequest(in, out)\n}", "func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error {\n\t// If runtime os is windows, execute Write-VolumeCache powershell command on the disk\n\tif runtime.GOOS == \"windows\" {\n\t\tcmd := fmt.Sprintf(\"Get-Volume -FilePath %s | Write-Volumecache\", deviceMountPath)\n\t\toutput, err := exec.Command(\"powershell\", \"/c\", cmd).CombinedOutput()\n\t\tklog.Infof(\"command (%q) execeuted: %v, output: %q\", cmd, err, string(output))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"command (%q) failed: %v, output: %q\", cmd, err, string(output))\n\t\t}\n\t}\n\t// For linux runtime, it skips because unmount will automatically flush disk data\n\treturn nil\n}", "func (o *GetFqdnCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Cidr != nil {\n\n\t\t// query param cidr\n\t\tvar qrCidr string\n\n\t\tif o.Cidr != nil {\n\t\t\tqrCidr = *o.Cidr\n\t\t}\n\t\tqCidr := qrCidr\n\t\tif qCidr != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"cidr\", qCidr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Matchpattern != nil {\n\n\t\t// query param matchpattern\n\t\tvar qrMatchpattern string\n\n\t\tif o.Matchpattern != nil {\n\t\t\tqrMatchpattern = *o.Matchpattern\n\t\t}\n\t\tqMatchpattern := qrMatchpattern\n\t\tif qMatchpattern != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"matchpattern\", qMatchpattern); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Source != nil {\n\n\t\t// query param source\n\t\tvar qrSource string\n\n\t\tif o.Source != nil {\n\t\t\tqrSource = *o.Source\n\t\t}\n\t\tqSource := qrSource\n\t\tif qSource != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"source\", qSource); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (server *Server) WriteLockRequest(args *Args, response *string) error {\n\tobj, found := server.Accounts[args.AccountName]\n\n\t// Granting access to newly created account; return success\n\tif !found {\n\t\t*response = \"NOT FOUND\"\n\t\treturn nil\n\t}\n\n\tobj.AccountLock.Lock()\n\t//There is no writer for this account\n\tif obj.Writer == \"\" {\n\t\t// Grant access when there is no reader and writer\n\t\tif obj.Readers.Size() == 0 {\n\t\t\tobj.Writer = args.TransactionID\n\t\t\t*response = \"SUCCESS \" + server.Name + \".\" + args.AccountName + \" \" + obj.AccountBalance\n\t\t\tobj.AccountLock.Unlock()\n\t\t} else {\n\t\t\t// Requester is the only reader\n\t\t\tif obj.Readers.SetHas(args.TransactionID) {\n\t\t\t\tif obj.Readers.Size() == 1 {\n\t\t\t\t\t// Same Transaction ID promotion (for only one reader no writer)\n\t\t\t\t\tobj.Writer = args.TransactionID\n\t\t\t\t\tobj.Readers.SetDelete(args.TransactionID)\n\t\t\t\t\t*response = \"SUCCESS \" + server.Name + \".\" + args.AccountName + \" \" + obj.AccountBalance\n\t\t\t\t\tobj.AccountLock.Unlock()\n\t\t\t\t} else {\n\t\t\t\t\t// More than one reader for the account\n\t\t\t\t\t// Wait until transaction is the only reader, then promote the reader to writer\n\t\t\t\t\treq := NewLockStat(\"promote\", args.TransactionID)\n\t\t\t\t\tobj.RequestQueue = append([]*LockStat{req}, obj.RequestQueue...) // Put the request to the beginning of the queue\n\t\t\t\t\tobj.AccountLock.Unlock()\n\n\t\t\t\t\t// Waiting for access\n\t\t\t\t\tok := <-req.Status\n\t\t\t\t\tif ok {\n\t\t\t\t\t\t*response = \"SUCCESS \" + server.Name + \".\" + args.AccountName + \" \" + obj.AccountBalance\n\t\t\t\t\t} else {\n\t\t\t\t\t\t*response = \"ABORT\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Requester is not the reader in the queue\n\t\t\t\treq := NewLockStat(\"write\", args.TransactionID)\n\n\t\t\t\t// Append the request at the end of the queue\n\t\t\t\tobj.RequestQueue = append(obj.RequestQueue, req)\n\n\t\t\t\tobj.AccountLock.Unlock()\n\n\t\t\t\t// Wait for access\n\t\t\t\tok := <-req.Status\n\t\t\t\tif ok {\n\t\t\t\t\t*response = \"SUCCESS \" + server.Name + \".\" + args.AccountName + \" \" + obj.AccountBalance\n\t\t\t\t} else {\n\t\t\t\t\t*response = \"ABORT\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Some other client is holding the write lock\n\t\t// return reader-writer conflict when there are still readers (Technically this should never happen)\n\t\tif obj.Readers.Size() != 0 {\n\t\t\tfmt.Println(\"Reader-Writer Conflict!\")\n\t\t\tobj.AccountLock.Unlock()\n\t\t\treturn errors.New(\"Write: Account accountName=\" + args.AccountName + \", Transaction=\" + args.TransactionID + \". Reader-writer conflict.\")\n\t\t}\n\t\treq := NewLockStat(\"write\", args.TransactionID)\n\n\t\t//Append the request at the end of RequestQueue\n\t\tobj.RequestQueue = append(obj.RequestQueue, req)\n\t\tobj.AccountLock.Unlock()\n\n\t\t// Wait for access\n\t\tok := <-req.Status\n\t\tif ok {\n\t\t\t*response = \"SUCCESS \" + server.Name + \".\" + args.AccountName + \" \" + obj.AccountBalance\n\t\t} else {\n\t\t\t*response = \"ABORT\"\n\t\t}\n\t}\n\treturn nil\n}", "func Convert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in *internal.GetServiceRequest, out *v1alpha1.GetServiceRequest) error {\n\treturn autoConvert_internal_GetServiceRequest_To_v1alpha1_GetServiceRequest(in, out)\n}", "func (o *GetPortInfoUsingGET2Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (c *APIGateway) FlushStageCacheRequest(input *FlushStageCacheInput) (req *request.Request, output *FlushStageCacheOutput) {\n\top := &request.Operation{\n\t\tName: opFlushStageCache,\n\t\tHTTPMethod: \"DELETE\",\n\t\tHTTPPath: \"/restapis/{restapi_id}/stages/{stage_name}/cache/data\",\n\t}\n\n\tif input == nil {\n\t\tinput = &FlushStageCacheInput{}\n\t}\n\n\treq = c.newRequest(op, input, output)\n\treq.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler)\n\treq.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)\n\toutput = &FlushStageCacheOutput{}\n\treq.Data = output\n\treturn\n}", "func Convert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in *internal.DismountVolumeRequest, out *v1beta1.DismountVolumeRequest) error {\n\treturn autoConvert_internal_DismountVolumeRequest_To_v1beta1_DismountVolumeRequest(in, out)\n}", "func Convert_v1alpha1_Memcached_To_v1alpha2_Memcached(in *Memcached, out *v1alpha2.Memcached, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_Memcached_To_v1alpha2_Memcached(in, out, s)\n}", "func (client *RedisClient) checkNameAvailabilityCreateRequest(ctx context.Context, parameters CheckNameAvailabilityParameters, options *RedisCheckNameAvailabilityOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Cache/CheckNameAvailability\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (o *UpdateNetworkSwitchAccessControlListsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param networkId\n\tif err := r.SetPathParam(\"networkId\", o.NetworkID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.UpdateNetworkSwitchAccessControlLists != nil {\n\t\tif err := r.SetBodyParam(o.UpdateNetworkSwitchAccessControlLists); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetPackageSearchActionOldSpacesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param baseSpaceId\n\tif err := r.SetPathParam(\"baseSpaceId\", o.BaseSpaceID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in *internal.ResizeVolumeRequest, out *v1beta1.ResizeVolumeRequest) error {\n\treturn autoConvert_internal_ResizeVolumeRequest_To_v1beta1_ResizeVolumeRequest(in, out)\n}", "func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_CreateSymlinkRequest_To_v2alpha1_CreateSymlinkRequest(in *impl.CreateSymlinkRequest, out *v2alpha1.CreateSymlinkRequest) error {\n\treturn autoConvert_impl_CreateSymlinkRequest_To_v2alpha1_CreateSymlinkRequest(in, out)\n}", "func (o *PutClusterForAutoscaleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param crn\n\tif err := r.SetPathParam(\"crn\", o.Crn); err != nil {\n\t\treturn err\n\t}\n\n\t// path param userId\n\tif err := r.SetPathParam(\"userId\", o.UserID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *UpdateNetworkHTTPServerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param networkId\n\tif err := r.SetPathParam(\"networkId\", o.NetworkID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.UpdateNetworkHTTPServer != nil {\n\t\tif err := r.SetBodyParam(o.UpdateNetworkHTTPServer); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetBootVolumeAttachmentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param bootVolumeAttachmentId\n\tif err := r.SetPathParam(\"bootVolumeAttachmentId\", o.BootVolumeAttachmentID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *StartV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Environment != nil {\n\n\t\t// query param environment\n\t\tvar qrEnvironment string\n\t\tif o.Environment != nil {\n\t\t\tqrEnvironment = *o.Environment\n\t\t}\n\t\tqEnvironment := qrEnvironment\n\t\tif qEnvironment != \"\" {\n\t\t\tif err := r.SetQueryParam(\"environment\", qEnvironment); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateChannelSpacesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ChannelResource != nil {\n\t\tif err := r.SetBodyParam(o.ChannelResource); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param baseSpaceId\n\tif err := r.SetPathParam(\"baseSpaceId\", o.BaseSpaceID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in *internal.MountVolumeRequest, out *v1beta1.MountVolumeRequest) error {\n\treturn autoConvert_internal_MountVolumeRequest_To_v1beta1_MountVolumeRequest(in, out)\n}", "func (o *UpdateMemberRoleAdminV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param memberRoleId\n\tif err := r.SetPathParam(\"memberRoleId\", o.MemberRoleID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *MapDisk) NewWriteRequest(fname string) Request {\n\treturn Request{\n\t\treqType: reqWrite,\n\t\tinChan: m.inChan,\n\t\tfname: fname,\n\t\tresChan: make(chan reply),\n\t}\n}", "func (r *Search) RequestCache(requestcache bool) *Search {\n\tr.values.Set(\"request_cache\", strconv.FormatBool(requestcache))\n\n\treturn r\n}", "func Convert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in *v2alpha1.ListVolumesOnDiskRequest, out *impl.ListVolumesOnDiskRequest) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskRequest_To_impl_ListVolumesOnDiskRequest(in, out)\n}", "func (o *PostMeAccessRestrictionBackupCodeDisableParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.MeAccessRestrictionBackupCodeDisablePost != nil {\n\t\tif err := r.SetBodyParam(o.MeAccessRestrictionBackupCodeDisablePost); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetOneColumnParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param X-Cassandra-Token\n\tif err := r.SetHeaderParam(\"X-Cassandra-Token\", o.XCassandraToken); err != nil {\n\t\treturn err\n\t}\n\n\t// path param columnName\n\tif err := r.SetPathParam(\"columnName\", o.ColumnName); err != nil {\n\t\treturn err\n\t}\n\n\t// path param keyspaceName\n\tif err := r.SetPathParam(\"keyspaceName\", o.KeyspaceName); err != nil {\n\t\treturn err\n\t}\n\n\t// path param tableName\n\tif err := r.SetPathParam(\"tableName\", o.TableName); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PutClientConfigV2NamespaceChangesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Namespaces != nil {\n\t\tif err := r.SetBodyParam(o.Namespaces); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (client *StorageTargetsClient) listByCacheCreateRequest(ctx context.Context, resourceGroupName string, cacheName string, options *StorageTargetsClientListByCacheOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif cacheName == \"\" {\n\t\treturn nil, errors.New(\"parameter cacheName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cacheName}\", url.PathEscape(cacheName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (o *CloudNFSExportAddParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param param\n\tif err := r.SetPathParam(\"param\", o.Param); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *SyncCmOnDatalakeClusterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v1alpha3_OSDisk_To_v1alpha2_OSDisk(in *v1alpha3.OSDisk, out *OSDisk, s apiconversion.Scope) error {\n\treturn autoConvert_v1alpha3_OSDisk_To_v1alpha2_OSDisk(in, out, s)\n}", "func (o *GetExampleNewProjectDescriptionCompatibilityVersion1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ID != nil {\n\n\t\t// query param id\n\t\tvar qrID string\n\t\tif o.ID != nil {\n\t\t\tqrID = *o.ID\n\t\t}\n\t\tqID := qrID\n\t\tif qID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"id\", qID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param projectLocator\n\tif err := r.SetPathParam(\"projectLocator\", o.ProjectLocator); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ShowPackageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param media_type\n\tif err := r.SetPathParam(\"media_type\", o.MediaType); err != nil {\n\t\treturn err\n\t}\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\t// path param package\n\tif err := r.SetPathParam(\"package\", o.Package); err != nil {\n\t\treturn err\n\t}\n\n\t// path param release\n\tif err := r.SetPathParam(\"release\", o.Release); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *PostApplyManifestParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.requestTimeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Content-Type\n\tif err := r.SetHeaderParam(\"Content-Type\", o.ContentType); err != nil {\n\t\treturn err\n\t}\n\n\tif o.XAuthToken != nil {\n\n\t\t// header param X-Auth-Token\n\t\tif err := r.SetHeaderParam(\"X-Auth-Token\", *o.XAuthToken); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif o.DisableUpdatePost != nil {\n\n\t\t// query param disable_update_post\n\t\tvar qrDisableUpdatePost bool\n\t\tif o.DisableUpdatePost != nil {\n\t\t\tqrDisableUpdatePost = *o.DisableUpdatePost\n\t\t}\n\t\tqDisableUpdatePost := swag.FormatBool(qrDisableUpdatePost)\n\t\tif qDisableUpdatePost != \"\" {\n\t\t\tif err := r.SetQueryParam(\"disable_update_post\", qDisableUpdatePost); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.DisableUpdatePre != nil {\n\n\t\t// query param disable_update_pre\n\t\tvar qrDisableUpdatePre bool\n\t\tif o.DisableUpdatePre != nil {\n\t\t\tqrDisableUpdatePre = *o.DisableUpdatePre\n\t\t}\n\t\tqDisableUpdatePre := swag.FormatBool(qrDisableUpdatePre)\n\t\tif qDisableUpdatePre != \"\" {\n\t\t\tif err := r.SetQueryParam(\"disable_update_pre\", qDisableUpdatePre); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.DryRun != nil {\n\n\t\t// query param dry_run\n\t\tvar qrDryRun bool\n\t\tif o.DryRun != nil {\n\t\t\tqrDryRun = *o.DryRun\n\t\t}\n\t\tqDryRun := swag.FormatBool(qrDryRun)\n\t\tif qDryRun != \"\" {\n\t\t\tif err := r.SetQueryParam(\"dry_run\", qDryRun); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.EnableChartCleanup != nil {\n\n\t\t// query param enable_chart_cleanup\n\t\tvar qrEnableChartCleanup bool\n\t\tif o.EnableChartCleanup != nil {\n\t\t\tqrEnableChartCleanup = *o.EnableChartCleanup\n\t\t}\n\t\tqEnableChartCleanup := swag.FormatBool(qrEnableChartCleanup)\n\t\tif qEnableChartCleanup != \"\" {\n\t\t\tif err := r.SetQueryParam(\"enable_chart_cleanup\", qEnableChartCleanup); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif err := r.SetBodyParam(o.RequestBody); err != nil {\n\t\treturn err\n\t}\n\n\tif o.TargetManifest != nil {\n\n\t\t// query param target_manifest\n\t\tvar qrTargetManifest string\n\t\tif o.TargetManifest != nil {\n\t\t\tqrTargetManifest = *o.TargetManifest\n\t\t}\n\t\tqTargetManifest := qrTargetManifest\n\t\tif qTargetManifest != \"\" {\n\t\t\tif err := r.SetQueryParam(\"target_manifest\", qTargetManifest); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.TillerHost != nil {\n\n\t\t// query param tiller_host\n\t\tvar qrTillerHost string\n\t\tif o.TillerHost != nil {\n\t\t\tqrTillerHost = *o.TillerHost\n\t\t}\n\t\tqTillerHost := qrTillerHost\n\t\tif qTillerHost != \"\" {\n\t\t\tif err := r.SetQueryParam(\"tiller_host\", qTillerHost); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.TillerNamespace != nil {\n\n\t\t// query param tiller_namespace\n\t\tvar qrTillerNamespace string\n\t\tif o.TillerNamespace != nil {\n\t\t\tqrTillerNamespace = *o.TillerNamespace\n\t\t}\n\t\tqTillerNamespace := qrTillerNamespace\n\t\tif qTillerNamespace != \"\" {\n\t\t\tif err := r.SetQueryParam(\"tiller_namespace\", qTillerNamespace); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.TillerPort != nil {\n\n\t\t// query param tiller_port\n\t\tvar qrTillerPort int64\n\t\tif o.TillerPort != nil {\n\t\t\tqrTillerPort = *o.TillerPort\n\t\t}\n\t\tqTillerPort := swag.FormatInt64(qrTillerPort)\n\t\tif qTillerPort != \"\" {\n\t\t\tif err := r.SetQueryParam(\"tiller_port\", qTillerPort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Timeout != nil {\n\n\t\t// query param timeout\n\t\tvar qrTimeout int64\n\t\tif o.Timeout != nil {\n\t\t\tqrTimeout = *o.Timeout\n\t\t}\n\t\tqTimeout := swag.FormatInt64(qrTimeout)\n\t\tif qTimeout != \"\" {\n\t\t\tif err := r.SetQueryParam(\"timeout\", qTimeout); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Wait != nil {\n\n\t\t// query param wait\n\t\tvar qrWait bool\n\t\tif o.Wait != nil {\n\t\t\tqrWait = *o.Wait\n\t\t}\n\t\tqWait := swag.FormatBool(qrWait)\n\t\tif qWait != \"\" {\n\t\t\tif err := r.SetQueryParam(\"wait\", qWait); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (o *GetNetworkAppliancePortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param appliancePortId\n\tif err := r.SetPathParam(\"appliancePortId\", o.AppliancePortID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param networkId\n\tif err := r.SetPathParam(\"networkId\", o.NetworkID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GroupV2AddOptionalConversationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param groupId\n\tif err := r.SetPathParam(\"groupId\", swag.FormatInt64(o.GroupID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateSubscriptionV2Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tvar res []error\n\n\tif o.Request == nil {\n\t\to.Request = new(models.CreateSubscriptionRequest)\n\t}\n\n\tif err := r.SetBodyParam(o.Request); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetClusterTemplateByNameInWorkspaceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\t// path param workspaceId\n\tif err := r.SetPathParam(\"workspaceId\", swag.FormatInt64(o.WorkspaceID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *CreateAccessPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func CreateModifyHostAvailabilityRequest() (request *ModifyHostAvailabilityRequest) {\n\trequest = &ModifyHostAvailabilityRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2019-01-01\", \"ModifyHostAvailability\", \"cms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (o *NetworkPruneParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Filters != nil {\n\n\t\t// query param filters\n\t\tvar qrFilters string\n\t\tif o.Filters != nil {\n\t\t\tqrFilters = *o.Filters\n\t\t}\n\t\tqFilters := qrFilters\n\t\tif qFilters != \"\" {\n\t\t\tif err := r.SetQueryParam(\"filters\", qFilters); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func CreateModifyDirectoryRequest() (request *ModifyDirectoryRequest) {\n\trequest = &ModifyDirectoryRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"vs\", \"2018-12-12\", \"ModifyDirectory\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (f *WriteRequest) Serialize(buffer []byte) []byte {\n\tbuffer[0] = byte(f.Flags)\n\tcopy(buffer[1:], f.Data)\n\treturn buffer\n}", "func (o *UpdateSingleGroupPublicV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param groupId\n\tif err := r.SetPathParam(\"groupId\", o.GroupID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ContainerUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetBodyParam(o.Update); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in *v2alpha1.MkdirRequest, out *impl.MkdirRequest) error {\n\treturn autoConvert_v2alpha1_MkdirRequest_To_impl_MkdirRequest(in, out)\n}", "func Convert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in *FakeRequest, out *v1alpha2.FakeRequest, s conversion.Scope) error {\n\treturn autoConvert_v1alpha1_FakeRequest_To_v1alpha2_FakeRequest(in, out, s)\n}", "func (s *CacheServer) Append(ctx context.Context, in *pb.CacheRequest) (*pb.CacheResponse, error) {\n\tin.Operation = pb.CacheRequest_APPEND\n\treturn s.Call(ctx, in)\n}", "func (o *ConfigurationBackupModifyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Info != nil {\n\t\tif err := r.SetBodyParam(o.Info); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in *v2alpha1.IsVolumeFormattedRequest, out *impl.IsVolumeFormattedRequest) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedRequest_To_impl_IsVolumeFormattedRequest(in, out)\n}", "func (o *GetAOrderStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Accept\n\tif err := r.SetHeaderParam(\"Accept\", o.Accept); err != nil {\n\t\treturn err\n\t}\n\n\t// header param Content-Type\n\tif err := r.SetHeaderParam(\"Content-Type\", o.ContentType); err != nil {\n\t\treturn err\n\t}\n\n\t// path param status_id\n\tif err := r.SetPathParam(\"status_id\", swag.FormatInt32(o.StatusID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *SetUniverseBackupFlagParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param cUUID\n\tif err := r.SetPathParam(\"cUUID\", o.CUUID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif o.MarkActive != nil {\n\n\t\t// query param markActive\n\t\tvar qrMarkActive bool\n\n\t\tif o.MarkActive != nil {\n\t\t\tqrMarkActive = *o.MarkActive\n\t\t}\n\t\tqMarkActive := swag.FormatBool(qrMarkActive)\n\t\tif qMarkActive != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"markActive\", qMarkActive); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// path param uniUUID\n\tif err := r.SetPathParam(\"uniUUID\", o.UniUUID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in *impl.PathExistsRequest, out *v2alpha1.PathExistsRequest) error {\n\treturn autoConvert_impl_PathExistsRequest_To_v2alpha1_PathExistsRequest(in, out)\n}", "func EncodeRingbufferCapacityRequest(name string) *proto.ClientMessage {\n\tclientMessage := proto.NewClientMessageForEncode()\n\tclientMessage.SetRetryable(true)\n\n\tinitialFrame := proto.NewFrameWith(make([]byte, RingbufferCapacityCodecRequestInitialFrameSize), proto.UnfragmentedMessage)\n\tclientMessage.AddFrame(initialFrame)\n\tclientMessage.SetMessageType(RingbufferCapacityCodecRequestMessageType)\n\tclientMessage.SetPartitionId(-1)\n\n\tEncodeString(clientMessage, name)\n\n\treturn clientMessage\n}", "func (o *CreateBlueprintInWorkspaceInternalParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.AccountID != nil {\n\n\t\t// query param accountId\n\t\tvar qrAccountID string\n\t\tif o.AccountID != nil {\n\t\t\tqrAccountID = *o.AccountID\n\t\t}\n\t\tqAccountID := qrAccountID\n\t\tif qAccountID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"accountId\", qAccountID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param workspaceId\n\tif err := r.SetPathParam(\"workspaceId\", swag.FormatInt64(o.WorkspaceID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *DecryptParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Parameters); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Convert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in *internal.VolumeStatsRequest, out *v1beta1.VolumeStatsRequest) error {\n\treturn autoConvert_internal_VolumeStatsRequest_To_v1beta1_VolumeStatsRequest(in, out)\n}", "func (o *EavAttributeSetRepositoryV1GetGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param attributeSetId\n\tif err := r.SetPathParam(\"attributeSetId\", swag.FormatInt64(o.AttributeSetID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetPrivateOrderstateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.OrderID != nil {\n\n\t\t// query param orderId\n\t\tvar qrOrderID float64\n\t\tif o.OrderID != nil {\n\t\t\tqrOrderID = *o.OrderID\n\t\t}\n\t\tqOrderID := swag.FormatFloat64(qrOrderID)\n\t\tif qOrderID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"orderId\", qOrderID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r *RemoteWriteClient) PrepareRequest(queue *util.EvictingQueue) ([]byte, error) {\n\t// prepare labels and samples from queue\n\terr := r.prepare(queue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := cortexpb.ToWriteRequest(r.labels, r.samples, nil, cortexpb.RULE)\n\tdefer cortexpb.ReuseSlice(req.Timeseries)\n\n\treqBytes, err := req.Marshal()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn snappy.Encode(nil, reqBytes), nil\n}", "func (request WriteARoomRequest) Validate() error {\n\tif request.Name == \"\" {\n\t\treturn ErrInvalidRequest\n\t}\n\treturn nil\n}", "func (o *TestProjectVersionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ProjectVersionTestRequest != nil {\n\t\tif err := r.SetBodyParam(o.ProjectVersionTestRequest); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetClockParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param X-Killbill-ApiKey\n\tif err := r.SetHeaderParam(\"X-Killbill-ApiKey\", o.XKillbillAPIKey); err != nil {\n\t\treturn err\n\t}\n\n\t// header param X-Killbill-ApiSecret\n\tif err := r.SetHeaderParam(\"X-Killbill-ApiSecret\", o.XKillbillAPISecret); err != nil {\n\t\treturn err\n\t}\n\n\t// header param WithProfilingInfo\n\tif o.WithProfilingInfo != nil && len(*o.WithProfilingInfo) > 0 {\n\t\tif err := r.SetHeaderParam(\"X-Killbill-Profiling-Req\", *o.WithProfilingInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// header param withStackTrace\n\tif o.WithStackTrace != nil && *o.WithStackTrace {\n\t\tif err := r.SetQueryParam(\"withStackTrace\", \"true\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ImportStore1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.File != nil {\n\n\t\tif o.File != nil {\n\n\t\t\t// form file param file\n\t\t\tif err := r.SetFileParam(\"file\", o.File); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif o.StoreID != nil {\n\n\t\t// query param storeId\n\t\tvar qrStoreID string\n\t\tif o.StoreID != nil {\n\t\t\tqrStoreID = *o.StoreID\n\t\t}\n\t\tqStoreID := qrStoreID\n\t\tif qStoreID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"storeId\", qStoreID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.StrictMode != nil {\n\n\t\t// query param strictMode\n\t\tvar qrStrictMode bool\n\t\tif o.StrictMode != nil {\n\t\t\tqrStrictMode = *o.StrictMode\n\t\t}\n\t\tqStrictMode := swag.FormatBool(qrStrictMode)\n\t\tif qStrictMode != \"\" {\n\t\t\tif err := r.SetQueryParam(\"strictMode\", qStrictMode); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// setting the default header value\n\tif err := r.SetHeaderParam(\"User-Agent\", utils.UserAgentGen()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetHeaderParam(\"X-Amzn-Trace-Id\", utils.AmazonTraceIDGen()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\n\treturn nil\n}", "func (s *CacheServer) Set(ctx context.Context, in *pb.CacheRequest) (*pb.CacheResponse, error) {\n\tin.Operation = pb.CacheRequest_SET\n\treturn s.Call(ctx, in)\n}", "func (client *RedisClient) updateCreateRequest(ctx context.Context, resourceGroupName string, name string, parameters RedisUpdateParameters, options *RedisUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{name}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-12-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}", "func (o *AssignUserToCustomerGroupUsingPATCH1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param baseSiteId\n\tif err := r.SetPathParam(\"baseSiteId\", o.BaseSiteID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param groupId\n\tif err := r.SetPathParam(\"groupId\", o.GroupID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Members != nil {\n\t\tif err := r.SetBodyParam(o.Members); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetPackageSearchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsBeginUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif diskEncryptionSetName == \"\" {\n\t\treturn nil, errors.New(\"parameter diskEncryptionSetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{diskEncryptionSetName}\", url.PathEscape(diskEncryptionSetName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, diskEncryptionSet)\n}", "func (o *UpdateStockReceiptParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param koronaAccountId\n\tif err := r.SetPathParam(\"koronaAccountId\", o.KoronaAccountID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param stockReceiptId\n\tif err := r.SetPathParam(\"stockReceiptId\", o.StockReceiptID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *ListAllKeyspacesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param X-Cassandra-Token\n\tif err := r.SetHeaderParam(\"X-Cassandra-Token\", o.XCassandraToken); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetVrackServiceNameDedicatedCloudDedicatedCloudParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param dedicatedCloud\n\tif err := r.SetPathParam(\"dedicatedCloud\", o.DedicatedCloud); err != nil {\n\t\treturn err\n\t}\n\n\t// path param serviceName\n\tif err := r.SetPathParam(\"serviceName\", o.ServiceName); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *GetLogicalSwitchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param lswitch-id\n\tif err := r.SetPathParam(\"lswitch-id\", o.LswitchID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func marshalUpdatePrivateCloudUpdatePrivateCloudRequest(c *Client, m map[string]interface{}) ([]byte, error) {\n\n\treturn json.Marshal(m)\n}", "func EncodeClusterLegacyAbacUpdateRequest(m map[string]interface{}) map[string]interface{} {\n\treq := make(map[string]interface{})\n\t// TODO(b/150883761): Check and return the error on the GetMapEntry() call.\n\ti, _ := dcl.GetMapEntry(req, []string{\"legacy_abac\", \"enabled\"})\n\tdcl.PutMapEntry(req, []string{\"enabled\"}, i)\n\treturn req\n}", "func EncodeClusterUpdateRequest(m map[string]interface{}) map[string]interface{} {\n\tupdate := make(map[string]interface{})\n\tfor k, v := range m {\n\t\tif strings.HasPrefix(k, \"desired\") {\n\t\t\tupdate[k] = v\n\t\t}\n\t}\n\treturn map[string]interface{}{\"update\": update}\n}" ]
[ "0.69193834", "0.61824894", "0.6113064", "0.5818244", "0.5635364", "0.54148304", "0.53449637", "0.5153387", "0.5063319", "0.49686766", "0.48736355", "0.4862342", "0.46447748", "0.4625127", "0.46123683", "0.46051776", "0.451407", "0.4512947", "0.45010355", "0.44656947", "0.44312382", "0.44111127", "0.4363269", "0.43464637", "0.43402037", "0.42972875", "0.4286554", "0.42851818", "0.42779398", "0.42651024", "0.4260685", "0.4258964", "0.42541194", "0.42379183", "0.42176774", "0.42086276", "0.42074296", "0.42004615", "0.4188463", "0.4180192", "0.41787797", "0.4161339", "0.41601774", "0.41404969", "0.4121808", "0.41165346", "0.41130847", "0.4106258", "0.4101282", "0.40975305", "0.40930542", "0.40797776", "0.40783748", "0.4076284", "0.40555853", "0.40451974", "0.40355936", "0.40310314", "0.4021825", "0.4017079", "0.40114278", "0.40023136", "0.39983025", "0.3997837", "0.39974183", "0.3992034", "0.39918458", "0.39909372", "0.39873987", "0.3985837", "0.3983632", "0.39817253", "0.3980154", "0.397516", "0.39741227", "0.397068", "0.39664757", "0.39587912", "0.3953361", "0.3951939", "0.39509186", "0.39422026", "0.39397356", "0.39389652", "0.39317703", "0.39294687", "0.3929249", "0.39287", "0.39275372", "0.39269668", "0.3924426", "0.39183003", "0.39145377", "0.39115697", "0.39064774", "0.38990825", "0.389743", "0.3893947", "0.38890907", "0.38836178" ]
0.8724024
0
Convert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse is an autogenerated conversion function.
func Convert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in *v2alpha1.WriteVolumeCacheResponse, out *impl.WriteVolumeCacheResponse) error { return autoConvert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error {\n\treturn autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func Convert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in *v2alpha1.GetVolumeStatsResponse, out *impl.GetVolumeStatsResponse) error {\n\treturn autoConvert_v2alpha1_GetVolumeStatsResponse_To_impl_GetVolumeStatsResponse(in, out)\n}", "func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error {\n\t// If runtime os is windows, execute Write-VolumeCache powershell command on the disk\n\tif runtime.GOOS == \"windows\" {\n\t\tcmd := fmt.Sprintf(\"Get-Volume -FilePath %s | Write-Volumecache\", deviceMountPath)\n\t\toutput, err := exec.Command(\"powershell\", \"/c\", cmd).CombinedOutput()\n\t\tklog.Infof(\"command (%q) execeuted: %v, output: %q\", cmd, err, string(output))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"command (%q) failed: %v, output: %q\", cmd, err, string(output))\n\t\t}\n\t}\n\t// For linux runtime, it skips because unmount will automatically flush disk data\n\treturn nil\n}", "func Convert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in *v2alpha1.UnmountVolumeResponse, out *impl.UnmountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_UnmountVolumeResponse_To_impl_UnmountVolumeResponse(in, out)\n}", "func Convert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in *v2alpha1.MountVolumeResponse, out *impl.MountVolumeResponse) error {\n\treturn autoConvert_v2alpha1_MountVolumeResponse_To_impl_MountVolumeResponse(in, out)\n}", "func Convert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in *v1beta1.FormatVolumeResponse, out *internal.FormatVolumeResponse) error {\n\treturn autoConvert_v1beta1_FormatVolumeResponse_To_internal_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in *v2alpha1.IsVolumeFormattedResponse, out *impl.IsVolumeFormattedResponse) error {\n\treturn autoConvert_v2alpha1_IsVolumeFormattedResponse_To_impl_IsVolumeFormattedResponse(in, out)\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetVMVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVMVolumeDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetMarketsGroupsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]int32, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetUniverseGroupsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]int32, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func Convert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in *v2alpha1.ListVolumesOnDiskResponse, out *impl.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_v2alpha1_ListVolumesOnDiskResponse_To_impl_ListVolumesOnDiskResponse(in, out)\n}", "func Convert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in *v2alpha1.MkdirResponse, out *impl.MkdirResponse) error {\n\treturn autoConvert_v2alpha1_MkdirResponse_To_impl_MkdirResponse(in, out)\n}", "func (m *ResponseCache) ToJSON() (string, error) {\n\treturn codec.ToJSON(m)\n}", "func (s *ServerCodec) WriteResponse(resp *rpc.Response, obj interface{}) error {\n\tpb, ok := obj.(proto.Message)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%T does not implement proto.Message\", obj)\n\t}\n\n\t// Write the header\n\theader := wire.Header{\n\t\tMethod: &resp.ServiceMethod,\n\t\tSeq: &resp.Seq,\n\t}\n\tif resp.Error != \"\" {\n\t\theader.Error = &resp.Error\n\t}\n\tif err := WriteProto(s.w, &header); err != nil {\n\t\treturn nil\n\t}\n\n\t// Write the proto\n\treturn WriteProto(s.w, pb)\n}", "func (c *CodecRequest) WriteResponse(w http.ResponseWriter, reply interface{}) {\n\tif c.request.Id != nil {\n\t\t// Id is null for notifications and they don't have a response.\n\t\tres := &serverResponse{\n\t\t\tResult: reply,\n\t\t\tError: &null,\n\t\t\tId: c.request.Id,\n\t\t}\n\t\tc.writeServerResponse(w, 200, res)\n\t}\n}", "func Convert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in *v2alpha1.RmdirResponse, out *impl.RmdirResponse) error {\n\treturn autoConvert_v2alpha1_RmdirResponse_To_impl_RmdirResponse(in, out)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func WriteResponse(r *Response, w io.Writer) error {\n\tif r.Type == InvalidResponse || r.Type >= lastResponse {\n\t\treturn ErrInvalidResponse\n\t}\n\n\tif err := bin.WriteUint16(w, uint16(r.Type)); err != nil {\n\t\treturn err\n\t}\n\n\tswitch r.Type {\n\tcase Ok, Error:\n\t\tif err := bin.WriteUint32(w, uint32(len(r.Data))); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := w.Write(r.Data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func Convert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in *v1beta1.ResizeVolumeResponse, out *internal.ResizeVolumeResponse) error {\n\treturn autoConvert_v1beta1_ResizeVolumeResponse_To_internal_ResizeVolumeResponse(in, out)\n}", "func (o *PutWorkpaceByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WriteResponse(w io.Writer, r *Response) (err error) {\n\tdefer essentials.AddCtxTo(\"write response\", &err)\n\tdata, err := bson.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsize := uint32(len(data))\n\tif err := binary.Write(w, binary.LittleEndian, size); err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(data)\n\treturn err\n}", "func (o *GetHealthzNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *PatchReposOwnerRepoReleasesIDForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(403)\n}", "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *UpdateDeploymentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func (o *ArtifactListerPartialContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Next\n\n\tnext := o.Next\n\tif next != \"\" {\n\t\trw.Header().Set(\"Next\", next)\n\t}\n\n\t// response header Previous\n\n\tprevious := o.Previous\n\tif previous != \"\" {\n\t\trw.Header().Set(\"Previous\", previous)\n\t}\n\n\t// response header RemainingRecords\n\n\tremainingRecords := swag.FormatUint64(o.RemainingRecords)\n\tif remainingRecords != \"\" {\n\t\trw.Header().Set(\"RemainingRecords\", remainingRecords)\n\t}\n\n\t// response header TotalRecords\n\n\ttotalRecords := swag.FormatUint64(o.TotalRecords)\n\tif totalRecords != \"\" {\n\t\trw.Header().Set(\"TotalRecords\", totalRecords)\n\t}\n\n\trw.WriteHeader(206)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*weles.ArtifactInfo, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func cacheResponse(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tc.Response().Writer = cache.NewWriter(c.Response().Writer, c.Request())\n\t\treturn next(c)\n\t}\n}", "func (o *PatchReposOwnerRepoReleasesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetReposOwnerRepoStatsCommitActivityForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(403)\n}", "func (w bodyCacheWriter) Write(b []byte) (int, error) {\n\n\texceptionPaths := []string{\"search\"}\n\thasException := funk.Contains(exceptionPaths, w.model.Slug)\n\thasException = funk.Contains(exceptionPaths, w.model.Code)\n\n\tcacheIgnoredPaths := []string{\"search\"}\n\tisIgnored := funk.Contains(cacheIgnoredPaths, w.model.Slug)\n\tisIgnored = funk.Contains(cacheIgnoredPaths, w.model.Code)\n\n\t// Write the response to the cache only if a success code\n\tstatus := w.Status()\n\tif 200 <= status && status <= 299 && !isIgnored {\n\t\tswitch w.store {\n\t\tcase \"redis\":\n\t\t\tgo w.redis.Set(RedisResponsePrefix, w.itemKey, string(b), RedisResponseDefaultKeyExpirationTime)\n\n\t\t\tvar setKey string\n\t\t\tsetKey = w.groupKey\n\t\t\tif w.hasRequestParams && !hasException {\n\t\t\t\tsetKey = fmt.Sprint(w.groupKey, \":with_params\")\n\t\t\t}\n\n\t\t\titemKeyIndex := w.requestURI\n\t\t\tauthRole := *w.authUserRole\n\t\t\tauthUserID := *w.authUserID\n\n\t\t\tif authRole != \"\" && authUserID != 0 {\n\t\t\t\titemKeyIndex = fmt.Sprint(w.requestURI, \":user_role:\", authRole, \":user_id:\", authUserID)\n\n\t\t\t\tif w.model.ID != \"\" || w.model.Code != \"\" || (w.model.Slug != \"\" && !hasException) {\n\t\t\t\t\tgo w.redis.SAdd(RedisResponsePrefix, fmt.Sprint(w.groupKey, \":\", w.requestURI), itemKeyIndex)\n\t\t\t\t} else {\n\t\t\t\t\tif !w.hasRequestParams {\n\t\t\t\t\t\tgo w.redis.SAdd(RedisResponsePrefix, setKey, itemKeyIndex)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tgo w.redis.SAdd(RedisResponsePrefix, setKey, w.requestURI)\n\t\t\tgo w.redis.SAdd(RedisResponsePrefix, fmt.Sprint(w.groupKey, \":all\"), w.requestURI)\n\t\t}\n\n\t}\n\n\t// Then write the response to gin\n\treturn w.ResponseWriter.Write(b)\n}", "func (o *GetVMVolumeNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(404)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PatchCoreV1PersistentVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *GetReposOwnerRepoStatsCommitActivityOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.CommitActivityStats, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (r *GenericResponse) WriteResponse(w http.ResponseWriter) {\n\tif r.Headers == nil {\n\t\tr.Headers = make(map[string]string)\n\t}\n\t// Headers\n\tif r.MimeContent != nil {\n\t\tr.Headers[\"Content-Type\"] = r.MimeContent.MimeType\n\t\tif len(r.MimeContent.Filename) > 0 {\n\t\t\t// Does not support UTF-8 or spaces in filename\n\t\t\tr.Headers[\"Content-Disposition\"] = \"attachment; filename=\" + r.MimeContent.Filename\n\t\t}\n\t}\n\tfor k, v := range r.Headers {\n\t\tw.Header().Set(k, v)\n\t}\n\n\t// Body\n\tif r.MimeContent != nil {\n\t\tw.WriteHeader(r.StatusCode)\n\t\t_, _ = w.Write(r.MimeContent.Content)\n\t} else if r.JSONableResponse != nil {\n\t\twriteJSON(r.JSONableResponse, w, r.StatusCode)\n\t} else {\n\t\tw.WriteHeader(r.StatusCode)\n\t}\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in *v1beta1.VolumeStatsResponse, out *internal.VolumeStatsResponse) error {\n\treturn autoConvert_v1beta1_VolumeStatsResponse_To_internal_VolumeStatsResponse(in, out)\n}", "func Convert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in *v2alpha1.RmdirContentsResponse, out *impl.RmdirContentsResponse) error {\n\treturn autoConvert_v2alpha1_RmdirContentsResponse_To_impl_RmdirContentsResponse(in, out)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func (o *PatchFoldersIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PutSlideLikeDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WrapResponse(w http.ResponseWriter, request types.InterxRequest, response types.ProxyResponse, statusCode int, saveToCache bool) {\n\tif statusCode == 0 {\n\t\tstatusCode = 503 // Service Unavailable Error\n\t}\n\tif saveToCache {\n\t\t// GetLogger().Info(\"[gateway] Saving in the cache\")\n\n\t\tchainIDHash := GetBlake2bHash(response.Chainid)\n\t\tendpointHash := GetBlake2bHash(request.Endpoint)\n\t\trequestHash := GetBlake2bHash(request)\n\t\tif conf, ok := RPCMethods[request.Method][request.Endpoint]; ok {\n\t\t\terr := PutCache(chainIDHash, endpointHash, requestHash, types.InterxResponse{\n\t\t\t\tResponse: response,\n\t\t\t\tStatus: statusCode,\n\t\t\t\tExpireAt: time.Now().Add(time.Duration(conf.CachingDuration) * time.Second),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\t// GetLogger().Error(\"[gateway] Failed to save in the cache: \", err.Error())\n\t\t\t}\n\t\t\t// GetLogger().Info(\"[gateway] Save finished\")\n\t\t}\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.Header().Add(\"Interx_chain_id\", response.Chainid)\n\tw.Header().Add(\"Interx_block\", strconv.FormatInt(response.Block, 10))\n\tw.Header().Add(\"Interx_blocktime\", response.Blocktime)\n\tw.Header().Add(\"Interx_timestamp\", strconv.FormatInt(response.Timestamp, 10))\n\tw.Header().Add(\"Interx_request_hash\", response.RequestHash)\n\tif request.Endpoint == config.QueryDataReference {\n\t\treference, err := database.GetReference(string(request.Params))\n\t\tif err == nil {\n\t\t\tw.Header().Add(\"Interx_ref\", \"/download/\"+reference.FilePath)\n\t\t}\n\t}\n\n\tif response.Response != nil {\n\t\tresponse.Signature, response.Hash = GetResponseSignature(response)\n\n\t\tw.Header().Add(\"Interx_signature\", response.Signature)\n\t\tw.Header().Add(\"Interx_hash\", response.Hash)\n\t\tw.WriteHeader(statusCode)\n\n\t\tjson.NewEncoder(w).Encode(response.Response)\n\t} else {\n\t\tw.WriteHeader(statusCode)\n\n\t\tif response.Error == nil {\n\t\t\tresponse.Error = \"service not available\"\n\t\t}\n\t\tjson.NewEncoder(w).Encode(response.Error)\n\t}\n}", "func UnmarshalCacheLevelResponse(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(CacheLevelResponse)\n\terr = core.UnmarshalPrimitive(m, \"success\", &obj.Success)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"errors\", &obj.Errors)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"messages\", &obj.Messages)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"result\", &obj.Result, UnmarshalCacheLevelResponseResult)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *PutUserSubscriptionsOwnerRepoForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(403)\n}", "func (o *PutWorkpaceByIDForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(403)\n}", "func (r *Response) Write(w io.Writer) error", "func (o *GetChartsInfoOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.ChartsData, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (cw *CacheWriter) Write(p []byte) (int, error) {\n\tvar (\n\t\tn int\n\t\terr error\n\t)\n\n\twriters := []io.Writer{\n\t\tcw.cacheBuff,\n\t}\n\n\tif !cw.useStale || !includesStaleStatus(cw.statusCode, cw.staleStatuses) {\n\t\twriters = append(writers, cw.ResponseWriter)\n\t}\n\n\tfor _, w := range writers {\n\t\tn, err = w.Write(p)\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\n\t\tif n != len(p) {\n\t\t\terr = io.ErrShortWrite\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\treturn len(p), nil\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *WatchNetworkingV1NetworkPolicyListForAllNamespacesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func (o *PutReposOwnerRepoContentsPathForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(403)\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ListStorageV1alpha1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceStorageV1CSINodeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceStorageV1CSINodeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetReposOwnerRepoLanguagesForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(403)\n}", "func (r *Response) cacheable() (rv cacheobject.ObjectResults) {\n\n\trespHeader := r.Response.Headers.(http.Header)\n\treqHeader := r.Request.Headers.(http.Header)\n\t//\trespHeader := r.Response.castHeaders()\n\t//\treqHeader := r.Request.castHeaders()\n\n\treqDir, err := cacheobject.ParseRequestCacheControl(reqHeader.Get(\"Cache-Control\"))\n\tif err != nil {\n\t\tlogger.Printf(err.Error())\n\t}\n\tresDir, err := cacheobject.ParseResponseCacheControl(respHeader.Get(\"Cache-Control\"))\n\tif err != nil {\n\t\tlogger.Printf(err.Error())\n\t}\n\t//logger.Println(respHeader)\n\texpiresHeader, _ := http.ParseTime(respHeader.Get(\"Expires\"))\n\tdateHeader, _ := http.ParseTime(respHeader.Get(\"Date\"))\n\tlastModifiedHeader, _ := http.ParseTime(respHeader.Get(\"Last-Modified\"))\n\tobj := cacheobject.Object{\n\t\t//\tCacheIsPrivate: false,\n\t\tRespDirectives: resDir,\n\t\tRespHeaders: respHeader,\n\t\tRespStatusCode: r.Response.Status,\n\t\tRespExpiresHeader: expiresHeader,\n\t\tRespDateHeader: dateHeader,\n\t\tRespLastModifiedHeader: lastModifiedHeader,\n\n\t\tReqDirectives: reqDir,\n\t\tReqHeaders: reqHeader,\n\t\tReqMethod: r.Request.Method,\n\t\tNowUTC: time.Now().UTC(),\n\t}\n\n\trv = cacheobject.ObjectResults{}\n\tcacheobject.CachableObject(&obj, &rv)\n\tcacheobject.ExpirationObject(&obj, &rv)\n\t//Check if it is cacheable\n\n\texpTime := rv.OutExpirationTime.Unix()\n\tif rv.OutExpirationTime.IsZero() {\n\t\texpTime = 0\n\t}\n\tr.CacheExpirationTime = expTime\n\tdebug := false\n\tif debug {\n\t\tif rv.OutErr != nil {\n\t\t\tlogger.Println(\"Errors: \", rv.OutErr)\n\t\t}\n\t\tif rv.OutReasons != nil {\n\t\t\tlogger.Println(\"Reasons to not cache: \", rv.OutReasons)\n\t\t}\n\t\tif rv.OutWarnings != nil {\n\t\t\tlogger.Println(\"Warning headers to add: \", rv.OutWarnings)\n\t\t}\n\t\tlogger.Println(\"Expiration: \", rv.OutExpirationTime.String())\n\t}\n\treturn rv\n}", "func (o *ShowPackageReleasesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.PackageManifest, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateResourceUsageOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PutSlideSuperlikeDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PutReposOwnerRepoContentsPathOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *UpdateOfficeUserForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(403)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (cbcr *CardBinCheckResponse) WriteResponse(w io.Writer, c ContentType) error {\n\tvar err error\n\tswitch c {\n\tcase JSONContentType:\n\t\t_, err = io.WriteString(w, cbcr.toJSON())\n\tcase TextContentType:\n\t\t_, err = io.WriteString(w, cbcr.toText())\n\tdefault:\n\t\terr = errors.New(\"No supporting content type\")\n\t}\n\treturn err\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *WatchApiregistrationV1APIServiceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PatchCoreV1PersistentVolumeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PutPerformancesForbidden) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(403)\n}", "func (o *PartialUpdateAppDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ListAppsV1NamespacedDeploymentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ArtifactListerOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Next\n\n\tnext := o.Next\n\tif next != \"\" {\n\t\trw.Header().Set(\"Next\", next)\n\t}\n\n\t// response header Previous\n\n\tprevious := o.Previous\n\tif previous != \"\" {\n\t\trw.Header().Set(\"Previous\", previous)\n\t}\n\n\t// response header TotalRecords\n\n\ttotalRecords := swag.FormatUint64(o.TotalRecords)\n\tif totalRecords != \"\" {\n\t\trw.Header().Set(\"TotalRecords\", totalRecords)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*weles.ArtifactInfo, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetReposOwnerRepoLanguagesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *ReplaceApiextensionsV1beta1CustomResourceDefinitionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateStorageV1CSINodeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPresignedForClusterFilesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}" ]
[ "0.72245854", "0.6971003", "0.65156394", "0.618123", "0.5680038", "0.56698704", "0.55675393", "0.53499454", "0.52156985", "0.52085894", "0.5184892", "0.51843864", "0.51680243", "0.51427984", "0.5129716", "0.51226175", "0.5092983", "0.509064", "0.5066222", "0.50655913", "0.5061326", "0.5028817", "0.5023232", "0.4978348", "0.4964308", "0.49392855", "0.49271667", "0.49250364", "0.49248698", "0.49045855", "0.49010336", "0.48959878", "0.48810238", "0.48807034", "0.4873257", "0.4863631", "0.48629603", "0.48595873", "0.4853249", "0.48474643", "0.48446062", "0.48418394", "0.48292646", "0.48221546", "0.48144966", "0.4814092", "0.48096022", "0.48084357", "0.480723", "0.4804581", "0.48033395", "0.47868267", "0.4784163", "0.47764683", "0.4775783", "0.47754037", "0.47728017", "0.47724387", "0.47688407", "0.47649163", "0.47648534", "0.47583917", "0.4756097", "0.47424704", "0.4732517", "0.4731631", "0.47287613", "0.47267812", "0.47255808", "0.47203287", "0.4719444", "0.4712815", "0.47122473", "0.4710074", "0.47065535", "0.4701212", "0.46931377", "0.4688194", "0.4686556", "0.46857506", "0.4681576", "0.4678697", "0.46758264", "0.46711957", "0.46693996", "0.46655992", "0.46631968", "0.4662381", "0.46622464", "0.46563825", "0.4656153", "0.4656136", "0.46498284", "0.46468508", "0.4644684", "0.46426198", "0.46401188", "0.46382967", "0.46350044", "0.46327597" ]
0.83519584
0
Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse is an autogenerated conversion function.
func Convert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in *impl.WriteVolumeCacheResponse, out *v2alpha1.WriteVolumeCacheResponse) error { return autoConvert_impl_WriteVolumeCacheResponse_To_v2alpha1_WriteVolumeCacheResponse(in, out) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Convert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in *impl.WriteVolumeCacheRequest, out *v2alpha1.WriteVolumeCacheRequest) error {\n\treturn autoConvert_impl_WriteVolumeCacheRequest_To_v2alpha1_WriteVolumeCacheRequest(in, out)\n}", "func Convert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in *impl.FormatVolumeResponse, out *v2alpha1.FormatVolumeResponse) error {\n\treturn autoConvert_impl_FormatVolumeResponse_To_v2alpha1_FormatVolumeResponse(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in *v2alpha1.WriteVolumeCacheResponse, out *impl.WriteVolumeCacheResponse) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheResponse_To_impl_WriteVolumeCacheResponse(in, out)\n}", "func Convert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in *impl.ResizeVolumeResponse, out *v2alpha1.ResizeVolumeResponse) error {\n\treturn autoConvert_impl_ResizeVolumeResponse_To_v2alpha1_ResizeVolumeResponse(in, out)\n}", "func Convert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in *impl.UnmountVolumeResponse, out *v2alpha1.UnmountVolumeResponse) error {\n\treturn autoConvert_impl_UnmountVolumeResponse_To_v2alpha1_UnmountVolumeResponse(in, out)\n}", "func Convert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in *impl.MountVolumeResponse, out *v2alpha1.MountVolumeResponse) error {\n\treturn autoConvert_impl_MountVolumeResponse_To_v2alpha1_MountVolumeResponse(in, out)\n}", "func Convert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in *impl.MkdirResponse, out *v2alpha1.MkdirResponse) error {\n\treturn autoConvert_impl_MkdirResponse_To_v2alpha1_MkdirResponse(in, out)\n}", "func Convert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in *v2alpha1.WriteVolumeCacheRequest, out *impl.WriteVolumeCacheRequest) error {\n\treturn autoConvert_v2alpha1_WriteVolumeCacheRequest_To_impl_WriteVolumeCacheRequest(in, out)\n}", "func Convert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in *v2alpha1.FormatVolumeResponse, out *impl.FormatVolumeResponse) error {\n\treturn autoConvert_v2alpha1_FormatVolumeResponse_To_impl_FormatVolumeResponse(in, out)\n}", "func Convert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in *impl.RmdirResponse, out *v2alpha1.RmdirResponse) error {\n\treturn autoConvert_impl_RmdirResponse_To_v2alpha1_RmdirResponse(in, out)\n}", "func Convert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in *internal.FormatVolumeResponse, out *v1beta1.FormatVolumeResponse) error {\n\treturn autoConvert_internal_FormatVolumeResponse_To_v1beta1_FormatVolumeResponse(in, out)\n}", "func Convert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in *impl.IsVolumeFormattedResponse, out *v2alpha1.IsVolumeFormattedResponse) error {\n\treturn autoConvert_impl_IsVolumeFormattedResponse_To_v2alpha1_IsVolumeFormattedResponse(in, out)\n}", "func (o *GetVMVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in *impl.GetVolumeStatsResponse, out *v2alpha1.GetVolumeStatsResponse) error {\n\treturn autoConvert_impl_GetVolumeStatsResponse_To_v2alpha1_GetVolumeStatsResponse(in, out)\n}", "func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error {\n\t// If runtime os is windows, execute Write-VolumeCache powershell command on the disk\n\tif runtime.GOOS == \"windows\" {\n\t\tcmd := fmt.Sprintf(\"Get-Volume -FilePath %s | Write-Volumecache\", deviceMountPath)\n\t\toutput, err := exec.Command(\"powershell\", \"/c\", cmd).CombinedOutput()\n\t\tklog.Infof(\"command (%q) execeuted: %v, output: %q\", cmd, err, string(output))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"command (%q) failed: %v, output: %q\", cmd, err, string(output))\n\t\t}\n\t}\n\t// For linux runtime, it skips because unmount will automatically flush disk data\n\treturn nil\n}", "func cacheResponse(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tc.Response().Writer = cache.NewWriter(c.Response().Writer, c.Request())\n\t\treturn next(c)\n\t}\n}", "func (o *GetMarketsGroupsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]int32, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetVMVolumeDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateAuthorizationV1beta1NamespacedLocalSubjectAccessReviewOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateDeploymentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateAuthorizationV1beta1NamespacedLocalSubjectAccessReviewAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *GetUniverseGroupsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]int32, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateAuthorizationV1beta1NamespacedLocalSubjectAccessReviewCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *V2UploadClusterIngressCertCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func Convert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in *impl.RmdirContentsResponse, out *v2alpha1.RmdirContentsResponse) error {\n\treturn autoConvert_impl_RmdirContentsResponse_To_v2alpha1_RmdirContentsResponse(in, out)\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func CreateModifyHostAvailabilityResponse() (response *ModifyHostAvailabilityResponse) {\n\tresponse = &ModifyHostAvailabilityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceStorageV1beta1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *CreateAuthorizationV1beta1NamespacedLocalSubjectAccessReviewUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateACLDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceExtensionsV1beta1NamespacedIngressUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *PutWorkpaceByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *WatchNetworkingV1NetworkPolicyListForAllNamespacesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateExtensionsV1beta1NamespacedIngressUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *AddPayloadRuntimeACLCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = models.ACLFilesEntries{}\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *PatchCoreV1PersistentVolumeUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *ListStorageV1alpha1VolumeAttachmentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *UpdateProviderTypeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *AddPayloadRuntimeACLDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVMVolumeNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(404)\n}", "func Convert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in *impl.CreateSymlinkResponse, out *v2alpha1.CreateSymlinkResponse) error {\n\treturn autoConvert_impl_CreateSymlinkResponse_To_v2alpha1_CreateSymlinkResponse(in, out)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PatchCoreV1PersistentVolumeOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *WatchPolicyV1beta1PodSecurityPolicyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetVersionsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func Convert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in *v2alpha1.ResizeVolumeResponse, out *impl.ResizeVolumeResponse) error {\n\treturn autoConvert_v2alpha1_ResizeVolumeResponse_To_impl_ResizeVolumeResponse(in, out)\n}", "func (o *GetAssetsListOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header X-Total-Count\n\n\txTotalCount := swag.FormatInt64(o.XTotalCount)\n\tif xTotalCount != \"\" {\n\t\trw.Header().Set(\"X-Total-Count\", xTotalCount)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.TokenAssetRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func Convert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in *impl.ListVolumesOnDiskResponse, out *v2alpha1.ListVolumesOnDiskResponse) error {\n\treturn autoConvert_impl_ListVolumesOnDiskResponse_To_v2alpha1_ListVolumesOnDiskResponse(in, out)\n}", "func (o *ShowPackageReleasesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.PackageManifest, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetHealthzOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *UpdateResourceUsageOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WrapResponse(w http.ResponseWriter, request types.InterxRequest, response types.ProxyResponse, statusCode int, saveToCache bool) {\n\tif statusCode == 0 {\n\t\tstatusCode = 503 // Service Unavailable Error\n\t}\n\tif saveToCache {\n\t\t// GetLogger().Info(\"[gateway] Saving in the cache\")\n\n\t\tchainIDHash := GetBlake2bHash(response.Chainid)\n\t\tendpointHash := GetBlake2bHash(request.Endpoint)\n\t\trequestHash := GetBlake2bHash(request)\n\t\tif conf, ok := RPCMethods[request.Method][request.Endpoint]; ok {\n\t\t\terr := PutCache(chainIDHash, endpointHash, requestHash, types.InterxResponse{\n\t\t\t\tResponse: response,\n\t\t\t\tStatus: statusCode,\n\t\t\t\tExpireAt: time.Now().Add(time.Duration(conf.CachingDuration) * time.Second),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\t// GetLogger().Error(\"[gateway] Failed to save in the cache: \", err.Error())\n\t\t\t}\n\t\t\t// GetLogger().Info(\"[gateway] Save finished\")\n\t\t}\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.Header().Add(\"Interx_chain_id\", response.Chainid)\n\tw.Header().Add(\"Interx_block\", strconv.FormatInt(response.Block, 10))\n\tw.Header().Add(\"Interx_blocktime\", response.Blocktime)\n\tw.Header().Add(\"Interx_timestamp\", strconv.FormatInt(response.Timestamp, 10))\n\tw.Header().Add(\"Interx_request_hash\", response.RequestHash)\n\tif request.Endpoint == config.QueryDataReference {\n\t\treference, err := database.GetReference(string(request.Params))\n\t\tif err == nil {\n\t\t\tw.Header().Add(\"Interx_ref\", \"/download/\"+reference.FilePath)\n\t\t}\n\t}\n\n\tif response.Response != nil {\n\t\tresponse.Signature, response.Hash = GetResponseSignature(response)\n\n\t\tw.Header().Add(\"Interx_signature\", response.Signature)\n\t\tw.Header().Add(\"Interx_hash\", response.Hash)\n\t\tw.WriteHeader(statusCode)\n\n\t\tjson.NewEncoder(w).Encode(response.Response)\n\t} else {\n\t\tw.WriteHeader(statusCode)\n\n\t\tif response.Error == nil {\n\t\t\tresponse.Error = \"service not available\"\n\t\t}\n\t\tjson.NewEncoder(w).Encode(response.Error)\n\t}\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *WatchApiregistrationV1APIServiceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PutWorkpaceByIDUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *ObjectsPatchUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *PostManagementKubernetesIoV1NodesAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(202)\n}", "func (o *CreateNetworkingV1beta1NamespacedIngressUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *CreateStorageV1CSINodeAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ReplaceStorageV1CSINodeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateDiscoveryV1beta1NamespacedEndpointSliceAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateVMTempOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PatchFoldersIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ListBatchV1NamespacedJobUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *CreateBatchV1NamespacedJobAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateACLAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *ClientPermissionCreateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateBatchV1NamespacedJobUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *PatchApiextensionsV1beta1CustomResourceDefinitionStatusUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *GetBlockBakingRightsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header X-Total-Count\n\n\txTotalCount := swag.FormatInt64(o.XTotalCount)\n\tif xTotalCount != \"\" {\n\t\trw.Header().Set(\"X-Total-Count\", xTotalCount)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.BakingRightsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *ListAppsV1NamespacedDeploymentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetSchedulingV1APIResourcesUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *ListAppsV1NamespacedDeploymentUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(401)\n}", "func (o *PartialUpdateAppDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetSchedulingV1APIResourcesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *Response) cacheable() (rv cacheobject.ObjectResults) {\n\n\trespHeader := r.Response.Headers.(http.Header)\n\treqHeader := r.Request.Headers.(http.Header)\n\t//\trespHeader := r.Response.castHeaders()\n\t//\treqHeader := r.Request.castHeaders()\n\n\treqDir, err := cacheobject.ParseRequestCacheControl(reqHeader.Get(\"Cache-Control\"))\n\tif err != nil {\n\t\tlogger.Printf(err.Error())\n\t}\n\tresDir, err := cacheobject.ParseResponseCacheControl(respHeader.Get(\"Cache-Control\"))\n\tif err != nil {\n\t\tlogger.Printf(err.Error())\n\t}\n\t//logger.Println(respHeader)\n\texpiresHeader, _ := http.ParseTime(respHeader.Get(\"Expires\"))\n\tdateHeader, _ := http.ParseTime(respHeader.Get(\"Date\"))\n\tlastModifiedHeader, _ := http.ParseTime(respHeader.Get(\"Last-Modified\"))\n\tobj := cacheobject.Object{\n\t\t//\tCacheIsPrivate: false,\n\t\tRespDirectives: resDir,\n\t\tRespHeaders: respHeader,\n\t\tRespStatusCode: r.Response.Status,\n\t\tRespExpiresHeader: expiresHeader,\n\t\tRespDateHeader: dateHeader,\n\t\tRespLastModifiedHeader: lastModifiedHeader,\n\n\t\tReqDirectives: reqDir,\n\t\tReqHeaders: reqHeader,\n\t\tReqMethod: r.Request.Method,\n\t\tNowUTC: time.Now().UTC(),\n\t}\n\n\trv = cacheobject.ObjectResults{}\n\tcacheobject.CachableObject(&obj, &rv)\n\tcacheobject.ExpirationObject(&obj, &rv)\n\t//Check if it is cacheable\n\n\texpTime := rv.OutExpirationTime.Unix()\n\tif rv.OutExpirationTime.IsZero() {\n\t\texpTime = 0\n\t}\n\tr.CacheExpirationTime = expTime\n\tdebug := false\n\tif debug {\n\t\tif rv.OutErr != nil {\n\t\t\tlogger.Println(\"Errors: \", rv.OutErr)\n\t\t}\n\t\tif rv.OutReasons != nil {\n\t\t\tlogger.Println(\"Reasons to not cache: \", rv.OutReasons)\n\t\t}\n\t\tif rv.OutWarnings != nil {\n\t\t\tlogger.Println(\"Warning headers to add: \", rv.OutWarnings)\n\t\t}\n\t\tlogger.Println(\"Expiration: \", rv.OutExpirationTime.String())\n\t}\n\treturn rv\n}" ]
[ "0.6693271", "0.6638265", "0.6307173", "0.6201272", "0.5958316", "0.5685052", "0.5651426", "0.5459383", "0.5358529", "0.5343374", "0.5241995", "0.5220932", "0.5206139", "0.5183032", "0.5086676", "0.50307286", "0.5018072", "0.50122184", "0.49841598", "0.49632716", "0.49610013", "0.49452865", "0.4940482", "0.4936042", "0.492635", "0.4917186", "0.49139005", "0.49133512", "0.48898277", "0.48768333", "0.48754194", "0.48650998", "0.4856937", "0.48550463", "0.48471642", "0.48210227", "0.48146832", "0.48055834", "0.48035356", "0.47961628", "0.47840148", "0.4772788", "0.47717747", "0.47419092", "0.47321865", "0.4729969", "0.47278813", "0.4721909", "0.47217196", "0.47130558", "0.47119236", "0.4708489", "0.46955365", "0.46941954", "0.46911278", "0.4688736", "0.468697", "0.46849024", "0.4682956", "0.468035", "0.46651307", "0.466454", "0.46591905", "0.46497446", "0.46460825", "0.46434444", "0.4636815", "0.4636077", "0.46300787", "0.4628697", "0.46200627", "0.4616987", "0.46113873", "0.4607791", "0.46066874", "0.46031657", "0.45965034", "0.45906764", "0.45808145", "0.457863", "0.45777175", "0.45717907", "0.4567777", "0.45661578", "0.45659778", "0.45653835", "0.4563227", "0.45620802", "0.456192", "0.45618802", "0.45580727", "0.45566654", "0.45565528", "0.45536444", "0.45412317", "0.4536899", "0.45368952", "0.45338988", "0.4531435", "0.4529595" ]
0.8577996
0
NewGetSearchOK creates GetSearchOK with default headers values
func NewGetSearchOK() *GetSearchOK { return &GetSearchOK{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetSearchDefault(code int) *GetSearchDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetSearchDefault{\n\t\t_statusCode: code,\n\t}\n}", "func New(tp elastictransport.Interface) *Search {\n\tr := &Search{\n\t\ttransport: tp,\n\t\tvalues: make(url.Values),\n\t\theaders: make(http.Header),\n\t\tbuf: gobytes.NewBuffer(nil),\n\n\t\treq: NewRequest(),\n\t}\n\n\treturn r\n}", "func (a SearchApi) GetSearch(q64 string, expand []string, profile bool) (*Jsonnodesearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/search\"\n\tdefaultReturn := new(Jsonnodesearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\tqueryParams[\"profile\"] = a.Configuration.APIClient.ParameterToString(profile, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Jsonnodesearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Jsonnodesearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewSearchBucket()(*SearchBucket) {\n m := &SearchBucket{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func NewSearchReq(addr net.Addr) (*SearchReq, error) {\n\treq := &SearchReq{}\n\n\thostinfo, err := HostInfoFromAddress(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.HostInfo = hostinfo\n\n\treturn req, nil\n}", "func (c *Client) NewSearch(searchType string, params *SearchParams) *Search {\n\treturn &Search{\n\t\tclient: c,\n\t\tType: searchType,\n\t\tParams: params,\n\t\tDeleted: false,\n\t}\n}", "func Search(terms []string) Params {\n\treturn Params{make(url.Values), SearchURL}.Country(CN).Terms(terms)\n}", "func PrepSearchRequest(r SearchRequest) (*http.Request, error) {\n\turl, err := url.Parse(r.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalues := url.Query()\n\t// required\n\tvalues.Add(\"Class\", r.Class)\n\tvalues.Add(\"SearchType\", r.SearchType)\n\n\t// optional\n\toptionalString := OptionalStringValue(values)\n\toptionalString(\"Format\", r.Format)\n\toptionalString(\"Select\", r.Select)\n\toptionalString(\"Payload\", r.Payload)\n\toptionalString(\"Query\", r.Query)\n\toptionalString(\"QueryType\", r.QueryType)\n\toptionalString(\"RestrictedIndicator\", r.RestrictedIndicator)\n\n\toptionalInt := OptionalIntValue(values)\n\tif r.Count > 0 {\n\t\toptionalInt(\"Count\", r.Count)\n\t}\n\tif r.Offset > 0 {\n\t\toptionalInt(\"Offset\", r.Offset)\n\t}\n\tif r.StandardNames > 0 {\n\t\toptionalInt(\"StandardNames\", r.StandardNames)\n\t}\n\t// limit is unique in that it can send a value of \"NONE\"\n\tswitch {\n\tcase r.Limit > 0:\n\t\toptionalInt(\"Limit\", r.Limit)\n\tcase r.Limit < 0:\n\t\tvalues.Add(\"Limit\", \"NONE\")\n\t}\n\n\tmethod := \"GET\"\n\tif r.HTTPMethod != \"\" {\n\t\tmethod = r.HTTPMethod\n\t}\n\n\turl.RawQuery = values.Encode()\n\n\treturn http.NewRequest(method, url.String(), nil)\n}", "func NewSearchRequest(src string, budget uint64, keywords []string) *SearchRequest {\n\treturn &SearchRequest{\n\t\tOrigin: src,\n\t\tBudget: budget,\n\t\tKeywords: keywords,\n\t}\n}", "func (c Client) Search(opts SearchInitOpts) SearchClient {\n\tvar searchClient SearchClient\n\tsearchClient.Client = c\n\tsearchClient.PerPage = 25\n\n\tif opts.FilterID > 0 {\n\t\tsearchClient.FilterID = opts.FilterID\n\t}\n\n\tif len(c.Key) > 0 {\n\t\tsearchClient.Key = c.Key\n\t}\n\n\tif len(opts.Key) > 0 {\n\t\tsearchClient.Key = opts.Key\n\t}\n\n\tif opts.PerPage > 0 {\n\t\tsearchClient.PerPage = opts.PerPage\n\t}\n\n\tif len(opts.SortDirection) > 0 {\n\t\tsearchClient.SortDirection = opts.SortDirection\n\t}\n\n\tif len(opts.SortField) > 0 {\n\t\tsearchClient.SortField = opts.SortField\n\t}\n\n\treturn searchClient\n}", "func newSearch(inst *Instagram) *Search {\n\tsearch := &Search{\n\t\tinst: inst,\n\t}\n\treturn search\n}", "func (client *Client) newGETRequest(url string) (*http.Request, error) {\n\trequest, err := http.NewRequest(\n\t\thttp.MethodGet,\n\t\turl,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trequest.Header = http.Header{}\n\trequest.Header.Set(\"Accept\", \"application/json\")\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\trequest.Header.Set(\"User-Agent\", client.userAgent)\n\n\treturn request, nil\n}", "func (o *GetSearchDefault) WithStatusCode(code int) *GetSearchDefault {\n\to._statusCode = code\n\treturn o\n}", "func (a SearchApi) GetVoicemailSearch(q64 string, expand []string) (*Voicemailssearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/voicemail/search\"\n\tdefaultReturn := new(Voicemailssearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetVoicemailSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Voicemailssearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Voicemailssearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func (client *Client) newGetRequest(urlString string) (*http.Request, error) {\n\trelevantUrl, err := url.Parse(urlString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfinalUrl := client.baseURL.ResolveReference(relevantUrl)\n\n\treq, err := http.NewRequest(\"GET\", finalUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(apiKeyHeader, client.apiKey)\n\n\tif client.userAgent != \"\" {\n\t\treq.Header.Set(userAgentHeader, client.userAgent)\n\t}\n\n\treturn req, nil\n}", "func buildSearchRedirect(redirectTo *url.URL, r *http.Request) {\n\tq := r.URL.Query()\n\n\tsetParamInURL(redirectTo, \"tab\", \"Everything\")\n\tsetParamInURL(redirectTo, \"search_scope\", \"MyInst_and_CI\")\n\n\tif q.Get(\"searchArg\") != \"\" {\n\t\tswitch q.Get(\"searchCode\") {\n\t\tcase \"TKEY^\":\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"title,contains,%v\", q.Get(\"searchArg\")))\n\t\tcase \"TALL\":\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"title,contains,%v\", q.Get(\"searchArg\")))\n\t\tcase \"NAME\":\n\t\t\tredirectTo.Path = \"/discovery/browse\"\n\t\t\tsetParamInURL(redirectTo, \"browseScope\", \"author\")\n\t\t\tsetParamInURL(redirectTo, \"browseQuery\", q.Get(\"searchArg\"))\n\t\tcase \"CALL\":\n\t\t\tredirectTo.Path = \"/discovery/browse\"\n\t\t\tsetParamInURL(redirectTo, \"browseScope\", \"callnumber.0\")\n\t\t\tsetParamInURL(redirectTo, \"browseQuery\", q.Get(\"searchArg\"))\n\t\tcase \"JALL\":\n\t\t\tredirectTo.Path = \"/discovery/jsearch\"\n\t\t\tsetParamInURL(redirectTo, \"tab\", \"jsearch_slot\")\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"searchArg\")))\n\t\tdefault:\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"searchArg\")))\n\t\t}\n\t} else if q.Get(\"SEARCH\") != \"\" {\n\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"SEARCH\")))\n\t}\n}", "func (s *scraper) GetSearch(ctx context.Context, isin string) (*http.Request, error) {\n\turl := fmt.Sprintf(\"https://www.fondidoc.it/Ricerca/Res?txt=%s&tipi=&societa=&pag=0&sort=&sortDir=&fldis=&nview=20&viewMode=anls&filters=&pir=0'\", isin)\n\treturn http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n}", "func New(t opentracing.Tracer, geoconn, rateconn *grpc.ClientConn) *Search {\n\treturn &Search{\n\t\tgeoClient: geo.NewGeoClient(geoconn),\n\t\trateClient: rate.NewRateClient(rateconn),\n\t\ttracer: t,\n\t}\n}", "func (c *Client) newGetRequest(URLStr string) (*http.Request, error) {\n\trel, err := url.Parse(URLStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := c.baseURL.ResolveReference(rel)\n\n\t// Create a new get request with the url provided\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set the api key on the request\n\treq.Header.Set(apiKeyHeader, c.apiKey)\n\n\t// If we specify a user agent we override the current one\n\tif c.userAgent != \"\" {\n\t\treq.Header.Set(userAgentHeader, c.userAgent)\n\t}\n\treturn req, nil\n}", "func NewSearchHandler(s registry.Searchable) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tp := &registry.SearchParams{}\n\t\tswitch r.Header.Get(\"Content-Type\") {\n\t\tcase \"application/json\":\n\t\t\tif err := json.NewDecoder(r.Body).Decode(p); err != nil {\n\t\t\t\tapiutil.WriteErrResponse(w, http.StatusBadRequest, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif p.Limit == 0 {\n\t\t\t\tp.Limit = defaultLimit\n\t\t\t}\n\t\tdefault:\n\t\t\t// read form values\n\t\t\tvar err error\n\t\t\tif p.Limit, err = apiutil.ReqParamInt(\"limit\", r); err != nil {\n\t\t\t\tp.Limit = defaultLimit\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\tif p.Offset, err = apiutil.ReqParamInt(\"offset\", r); err != nil {\n\t\t\t\tp.Offset = defaultOffset\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\tp.Q = r.FormValue(\"q\")\n\t\t}\n\t\tswitch r.Method {\n\t\tcase \"GET\":\n\t\t\tresults, err := s.Search(*p)\n\t\t\tif err != nil {\n\t\t\t\tapiutil.WriteErrResponse(w, http.StatusBadRequest, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tapiutil.WriteResponse(w, results)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (a SearchApi) GetLocationsSearch(q64 string, expand []string) (*Locationssearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/locations/search\"\n\tdefaultReturn := new(Locationssearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetLocationsSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Locationssearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Locationssearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewGet(url string) *Request { return NewRequest(\"GET\", url) }", "func (r *Requester) newRequest(endpoint string) (*http.Request, error) {\n req, err := http.NewRequest(\"GET\", endpoint, nil)\n if err != nil {\n return nil, err\n }\n\tbearer := fmt.Sprintf(\"Bearer %s\", r.bearer)\n req.Header.Add(\"Authorization\", bearer)\n\treq.Header.Add(\"Ocp-Apim-Subscription-Key\", apimKey)\n req.Header.Set(\"User-Agent\", \"hackacraic\")\n\treturn req, nil\n}", "func (r *SearchREST) New() runtime.Object {\n\treturn &searchapis.Search{}\n}", "func NewSearch() *Search {\n\ts := &Search{}\n\tdefaults.SetDefaults(s)\n\treturn s\n}", "func NewSearchApi() *SearchApi {\n\tfmt.Sprintf(strings.Title(\"\"), \"\")\n\tconfig := GetDefaultConfiguration()\n\treturn &SearchApi{\n\t\tConfiguration: config,\n\t}\n}", "func NewGetIndexSearchOK() *GetIndexSearchOK {\n\n\treturn &GetIndexSearchOK{}\n}", "func (a SearchApi) GetDocumentationSearch(q64 string) (*Documentationsearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/documentation/search\"\n\tdefaultReturn := new(Documentationsearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetDocumentationSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Documentationsearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Documentationsearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func newSearchService(sling *sling.Sling) *SearchService {\n\treturn &SearchService{\n\t\tsling: sling.Path(\"search/\"),\n\t}\n}", "func creatAPIMSearchHTTPRequest(endpoint, resourceName string) (*client.HTTPRequest, error) {\n\taT, err := tokenManager.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := client.CreateHTTPGETRequest(aT, endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tq := url.Values{}\n\tq.Add(\"query\", resourceName)\n\treq.HTTPRequest().URL.RawQuery = q.Encode()\n\treturn req, err\n}", "func NewHeaders()(*Headers) {\n m := &Headers{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (client *HearthstoneAPI) newCardCollectionSearch() *cardCollectionSearch {\n\treturn &cardCollectionSearch{\n\t\turl: client.apiURL,\n\t\tlocale: client.locale,\n\t\toptionalString: make(map[string]string),\n\t\toptionalInt: make(map[string]int),\n\t}\n}", "func (c *baseClient) New() *baseClient {\n\t// Copy headers\n\theader := make(http.Header)\n\tfor k, v := range c.header {\n\t\theader[k] = v\n\t}\n\n\treturn &baseClient{\n\t\thttpClient: c.httpClient,\n\t\tmethod: c.method,\n\t\turl: c.url,\n\t\theader: header,\n\t}\n}", "func (c *Client) newRequest(url string) (*http.Request, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"user-key\", c.key)\n\treq.Header.Add(\"Accept\", \"application/json\")\n\n\treturn req, nil\n}", "func (a *Client) CreateSearch(params *CreateSearchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateSearchOK, *CreateSearchNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateSearchParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"CreateSearch\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/api/v1/orgs/{owner}/searches\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &CreateSearchReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *CreateSearchOK:\n\t\treturn value, nil, nil\n\tcase *CreateSearchNoContent:\n\t\treturn nil, value, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*CreateSearchDefault)\n\treturn nil, nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func NewGetOK() *GetOK {\n\treturn &GetOK{}\n}", "func (m *GraphBaseServiceClient) Search()(*i286f3babd79fe9ec3b0f52b6ed5910842c0adaeff02be1843d0e01c56d9ba6d9.SearchRequestBuilder) {\n return i286f3babd79fe9ec3b0f52b6ed5910842c0adaeff02be1843d0e01c56d9ba6d9.NewSearchRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) Search()(*i286f3babd79fe9ec3b0f52b6ed5910842c0adaeff02be1843d0e01c56d9ba6d9.SearchRequestBuilder) {\n return i286f3babd79fe9ec3b0f52b6ed5910842c0adaeff02be1843d0e01c56d9ba6d9.NewSearchRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (s *HighAvailabilityService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func newRequest(method, url string, body io.Reader, headers http.Header) (*http.Request, error) {\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor key, value := range headers {\n\t\treq.Header.Add(key, value[0])\n\t}\n\treturn req, nil\n}", "func (c *Client) newRequest(method, path string, v interface{}, ctype string) (req *http.Request, err error) {\n\t// Build request JSON.\n\tbody, err := writeJson(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err = http.NewRequest(method, c.pathToEndPoint(path), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"X-Kii-AppID\", c.AppId)\n\treq.Header.Add(\"X-Kii-AppKey\", c.AppKey)\n\tif ctype != \"\" {\n\t\treq.Header.Add(\"Content-Type\", ctype)\n\t}\n\tif c.Authorization != \"\" {\n\t\treq.Header.Add(\"Authorization\", c.Authorization)\n\t}\n\treturn\n}", "func (a SearchApi) GetDocumentationGknSearch(q64 string) (*Gkndocumentationsearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/documentation/gkn/search\"\n\tdefaultReturn := new(Gkndocumentationsearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetDocumentationGknSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Gkndocumentationsearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Gkndocumentationsearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func (c *Client) newRequest(method, urlStr string, body io.Reader) (*http.Request, error) {\n\treq, err := http.NewRequest(method, urlStr, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"api-name\", c.apiName)\n\treq.Header.Set(\"api-key\", c.apiKey)\n\treq.Header.Set(c.userHeader, c.user)\n\treturn req, nil\n}", "func (c *Client) newRequest(ctx context.Context, method, url string, body io.Reader) (*http.Request, error) {\n\t// Build new request with base URL.\n\treq, err := http.NewRequest(method, c.URL+url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set API key in header.\n\tif user := wtf.UserFromContext(ctx); user != nil && user.APIKey != \"\" {\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+user.APIKey)\n\t}\n\n\t// Default to JSON format.\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-type\", \"application/json\")\n\n\treturn req, nil\n}", "func (c *Client) Search(ctx context.Context, searchPerson *Person) (*Response, error) {\n\n\t// Do we meet the minimum requirements for searching?\n\tif !SearchMeetsMinimumCriteria(searchPerson) {\n\t\treturn nil, ErrDoesNotMeetMinimumCriteria\n\t}\n\n\t// Start the post data\n\tpostData := url.Values{}\n\n\t// Add the API key (always - API is required by default)\n\tpostData.Add(fieldAPIKey, c.options.apiKey)\n\n\t// Option for pretty response\n\tif !c.options.searchOptions.Search.Pretty {\n\t\tpostData.Add(fieldPretty, valueFalse)\n\t}\n\n\t// Should we show sources?\n\tif c.options.searchOptions.Search.ShowSources != ShowSourcesNone {\n\t\tpostData.Add(fieldShowSources, string(c.options.searchOptions.Search.ShowSources))\n\t}\n\n\t// Add match requirements?\n\tif c.options.searchOptions.Search.MatchRequirements != MatchRequirementsNone {\n\t\tpostData.Add(fieldMatchRequirements, string(c.options.searchOptions.Search.MatchRequirements))\n\t}\n\n\t// Add source category requirements?\n\tif c.options.searchOptions.Search.SourceCategoryRequirements != SourceCategoryRequirementsNone {\n\t\tpostData.Add(fieldSourceCategoryRequirements, string(c.options.searchOptions.Search.SourceCategoryRequirements))\n\t}\n\n\t// Custom minimum match\n\tif c.options.searchOptions.Search.MinimumMatch != MinimumMatch {\n\t\tpostData.Add(fieldMinimumMatch, fmt.Sprintf(\"%v\", c.options.searchOptions.Search.MinimumMatch))\n\t}\n\n\t// Set the \"hide sponsors\" flag (default is false)\n\tif c.options.searchOptions.Search.HideSponsored {\n\t\tpostData.Add(fieldHideSponsored, valueTrue)\n\t}\n\n\t// Set the \"infer persons\" flag (default is false)\n\tif c.options.searchOptions.Search.InferPersons {\n\t\tpostData.Add(fieldInferPersons, valueTrue)\n\t}\n\n\t// Ask for the top match?\n\tif c.options.searchOptions.Search.TopMatch {\n\t\tpostData.Add(fieldTopMatch, valueTrue)\n\t}\n\n\t// Set the live feeds flag (default is true)\n\tif !c.options.searchOptions.Search.LiveFeeds {\n\t\tpostData.Add(fieldLiveFeeds, valueFalse)\n\t}\n\n\t// Parse the search object\n\tpersonJSON, err := json.Marshal(searchPerson)\n\tif err != nil { // This should NEVER error out since the struct is being generated\n\t\treturn nil, err\n\t}\n\n\t// Add the person to the request\n\tpostData.Add(fieldPerson, string(personJSON))\n\n\t// Fire the request\n\tvar response *Response\n\tresponse, err = httpRequest(ctx, c, searchAPIEndpoint, &postData)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(response.Error) > 0 {\n\t\treturn nil, errors.New(response.Error)\n\t}\n\treturn response, nil\n}", "func (a SearchApi) GetGroupsSearch(q64 string, expand []string) (*Groupssearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/groups/search\"\n\tdefaultReturn := new(Groupssearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetGroupsSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Groupssearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Groupssearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func SearchOne(term string) Params {\n\treturn Params{make(url.Values), SearchURL}.Country(CN).Term(term)\n}", "func getSearch(searchString string, params map[string]string) {\n\tnetwork.InitSearch(params)\n\n\tresults := network.GetSearch(searchString)\n\tview.RenderTable(results)\n}", "func (a SearchApi) GetSearchSuggest(q64 string, expand []string, profile bool) (*Jsonnodesearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/search/suggest\"\n\tdefaultReturn := new(Jsonnodesearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetSearchSuggest\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\tqueryParams[\"profile\"] = a.Configuration.APIClient.ParameterToString(profile, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Jsonnodesearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Jsonnodesearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewGetIPAMSearchOK() *GetIPAMSearchOK {\n\treturn &GetIPAMSearchOK{}\n}", "func (api *NominatimAPI) buildSearchURL(req *NominatimSearchRequest) (string, error) {\n\turls := fmt.Sprintf(\"%s%s/search.php\", api.c.BaseURL(), NominatimPathPrefix)\n\tu, err := url.Parse(urls)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Add key and other parameters to the query string\n\tq := u.Query()\n\tq.Set(\"format\", \"json\")\n\tif req.Query != \"\" {\n\t\tq.Set(\"q\", req.Query)\n\t} else {\n\t\tif req.Street != \"\" {\n\t\t\tq.Set(\"street\", req.Street)\n\t\t}\n\t\tif req.City != \"\" {\n\t\t\tq.Set(\"city\", req.City)\n\t\t}\n\t\tif req.County != \"\" {\n\t\t\tq.Set(\"county\", req.County)\n\t\t}\n\t\tif req.State != \"\" {\n\t\t\tq.Set(\"state\", req.State)\n\t\t}\n\t\tif req.Country != \"\" {\n\t\t\tq.Set(\"country\", req.Country)\n\t\t}\n\t\tif req.PostalCode != \"\" {\n\t\t\tq.Set(\"postalcode\", req.PostalCode)\n\t\t}\n\t}\n\tq.Set(\"addressdetails\", \"1\")\n\tif req.Limit > 0 {\n\t\tq.Set(\"limit\", fmt.Sprintf(\"%d\", req.Limit))\n\t}\n\tif len(req.CountryCodes) > 0 {\n\t\tq.Set(\"countrycodes\", strings.Join(req.CountryCodes, \",\"))\n\t}\n\tif len(req.ViewBox) == 4 {\n\t\tq.Set(\"viewbox\", fmt.Sprintf(\"%f,%f,%f,%f\", req.ViewBox[0], req.ViewBox[1], req.ViewBox[2], req.ViewBox[3]))\n\t}\n\tif len(req.ExcludePlaceIds) > 0 {\n\t\tq.Set(\"exclude_place_ids\", strings.Join(req.ExcludePlaceIds, \",\"))\n\t}\n\tif req.Bounded != nil {\n\t\tif *req.Bounded {\n\t\t\tq.Set(\"bounded\", \"1\")\n\t\t} else {\n\t\t\tq.Set(\"bounded\", \"0\")\n\t\t}\n\t}\n\t// TODO(oe): routewidth\n\tif req.RouteWidth != nil {\n\t\tq.Set(\"routewidth\", fmt.Sprintf(\"%f\", *req.RouteWidth))\n\t}\n\tif req.OSMType != \"\" {\n\t\tq.Set(\"osm_type\", req.OSMType)\n\t}\n\tif req.OSMId != \"\" {\n\t\tq.Set(\"osm_id\", req.OSMId)\n\t}\n\n\t// No key here!\n\tu.RawQuery = q.Encode()\n\treturn u.String(), nil\n}", "func New(c *http.Client, r regions.Region, l locale.Locale, k, v string) (*Client, error) {\n\tif \"\" == k {\n\t\treturn nil, errors.ErrNoKeySupplied\n\t}\n\n\tif \"\" == v {\n\t\treturn nil, errors.ErrNoVersionSupplied\n\t}\n\n\tac := &Client{\n\t\tclient: &http.Client{Timeout: (10 * time.Second)},\n\t\tlocale: locale.AmericanEnglish,\n\t\tregion: regions.US,\n\t\tuserAgent: \"GoBattleNet/\" + v,\n\t\tkey: k,\n\t}\n\n\tif r.Int() > 5 {\n\t\treturn nil, errors.ErrUnsupportedArgument\n\t}\n\tif nil != c {\n\t\tac.client = c\n\t}\n\tif ac.locale != l {\n\t\tac.locale = l\n\t}\n\tif ac.region != r {\n\t\tac.region = r\n\t}\n\n\treturn ac, nil\n}", "func (qiwi *PersonalAPI) newRequest(apiKey, method, spath string, data map[string]interface{}) (req *http.Request, err error) {\n\n\tvar path = APIURL + spath\n\n\tvar body io.Reader\n\n\tif len(data) > 0 {\n\n\t\ts, err := json.Marshal(data)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbody = bytes.NewBuffer(s)\n\n\t}\n\n\treq, err = http.NewRequest(method, path, body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Authorization\", \"Bearer \"+apiKey)\n\n\treturn req, err\n}", "func (a SearchApi) PostSearch(body Searchrequest, profile bool) (*Jsonnodesearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"POST\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/search\"\n\tdefaultReturn := new(Jsonnodesearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'body' is set\n\tif &body == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'body' when calling SearchApi->PostSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"profile\"] = a.Configuration.APIClient.ParameterToString(profile, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tpostBody = &body\n\n\tvar successPayload *Jsonnodesearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Jsonnodesearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewSearch() *Search {\n\treturn &Search{}\n}", "func newHandleGetOrHeader(\n\tcore core.Core,\n) handleGetOrHeader {\n\treturn _handleGetOrHeader{\n\t\tcore: core,\n\t\thttp: ihttp.New(),\n\t}\n}", "func (a *Client) GetSearch(params *GetSearchParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*GetSearchOK, *GetSearchNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetSearchParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetSearch\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/orgs/{owner}/searches/{uuid}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetSearchReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetSearchOK:\n\t\treturn value, nil, nil\n\tcase *GetSearchNoContent:\n\t\treturn nil, value, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*GetSearchDefault)\n\treturn nil, nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func CreateGetFaceSearchUserRequest() (request *GetFaceSearchUserRequest) {\n\trequest = &GetFaceSearchUserRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"imm\", \"2017-09-06\", \"GetFaceSearchUser\", \"imm\", \"openAPI\")\n\treturn\n}", "func (g *GoFlickr) newRequest(apiMethod string) ApiRequest {\n\n\treq := ApiRequest{\n\t\tMethodName: apiMethod,\n\t}\n\treq.addParam(\"api_key\", g.ApiKey)\n\treq.addParam(\"format\", \"json\")\n\treq.addParam(\"nojsoncallback\", \"1\")\n\treturn req\n\n}", "func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func FakeSearch(w http.ResponseWriter, r *http.Request) {\n\tLogRequest(r, \"attack\")\n\tresponse := fmt.Sprintf(`\n\t{\n \"took\" : 6,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 6,\n \"successful\" : 6,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1,\n \"max_score\" : 1.0,\n \"hits\" : [ {\n \"_index\" : \".kibana\",\n \"_type\" : \"index-pattern\",\n \"_id\" : \"logstash-*\",\n \"_score\" : 1.0,\n \"_source\":{\"title\":\"logstash-*\",\"timeFieldName\":\"@timestamp\",\"customFormats\":\"{}\",\"fields\":\"[{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":true,\\\"doc_values\\\":false,\\\"name\\\":\\\"host\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":false,\\\"analyzed\\\":false,\\\"name\\\":\\\"_source\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"message.raw\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":false,\\\"analyzed\\\":false,\\\"name\\\":\\\"_index\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"@version\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":true,\\\"doc_values\\\":false,\\\"name\\\":\\\"message\\\",\\\"count\\\":0},{\\\"type\\\":\\\"date\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"@timestamp\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"name\\\":\\\"_type\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"name\\\":\\\"_id\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"host.raw\\\",\\\"count\\\":0},{\\\"type\\\":\\\"geo_point\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"geoip.location\\\",\\\"count\\\":0}]\"}\n }]\n }\n }`)\n\tWriteResponse(w, response)\n\treturn\n}", "func (self *SearchService) Search(params *SearchRequest) (*Search, *http.Response, error) {\n\tsearch := new(Search)\n\tapiError := new(APIError)\n\n\tresp, err := self.api.New().QueryStruct(params).Receive(search, apiError)\n\treturn search, resp, relevantError(err, *apiError)\n}", "func (a SearchApi) GetUsersSearch(q64 string, expand []string, integrationPresenceSource string) (*Userssearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/users/search\"\n\tdefaultReturn := new(Userssearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetUsersSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\tqueryParams[\"integrationPresenceSource\"] = a.Configuration.APIClient.ParameterToString(integrationPresenceSource, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Userssearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Userssearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func MetadataSearch(w http.ResponseWriter, r *http.Request) *appError {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n decoder := json.NewDecoder(r.Body)\n var query Query\n err = decoder.Decode(&query)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n path := \"/?query=\" + query.Query\n if query.Marker != \"\" {\n path += \"&marker=\" + query.Marker\n }\n if query.MaxKeys != \"\" {\n path += \"&max-keys=\" + query.MaxKeys\n }\n if query.SortedBy != \"\" {\n path += \"&sorted=\" + query.SortedBy\n }\n if query.ReturnAllMetadata {\n path += \"&attributes=ALL\"\n }\n bucketQueryResponse, err := s3Request(s3, query.Bucket, \"GET\", path, make(map[string][]string), \"\")\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n if bucketQueryResponse.Code == 200 {\n bucketQueryResult := &BucketQueryResult{}\n xml.NewDecoder(strings.NewReader(bucketQueryResponse.Body)).Decode(bucketQueryResult)\n // Generate a shared URL for each object returned by the metadata search\n if len(bucketQueryResult.EntryLists) > 0 {\n expires := time.Now().Add(time.Second*24*3600)\n for i, item := range bucketQueryResult.EntryLists {\n if item.ObjectName[len(item.ObjectName)-1:] != \"/\" {\n headers := make(map[string][]string)\n preparedS3Request, _ := prepareS3Request(s3, query.Bucket, \"GET\", query.Bucket + \"/\" + item.ObjectName + \"?Expires=\" + strconv.FormatInt(expires.Unix(), 10), headers, true)\n values := url.Values{}\n values = preparedS3Request.Params\n bucketQueryResult.EntryLists[i].Url = strings.Split(preparedS3Request.Url, \"?\")[0] + \"?\" + values.Encode()\n }\n }\n }\n rendering.JSON(w, http.StatusOK, bucketQueryResult)\n } else {\n return &appError{err: err, status: http.StatusInternalServerError, xml: bucketQueryResponse.Body}\n }\n\n return nil\n}", "func (conn Connection) newRequest(method, cmd string, body io.Reader) *http.Request {\n\treq, err := http.NewRequest(method, conn.ServiceURL+cmd, body)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Coulnd't generate HTTP request - %s\\n\", err.Error()))\n\t}\n\n\tfor k, v := range conn.Headers {\n\t\treq.Header.Add(k, v)\n\t}\n\n\treturn req\n}", "func NewGetConstructorOK() *GetConstructorOK {\n\treturn &GetConstructorOK{}\n}", "func (l License) AsSearchResponse() (*SearchResponse, bool) {\n\treturn nil, false\n}", "func (c *OutputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewGetPackageSearchParams() *GetPackageSearchParams {\n\tvar ()\n\treturn &GetPackageSearchParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (i Intangible) AsSearchResponse() (*SearchResponse, bool) {\n\treturn nil, false\n}", "func New() *Request {\n\treturn &Request{\n\t\tclient: &http.Client{Timeout: time.Second * 3},\n\t\tmethod: \"GET\",\n\t\theader: make(http.Header),\n\t}\n}", "func (s *VirtualhostsService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func (rb ResponseBase) AsSearchResponse() (*SearchResponse, bool) {\n\treturn nil, false\n}", "func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func buildSearchInput(qs url.Values) (*hub.SearchPackageInput, error) {\n\t// Limit\n\tvar limit int\n\tif qs.Get(\"limit\") != \"\" {\n\t\tvar err error\n\t\tlimit, err = strconv.Atoi(qs.Get(\"limit\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid limit: %s\", qs.Get(\"limit\"))\n\t\t}\n\t}\n\n\t// Offset\n\tvar offset int\n\tif qs.Get(\"offset\") != \"\" {\n\t\tvar err error\n\t\toffset, err = strconv.Atoi(qs.Get(\"offset\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid offset: %s\", qs.Get(\"offset\"))\n\t\t}\n\t}\n\n\t// Facets\n\tvar facets bool\n\tif qs.Get(\"facets\") != \"\" {\n\t\tvar err error\n\t\tfacets, err = strconv.ParseBool(qs.Get(\"facets\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid facets: %s\", qs.Get(\"facets\"))\n\t\t}\n\t}\n\n\t// Kinds\n\tkinds := make([]hub.PackageKind, 0, len(qs[\"kind\"]))\n\tfor _, kindStr := range qs[\"kind\"] {\n\t\tkind, err := strconv.Atoi(kindStr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid kind: %s\", kindStr)\n\t\t}\n\t\tkinds = append(kinds, hub.PackageKind(kind))\n\t}\n\n\t// Include deprecated packages\n\tvar deprecated bool\n\tif qs.Get(\"deprecated\") != \"\" {\n\t\tvar err error\n\t\tdeprecated, err = strconv.ParseBool(qs.Get(\"deprecated\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid deprecated: %s\", qs.Get(\"deprecated\"))\n\t\t}\n\t}\n\n\treturn &hub.SearchPackageInput{\n\t\tLimit: limit,\n\t\tOffset: offset,\n\t\tFacets: facets,\n\t\tText: qs.Get(\"text\"),\n\t\tPackageKinds: kinds,\n\t\tUsers: qs[\"user\"],\n\t\tOrgs: qs[\"org\"],\n\t\tChartRepositories: qs[\"repo\"],\n\t\tDeprecated: deprecated,\n\t}, nil\n}", "func (cs CivicStructure) AsSearchResponse() (*SearchResponse, bool) {\n\treturn nil, false\n}", "func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewSearchAclsOK() *SearchAclsOK {\n\treturn &SearchAclsOK{}\n}", "func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (fe FoodEstablishment) AsSearchResponse() (*SearchResponse, bool) {\n\treturn nil, false\n}", "func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (s *HsmProvidersService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func parseSearchOptions() (*searchOptions, error) {\n\toptions := &searchOptions{}\n\toptions.projectID = os.Getenv(\"PROJECT_ID\")\n\tif options.projectID == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid configuration, PROJECT_ID variable not found\")\n\t}\n\toptions.oauthClientID = os.Getenv(\"OAUTH_CLIENT_ID\")\n\toptions.serviceName = os.Getenv(\"SERVICE_NAME\")\n\toptions.serviceGkeNamespace = os.Getenv(\"SERVICE_GKE_NAMESPACE\")\n\tif options.serviceGkeNamespace == \"\" {\n\t\toptions.serviceGkeNamespace = \"default\"\n\t}\n\toptions.serviceGkeName = os.Getenv(\"SERVICE_GKE_NAME\")\n\toptions.serviceGkePortName = os.Getenv(\"SERVICE_GKE_PORT_NAME\")\n\tportNumber := os.Getenv(\"SERVICE_GKE_PORT_NUMBER\")\n\tif portNumber != \"\" {\n\t\tportNumberValue, err := strconv.Atoi(portNumber)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid configuration, SERVICE_GKE_PORT_NUMBER must a number, found %q\", portNumber)\n\t\t}\n\t\toptions.serviceGkePortNumber = portNumberValue\n\t}\n\n\t// vaidate search options - at least one search options\n\tif options.oauthClientID == \"\" &&\n\t\toptions.serviceName == \"\" &&\n\t\toptions.serviceGkeName == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid configuration, at least one search criteria must be specified. Set one of the variables: OAUTH_CLIENT_ID, SERVICE_NAME, SERVICE_GKE_NAME\")\n\t}\n\n\t// validate search options - service number with service name\n\tif options.serviceGkeName == \"\" && options.serviceGkePortName != \"\" {\n\t\tklog.Warning(\"[warning] SERVICE_GKE_PORT_NAME without SERVICE_GKE_NAME, value will be ignored\")\n\t}\n\tif options.serviceGkeName == \"\" && options.serviceGkePortNumber != 0 {\n\t\tklog.Warning(\"[warning] SERVICE_GKE_PORT_NUMBER without SERVICE_GKE_NAME, value will be ignored\")\n\t}\n\n\tklog.Infof(\"search options: %+v\", options)\n\treturn options, nil\n}", "func NewSearchAPI(client *scw.Client) *SearchAPI {\n\treturn &SearchAPI{\n\t\tclient: client,\n\t}\n}", "func (c *InputService20ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func readFEDSearchRequest(u *url.URL) fedSearchRequest {\n\treturn fedSearchRequest{\n\t\tName: strings.ToUpper(strings.TrimSpace(u.Query().Get(\"name\"))),\n\t\tRoutingNumber: strings.ToUpper(strings.TrimSpace(u.Query().Get(\"routingNumber\"))),\n\t\tCity: strings.ToUpper(strings.TrimSpace(u.Query().Get(\"city\"))),\n\t\tState: strings.ToUpper(strings.TrimSpace(u.Query().Get(\"state\"))),\n\t\tPostalCode: strings.ToUpper(strings.TrimSpace(u.Query().Get(\"postalCode\"))),\n\t}\n}", "func newRequest(method, url string, body string) *http.Request {\n\treq, err := http.NewRequest(method, url, strings.NewReader(body))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"X-API-Token\", \"token1\")\n\treturn req\n}", "func NewSearchRequestFromFormat(format string, a ...interface{}) SearchRequest {\n\treturn SearchRequest{\n\t\tFilter: fmt.Sprintf(format, a...),\n\t}\n}", "func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}" ]
[ "0.62544066", "0.6075037", "0.5774859", "0.57147956", "0.5699216", "0.5533846", "0.54850787", "0.54613936", "0.5431669", "0.542688", "0.53875977", "0.53590226", "0.5358159", "0.53361887", "0.5309192", "0.52980083", "0.5287437", "0.52792513", "0.5272613", "0.52666026", "0.5264822", "0.52438796", "0.52345276", "0.52158356", "0.5213163", "0.5209449", "0.5207047", "0.5202807", "0.5199571", "0.5189245", "0.5174067", "0.51606834", "0.51546085", "0.51483893", "0.512689", "0.5113567", "0.5107403", "0.5107403", "0.5093271", "0.50909346", "0.5083708", "0.50667787", "0.5063594", "0.5053096", "0.5045592", "0.5043429", "0.50430304", "0.5041855", "0.5037699", "0.50350565", "0.50268143", "0.50239575", "0.50224", "0.49866933", "0.49680746", "0.4966825", "0.4962817", "0.4959056", "0.49525806", "0.49507165", "0.49427634", "0.4942214", "0.4929343", "0.4924367", "0.49189425", "0.49161863", "0.49118167", "0.48815146", "0.4878074", "0.48745564", "0.48713058", "0.48678973", "0.4864802", "0.4863038", "0.48574507", "0.48574507", "0.48539123", "0.4852656", "0.485168", "0.485168", "0.48490238", "0.4842176", "0.48408207", "0.48401105", "0.48401105", "0.48374614", "0.483233", "0.4831311", "0.48285457", "0.48261264", "0.48235792", "0.48204064", "0.48158786", "0.48158786", "0.4813345", "0.4808816", "0.47974738", "0.4789058", "0.4789058" ]
0.61387986
1
WithPayload adds the payload to the get search o k response
func (o *GetSearchOK) WithPayload(payload *models.User) *GetSearchOK { o.Payload = payload return o }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *GetSearchDefault) WithPayload(payload *models.Error) *GetSearchDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetIndexSearchOK) SetPayload(payload *GetIndexSearchOKBody) {\n\to.Payload = payload\n}", "func (o *GetSearchDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetSearchOK) SetPayload(payload *models.User) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchInternalServerError) SetPayload(payload *GetIndexSearchInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *SearchTournamentsOK) WithPayload(payload []*models.Tournament) *SearchTournamentsOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *WeaviateThingsGetOK) WithPayload(payload *models.ThingGetResponse) *WeaviateThingsGetOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetLegacyUserSearchKeywordOK) WithPayload(payload *models.SearchUsersByKeyword) *GetLegacyUserSearchKeywordOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetIndexSearchNotFound) SetPayload(payload *GetIndexSearchNotFoundBody) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchInternalServerError) WithPayload(payload *GetIndexSearchInternalServerErrorBody) *GetIndexSearchInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetPaymentRequestEDINotFound) WithPayload(payload *supportmessages.ClientError) *GetPaymentRequestEDINotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetLegacyUserSearchKeywordOK) SetPayload(payload *models.SearchUsersByKeyword) {\n\to.Payload = payload\n}", "func (o *GetPrefilterOK) WithPayload(payload *models.Prefilter) *GetPrefilterOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetfeedsOK) WithPayload(payload []string) *GetfeedsOK {\n\to.Payload = payload\n\treturn o\n}", "func TestSearchCorrectPayload(t *testing.T) {\n\tdb := DBSession()\n\tdefer db.Close() // clean up when we’re done\n\n\tSetupData(db)\n\ta := assert.New(t)\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/cb_service/contact_book/search/{query}\", http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\t// save it in the request context\n\t\tctx := context.WithValue(req.Context(), dbSessionKey, db)\n\t\treq.Header.Set(\"Content-Type\", contentType)\n\t\treq.Header.Set(\"Authorization\", encodedAuthToken)\n\t\treq = req.WithContext(ctx)\n\t\tsearchH(res, req)\n\t}))\n\n\tserver := httptest.NewServer(router)\n\tdefer server.Close()\n\treqURL := server.URL + \"/cb_service/contact_book/search/Yog\"\n\tres, err := http.Get(reqURL)\n\tif err != nil {\n\t\tl.Printf(\"Cannot Make Request :%v \", err)\n\t\ta.Error(err)\n\t}\n\n\ta.Equal(res.StatusCode, http.StatusOK)\n\tClearData(db)\n}", "func Search(w http.ResponseWriter, r *http.Request) {\n\tq := r.URL.Query()\n\n\tif keywords, ok := q[\"keyword\"]; ok {\n\t\tsearch := keywords[0]\n\n\t\tproducts, err := lib.Search(search)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tbytes, err := helpers.JSONMarshal(products, true)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfmt.Fprintf(w, \"%s\", bytes)\n\t}\n}", "func (o *GetIndexSearchBadRequest) SetPayload(payload *GetIndexSearchBadRequestBody) {\n\to.Payload = payload\n}", "func (o *GetTagOK) WithPayload(payload *models.Tag) *GetTagOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetApisOK) WithPayload(payload *models.APIMeta) *GetApisOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *FindRecommendationForLearningResourceOK) WithPayload(payload *models.Recommendations) *FindRecommendationForLearningResourceOK {\n\to.Payload = payload\n\treturn o\n}", "func FakeSearch(w http.ResponseWriter, r *http.Request) {\n\tLogRequest(r, \"attack\")\n\tresponse := fmt.Sprintf(`\n\t{\n \"took\" : 6,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 6,\n \"successful\" : 6,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1,\n \"max_score\" : 1.0,\n \"hits\" : [ {\n \"_index\" : \".kibana\",\n \"_type\" : \"index-pattern\",\n \"_id\" : \"logstash-*\",\n \"_score\" : 1.0,\n \"_source\":{\"title\":\"logstash-*\",\"timeFieldName\":\"@timestamp\",\"customFormats\":\"{}\",\"fields\":\"[{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":true,\\\"doc_values\\\":false,\\\"name\\\":\\\"host\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":false,\\\"analyzed\\\":false,\\\"name\\\":\\\"_source\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"message.raw\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":false,\\\"analyzed\\\":false,\\\"name\\\":\\\"_index\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"@version\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":true,\\\"doc_values\\\":false,\\\"name\\\":\\\"message\\\",\\\"count\\\":0},{\\\"type\\\":\\\"date\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"@timestamp\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"name\\\":\\\"_type\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"name\\\":\\\"_id\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"host.raw\\\",\\\"count\\\":0},{\\\"type\\\":\\\"geo_point\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"geoip.location\\\",\\\"count\\\":0}]\"}\n }]\n }\n }`)\n\tWriteResponse(w, response)\n\treturn\n}", "func (o *GetDocumentNotFound) WithPayload(payload *ghcmessages.Error) *GetDocumentNotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetTournamentOK) WithPayload(payload *models.Tournament) *GetTournamentOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetGistsOK) WithPayload(payload models.Gists) *GetGistsOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetmoviesinfoDefault) WithPayload(payload *models.Error) *GetmoviesinfoDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetApisOK) SetPayload(payload *models.APIMeta) {\n\to.Payload = payload\n}", "func (o *GetAppsOK) SetPayload(payload *models.GetAppsOKBody) {\n\to.Payload = payload\n}", "func (o *GetDocumentOK) SetPayload(payload *ghcmessages.Document) {\n\to.Payload = payload\n}", "func (o *GetWhaleTranfersOK) WithPayload(payload []*models.OperationsRow) *GetWhaleTranfersOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *WeaviateThingsGetOK) SetPayload(payload *models.ThingGetResponse) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoInternalServerError) WithPayload(payload *models.Error) *GetmoviesinfoInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (s *svc) Search(ctx context.Context, req *api.SearchRequest) (*api.SearchResponse, error) {\n\tvar resp api.SearchResponse\n\tresp.Results = &api.Series{\n\t\tKey: req.Key,\n\t}\n\n\telts, err := s.searcher.Search(req.Key, req.Oldest, req.Newest)\n\tswitch err.(type) {\n\tcase storage.KeyNotFound:\n\t\tresp.Status = api.SearchResponse_NOT_FOUND\n\t\treturn &resp, nil\n\tcase storage.InvalidSearch:\n\t\tresp.Status = api.SearchResponse_INVALID_ARGUMENTS\n\t\treturn &resp, nil\n\tcase nil:\n\tdefault:\n\t\treturn nil, err\n\t}\n\tfor _, elt := range elts {\n\t\tresp.Results.Elements = append(resp.Results.Elements, &elt)\n\t}\n\n\treturn &resp, nil\n}", "func (o *GetDocumentOK) WithPayload(payload *ghcmessages.Document) *GetDocumentOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetPrefilterOK) SetPayload(payload *models.Prefilter) {\n\to.Payload = payload\n}", "func (o *GetApisInternalServerError) WithPayload(payload *models.Error) *GetApisInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetDocumentNotFound) SetPayload(payload *ghcmessages.Error) {\n\to.Payload = payload\n}", "func (o *GetNamespacedNotebooksOK) SetPayload(payload *models.GetNotebooksResponse) {\r\n\to.Payload = payload\r\n}", "func (o *ServiceInstanceLastOperationGetNotFound) WithPayload(payload *models.Error) *ServiceInstanceLastOperationGetNotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetV1RdssOK) WithPayload(payload models.RDSS) *GetV1RdssOK {\n\to.Payload = payload\n\treturn o\n}", "func buildSearchRedirect(redirectTo *url.URL, r *http.Request) {\n\tq := r.URL.Query()\n\n\tsetParamInURL(redirectTo, \"tab\", \"Everything\")\n\tsetParamInURL(redirectTo, \"search_scope\", \"MyInst_and_CI\")\n\n\tif q.Get(\"searchArg\") != \"\" {\n\t\tswitch q.Get(\"searchCode\") {\n\t\tcase \"TKEY^\":\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"title,contains,%v\", q.Get(\"searchArg\")))\n\t\tcase \"TALL\":\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"title,contains,%v\", q.Get(\"searchArg\")))\n\t\tcase \"NAME\":\n\t\t\tredirectTo.Path = \"/discovery/browse\"\n\t\t\tsetParamInURL(redirectTo, \"browseScope\", \"author\")\n\t\t\tsetParamInURL(redirectTo, \"browseQuery\", q.Get(\"searchArg\"))\n\t\tcase \"CALL\":\n\t\t\tredirectTo.Path = \"/discovery/browse\"\n\t\t\tsetParamInURL(redirectTo, \"browseScope\", \"callnumber.0\")\n\t\t\tsetParamInURL(redirectTo, \"browseQuery\", q.Get(\"searchArg\"))\n\t\tcase \"JALL\":\n\t\t\tredirectTo.Path = \"/discovery/jsearch\"\n\t\t\tsetParamInURL(redirectTo, \"tab\", \"jsearch_slot\")\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"searchArg\")))\n\t\tdefault:\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"searchArg\")))\n\t\t}\n\t} else if q.Get(\"SEARCH\") != \"\" {\n\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"SEARCH\")))\n\t}\n}", "func (o *GetServicesNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetGistsOK) SetPayload(payload models.Gists) {\n\to.Payload = payload\n}", "func (o *GetDocumentInternalServerError) WithPayload(payload *ghcmessages.Error) *GetDocumentInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *ServiceInstanceLastOperationGetNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetDetailOK) WithPayload(payload *models.Detail) *GetDetailOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetInteractionsOK) WithPayload(payload models.ConsoleInteractions) *GetInteractionsOK {\n\to.Payload = payload\n\treturn o\n}", "func productSearch(ctx *iris.Context) {\n\tkeyword := ctx.FormValue(\"keyword\")\n\tresponseGroup := string(ctx.FormValue(\"responseGroup\"))\n\tsearchIndex := string(ctx.FormValue(\"searchIndex\"))\n\tpageIndexStr := string(ctx.FormValue(\"pageIndex\"))\n\n\tpageIndex := 1\n\n\tif pageIndexConv, err := strconv.Atoi(pageIndexStr); err == nil {\n\t\tpageIndex = pageIndexConv\n\t}\n\n\tif string(keyword) == \"\" {\n\t\tctx.Error(\"Invalid Keyword\", 400)\n\t\treturn\n\t}\n\n\t//Images,ItemAttributes,Small,EditorialReview\n\n\tif string(responseGroup) == \"\" {\n\t\tresponseGroup = \"Images,ItemAttributes,Small,EditorialReview\"\n\t}\n\n\t// Search Index - Defaults to 'All' ( Case sensitive )\n\tif searchIndex == \"\" {\n\t\tsearchIndex = \"All\"\n\t}\n\n\tresponseGroup = strings.Replace(responseGroup, \" \", \"\", -1)\n\n\tapi := getAPIHandler()\n\tresult, err := api.ItemSearchByKeywordWithResponseGroupWithSearchIndex(string(keyword), string(responseGroup), searchIndex, pageIndex)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\txml := strings.NewReader(result)\n\n\tjson, err := xj.Convert(xml)\n\tif err != nil {\n\t\tpanic(\"That's embarrassing...\")\n\t}\n\n\t//fmt.Println(result)\n\tctx.SetHeader(\"Content-Type\", \"application/json\")\n\tctx.Write(json.String())\n}", "func (o *GetNamespacedNotebooksOK) WithPayload(payload *models.GetNotebooksResponse) *GetNamespacedNotebooksOK {\r\n\to.Payload = payload\r\n\treturn o\r\n}", "func (o *GetPracticesOK) WithPayload(payload *models.GotPractices) *GetPracticesOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetAppsInternalServerError) WithPayload(payload *models.Error) *GetAppsInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetPaymentRequestEDINotFound) SetPayload(payload *supportmessages.ClientError) {\n\to.Payload = payload\n}", "func (a SearchApi) GetSearch(q64 string, expand []string, profile bool) (*Jsonnodesearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/search\"\n\tdefaultReturn := new(Jsonnodesearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\tqueryParams[\"profile\"] = a.Configuration.APIClient.ParameterToString(profile, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Jsonnodesearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Jsonnodesearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func (o *SearchTournamentsOK) SetPayload(payload []*models.Tournament) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoOK) SetPayload(payload *GetmoviesinfoOKBody) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchBadRequest) WithPayload(payload *GetIndexSearchBadRequestBody) *GetIndexSearchBadRequest {\n\to.Payload = payload\n\treturn o\n}", "func (o *ServiceInstanceLastOperationGetOK) SetPayload(payload *models.LastOperationResource) {\n\to.Payload = payload\n}", "func (o *WeaviateActionsPatchOK) WithPayload(payload *models.ActionGetResponse) *WeaviateActionsPatchOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetSectionOK) WithPayload(payload *models.SectionResponse) *GetSectionOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetIndexSearchOK) WithPayload(payload *GetIndexSearchOKBody) *GetIndexSearchOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ArtifactListerOK) WithPayload(payload []*weles.ArtifactInfo) *ArtifactListerOK {\n\to.Payload = payload\n\treturn o\n}", "func (c *Client) Search(ctx context.Context, searchPerson *Person) (*Response, error) {\n\n\t// Do we meet the minimum requirements for searching?\n\tif !SearchMeetsMinimumCriteria(searchPerson) {\n\t\treturn nil, ErrDoesNotMeetMinimumCriteria\n\t}\n\n\t// Start the post data\n\tpostData := url.Values{}\n\n\t// Add the API key (always - API is required by default)\n\tpostData.Add(fieldAPIKey, c.options.apiKey)\n\n\t// Option for pretty response\n\tif !c.options.searchOptions.Search.Pretty {\n\t\tpostData.Add(fieldPretty, valueFalse)\n\t}\n\n\t// Should we show sources?\n\tif c.options.searchOptions.Search.ShowSources != ShowSourcesNone {\n\t\tpostData.Add(fieldShowSources, string(c.options.searchOptions.Search.ShowSources))\n\t}\n\n\t// Add match requirements?\n\tif c.options.searchOptions.Search.MatchRequirements != MatchRequirementsNone {\n\t\tpostData.Add(fieldMatchRequirements, string(c.options.searchOptions.Search.MatchRequirements))\n\t}\n\n\t// Add source category requirements?\n\tif c.options.searchOptions.Search.SourceCategoryRequirements != SourceCategoryRequirementsNone {\n\t\tpostData.Add(fieldSourceCategoryRequirements, string(c.options.searchOptions.Search.SourceCategoryRequirements))\n\t}\n\n\t// Custom minimum match\n\tif c.options.searchOptions.Search.MinimumMatch != MinimumMatch {\n\t\tpostData.Add(fieldMinimumMatch, fmt.Sprintf(\"%v\", c.options.searchOptions.Search.MinimumMatch))\n\t}\n\n\t// Set the \"hide sponsors\" flag (default is false)\n\tif c.options.searchOptions.Search.HideSponsored {\n\t\tpostData.Add(fieldHideSponsored, valueTrue)\n\t}\n\n\t// Set the \"infer persons\" flag (default is false)\n\tif c.options.searchOptions.Search.InferPersons {\n\t\tpostData.Add(fieldInferPersons, valueTrue)\n\t}\n\n\t// Ask for the top match?\n\tif c.options.searchOptions.Search.TopMatch {\n\t\tpostData.Add(fieldTopMatch, valueTrue)\n\t}\n\n\t// Set the live feeds flag (default is true)\n\tif !c.options.searchOptions.Search.LiveFeeds {\n\t\tpostData.Add(fieldLiveFeeds, valueFalse)\n\t}\n\n\t// Parse the search object\n\tpersonJSON, err := json.Marshal(searchPerson)\n\tif err != nil { // This should NEVER error out since the struct is being generated\n\t\treturn nil, err\n\t}\n\n\t// Add the person to the request\n\tpostData.Add(fieldPerson, string(personJSON))\n\n\t// Fire the request\n\tvar response *Response\n\tresponse, err = httpRequest(ctx, c, searchAPIEndpoint, &postData)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(response.Error) > 0 {\n\t\treturn nil, errors.New(response.Error)\n\t}\n\treturn response, nil\n}", "func (o *GetmoviesinfoOK) WithPayload(payload *GetmoviesinfoOKBody) *GetmoviesinfoOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ArtifactListerNotFound) SetPayload(payload *weles.ErrResponse) {\n\to.Payload = payload\n}", "func (o *GetHealthzOK) WithPayload(payload string) *GetHealthzOK {\n\to.Payload = payload\n\treturn o\n}", "func (as *API) Search(ctx context.Context, req *pbreq.Search) (*pbresp.Results, error) {\n\tobjects, err := as.lens.KeywordSearch(req.Keywords)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar objs = make([]*pbresp.Object, len(objects))\n\tfor _, v := range objects {\n\t\tobjs = append(objs, &pbresp.Object{\n\t\t\tName: v.Name,\n\t\t\tMimeType: v.MetaData.MimeType,\n\t\t\tCategory: v.MetaData.Category,\n\t\t})\n\t}\n\n\treturn &pbresp.Results{\n\t\tObjects: objs,\n\t}, nil\n}", "func (o *GetSectionNotFound) SetPayload(payload *models.NotFound) {\n\to.Payload = payload\n}", "func searchHandler(w http.ResponseWriter, r *http.Request) {\r\n\tresultJson, _ := json.Marshal(grafanaItemList)\r\n\tfmt.Fprintf(w, string(resultJson))\r\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetSectionOK) SetPayload(payload *models.SectionResponse) {\n\to.Payload = payload\n}", "func (o *GetServicesOK) SetPayload(payload *models.ServicesWithStageInfo) {\n\to.Payload = payload\n}", "func searchSpecificStore(w http.ResponseWriter, req *http.Request) {\n\tvar sStore SpecificStore\n\t_ = json.NewDecoder(req.Body).Decode(&sStore)\n\tfmt.Println(\"$$$Buscando tienda con los parametros especificados\")\n\tvar store Store\n\tfor i := 0; i < len(array); i++ {\n\t\tif array[i].Department == sStore.Departament && array[i].Rating == sStore.Rating {\n\t\t\tfor j := 0; j < array[i].List.lenght; j++ {\n\t\t\t\ttempNode, _ := array[i].List.GetNodeAt(j)\n\t\t\t\ttempName := tempNode.data.Name\n\t\t\t\t//fmt.Println(tempName)\n\t\t\t\tif tempName == sStore.Name {\n\t\t\t\t\tstore = tempNode.data\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Println(\"$$$ Retornando datos obtenidos\")\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(store)\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceOK) SetPayload(payload *models.Resources) {\n\to.Payload = payload\n}", "func BuildAdminSearchPayload(stationAdminSearchQuery string, stationAdminSearchAuth string) (*station.AdminSearchPayload, error) {\n\tvar query string\n\t{\n\t\tquery = stationAdminSearchQuery\n\t}\n\tvar auth string\n\t{\n\t\tauth = stationAdminSearchAuth\n\t}\n\tv := &station.AdminSearchPayload{}\n\tv.Query = query\n\tv.Auth = auth\n\n\treturn v, nil\n}", "func (o *GetPaymentRequestEDIOK) WithPayload(payload *supportmessages.PaymentRequestEDI) *GetPaymentRequestEDIOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetInteractionsNotFound) SetPayload(payload *models.APIError) {\n\to.Payload = payload\n}", "func (o *ArtifactListerNotFound) WithPayload(payload *weles.ErrResponse) *ArtifactListerNotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *DescribeOK) WithPayload(payload *models.DescribeStack) *DescribeOK {\n\to.Payload = payload\n\treturn o\n}", "func SearchMember(w http.ResponseWriter, r *http.Request) {\n\tvar data []SearchRequest\n var payload ResponseValues\n\n enc := json.NewEncoder(w)\n enc.SetIndent(\"\", \" \")\n\n // Decodes the request body\n\terr := json.NewDecoder(r.Body).Decode(&data)\n\tif err != nil {\n writeError(w, &enc, &payload, http.StatusBadRequest, \"Invalid request data format\")\n return\n\t}\n\n // Validates the incoming request data\n validated := validateRequestData(data)\n if !validated {\n writeError(w, &enc, &payload, http.StatusBadRequest, \"Invalid request data format\")\n return\n }\n\n // Uses request body data to build the elastic query body\n\telasticQueryBody, err := buildElasticQuery(data)\n\tif err != nil {\n writeError(w, &enc, &payload, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n // Queries the elastic service\n payload, err = elasticService.QueryElasticService(elasticQueryBody)\n if err != nil {\n writeError(w, &enc, &payload, http.StatusBadRequest, payload.Error)\n return\n }\n\n w.Header().Set(\"Content-Type\", \"application/json\")\n w.WriteHeader(http.StatusOK)\n enc.Encode(payload)\n\n\treturn\n}", "func (o *GetDetailsOK) WithPayload(payload models.Details) *GetDetailsOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetRepositoryInfoOK) SetPayload(payload *models.RepositoryInfo) {\n\to.Payload = payload\n}", "func (o *GetTagOK) WithPayload(payload *GetTagOKBody) *GetTagOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetModelNotFound) SetPayload(payload *restmodels.Error) {\n\to.Payload = payload\n}", "func (p *NoteStoreClient) GetSearch(ctx context.Context, authenticationToken string, guid GUID) (r *SavedSearch, err error) {\n var _args87 NoteStoreGetSearchArgs\n _args87.AuthenticationToken = authenticationToken\n _args87.GUID = guid\n var _result88 NoteStoreGetSearchResult\n if err = p.Client_().Call(ctx, \"getSearch\", &_args87, &_result88); err != nil {\n return\n }\n switch {\n case _result88.UserException!= nil:\n return r, _result88.UserException\n case _result88.SystemException!= nil:\n return r, _result88.SystemException\n case _result88.NotFoundException!= nil:\n return r, _result88.NotFoundException\n }\n\n return _result88.GetSuccess(), nil\n}", "func (o *ServiceInstanceLastOperationGetUnauthorized) WithPayload(payload *models.Error) *ServiceInstanceLastOperationGetUnauthorized {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetDistrictForSchoolNotFound) SetPayload(payload *models.NotFound) {\n\to.Payload = payload\n}", "func (o *GetRepositoryInfoNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *RetrieveCopyOK) WithPayload(payload models.OutputStream) *RetrieveCopyOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetEventsEventIDOK) SetPayload(payload *GetEventsEventIDOKBody) {\n\to.Payload = payload\n}", "func (o *GetNamespacedNotebooksUnauthorized) WithPayload(payload *models.Error) *GetNamespacedNotebooksUnauthorized {\r\n\to.Payload = payload\r\n\treturn o\r\n}", "func (o *GetTweetsOK) WithPayload(payload *models.TweetDetailsListResponse) *GetTweetsOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetProbeOK) WithPayload(payload *models.Response) *GetProbeOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *UpdateClusterNotFound) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (business *Business) Search(ctx context.Context, req *bs.BusinessRequest, rsp *bs.BusinessResponse) error {\n\tlog.Print(\"Received Business.Search request\")\n\tif len(req.Req.Q) == 0 {\n\t\treturn errors.BadRequest(\"business\", \"please enter mandatory fields\")\n\t}\n\n\td := &ds.DatastoreRequest{}\n\td.Req = req.Req\n\tres, err := dsClient.Search(context.TODO(), d)\n\tif nil != err {\n\t\tcommon.PrintError(err)\n\t}\n\n\tqRsp := &query.Response{}\n\n\tif res.Rsp == \"\" {\n\t\t//\t\tpost message to trigger external API request\n\t\treqBytes, err := common.EncByteArray(req.Req)\n\t\tif err != nil {\n\t\t\tcommon.PrintError(err)\n\t\t}\n\t\tcommon.Publish(\"sendExternalApiReq\", reqBytes)\n\t\t//\t\twait for response from external api\n\t\tqRsp.Response = <-msg\n\t\t//\t\tsame response in DB\n\t\tsaveReq := &ds.SaveRequest{}\n\t\tsaveReq.Req = req.Req\n\t\tsaveReq.Recipe = qRsp.Response\n\t\tdsClient.Save(context.TODO(), saveReq)\n\t} else {\n\t\tfmt.Println(\"got ds.search reply \", res)\n\t\tqRsp.Response = res.Rsp\n\t}\n\trsp.Rsp = qRsp\n\treturn nil\n}", "func (o *GetApisUnauthorized) WithPayload(payload *models.Error) *GetApisUnauthorized {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetReposOwnerRepoTagsOK) SetPayload(payload *models.Tags) {\n\to.Payload = payload\n}", "func (o *GetDocumentUnauthorized) WithPayload(payload *ghcmessages.Error) *GetDocumentUnauthorized {\n\to.Payload = payload\n\treturn o\n}", "func (o *AddConsumptionNotFound) WithPayload(payload *models.ErrorResponse) *AddConsumptionNotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetModelOK) SetPayload(payload *restmodels.Model) {\n\to.Payload = payload\n}", "func (req *QueryRequest) RunSearch(\n\tsearch esapi.Search,\n\to ...func(*esapi.SearchRequest),\n) (res *esapi.Response, err error) {\n\tvar b bytes.Buffer\n\terr = json.NewEncoder(&b).Encode(req.Map())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\topts := append([]func(*esapi.SearchRequest){search.WithBody(&b)}, o...)\n\n\treturn search(opts...)\n}", "func Search() *SearchDsl {\n\treturn &SearchDsl{\n\t\tsimplejson.New(),\n\t\tnil, nil, nil,\n\t}\n}" ]
[ "0.68638563", "0.6374733", "0.6226127", "0.6121761", "0.6096384", "0.57941866", "0.57626396", "0.5745161", "0.5615595", "0.55488575", "0.5514837", "0.54362196", "0.5394538", "0.53849316", "0.53676116", "0.5317164", "0.53123075", "0.527722", "0.52268726", "0.5221842", "0.52190834", "0.5169125", "0.51339453", "0.5126016", "0.511854", "0.51180494", "0.5092185", "0.50697666", "0.50654846", "0.50532883", "0.50467235", "0.5045855", "0.5045087", "0.50346583", "0.50327146", "0.50184906", "0.5013114", "0.5005508", "0.49998555", "0.49958283", "0.49920702", "0.49854752", "0.49754912", "0.4958013", "0.49462256", "0.49429366", "0.49392304", "0.49378034", "0.49359325", "0.49326545", "0.4927989", "0.49205756", "0.49166998", "0.4910476", "0.49055237", "0.49046153", "0.49024916", "0.49012408", "0.4886727", "0.48861977", "0.48859197", "0.4884237", "0.4880583", "0.48800597", "0.4869914", "0.48679894", "0.4865484", "0.48621538", "0.486163", "0.4848148", "0.4837231", "0.48324674", "0.48272395", "0.48270613", "0.4821689", "0.48199025", "0.48139784", "0.4813415", "0.48088914", "0.48075023", "0.48024926", "0.48023304", "0.4801051", "0.47996658", "0.47988057", "0.47951242", "0.4791674", "0.47846818", "0.47824222", "0.47815403", "0.47814146", "0.4779465", "0.47754583", "0.4773279", "0.4769621", "0.47675353", "0.47622287", "0.47587276", "0.47583517", "0.47557926" ]
0.7391278
0
SetPayload sets the payload to the get search o k response
func (o *GetSearchOK) SetPayload(payload *models.User) { o.Payload = payload }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *GetSearchDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchInternalServerError) SetPayload(payload *GetIndexSearchInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchOK) SetPayload(payload *GetIndexSearchOKBody) {\n\to.Payload = payload\n}", "func (o *GetLegacyUserSearchKeywordOK) SetPayload(payload *models.SearchUsersByKeyword) {\n\to.Payload = payload\n}", "func (o *GetPrefilterOK) SetPayload(payload *models.Prefilter) {\n\to.Payload = payload\n}", "func (o *GetApisOK) SetPayload(payload *models.APIMeta) {\n\to.Payload = payload\n}", "func (o *WeaviateThingsGetOK) SetPayload(payload *models.ThingGetResponse) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchNotFound) SetPayload(payload *GetIndexSearchNotFoundBody) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchBadRequest) SetPayload(payload *GetIndexSearchBadRequestBody) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetSectionOK) SetPayload(payload *models.SectionResponse) {\n\to.Payload = payload\n}", "func (o *SearchTournamentsOK) SetPayload(payload []*models.Tournament) {\n\to.Payload = payload\n}", "func (o *GetInteractionsNotFound) SetPayload(payload *models.APIError) {\n\to.Payload = payload\n}", "func (o *ServiceInstanceLastOperationGetNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetPaymentRequestEDINotFound) SetPayload(payload *supportmessages.ClientError) {\n\to.Payload = payload\n}", "func (o *ArtifactListerNotFound) SetPayload(payload *weles.ErrResponse) {\n\to.Payload = payload\n}", "func (o *AddKeypairInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetServicesNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetDocumentOK) SetPayload(payload *ghcmessages.Document) {\n\to.Payload = payload\n}", "func (o *GetProviderRegionByIDInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *UpdateClusterNotFound) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetProbeOK) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *GetRepositoryInfoOK) SetPayload(payload *models.RepositoryInfo) {\n\to.Payload = payload\n}", "func (o *GetTournamentOK) SetPayload(payload *models.Tournament) {\n\to.Payload = payload\n}", "func (o *GetInteractionsInternalServerError) SetPayload(payload *models.APIError) {\n\to.Payload = payload\n}", "func (o *GetGistsOK) SetPayload(payload models.Gists) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresOK) SetPayload(payload []*models.VSphereDatastore) {\n\to.Payload = payload\n}", "func (o *GetRepositoryInfoInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetRepositoryInfoNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetTweetsOK) SetPayload(payload *models.TweetDetailsListResponse) {\n\to.Payload = payload\n}", "func (o *GetNamespacedNotebooksOK) SetPayload(payload *models.GetNotebooksResponse) {\r\n\to.Payload = payload\r\n}", "func (o *GetServicesDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetHealthzInternalServerError) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetIdentityIDOK) SetPayload(payload *models.Identity) {\n\to.Payload = payload\n}", "func (o *WeaviateActionsPatchOK) SetPayload(payload *models.ActionGetResponse) {\n\to.Payload = payload\n}", "func (o *ServiceInstanceLastOperationGetOK) SetPayload(payload *models.LastOperationResource) {\n\to.Payload = payload\n}", "func (o *GetHealthzOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetApisInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetPaymentNotFound) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *GraphqlPostOK) SetPayload(payload *models.GraphQLResponse) {\n\to.Payload = payload\n}", "func (o *GetDocumentNotFound) SetPayload(payload *ghcmessages.Error) {\n\to.Payload = payload\n}", "func (o *GetInteractionsOK) SetPayload(payload models.ConsoleInteractions) {\n\to.Payload = payload\n}", "func (o *GetModelOK) SetPayload(payload *restmodels.Model) {\n\to.Payload = payload\n}", "func (o *GetAllReposOK) SetPayload(payload *models.ResourceArrayData) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoOK) SetPayload(payload *GetmoviesinfoOKBody) {\n\to.Payload = payload\n}", "func (o *GetTaskSyncNotFound) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *GetTaskSyncInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetQuestionFromTeamOK) SetPayload(payload *models.Question) {\n\to.Payload = payload\n}", "func (o *GetServicesOK) SetPayload(payload *models.ServicesWithStageInfo) {\n\to.Payload = payload\n}", "func (o *ShopGetProductOK) SetPayload(payload *models.ShopProductResponse) {\n\to.Payload = payload\n}", "func (o *GetRefreshTokenNotFound) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetModelNotFound) SetPayload(payload *restmodels.Error) {\n\to.Payload = payload\n}", "func (o *GetEventsEventIDInternalServerError) SetPayload(payload *GetEventsEventIDInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *UpdateMovieNotFound) SetPayload(payload *models.Result) {\n\to.Payload = payload\n}", "func (o *GetTaskTaskIDOK) SetPayload(payload *models.ResponseTask) {\n\to.Payload = payload\n}", "func (o *GetRepositoryInfoDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *AddRegionAZInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetSectionNotFound) SetPayload(payload *models.NotFound) {\n\to.Payload = payload\n}", "func (o *GetGateSourceByGateNameAndMntNotFound) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *GetNamespacedNotebooksNotFound) SetPayload(payload *models.Error) {\r\n\to.Payload = payload\r\n}", "func (o *GetRefreshTokenInternalServerError) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetClusterOK) SetPayload(payload *models.Cluster) {\n\to.Payload = payload\n}", "func (o *ServiceInstanceLastOperationGetUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *ArtifactListerInternalServerError) SetPayload(payload *weles.ErrResponse) {\n\to.Payload = payload\n}", "func (o *AddRegionAZOK) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetResetPasswordRequestEmailNotFound) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceOK) SetPayload(payload *models.Resources) {\n\to.Payload = payload\n}", "func (o *GetPaymentRequestEDIOK) SetPayload(payload *supportmessages.PaymentRequestEDI) {\n\to.Payload = payload\n}", "func (o *GetResetPasswordRequestEmailInternalServerError) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationNotFound) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *AddReleasesInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *UpdateClusterInternalServerError) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetProviderRegionByIDOK) SetPayload(payload *models.ProviderRegion) {\n\to.Payload = payload\n}", "func (o *GetClusterNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetChartsInfoOK) SetPayload(payload []*models.ChartsData) {\n\to.Payload = payload\n}", "func (o *GetQuestionFromTeamGone) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetReposOwnerRepoTagsOK) SetPayload(payload *models.Tags) {\n\to.Payload = payload\n}", "func (o *GetClusterInstallConfigOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *GetPetByIDOK) SetPayload(payload *models.Pet) {\n\to.Payload = payload\n}", "func (o *GetVSphereDatastoresUnauthorized) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetV1RdssOK) SetPayload(payload models.RDSS) {\n\to.Payload = payload\n}", "func (o *AddKeypairConflict) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *DeleteOrganizationInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *FetchTodoItemsInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetServicesHaproxyRuntimeAclsIDNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetS3BackupOK) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (r *Request) setPayload(res Result) {\n\treflect.ValueOf(r.resultPayload).Elem().Set(reflect.ValueOf(res).Elem())\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationInternalServerError) SetPayload(payload interface{}) {\n\to.Payload = payload\n}", "func (o *UpdateClusterUnauthorized) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetNamespacedNotebooksUnauthorized) SetPayload(payload *models.Error) {\r\n\to.Payload = payload\r\n}", "func (o *GetAppsInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetBackendNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetVisiblePruebasFromQuestionTestGone) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetVSphereComputeResourcesInternalServerError) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetTagsFromQuestionGone) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetDistrictForSchoolNotFound) SetPayload(payload *models.NotFound) {\n\to.Payload = payload\n}" ]
[ "0.7735776", "0.7393954", "0.72819275", "0.6931681", "0.6840269", "0.68224037", "0.67992747", "0.67884505", "0.67632055", "0.67573553", "0.6756289", "0.67104745", "0.66696805", "0.6660098", "0.66547406", "0.6612361", "0.6595401", "0.6590472", "0.6584048", "0.658069", "0.6577633", "0.6573933", "0.65738165", "0.6567231", "0.65630066", "0.65565956", "0.6556071", "0.6548068", "0.6547874", "0.6546001", "0.65439236", "0.65415317", "0.6539655", "0.65343374", "0.6525994", "0.6518991", "0.65167695", "0.6515205", "0.65069693", "0.6501597", "0.64967823", "0.64960223", "0.64821625", "0.64789414", "0.6466363", "0.6464341", "0.64641196", "0.64564484", "0.64545774", "0.6450439", "0.6449257", "0.6447141", "0.64439577", "0.6440866", "0.6440823", "0.6432783", "0.64267945", "0.642604", "0.6419629", "0.64175993", "0.6417103", "0.64157426", "0.64118505", "0.6411481", "0.6409368", "0.64084053", "0.64038855", "0.6402175", "0.64016956", "0.6400048", "0.6389544", "0.6389072", "0.6386707", "0.63777465", "0.6371804", "0.6367344", "0.6364649", "0.6364348", "0.6360021", "0.6356393", "0.63541496", "0.6353011", "0.6351917", "0.63516176", "0.634963", "0.6349597", "0.6342815", "0.6337229", "0.6336176", "0.6331622", "0.6331397", "0.6329311", "0.6328258", "0.6326639", "0.63250476", "0.63209236", "0.6320017", "0.6318883", "0.631626", "0.63142943" ]
0.7539656
1
WriteResponse to the client
func (o *GetSearchOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(200) if o.Payload != nil { payload := o.Payload if err := producer.Produce(rw, payload); err != nil { panic(err) // let the recovery middleware deal with this } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Response) Write(w io.Writer) error", "func (c *Operation) writeResponse(rw http.ResponseWriter, status int, data []byte) { // nolint: unparam\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write(data); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}", "func WriteResponse(w http.ResponseWriter, mensaje string, code int) {\n\tmessage := myTypes.Respuesta{\n\t\tMessage: mensaje,\n\t}\n\tresponse, _ := json.Marshal(message)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(response)\n}", "func WriteResponse(w http.ResponseWriter, object interface{}, rerr *irma.RemoteError) {\n\tstatus, bts := JsonResponse(object, rerr)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\t_, err := w.Write(bts)\n\tif err != nil {\n\t\tLogWarning(errors.WrapPrefix(err, \"failed to write response\", 0))\n\t}\n}", "func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, v interface{}, statusCode int) {\n\tresBody, err := json.Marshal(v)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\t_, _ = w.Write(resBody)\n}", "func WriteResponse(w http.ResponseWriter, code int, object interface{}) {\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func WriteResponse(w http.ResponseWriter, code int, resp interface{}) error {\n\tj, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(j)\n\treturn err\n}", "func writeResponse(w *http.ResponseWriter, res responseData, status int) {\n\tresJSON, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(*w, \"Failed to parse struct `responseData` into JSON object\", http.StatusInternalServerError)\n\t}\n\n\t(*w).Header().Set(\"Content-Type\", \"application/json\")\n\t(*w).WriteHeader(status)\n\t(*w).Write(resJSON)\n}", "func WriteResponse(w http.ResponseWriter, d string) {\n\tw.WriteHeader(200)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.Write([]byte(d))\n\treturn\n}", "func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(w http.ResponseWriter, response Response) {\n\tjson, err := json.Marshal(&response)\n\n\tif err != nil {\n\t\tfmt.Fprint(w, \"There was an error processing the request.\")\n\t}\n\n\tcommon.Log(fmt.Sprintf(\"Returning response %s\", json))\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMedicineOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) {\n\n\t// Deal with CORS\n\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"DELETE, GET, HEAD, OPTIONS, POST, PUT\")\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\t// Allow any headers\n\t\tif wantedHeaders := r.Header.Get(\"Access-Control-Request-Headers\"); wantedHeaders != \"\" {\n\t\t\tw.Header().Set(\"Access-Control-Allow-Headers\", wantedHeaders)\n\t\t}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\tb, err := json.Marshal(resp)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(w, `{\"error\":\"failed to marshal json\"}`)\n\t\treturn\n\t}\n\n\tw.WriteHeader(code)\n\tfmt.Fprintln(w, string(b))\n}", "func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}", "func (o *UpdateCatalogOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (c *SwitchVersion) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postSwitchVersion(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *PutRecordingsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, code int, err error, data interface{}, t0 time.Time) {\n\tw.WriteHeader(code)\n\tresp := &Response{Data: data, Dur: fmt.Sprint(time.Since(t0)), OK: false}\n\tif code < 300 {\n\t\tresp.OK = true\n\t}\n\tif err != nil {\n\t\tresp.Err = err.Error()\n\t}\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tlog.Infof(\"failed to json encode response: %v\", err)\n\t\tif _, err = w.Write([]byte(spew.Sdump(resp))); err != nil {\n\t\t\tlog.Infof(\"failed to write dump of response: %v\", err)\n\t\t}\n\t}\n}", "func (o *NewDiscoveryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func writeResponse(data []byte, size int64, ctype string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", ctype)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", size))\n\tw.Header().Set(\"Cache-Control\", \"no-transform,public,max-age=86400,s-maxage=2592000\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, code int, object interface{}) {\n\tfmt.Println(\"writing response:\", code, object)\n\tdata, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}", "func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (o *GetCharactersCharacterIDOpportunitiesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetCharactersCharacterIDOpportunitiesOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *WeaviateThingsGetNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (c *UpdateSwitch) WriteResponse(rw http.ResponseWriter, rp runtime.Producer) {\n\tswitch c.Request.Method {\n\tcase http.MethodPost:\n\t\tc.postUpdateSwitch(rw, rp)\n\tdefault:\n\t\tc.notSupported(rw, rp)\n\t}\n}", "func (o *UpdateLinkInPostOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetChatroomsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetEchoNameOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetUIContentOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *ListVsphereResourceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func ResponseWrite(w http.ResponseWriter, responseCode int, responseData interface{}) {\n\t// Write Response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(responseCode)\n\n\t// Write JSON to Response\n\tjson.NewEncoder(w).Encode(responseData)\n}", "func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}", "func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func writeResponse(data interface{}, w http.ResponseWriter) error {\n\tvar (\n\t\tenc []byte\n\t\terr error\n\t)\n\tenc, err = json.Marshal(data)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to marshal, err = %s\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tn, err := w.Write(enc)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Failure to write, err = %s\", err)\n\t}\n\tif n != len(enc) {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn fmt.Errorf(\"Short write sent = %d, wrote = %d\", len(enc), n)\n\t}\n\treturn nil\n}", "func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateMoveTaskOrderPostCounselingInformationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func WriteResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PutQuestionOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (r *response) Write(b []byte) (n int, err error) {\n\tif !r.headersSend {\n\t\tif r.status == 0 {\n\t\t\tr.status = http.StatusOK\n\t\t}\n\t\tr.WriteHeader(r.status)\n\t}\n\tn, err = r.ResponseWriter.Write(b)\n\tr.size += int64(n)\n\treturn\n}", "func (o *PostOperationsDeleteP2PPathCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *HealthGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsPatchNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviateThingsGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *DeleteServiceIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *Operation) writeResponse(rw io.Writer, v interface{}) {\n\terr := json.NewEncoder(rw).Encode(v)\n\t// as of now, just log errors for writing response\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to send error response, %s\", err)\n\t}\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *UserEditOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *WeaviatePeersAnnounceOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}", "func (o *CertifyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func writeResponse(writer http.ResponseWriter, response *http.Response) (int64, error) {\n\tdefer response.Body.Close()\n\twriteResponseHeaders(writer, response, false)\n\treturn io.Copy(writer, response.Body)\n}", "func (o *PutMeetupDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *FingerPathsPostCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *PostPlaybookOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UpdateHostIgnitionCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *GetCharactersCharacterIDLocationOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPingDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostManagementKubernetesIoV1NodesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *PutPerformancesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *StopAppAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(202)\n}", "func (o *GetFleetsFleetIDMembersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Cache-Control\n\n\tcacheControl := o.CacheControl\n\tif cacheControl != \"\" {\n\t\trw.Header().Set(\"Cache-Control\", cacheControl)\n\t}\n\n\t// response header Content-Language\n\n\tcontentLanguage := o.ContentLanguage\n\tif contentLanguage != \"\" {\n\t\trw.Header().Set(\"Content-Language\", contentLanguage)\n\t}\n\n\t// response header Expires\n\n\texpires := o.Expires\n\tif expires != \"\" {\n\t\trw.Header().Set(\"Expires\", expires)\n\t}\n\n\t// response header Last-Modified\n\n\tlastModified := o.LastModified\n\tif lastModified != \"\" {\n\t\trw.Header().Set(\"Last-Modified\", lastModified)\n\t}\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make(models.GetFleetsFleetIDMembersOKBody, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *GetMeetupsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(o._statusCode)\n}", "func (o *PostEventCreated) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(201)\n}", "func (o *GetTaskTaskIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateTCPCheckAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *PostOperationsGetNetworkElementListCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ServiceInstanceLastOperationGetOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header RetryAfter\n\n\tretryAfter := o.RetryAfter\n\tif retryAfter != \"\" {\n\t\trw.Header().Set(\"RetryAfter\", retryAfter)\n\t}\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetPiecesIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetTaskDetailsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *GetDetailOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\trw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetServicesHaproxyRuntimeAclsIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (r *responseInfoRecorder) Write(b []byte) (int, error) {\n\tr.ContentLength += int64(len(b))\n\tif r.statusCode == 0 {\n\t\tr.statusCode = http.StatusOK\n\t}\n\treturn r.ResponseWriter.Write(b)\n}", "func (o *LogoutOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func (o *UploadFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}", "func WriteResponse(w http.ResponseWriter, data interface{}) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": http.StatusOK,\n\t\t},\n\t\t\"data\": data,\n\t}\n\treturn jsonResponse(w, env)\n}", "func (o *WeaviateThingTemplatesCreateNotImplemented) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(501)\n}", "func (r *Responder) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\tfor k, v := range r.headers {\n\t\tfor _, val := range v {\n\t\t\trw.Header().Add(k, val)\n\t\t}\n\t}\n\n\trw.WriteHeader(r.code)\n\n\tif r.response != nil {\n\t\tif err := producer.Produce(rw, r.response); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func (o *GetGateSourceByGateNameAndMntOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *CreateSpoeCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *Output) writeResponse(response string) error {\r\n\t// write the response\r\n\tif _, err := o.writer.WriteString(response + \"\\n\"); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn nil\r\n}", "func (o *GetTransportByIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *TransferOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n\tif err := producer.Produce(rw, o.Payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateUserCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (o *ViewOneOrderOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (o *GetVisiblePruebasFromQuestionTestInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(500)\n}", "func (o *GetWhaleTranfersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = make([]*models.OperationsRow, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}", "func (o *SearchTournamentsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\tpayload = make([]*models.Tournament, 0, 50)\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n\n}", "func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}" ]
[ "0.81304365", "0.78822106", "0.7772603", "0.77724785", "0.7753003", "0.7741224", "0.76676315", "0.7638531", "0.7610215", "0.7580745", "0.75792986", "0.75681144", "0.7560947", "0.7558793", "0.75451237", "0.7542909", "0.7541853", "0.75351036", "0.75317055", "0.7520023", "0.75197107", "0.7512948", "0.75119436", "0.75060153", "0.75032663", "0.7498435", "0.7488388", "0.7483949", "0.7477941", "0.7468687", "0.7467289", "0.7466921", "0.7464827", "0.7463887", "0.7463887", "0.7461539", "0.74607104", "0.74594444", "0.7445936", "0.74437296", "0.74364424", "0.7428169", "0.742627", "0.74193496", "0.7414609", "0.7407497", "0.740679", "0.7405893", "0.7399214", "0.7389537", "0.73864824", "0.7380773", "0.73607856", "0.7360597", "0.7355258", "0.7355082", "0.7353997", "0.73482996", "0.7345686", "0.7328176", "0.7325791", "0.7318597", "0.73169374", "0.73163897", "0.7315758", "0.73130983", "0.7312643", "0.7310174", "0.73093194", "0.73014235", "0.7296487", "0.7291982", "0.7291501", "0.72891283", "0.7285318", "0.72836924", "0.7282427", "0.7280994", "0.7275351", "0.72748315", "0.7273309", "0.7272943", "0.7269458", "0.7269213", "0.72688186", "0.7266069", "0.7261708", "0.7253967", "0.7251768", "0.7249987", "0.72485304", "0.724809", "0.7241035", "0.7239367", "0.7237185", "0.72348326", "0.7228545", "0.72232014", "0.72160393", "0.7215001", "0.7212855" ]
0.0
-1
NewGetSearchDefault creates GetSearchDefault with default headers values
func NewGetSearchDefault(code int) *GetSearchDefault { if code <= 0 { code = 500 } return &GetSearchDefault{ _statusCode: code, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *GetSearchDefault) WithStatusCode(code int) *GetSearchDefault {\n\to._statusCode = code\n\treturn o\n}", "func Default(u []*url.URL, s Handler, verbose bool) *GGet {\n\treturn &GGet{URLS: u, Strategy: s, r: &httpRetriever{}, Verbose: verbose}\n}", "func NewGetSearchSearchItemsMoidDefault(code int) *GetSearchSearchItemsMoidDefault {\n\treturn &GetSearchSearchItemsMoidDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (o *GetSearchDefault) WithPayload(payload *models.Error) *GetSearchDefault {\n\to.Payload = payload\n\treturn o\n}", "func NewGetModelRegistryDefault(code int) *GetModelRegistryDefault {\n\treturn &GetModelRegistryDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (a SearchApi) GetSearch(q64 string, expand []string, profile bool) (*Jsonnodesearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/search\"\n\tdefaultReturn := new(Jsonnodesearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\tqueryParams[\"profile\"] = a.Configuration.APIClient.ParameterToString(profile, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Jsonnodesearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Jsonnodesearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewGetServicesDefault(code int) *GetServicesDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetServicesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func New(tp elastictransport.Interface) *Search {\n\tr := &Search{\n\t\ttransport: tp,\n\t\tvalues: make(url.Values),\n\t\theaders: make(http.Header),\n\t\tbuf: gobytes.NewBuffer(nil),\n\n\t\treq: NewRequest(),\n\t}\n\n\treturn r\n}", "func DefaultHeader(k, v string) Opt {\n\treturn func(c *Client) Opt {\n\t\told, found := c.header[k]\n\t\told = append([]string{}, old...) // clone\n\t\tc.header.Add(k, v)\n\t\treturn func(c *Client) Opt {\n\t\t\tif found {\n\t\t\t\tc.header[k] = old\n\t\t\t} else {\n\t\t\t\tc.header.Del(k)\n\t\t\t}\n\t\t\treturn DefaultHeader(k, v)\n\t\t}\n\t}\n}", "func NewGetVersionDefault(code int) *GetVersionDefault {\n\treturn &GetVersionDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewDefault() *instance {\n\treturn New(\n\t\t[]factory.Contract{\n\t\t\texampleMetadataFactory.New(),\n\t\t\tiotaMetadataFactory.New(),\n\t\t\tipfsMetadataFactory.New(),\n\t\t},\n\t)\n}", "func NewGetLibcVersionDefault(code int) *GetLibcVersionDefault {\n\treturn &GetLibcVersionDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewDefaultSearcher(\n\tpeerSigner client.Signer,\n\torgSigner client.Signer,\n\trec comm.QueryRecorder,\n\tdoc comm.Doctor,\n\tclients client.Pool,\n) Searcher {\n\treturn NewSearcher(\n\t\tpeerSigner,\n\t\torgSigner,\n\t\trec,\n\t\tdoc,\n\t\tclient.NewFinderCreator(clients),\n\t\tNewResponseProcessor(peer.NewFromer(), doc),\n\t)\n}", "func newRestConfigForDefaultSearchPath(kubeconfigPath string, overrides clientcmd.ConfigOverrides) (*rest.Config, error) {\n\tif kubeconfigPath == \"\" {\n\t\tconfig, err := rest.InClusterConfig()\n\t\t// if there is no err, continue because InClusterConfig is only expected to succeed if running inside of a pod.\n\t\tif err == nil {\n\t\t\treturn config, nil\n\t\t}\n\t}\n\tapiConfig, err := newAPIConfigForDefaultSearchPath(kubeconfigPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newRestConfig(apiConfig, overrides)\n}", "func newDefaultMerger() *Merge {\n\treturn &Merge{\n\t\tmergedMeta: make([]MergedMeta, 0),\n\t}\n}", "func NewGetMeDefault(code int) *GetMeDefault {\n\treturn &GetMeDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (c MethodsCollection) DefaultGet() pDefaultGet {\n\treturn pDefaultGet{\n\t\tMethod: c.MustGet(\"DefaultGet\"),\n\t}\n}", "func NewSearchLogQueryDefault(code int) *SearchLogQueryDefault {\n\treturn &SearchLogQueryDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetSearchTagItemsMoidDefault(code int) *GetSearchTagItemsMoidDefault {\n\treturn &GetSearchTagItemsMoidDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewSearch() *Search {\n\ts := &Search{}\n\tdefaults.SetDefaults(s)\n\treturn s\n}", "func DefaultParams(service string) *QueryParam {\n\treturn &QueryParam{\n\t\tService: service,\n\t\tDomain: \"local\",\n\t\tTimeout: time.Second,\n\t\tEntries: make(chan *ServiceEntry),\n\t\tWantUnicastResponse: false, // TODO(reddaly): Change this default.\n\t}\n}", "func (o *GetContentSourceUsingGETParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func Default() *WebClient {\n\t// From a zapcore.Core, it's easy to construct a Logger.\n\tlog := logger.Console()\n\n\treturn &WebClient{\n\t\tAuthentication: false,\n\t\tTransactionID: time.Now().String(),\n\t\tUserAgent: \"testAgent\",\n\t\tContentType: \"application/json; charset=utf-8\",\n\t\tAccept: AcceptJson,\n\t\tDebug: true,\n\t\tlog: log,\n\t}\n}", "func GetDefaultHeaders() map[string]string {\n\treturn map[string]string{\n\t\t\"content-type\": \"application/json\",\n\t}\n}", "func NewGetPingDefault(code int) *GetPingDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetPingDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (a SearchApi) GetDocumentationSearch(q64 string) (*Documentationsearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/documentation/search\"\n\tdefaultReturn := new(Documentationsearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetDocumentationSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Documentationsearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Documentationsearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func (o *HandleGetAboutUsingGETParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func NewGetHostGroupsDefault(code int) *GetHostGroupsDefault {\n\treturn &GetHostGroupsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewDescribeDefault(code int) *DescribeDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &DescribeDefault{\n\t\t_statusCode: code,\n\t}\n}", "func newSearcher() *defaultSearcher {\n\treturn &defaultSearcher{\n\t\tpathStringer: new(defaultPathStringer),\n\t}\n}", "func newAPIConfigForDefaultSearchPath(kubeconfigPath string) (*api.Config, error) {\n\tconfigLoader := clientcmd.NewDefaultClientConfigLoadingRules()\n\tconfigLoader.ExplicitPath = kubeconfigPath\n\treturn configLoader.Load()\n}", "func (r *SearchREST) New() runtime.Object {\n\treturn &searchapis.Search{}\n}", "func MakeDefaultCommonResult() IResult {\n\treturn &commonResult{\n\t\tdebug: true,\n\t\tbeautify_logs: false,\n\t}\n}", "func (a SearchApi) GetDocumentationGknSearch(q64 string) (*Gkndocumentationsearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/documentation/gkn/search\"\n\tdefaultReturn := new(Gkndocumentationsearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetDocumentationGknSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Gkndocumentationsearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Gkndocumentationsearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewGetResultTopFileDefault(code int) *GetResultTopFileDefault {\n\treturn &GetResultTopFileDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (o *ExportUsingGETParams) SetDefaults() {\n\tvar (\n\t\tendpointsDefault = string(\"Jenkins, Jira\")\n\n\t\tpipelineDefault = string(\"Deploy Production\")\n\n\t\tpipelinesDefault = string(\"Deploy Production, Dev\")\n\n\t\tprojectDefault = string(\"Project-1\")\n\t)\n\n\tval := ExportUsingGETParams{\n\t\tEndpoints: &endpointsDefault,\n\t\tPipeline: &pipelineDefault,\n\t\tPipelines: &pipelinesDefault,\n\t\tProject: &projectDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func DefaultHeadersEnforcer() *HeadersEnforcer {\n\treturn &HeadersEnforcer{\n\t\tCheckers: []*HeaderChecker{XGoogClientHeaderChecker},\n\t}\n}", "func (req *Request) DefaultHeaders(mKey string) (err error) {\n\treq.Header.Add(HeaderXDate, time.Now().UTC().Format(\"Mon, 02 Jan 2006 15:04:05 GMT\"))\n\treq.Header.Add(HeaderVersion, SupportedAPIVersion)\n\treq.Header.Add(HeaderUserAgent, UserAgent)\n\n\t// Auth\n\tparts := req.Method + \"\\n\" +\n\t\treq.rType + \"\\n\" +\n\t\treq.rLink + \"\\n\" +\n\t\treq.Header.Get(HeaderXDate) + \"\\n\" +\n\t\treq.Header.Get(\"Date\") + \"\\n\"\n\n\tpartsLower := strings.ToLower(parts)\n\n\tsign, err := authorize(partsLower, mKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmasterToken := \"master\"\n\ttokenVersion := \"1.0\"\n\treq.Header.Add(HeaderAuth, url.QueryEscape(\"type=\"+masterToken+\"&ver=\"+tokenVersion+\"&sig=\"+sign))\n\treturn\n}", "func NewGetmoviesinfoDefault(code int) *GetmoviesinfoDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetmoviesinfoDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewFetcherDefault(l *logrusx.Logger, cancelAfter time.Duration, ttl time.Duration, opts ...FetcherOption) *FetcherDefault {\n\tf := &FetcherDefault{\n\t\tcancelAfter: cancelAfter,\n\t\tl: l,\n\t\tttl: ttl,\n\t\tkeys: make(map[string]jose.JSONWebKeySet),\n\t\tfetchedAt: make(map[string]time.Time),\n\t\tclient: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),\n\t\tmux: cloudstorage.NewURLMux(),\n\t}\n\tfor _, o := range opts {\n\t\to(f)\n\t}\n\treturn f\n}", "func newDefaultHTTPClient() (cev2.Client, error) {\n\tp, err := cev2.NewHTTP()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cev2.NewClientObserved(p,\n\t\tcev2.WithUUIDs(),\n\t\tcev2.WithTimeNow(),\n\t\tcev2.WithTracePropagation,\n\t)\n}", "func (c *Client) NewSearch(searchType string, params *SearchParams) *Search {\n\treturn &Search{\n\t\tclient: c,\n\t\tType: searchType,\n\t\tParams: params,\n\t\tDeleted: false,\n\t}\n}", "func (a SearchApi) GetSearchSuggest(q64 string, expand []string, profile bool) (*Jsonnodesearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/search/suggest\"\n\tdefaultReturn := new(Jsonnodesearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetSearchSuggest\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\tqueryParams[\"profile\"] = a.Configuration.APIClient.ParameterToString(profile, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Jsonnodesearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Jsonnodesearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewGetProviderRegistersDefault(code int) *GetProviderRegistersDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetProviderRegistersDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetZippedDefault(code int) *GetZippedDefault {\n\treturn &GetZippedDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetRepositoryInfoDefault(code int) *GetRepositoryInfoDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetRepositoryInfoDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (client IdentityClient) getTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/tagDefaults/{tagDefaultId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func NewGetUserDefault(code int) *GetUserDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetUserDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (a SearchApi) GetLocationsSearch(q64 string, expand []string) (*Locationssearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/locations/search\"\n\tdefaultReturn := new(Locationssearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetLocationsSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Locationssearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Locationssearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func getSearch(searchString string, params map[string]string) {\n\tnetwork.InitSearch(params)\n\n\tresults := network.GetSearch(searchString)\n\tview.RenderTable(results)\n}", "func (s *scraper) GetSearch(ctx context.Context, isin string) (*http.Request, error) {\n\turl := fmt.Sprintf(\"https://www.fondidoc.it/Ricerca/Res?txt=%s&tipi=&societa=&pag=0&sort=&sortDir=&fldis=&nview=20&viewMode=anls&filters=&pir=0'\", isin)\n\treturn http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n}", "func newSearchService(sling *sling.Sling) *SearchService {\n\treturn &SearchService{\n\t\tsling: sling.Path(\"search/\"),\n\t}\n}", "func (o *SearchIngredientsParams) SetDefaults() {\n\tvar (\n\t\tallowDeletedDefault = bool(false)\n\n\t\tallowUnstableDefault = bool(false)\n\n\t\texactOnlyDefault = bool(false)\n\n\t\tlimitDefault = int64(50)\n\n\t\toffsetDefault = int64(0)\n\n\t\tqDefault = string(\"\")\n\t)\n\n\tval := SearchIngredientsParams{\n\t\tAllowDeleted: &allowDeletedDefault,\n\t\tAllowUnstable: &allowUnstableDefault,\n\t\tExactOnly: &exactOnlyDefault,\n\t\tLimit: &limitDefault,\n\t\tOffset: &offsetDefault,\n\t\tQ: &qDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func Search(terms []string) Params {\n\treturn Params{make(url.Values), SearchURL}.Country(CN).Terms(terms)\n}", "func NewGetHardwareDefault(code int) *GetHardwareDefault {\n\treturn &GetHardwareDefault{\n\t\t_statusCode: code,\n\t}\n}", "func newHandleGetOrHeader(\n\tcore core.Core,\n) handleGetOrHeader {\n\treturn _handleGetOrHeader{\n\t\tcore: core,\n\t\thttp: ihttp.New(),\n\t}\n}", "func Default() *API {\n\tapi := New()\n\tapi.Use(\n\t\ttimeout.Default(\"12s\"),\n\t\tlogging.Default(\"TONIC\"))\n\n\t// adding default encoders..\n\tapi.Encoder(\n\t\tencoding.JSON,\n\t\tencoding.XML,\n\t\tencoding.YAML,\n\t\tencoding.FORM)\n\n\treturn api\n}", "func NewGetProjectDefault(code int) *GetProjectDefault {\n\treturn &GetProjectDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewSearchBucket()(*SearchBucket) {\n m := &SearchBucket{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func NewSearchApi() *SearchApi {\n\tfmt.Sprintf(strings.Title(\"\"), \"\")\n\tconfig := GetDefaultConfiguration()\n\treturn &SearchApi{\n\t\tConfiguration: config,\n\t}\n}", "func NewSearchHandler(s registry.Searchable) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tp := &registry.SearchParams{}\n\t\tswitch r.Header.Get(\"Content-Type\") {\n\t\tcase \"application/json\":\n\t\t\tif err := json.NewDecoder(r.Body).Decode(p); err != nil {\n\t\t\t\tapiutil.WriteErrResponse(w, http.StatusBadRequest, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif p.Limit == 0 {\n\t\t\t\tp.Limit = defaultLimit\n\t\t\t}\n\t\tdefault:\n\t\t\t// read form values\n\t\t\tvar err error\n\t\t\tif p.Limit, err = apiutil.ReqParamInt(\"limit\", r); err != nil {\n\t\t\t\tp.Limit = defaultLimit\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\tif p.Offset, err = apiutil.ReqParamInt(\"offset\", r); err != nil {\n\t\t\t\tp.Offset = defaultOffset\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\tp.Q = r.FormValue(\"q\")\n\t\t}\n\t\tswitch r.Method {\n\t\tcase \"GET\":\n\t\t\tresults, err := s.Search(*p)\n\t\t\tif err != nil {\n\t\t\t\tapiutil.WriteErrResponse(w, http.StatusBadRequest, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tapiutil.WriteResponse(w, results)\n\t\t\treturn\n\t\t}\n\t}\n}", "func newSearch(inst *Instagram) *Search {\n\tsearch := &Search{\n\t\tinst: inst,\n\t}\n\treturn search\n}", "func DefaultHTTPGet(url string) (*http.Response, error) {\n\treturn GetHTTPClient().Get(url)\n}", "func Default() *Hodor {\n\th := NewHodor(NewRouter())\n\tlogger := log.New(os.Stdout, \"[Hodor] \", log.LstdFlags)\n\th.AddFilters(\n\t\tLogFilter(logger),\n\t\tRecoveryFilter(logger),\n\t)\n\treturn h\n}", "func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search {\n\tr.values.Set(\"default_operator\", defaultoperator.String())\n\n\treturn r\n}", "func NewGetV0AuthCallbackDefault(code int) *GetV0AuthCallbackDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetV0AuthCallbackDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (o PCSearchResultsList) DefaultOrder() []string {\n\n\treturn []string{}\n}", "func NewGetTagDefault(code int) *GetTagDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetTagDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (a SearchApi) GetVoicemailSearch(q64 string, expand []string) (*Voicemailssearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/voicemail/search\"\n\tdefaultReturn := new(Voicemailssearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetVoicemailSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Voicemailssearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Voicemailssearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewGetSearchOK() *GetSearchOK {\n\treturn &GetSearchOK{}\n}", "func NewGetSearchOK() *GetSearchOK {\n\treturn &GetSearchOK{}\n}", "func NewGetfeedsDefault(code int) *GetfeedsDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetfeedsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetS3BackupDefault(code int) *GetS3BackupDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetS3BackupDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewDefaultClient(_ *http.Request) (Client, http.Header, error) {\n\treturn newClientWithOptions()\n}", "func SearchOne(term string) Params {\n\treturn Params{make(url.Values), SearchURL}.Country(CN).Term(term)\n}", "func (a SearchApi) GetGroupsSearch(q64 string, expand []string) (*Groupssearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/groups/search\"\n\tdefaultReturn := new(Groupssearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetGroupsSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Groupssearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Groupssearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func NewGetPracticesDefault(code int) *GetPracticesDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetPracticesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetOrganizationsDefault(code int) *GetOrganizationsDefault {\n\treturn &GetOrganizationsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func WithDefaultHeaders() Option {\n\treturn func(r *RequestClient) {\n\t\tfor key, value := range defaultHeaders {\n\t\t\tr.headers.Add(key, value)\n\t\t}\n\t}\n}", "func (o ApiOperationResponseHeaderOutput) DefaultValue() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApiOperationResponseHeader) *string { return v.DefaultValue }).(pulumi.StringPtrOutput)\n}", "func DefaultParams() Params {\n\treturn Params{\n\t\tNickname: DefaultNicknameParams(),\n\t\tDTag: DefaultDTagParams(),\n\t\tBio: DefaultBioParams(),\n\t\tOracle: DefaultOracleParams(),\n\t}\n}", "func (o *PCSearchResults) DefaultOrder() []string {\n\n\treturn []string{}\n}", "func NewGetMeetupsDefault(code int) *GetMeetupsDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetMeetupsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetVulnerabilitiesDefault(code int) *GetVulnerabilitiesDefault {\n\treturn &GetVulnerabilitiesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetSummarySystemDefault(code int) *GetSummarySystemDefault {\n\treturn &GetSummarySystemDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewDefault(m map[string]interface{}) (share.Manager, error) {\n\tc := &config{}\n\tif err := mapstructure.Decode(m, c); err != nil {\n\t\terr = errors.Wrap(err, \"error creating a new manager\")\n\t\treturn nil, err\n\t}\n\n\ts, err := metadata.NewCS3Storage(c.GatewayAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexer := indexer.CreateIndexer(s)\n\n\tclient, err := pool.GetGatewayServiceClient(c.GatewayAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(client, s, indexer)\n}", "func NewGetServiceInstanceByNameDefault(code int) *GetServiceInstanceByNameDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetServiceInstanceByNameDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetRenterDownloadsDefault(code int) *GetRenterDownloadsDefault {\n\treturn &GetRenterDownloadsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetNodesDefault(code int) *GetNodesDefault {\n\treturn &GetNodesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (client *HearthstoneAPI) newCardCollectionSearch() *cardCollectionSearch {\n\treturn &cardCollectionSearch{\n\t\turl: client.apiURL,\n\t\tlocale: client.locale,\n\t\toptionalString: make(map[string]string),\n\t\toptionalInt: make(map[string]int),\n\t}\n}", "func newDefaultOptions() *Options {\n\treturn &Options{\n\t\tParseEnv: true,\n\n\t\tVarOpen: \"%(\",\n\t\tVarClose: \")s\",\n\t\tTagName: DefTagName,\n\n\t\tDefSection: parser.DefSection,\n\t\tSectionSep: SepSection,\n\t}\n}", "func (o SparsePCSearchResultsList) DefaultOrder() []string {\n\n\treturn []string{}\n}", "func NewGetHardwareFcportsDefault(code int) *GetHardwareFcportsDefault {\n\treturn &GetHardwareFcportsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (directLinkProvider *DirectLinkProviderV2) SetDefaultHeaders(headers http.Header) {\n\tdirectLinkProvider.Service.SetDefaultHeaders(headers)\n}", "func GetQueryDefault(values url.Values, key, _default string) string {\n\tif v := GetQuery(values, key); v != \"\" {\n\t\treturn v\n\t}\n\treturn _default\n}", "func (a SearchApi) GetUsersSearch(q64 string, expand []string, integrationPresenceSource string) (*Userssearchresponse, *APIResponse, error) {\n\tvar httpMethod = \"GET\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/api/v2/users/search\"\n\tdefaultReturn := new(Userssearchresponse)\n\tif true == false {\n\t\treturn defaultReturn, nil, errors.New(\"This message brought to you by the laws of physics being broken\")\n\t}\n\n\t// verify the required parameter 'q64' is set\n\tif &q64 == nil {\n\t\t// false\n\t\treturn defaultReturn, nil, errors.New(\"Missing required parameter 'q64' when calling SearchApi->GetUsersSearch\")\n\t}\n\n\theaderParams := make(map[string]string)\n\tqueryParams := make(map[string]string)\n\tformParams := url.Values{}\n\tvar postBody interface{}\n\tvar postFileName string\n\tvar fileBytes []byte\n\t// authentication (PureCloud OAuth) required\n\n\t// oauth required\n\tif a.Configuration.AccessToken != \"\"{\n\t\theaderParams[\"Authorization\"] = \"Bearer \" + a.Configuration.AccessToken\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\t\n\tqueryParams[\"q64\"] = a.Configuration.APIClient.ParameterToString(q64, \"\")\n\t\n\tqueryParams[\"expand\"] = a.Configuration.APIClient.ParameterToString(expand, \"multi\")\n\t\n\tqueryParams[\"integrationPresenceSource\"] = a.Configuration.APIClient.ParameterToString(integrationPresenceSource, \"\")\n\t\n\n\t// Find an replace keys that were altered to avoid clashes with go keywords \n\tcorrectedQueryParams := make(map[string]string)\n\tfor k, v := range queryParams {\n\t\tif k == \"varType\" {\n\t\t\tcorrectedQueryParams[\"type\"] = v\n\t\t\tcontinue\n\t\t}\n\t\tcorrectedQueryParams[k] = v\n\t}\n\tqueryParams = correctedQueryParams\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tvar successPayload *Userssearchresponse\n\tresponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, postFileName, fileBytes)\n\tif err != nil {\n\t\t// Nothing special to do here, but do avoid processing the response\n\t} else if err == nil && response.Error != nil {\n\t\terr = errors.New(response.ErrorMessage)\n\t} else if response.HasBody {\n\t\tif \"Userssearchresponse\" == \"string\" {\n\t\t\tcopy(response.RawBody, &successPayload)\n\t\t} else {\n\t\t\terr = json.Unmarshal(response.RawBody, &successPayload)\n\t\t}\n\t}\n\treturn successPayload, response, err\n}", "func buildSearchRedirect(redirectTo *url.URL, r *http.Request) {\n\tq := r.URL.Query()\n\n\tsetParamInURL(redirectTo, \"tab\", \"Everything\")\n\tsetParamInURL(redirectTo, \"search_scope\", \"MyInst_and_CI\")\n\n\tif q.Get(\"searchArg\") != \"\" {\n\t\tswitch q.Get(\"searchCode\") {\n\t\tcase \"TKEY^\":\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"title,contains,%v\", q.Get(\"searchArg\")))\n\t\tcase \"TALL\":\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"title,contains,%v\", q.Get(\"searchArg\")))\n\t\tcase \"NAME\":\n\t\t\tredirectTo.Path = \"/discovery/browse\"\n\t\t\tsetParamInURL(redirectTo, \"browseScope\", \"author\")\n\t\t\tsetParamInURL(redirectTo, \"browseQuery\", q.Get(\"searchArg\"))\n\t\tcase \"CALL\":\n\t\t\tredirectTo.Path = \"/discovery/browse\"\n\t\t\tsetParamInURL(redirectTo, \"browseScope\", \"callnumber.0\")\n\t\t\tsetParamInURL(redirectTo, \"browseQuery\", q.Get(\"searchArg\"))\n\t\tcase \"JALL\":\n\t\t\tredirectTo.Path = \"/discovery/jsearch\"\n\t\t\tsetParamInURL(redirectTo, \"tab\", \"jsearch_slot\")\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"searchArg\")))\n\t\tdefault:\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"searchArg\")))\n\t\t}\n\t} else if q.Get(\"SEARCH\") != \"\" {\n\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"SEARCH\")))\n\t}\n}", "func NewGetLocationsDefault(code int) *GetLocationsDefault {\n\treturn &GetLocationsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (o *ExportUsingGETParams) WithDefaults() *ExportUsingGETParams {\n\to.SetDefaults()\n\treturn o\n}", "func New(t opentracing.Tracer, geoconn, rateconn *grpc.ClientConn) *Search {\n\treturn &Search{\n\t\tgeoClient: geo.NewGeoClient(geoconn),\n\t\trateClient: rate.NewRateClient(rateconn),\n\t\ttracer: t,\n\t}\n}" ]
[ "0.6124286", "0.5984633", "0.5685322", "0.5670665", "0.5597977", "0.5481749", "0.54756683", "0.54599804", "0.5451325", "0.54285806", "0.53983486", "0.5374962", "0.5373182", "0.53702193", "0.53571707", "0.52928585", "0.52792096", "0.52537185", "0.5209282", "0.520806", "0.51980287", "0.51791275", "0.5174424", "0.5163341", "0.5139421", "0.5133244", "0.5123488", "0.5117148", "0.50962746", "0.5088114", "0.50825405", "0.506278", "0.5056409", "0.5052282", "0.5028781", "0.501943", "0.50192076", "0.5008327", "0.5007131", "0.5005283", "0.5003789", "0.50010204", "0.49857873", "0.4981603", "0.49786493", "0.4974549", "0.497372", "0.4969512", "0.4966887", "0.49619725", "0.49553436", "0.49542657", "0.49529845", "0.49520314", "0.49512565", "0.49453115", "0.4938097", "0.49363333", "0.4934044", "0.49247965", "0.49127126", "0.4911299", "0.49057856", "0.49040356", "0.49022084", "0.48862174", "0.48856807", "0.48847872", "0.4878491", "0.48747155", "0.48747155", "0.48716813", "0.48655593", "0.48522383", "0.48517382", "0.48508844", "0.4846926", "0.48319095", "0.48205358", "0.48183614", "0.4809759", "0.48095843", "0.48082295", "0.4804192", "0.47966367", "0.47904003", "0.4785175", "0.47657645", "0.47642905", "0.476073", "0.47603738", "0.47572523", "0.47568324", "0.47557536", "0.47541296", "0.47533408", "0.47521794", "0.47482345", "0.47472426", "0.47319615" ]
0.71142584
0
WithStatusCode adds the status to the get search default response
func (o *GetSearchDefault) WithStatusCode(code int) *GetSearchDefault { o._statusCode = code return o }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *GetSearchDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetBackendDefault) WithStatusCode(code int) *GetBackendDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *AddItemDefault) WithStatusCode(code int) *AddItemDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetmoviesinfoDefault) WithStatusCode(code int) *GetmoviesinfoDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetServicesDefault) WithStatusCode(code int) *GetServicesDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetAllReposDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetServicesDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetPracticesDefault) WithStatusCode(code int) *GetPracticesDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetRepositoryInfoDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetmoviesinfoDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (e *expectation) WithStatusCode(code int) { e.code = code }", "func (o *NewDiscoveryDefault) WithStatusCode(code int) *NewDiscoveryDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetfeedsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutSlideSuperlikeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutSlideLikeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetPracticesDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetServiceInstanceByNameDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func NewGetSearchDefault(code int) *GetSearchDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetSearchDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (o *GetProviderRegistersDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetVMVolumeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetTagDefault) WithStatusCode(code int) *GetTagDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetRepositoryInfoDefault) WithStatusCode(code int) *GetRepositoryInfoDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetPingDefault) WithStatusCode(code int) *GetPingDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetAllReposDefault) WithStatusCode(code int) *GetAllReposDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetImagesListDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetBackendDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *AddItemDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetPingDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetfeedsDefault) WithStatusCode(code int) *GetfeedsDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetSearchSearchItemsMoidDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *NewDiscoveryDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetProviderRegistersDefault) WithStatusCode(code int) *GetProviderRegistersDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *ListOfDevicesDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *DescribeDefault) WithStatusCode(code int) *DescribeDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetMeetupsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ShopGetProductDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *NrActivityListSuggestDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (r *Response) Status(code int) JResponseWriter {\n\tr.code = code\n\treturn r\n}", "func (o *PutMeetupDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetCardsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetCardsDefault) WithStatusCode(code int) *GetCardsDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *RetrieveCopyDefault) WithStatusCode(code int) *RetrieveCopyDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetS3BackupDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetReadyDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetTaskDetailsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *RetrieveCopyDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetV0AuthCallbackDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetTaskDetailsDefault) WithStatusCode(code int) *GetTaskDetailsDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *PutSlideSuperlikeDefault) WithStatusCode(code int) *PutSlideSuperlikeDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *FindRecommendationForLearningResourceDefault) WithStatusCode(code int) *FindRecommendationForLearningResourceDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *PartialUpdateAppDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetServiceInstanceByNameDefault) WithStatusCode(code int) *GetServiceInstanceByNameDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *PostAddDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ViewOneOrderDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetUserDefault) WithStatusCode(code int) *GetUserDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *HealthGetDefault) WithStatusCode(code int) *HealthGetDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetReadyDefault) WithStatusCode(code int) *GetReadyDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *RegisterPluginDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *BookChapListDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetImagesListDefault) WithStatusCode(code int) *GetImagesListDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetTagDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *SearchLogQueryDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *CreateSpoeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *BookBuyDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ReplaceHTTPErrorRuleDefault) WithStatusCode(code int) *ReplaceHTTPErrorRuleDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *ListUsersDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *DescribeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (h *Headers) SetStatusCode(code int) { h.statusCode = code }", "func (o *DeleteImageDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PostWordDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (c *SearchCall) Status(status string) *SearchCall {\n\tc.urlParams_.Set(\"status\", status)\n\treturn c\n}", "func (o *PutSlideLikeDefault) WithStatusCode(code int) *PutSlideLikeDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetSearchTagItemsMoidDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *BackFlipDroneDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (r *GowebHTTPResponder) WithStatus(ctx context.Context, httpStatus int) error {\n\n\t// check for always200\n\tif len(ctx.FormValue(Always200ParamName)) > 0 {\n\t\t// always return OK\n\t\thttpStatus = http.StatusOK\n\t}\n\n\tctx.HttpResponseWriter().WriteHeader(httpStatus)\n\treturn nil\n}", "func (o *NrActivityListJoinedDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *NrActivityListSuggestDefault) WithStatusCode(code int) *NrActivityListSuggestDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *AddAttendeeToTalkDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ReplaceSpoeMessageDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *DescribeClustersDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (*NotFoundError) StatusCode() int {\n\treturn http.StatusNotFound\n}", "func (o *GetS3BackupDefault) WithStatusCode(code int) *GetS3BackupDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *BackFlipDroneDefault) WithStatusCode(code int) *BackFlipDroneDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *CreateMailerEntryDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateClusterDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetUserDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetV0AuthCallbackDefault) WithStatusCode(code int) *GetV0AuthCallbackDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetTradesByAccountDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *UserInfoDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutProjectProjectNameStageStageNameServiceServiceNameResourceDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *NrActivityListFavDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetServicesHaproxyRuntimeAclsIDDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *HealthGetDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateMailersSectionDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ListOfDevicesDefault) WithStatusCode(code int) *ListOfDevicesDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *SearchLogQueryOK) Code() int {\n\treturn 200\n}", "func (o *RemoveOneDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PartialUpdateAppDefault) WithStatusCode(code int) *PartialUpdateAppDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *ReplaceSpoeMessageDefault) WithStatusCode(code int) *ReplaceSpoeMessageDefault {\n\to._statusCode = code\n\treturn o\n}" ]
[ "0.73464197", "0.64579624", "0.64554936", "0.64125574", "0.63541776", "0.6325178", "0.6320506", "0.6317493", "0.6317194", "0.63071406", "0.62859255", "0.6273145", "0.6252006", "0.6243317", "0.62240434", "0.622158", "0.62189525", "0.620479", "0.62031937", "0.6199969", "0.6175299", "0.6150171", "0.6135145", "0.61259437", "0.6125265", "0.6124976", "0.6103586", "0.6093679", "0.60935766", "0.6085785", "0.6065695", "0.60601115", "0.6045634", "0.6026329", "0.60231864", "0.60222137", "0.60141253", "0.60131675", "0.5999838", "0.5998961", "0.5998238", "0.5993823", "0.59735954", "0.59706914", "0.5951368", "0.59454", "0.5941719", "0.5929744", "0.59267807", "0.5921533", "0.5921147", "0.5911315", "0.5890571", "0.58836114", "0.58813846", "0.5878011", "0.5869101", "0.58666116", "0.58529615", "0.58513355", "0.5845529", "0.5844789", "0.58447456", "0.5839064", "0.5834206", "0.58323914", "0.582673", "0.58212054", "0.5810221", "0.5803673", "0.57977235", "0.5795662", "0.5782025", "0.57795465", "0.57765037", "0.57750434", "0.5754457", "0.57444584", "0.57357943", "0.573579", "0.57357645", "0.5728253", "0.5723313", "0.57219887", "0.57215697", "0.57201004", "0.5711957", "0.57108575", "0.57096165", "0.5709158", "0.57073754", "0.5707126", "0.570693", "0.5706369", "0.5704749", "0.57025766", "0.56875736", "0.5678677", "0.56701374", "0.56698185" ]
0.7871633
0
SetStatusCode sets the status to the get search default response
func (o *GetSearchDefault) SetStatusCode(code int) { o._statusCode = code }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *GetServicesDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetmoviesinfoDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetVMVolumeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutSlideSuperlikeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetPracticesDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetBackendDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetServiceInstanceByNameDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetAllReposDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetRepositoryInfoDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutSlideLikeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetProviderRegistersDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetfeedsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetReadyDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *NewDiscoveryDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetPingDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetV0AuthCallbackDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetS3BackupDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetCardsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *RetrieveCopyDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetMeetupsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *AddItemDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ListOfDevicesDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetTaskDetailsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *BackFlipDroneDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutMeetupDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetImagesListDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *RegisterPluginDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ShopGetProductDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ViewOneOrderDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateSpoeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *BookBuyDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *DescribeDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetServicesHaproxyRuntimeAclsIDDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *NrActivityListSuggestDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PartialUpdateAppDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateBackendSwitchingRuleDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *BookChapListDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (h *Headers) SetStatusCode(code int) { h.statusCode = code }", "func (o *CreateACLDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ReplaceSpoeMessageDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateMailerEntryDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *HealthGetDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateClusterDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetTagDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *DescribeClustersDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PostWordDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *DeleteImageDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateFileDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ReplaceHTTPErrorRuleDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetTradesByAccountDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *UserInfoDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ProductDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetUserDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreatePeerDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateTCPCheckDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *UploadFileDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ServiceInstanceLastOperationGetDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PostAddDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ListUsersDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateTaskDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *AddPayloadRuntimeACLDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *RemoveOneDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutProjectProjectNameStageStageNameResourceResourceURIDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateStorageSSLCertificateDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutUserDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PutProjectProjectNameStageStageNameServiceServiceNameResourceDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *AddAttendeeToTalkDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PostUserIDF2aDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *NrActivityListJoinedDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *GetSearchDefault) WithStatusCode(code int) *GetSearchDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *CreateUploadSessionDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *ShipPackageDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *RegisterUserDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *DeleteAddressesDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *LogoutDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateMailersSectionDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *DeleteConsulDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *AbortUploadSessionDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *CreateUserGardenDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *NrActivityListFavDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (_BaseContent *BaseContentTransactor) SetStatusCode(opts *bind.TransactOpts, status_code *big.Int) (*types.Transaction, error) {\n\treturn _BaseContent.contract.Transact(opts, \"setStatusCode\", status_code)\n}", "func (o *PostcommentsDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (o *PostAttendeesDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (r *Response) SetStatusCode(code int) *Response {\n\tr.statusCode = code\n\n\tmsg, err := r.getDefaultStatusMessage(r.statusCode)\n\tif nil != err {\n\t\tr.statusMessage = \"\"\n\t} else {\n\t\tr.statusMessage = msg\n\t}\n\treturn r\n}", "func (o *PostDialogIDCommentDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (w TemplateWriter) SetStatusCode(code int) TemplateWriter {\n\tw.StatusCode = code\n\n\treturn w\n}", "func (o *PostUserRoleDefault) SetStatusCode(code int) {\n\to._statusCode = code\n}", "func (r *Response) SetStatusCode(statusCode int) {\n\tr.statusCode = statusCode\n}", "func (o *GetSearchSearchItemsMoidDefault) Code() int {\n\treturn o._statusCode\n}", "func (e *expectation) WithStatusCode(code int) { e.code = code }", "func SetStatusCode(index, status int) {\n\tC.hepevt_set_status_code(C.int(index+1), C.int(status))\n}", "func (ctx *Context) SetStatusCode(code int) {\n\tctx.Response.StatusCode = code\n}", "func (h *ResponseHeader) SetStatusCode(statusCode int) {\n\th.statusCode = statusCode\n}", "func (this *SIPResponse) SetStatusCode(statusCode int) { //throws ParseException {\n\t// if (statusCode < 100 || statusCode > 800)\n\t// throw new ParseException(\"bad status code\",0);\n\tif this.statusLine == nil {\n\t\tthis.statusLine = header.NewStatusLine()\n\t}\n\tthis.statusLine.SetStatusCode(statusCode)\n}", "func (resp *Response) SetStatusCode(s int) {\n\tresp.Resp.StatusCode = s\n}", "func (o *SearchLogQueryDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *GetSearchTagItemsMoidDefault) Code() int {\n\treturn o._statusCode\n}", "func (o *FindVmsByFiltersDefault) Code() int {\n\treturn o._statusCode\n}", "func (self *Response) SetStatusCode(responseStatusCode interface{}) error {\n\tswitch responseStatusCode.(type) {\n\tcase func(string, http.Request) int:\n\t\t//\n\t\t// Mock Response should have a Status Code Generator function registered?\n\t\t//\n\n\t\tstatusCodeGenerator := responseStatusCode.(func(string, http.Request) int)\n\t\tself.statusCodeGenerator = &statusCodeGenerator\n\n\tcase int:\n\t\t//\n\t\t// Mock Response should have a static Status Code registered?\n\t\t//\n\n\t\tself.statusCode = responseStatusCode.(int)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported status code type. SetStatusCode() can receive either a static integer status code or a Status Code Generator Callback with the signature 'func(string, http.Request) int', for a more detailed description kindly check Go Mock Yourself documentation\")\n\t}\n\n\treturn nil\n}" ]
[ "0.7818761", "0.7716831", "0.7708346", "0.7704794", "0.770282", "0.7682419", "0.7668853", "0.76673067", "0.7661198", "0.7651367", "0.76446575", "0.76419425", "0.7630878", "0.7612454", "0.7598704", "0.75901496", "0.7577819", "0.75438464", "0.7519618", "0.74971324", "0.7492259", "0.74675083", "0.7463903", "0.74546176", "0.74495274", "0.7423545", "0.7417265", "0.74058306", "0.7376909", "0.7372969", "0.73663133", "0.7358385", "0.7353855", "0.73463774", "0.7332931", "0.7304686", "0.73021805", "0.72999823", "0.7296245", "0.7283296", "0.72756594", "0.72715306", "0.72689426", "0.7266081", "0.72658455", "0.726337", "0.7239216", "0.72381073", "0.7221608", "0.7205095", "0.71904975", "0.718993", "0.7183131", "0.7166287", "0.7163332", "0.7161174", "0.71610624", "0.71495724", "0.71367234", "0.7119498", "0.71156704", "0.70991224", "0.70959353", "0.7085733", "0.70680773", "0.7057811", "0.703577", "0.7029347", "0.7029129", "0.70276195", "0.70201826", "0.6999203", "0.6995158", "0.698722", "0.69857734", "0.6983839", "0.6967771", "0.69311976", "0.6920612", "0.6902424", "0.6876648", "0.6869876", "0.6847514", "0.6820807", "0.680126", "0.6777904", "0.67291653", "0.66941893", "0.6687734", "0.66801935", "0.6644654", "0.65740085", "0.6531122", "0.6447154", "0.64402485", "0.63962066", "0.63726234", "0.6347455", "0.6307551", "0.6306157" ]
0.8563357
0
WithPayload adds the payload to the get search default response
func (o *GetSearchDefault) WithPayload(payload *models.Error) *GetSearchDefault { o.Payload = payload return o }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *GetSearchOK) WithPayload(payload *models.User) *GetSearchOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetSearchDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchOK) SetPayload(payload *GetIndexSearchOKBody) {\n\to.Payload = payload\n}", "func (o *GetSearchOK) SetPayload(payload *models.User) {\n\to.Payload = payload\n}", "func (o *GetIndexSearchInternalServerError) SetPayload(payload *GetIndexSearchInternalServerErrorBody) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoDefault) WithPayload(payload *models.Error) *GetmoviesinfoDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetIndexSearchNotFound) SetPayload(payload *GetIndexSearchNotFoundBody) {\n\to.Payload = payload\n}", "func (o *NewDiscoveryDefault) WithPayload(payload *models.Error) *NewDiscoveryDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetfeedsDefault) WithPayload(payload *models.Error) *GetfeedsDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *AddItemDefault) WithPayload(payload models.ErrorResponse) *AddItemDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetTagDefault) WithPayload(payload models.Error) *GetTagDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetServicesDefault) WithPayload(payload *models.Error) *GetServicesDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetIndexSearchBadRequest) SetPayload(payload *GetIndexSearchBadRequestBody) {\n\to.Payload = payload\n}", "func (o *FindRecommendationForLearningResourceDefault) WithPayload(payload *models.ErrorModel) *FindRecommendationForLearningResourceDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetLegacyUserSearchKeywordOK) SetPayload(payload *models.SearchUsersByKeyword) {\n\to.Payload = payload\n}", "func (o *GetPracticesDefault) WithPayload(payload *models.Error) *GetPracticesDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetRepositoryInfoDefault) WithPayload(payload *models.Error) *GetRepositoryInfoDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetAllReposDefault) WithPayload(payload *models.Error) *GetAllReposDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetProviderRegistersDefault) WithPayload(payload *models.Error) *GetProviderRegistersDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *ShopGetProductDefault) WithPayload(payload *models.RuntimeError) *ShopGetProductDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetLegacyUserSearchKeywordOK) WithPayload(payload *models.SearchUsersByKeyword) *GetLegacyUserSearchKeywordOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetIndexSearchInternalServerError) WithPayload(payload *GetIndexSearchInternalServerErrorBody) *GetIndexSearchInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *SearchTournamentsOK) WithPayload(payload []*models.Tournament) *SearchTournamentsOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetServicesDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *DescribeDefault) WithPayload(payload *models.Error) *DescribeDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetfeedsOK) WithPayload(payload []string) *GetfeedsOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetSearchDefault) WithStatusCode(code int) *GetSearchDefault {\n\to._statusCode = code\n\treturn o\n}", "func (o *GetPaymentRequestEDINotFound) WithPayload(payload *supportmessages.ClientError) *GetPaymentRequestEDINotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetBackendDefault) WithPayload(payload *models.Error) *GetBackendDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetAllReposDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetImagesListDefault) WithPayload(payload *models.Error) *GetImagesListDefault {\n\to.Payload = payload\n\treturn o\n}", "func FakeSearch(w http.ResponseWriter, r *http.Request) {\n\tLogRequest(r, \"attack\")\n\tresponse := fmt.Sprintf(`\n\t{\n \"took\" : 6,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 6,\n \"successful\" : 6,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1,\n \"max_score\" : 1.0,\n \"hits\" : [ {\n \"_index\" : \".kibana\",\n \"_type\" : \"index-pattern\",\n \"_id\" : \"logstash-*\",\n \"_score\" : 1.0,\n \"_source\":{\"title\":\"logstash-*\",\"timeFieldName\":\"@timestamp\",\"customFormats\":\"{}\",\"fields\":\"[{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":true,\\\"doc_values\\\":false,\\\"name\\\":\\\"host\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":false,\\\"analyzed\\\":false,\\\"name\\\":\\\"_source\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"message.raw\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":false,\\\"analyzed\\\":false,\\\"name\\\":\\\"_index\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"@version\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":true,\\\"doc_values\\\":false,\\\"name\\\":\\\"message\\\",\\\"count\\\":0},{\\\"type\\\":\\\"date\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"@timestamp\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"name\\\":\\\"_type\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"name\\\":\\\"_id\\\",\\\"count\\\":0},{\\\"type\\\":\\\"string\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"host.raw\\\",\\\"count\\\":0},{\\\"type\\\":\\\"geo_point\\\",\\\"indexed\\\":true,\\\"analyzed\\\":false,\\\"doc_values\\\":false,\\\"name\\\":\\\"geoip.location\\\",\\\"count\\\":0}]\"}\n }]\n }\n }`)\n\tWriteResponse(w, response)\n\treturn\n}", "func (o *GetRepositoryInfoDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetmoviesinfoDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetApisOK) SetPayload(payload *models.APIMeta) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *WeaviateThingsGetOK) WithPayload(payload *models.ThingGetResponse) *WeaviateThingsGetOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetDocumentNotFound) SetPayload(payload *ghcmessages.Error) {\n\to.Payload = payload\n}", "func (o *GetServicesNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetDocumentOK) SetPayload(payload *ghcmessages.Document) {\n\to.Payload = payload\n}", "func (o *GetDocumentNotFound) WithPayload(payload *ghcmessages.Error) *GetDocumentNotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetNamespacedNotebooksOK) SetPayload(payload *models.GetNotebooksResponse) {\r\n\to.Payload = payload\r\n}", "func (o *ArtifactListerNotFound) SetPayload(payload *weles.ErrResponse) {\n\to.Payload = payload\n}", "func (o *GetPrefilterOK) WithPayload(payload *models.Prefilter) *GetPrefilterOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetSectionNotFound) SetPayload(payload *models.NotFound) {\n\to.Payload = payload\n}", "func (o *GetPrefilterOK) SetPayload(payload *models.Prefilter) {\n\to.Payload = payload\n}", "func (o *GetServiceInstanceByNameDefault) SetPayload(payload *v1.Error) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetImagesListDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetDistrictForSchoolNotFound) SetPayload(payload *models.NotFound) {\n\to.Payload = payload\n}", "func (o *AddItemDefault) SetPayload(payload models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *ServiceInstanceLastOperationGetNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *RetrieveCopyDefault) WithPayload(payload *models.Error) *RetrieveCopyDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetUserDefault) WithPayload(payload *models.Error) *GetUserDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetRepositoryInfoNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetProviderRegistersDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetAllStorageNotFound) WithPayload(payload *models.Error) *GetAllStorageNotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetApisOK) WithPayload(payload *models.APIMeta) *GetApisOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetModelNotFound) SetPayload(payload *restmodels.Error) {\n\to.Payload = payload\n}", "func (o *GetProjectProjectNameServiceServiceNameResourceOK) SetPayload(payload *models.Resources) {\n\to.Payload = payload\n}", "func (o *GetGistsOK) SetPayload(payload models.Gists) {\n\to.Payload = payload\n}", "func (o *GetfeedsDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetCardsDefault) WithPayload(payload *models.Error) *GetCardsDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetRepositoryInfoOK) SetPayload(payload *models.RepositoryInfo) {\n\to.Payload = payload\n}", "func (o *GetAppsOK) SetPayload(payload *models.GetAppsOKBody) {\n\to.Payload = payload\n}", "func (o *FindRecommendationForLearningResourceOK) WithPayload(payload *models.Recommendations) *FindRecommendationForLearningResourceOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetIndexSearchBadRequest) WithPayload(payload *GetIndexSearchBadRequestBody) *GetIndexSearchBadRequest {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetInteractionsNotFound) SetPayload(payload *models.APIError) {\n\to.Payload = payload\n}", "func (o *SearchTournamentsOK) SetPayload(payload []*models.Tournament) {\n\to.Payload = payload\n}", "func (o *NewDiscoveryDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetS3BackupDefault) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *GetPracticesDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *NrActivityListSuggestDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetS3BackupDefault) WithPayload(payload *models.Response) *GetS3BackupDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *WeaviateThingsGetOK) SetPayload(payload *models.ThingGetResponse) {\n\to.Payload = payload\n}", "func buildSearchRedirect(redirectTo *url.URL, r *http.Request) {\n\tq := r.URL.Query()\n\n\tsetParamInURL(redirectTo, \"tab\", \"Everything\")\n\tsetParamInURL(redirectTo, \"search_scope\", \"MyInst_and_CI\")\n\n\tif q.Get(\"searchArg\") != \"\" {\n\t\tswitch q.Get(\"searchCode\") {\n\t\tcase \"TKEY^\":\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"title,contains,%v\", q.Get(\"searchArg\")))\n\t\tcase \"TALL\":\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"title,contains,%v\", q.Get(\"searchArg\")))\n\t\tcase \"NAME\":\n\t\t\tredirectTo.Path = \"/discovery/browse\"\n\t\t\tsetParamInURL(redirectTo, \"browseScope\", \"author\")\n\t\t\tsetParamInURL(redirectTo, \"browseQuery\", q.Get(\"searchArg\"))\n\t\tcase \"CALL\":\n\t\t\tredirectTo.Path = \"/discovery/browse\"\n\t\t\tsetParamInURL(redirectTo, \"browseScope\", \"callnumber.0\")\n\t\t\tsetParamInURL(redirectTo, \"browseQuery\", q.Get(\"searchArg\"))\n\t\tcase \"JALL\":\n\t\t\tredirectTo.Path = \"/discovery/jsearch\"\n\t\t\tsetParamInURL(redirectTo, \"tab\", \"jsearch_slot\")\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"searchArg\")))\n\t\tdefault:\n\t\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"searchArg\")))\n\t\t}\n\t} else if q.Get(\"SEARCH\") != \"\" {\n\t\tsetParamInURL(redirectTo, \"query\", fmt.Sprintf(\"any,contains,%v\", q.Get(\"SEARCH\")))\n\t}\n}", "func (o *ServiceInstanceLastOperationGetOK) SetPayload(payload *models.LastOperationResource) {\n\to.Payload = payload\n}", "func (o *GetSectionOK) SetPayload(payload *models.SectionResponse) {\n\to.Payload = payload\n}", "func (o *GetRefreshTokenNotFound) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *ArtifactListerNotFound) WithPayload(payload *weles.ErrResponse) *ArtifactListerNotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *NrActivityListSuggestDefault) WithPayload(payload *models.Error) *NrActivityListSuggestDefault {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteOrganizationNotFound) SetPayload(payload *models.MissingResponse) {\n\to.Payload = payload\n}", "func (o *GetResetPasswordRequestEmailNotFound) SetPayload(payload *models.GeneralResponse) {\n\to.Payload = payload\n}", "func (o *GetPaymentRequestEDINotFound) SetPayload(payload *supportmessages.ClientError) {\n\to.Payload = payload\n}", "func (o *GetV0AuthCallbackDefault) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *UpdateClusterNotFound) SetPayload(payload *models.APIResponse) {\n\to.Payload = payload\n}", "func (o *GetWhaleTranfersOK) WithPayload(payload []*models.OperationsRow) *GetWhaleTranfersOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetAllReposOK) SetPayload(payload *models.ResourceArrayData) {\n\to.Payload = payload\n}", "func TestSearchCorrectPayload(t *testing.T) {\n\tdb := DBSession()\n\tdefer db.Close() // clean up when we’re done\n\n\tSetupData(db)\n\ta := assert.New(t)\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/cb_service/contact_book/search/{query}\", http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\t// save it in the request context\n\t\tctx := context.WithValue(req.Context(), dbSessionKey, db)\n\t\treq.Header.Set(\"Content-Type\", contentType)\n\t\treq.Header.Set(\"Authorization\", encodedAuthToken)\n\t\treq = req.WithContext(ctx)\n\t\tsearchH(res, req)\n\t}))\n\n\tserver := httptest.NewServer(router)\n\tdefer server.Close()\n\treqURL := server.URL + \"/cb_service/contact_book/search/Yog\"\n\tres, err := http.Get(reqURL)\n\tif err != nil {\n\t\tl.Printf(\"Cannot Make Request :%v \", err)\n\t\ta.Error(err)\n\t}\n\n\ta.Equal(res.StatusCode, http.StatusOK)\n\tClearData(db)\n}", "func (o *ServiceInstanceLastOperationGetNotFound) WithPayload(payload *models.Error) *ServiceInstanceLastOperationGetNotFound {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetAllStorageOK) WithPayload(payload *models.PageStorageList) *GetAllStorageOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetReposOwnerRepoTagsOK) SetPayload(payload *models.Tags) {\n\to.Payload = payload\n}", "func Search() *SearchDsl {\n\treturn &SearchDsl{\n\t\tsimplejson.New(),\n\t\tnil, nil, nil,\n\t}\n}", "func (o *GetApisInternalServerError) WithPayload(payload *models.Error) *GetApisInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetTagOK) WithPayload(payload *models.Tag) *GetTagOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *GetNamespacedNotebooksNotFound) SetPayload(payload *models.Error) {\r\n\to.Payload = payload\r\n}", "func (o *ShopGetProductDefault) SetPayload(payload *models.RuntimeError) {\n\to.Payload = payload\n}", "func (o *GetTagDefault) SetPayload(payload models.Error) {\n\to.Payload = payload\n}", "func (o *GetAllStorageNotFound) SetPayload(payload *models.Error) {\n\to.Payload = payload\n}", "func (o *GetServicesOK) SetPayload(payload *models.ServicesWithStageInfo) {\n\to.Payload = payload\n}" ]
[ "0.70385116", "0.69089067", "0.646442", "0.6227656", "0.62183255", "0.59562975", "0.5850788", "0.5763576", "0.57290655", "0.5705652", "0.56733704", "0.5608187", "0.5560555", "0.55532426", "0.55529666", "0.551911", "0.55013007", "0.5495938", "0.54892355", "0.5488883", "0.54675514", "0.5456084", "0.5454418", "0.5453384", "0.5445326", "0.5375321", "0.5344957", "0.5321389", "0.5301512", "0.5274189", "0.5270779", "0.52639997", "0.5261926", "0.52533346", "0.5226782", "0.5219026", "0.52101904", "0.5197496", "0.51862687", "0.51781577", "0.51645714", "0.51459116", "0.51434386", "0.5128421", "0.5122969", "0.5112136", "0.51046485", "0.5104332", "0.5093286", "0.5086572", "0.5083744", "0.5082458", "0.50737333", "0.5057904", "0.5057683", "0.50547785", "0.5050913", "0.50489026", "0.5025078", "0.50234485", "0.50229377", "0.50074047", "0.5006217", "0.49903598", "0.4989789", "0.4985894", "0.49855378", "0.49821624", "0.49808404", "0.49724862", "0.49710485", "0.49668235", "0.496331", "0.4954777", "0.4949645", "0.49402246", "0.49288487", "0.49194232", "0.49167302", "0.49164346", "0.4910039", "0.49098963", "0.4908022", "0.4907284", "0.4905484", "0.490498", "0.49003208", "0.49003053", "0.48969325", "0.4895787", "0.4895156", "0.48949608", "0.48946518", "0.48934898", "0.4881987", "0.48782662", "0.48766792", "0.4874988", "0.48690835", "0.48689678" ]
0.7802895
0