hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
1d8dd7e83e8b4ccede85f1a8d787e972a25f7e4b | 4,047 | module ResponseFixtures
def accessible_users_response
{
"users" => [
{
"id" => 100,
"email" => "[email protected]",
"first_name" => "Bob",
"last_name" => "Anderson"
}
]
}
end
def sign_on_tokens_response
{
"sign_on_token" => {
"token" => "15ad86b2ede6",
"expires_at" => "2015-04-16T23:56:30.321Z",
"url" => "https://app.rallio.com/api/internal/sign_on_tokens/15ad86b2ede6"
}
}
end
def access_tokens_response
{
"access_token" => "4a25dd89e50bd0a0db1eeae65864fe6b",
"user_id" => 100,
"expires_at" => nil,
"scopes" => "user_info basic_access"
}
end
def user_response
accessible_users_response['users'].first
end
def dashboard_response
{
"me" => {
"id" => 2,
"email" => "[email protected]",
"name" => "John Q User"
},
"accounts" => [
{
"id" => 100,
"name" => "Rally-O Tires NYC",
"franchisor_id" => 200,
"franchisor_name" => "Rally-O Tires",
"facebook_connected" => true,
"yelp_connected" => false,
"google_connected" => false
}
],
"franchisors" => [
{
"id" => 200,
"name" => "Rally-O Tires"
}
]
}
end
def reviews_response
{
"reviews" => [
{
"id" => 227704,
"account_id" => 9397,
"account_name" => "Rally-O Tires New York",
"network" => "facebook",
"posted_at" => "2017-02-21T23:12:33.000Z",
"user_name" => "Andy Bobson",
"user_image" => "https://graph.facebook.com/100009872044695/picture",
"rating" => 5,
"message" => "This is my favourite place to buy tires!",
"comments" => [
{
"user_name" => "Rally-O Tires New York",
"user_image" => "https://graph.facebook.com/113397275345614/picture",
"message" => "Thanks for the 5 star review!",
"created_at" => "2017-02-22T00:49:53.000+00:00",
}
],
"liked" => true,
"url" => "https://www.facebook.com/123123123",
"can_reply" => true,
"location_name" => "Visiting Angels Newburyport MA",
"location_image_url" => "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16266055_1428821143803214_8378119243787669723_n.jpg?oh=3268e6e30474a0aa488cfd896a6d6c06&oe=59357742",
"review_reply" => nil,
"review_reply_at" => nil
}
]
}
end
def franchisors_response
{
"franchisors" => [
{
"id" => 100,
"name" => "Awesome Haircuts"
}
]
}
end
def accounts_response
{
"accounts" => [
{
"id" => 100,
"name" => "Awesome Haircuts New York City",
"short_name" => "AH-NYC",
"url" => "https://awesomehaircuts.fake",
"city" => "New York",
"country_code" => "US",
"time_zone" => "Eastern Time (US & Canada)"
}
]
}
end
def account_ownerships_response
{
"account_ownerships" => [
{
"user_id" => 100,
"account_id" => 100,
"account_name" => "Awesome Haircuts New York City",
"account_franchisor_id" => 300,
"account_franchisor_name" => "Awesome Haircuts Franchise"
}
]
}
end
def account_ownership_response
{
"account_ownership" => account_ownerships_response["account_ownerships"].first
}
end
def franchisor_ownerships_response
{
"franchisor_ownerships" => [
{
"user_id" => 100,
"franchisor_id" => 300,
"franchisor_name" => "Awesome Haircuts Franchise",
}
]
}
end
def franchisor_ownership_response
{
"franchisor_ownership" => franchisor_ownerships_response["franchisor_ownerships"].first
}
end
end
| 24.828221 | 184 | 0.51149 |
1a770a4d8c70f5fb3abb5a7f81eb53ec9c7987cb | 4,305 | # Copyright (C) 2019 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
module Mongo
module Crypt
# A class that implements I/O methods between the driver and
# the MongoDB server or mongocryptd.
#
# @api private
class EncryptionIO
# Creates a new EncryptionIO object with information about how to connect
# to the key vault.
#
# @param [ Mongo::Client ] client: The client used to connect to the collection
# that stores the encrypted documents, defaults to nil
# @param [ Mongo::Client ] mongocryptd_client: The client connected to mongocryptd,
# defaults to nil
# @param [ Mongo::Collection ] key_vault_collection: The Collection object
# representing the database collection storing the encryption data keys
#
# @note This class expects that the key_vault_client and key_vault_namespace
# options are not nil and are in the correct format
def initialize(client: nil, mongocryptd_client: nil, key_vault_collection:)
@client = client
@mongocryptd_client = mongocryptd_client
@key_vault_collection = key_vault_collection
end
# Query for keys in the key vault collection using the provided
# filter
#
# @param [ Hash ] filter
#
# @return [ Array<BSON::Document> ] The query results
def find_keys(filter)
@key_vault_collection.find(filter).to_a
end
# Insert a document into the key vault collection
#
# @param [ Hash ] document
#
# @return [ Mongo::Operation::Insert::Result ] The insertion result
def insert(document)
@key_vault_collection.insert_one(document)
end
# Get collection info for a collection matching the provided filter
#
# @param [ Hash ] filter
#
# @return [ Hash ] The collection information
def collection_info(filter)
result = @client.database.list_collections
name = filter['name']
result.find { |r| r['name'] == name }
end
# Send the command to mongocryptd to be marked with intent-to-encrypt markings
#
# @param [ Hash ] cmd
#
# @return [ Hash ] The marked command
def mark_command(cmd)
begin
response = @mongocryptd_client.database.command(cmd)
rescue Error::NoServerAvailable => e
raise e if @client.encryption_options[:mongocryptd_bypass_spawn]
@client.spawn_mongocryptd
response = @mongocryptd_client.database.command(cmd)
end
return response.first
end
# Get information about the AWS encryption key and feed it to the the
# KmsContext object
#
# @param [ Mongo::Crypt::KmsContext ] kms_context A KmsContext object
# corresponding to one AWS KMS data key. Contains information about
# the endpoint at which to establish a TLS connection and the message
# to send on that connection.
def feed_kms(kms_context)
endpoint = kms_context.endpoint
message = kms_context.message
# There is no specific timeout written in the spec. See SPEC-1394
# for a discussion and updates on what this timeout should be.
socket_timeout = 10
host, port = endpoint.split(':')
port ||= 443
ssl_socket = Socket::SSL.new(host, port, host, socket_timeout, Socket::PF_INET)
ssl_socket.write(message)
num_bytes_needed = kms_context.bytes_needed
while num_bytes_needed > 0
bytes = []
while !ssl_socket.eof?
bytes << ssl_socket.readbyte
end
kms_context.feed(bytes.pack('C*'))
num_bytes_needed = kms_context.bytes_needed
end
end
end
end
end
| 34.166667 | 89 | 0.656214 |
5d8fd9f6f6bd8b4f3ac8d8b6c2f44dfc8693dbd1 | 15,967 | require 'rubygems'
require 'rest_client'
require 'nokogiri'
#require '/var/www/openshift/broker/config/environment'
require 'logger'
require 'parseconfig'
require 'rspec'
$hostname = "localhost"
begin
if File.exists?("/etc/openshift/node.conf")
config = ParseConfig.new("/etc/openshift/node.conf")
val = config["PUBLIC_HOSTNAME"].gsub(/[ \t]*#[^\n]*/,"")
val = val[1..-2] if val.start_with? "\""
$hostname = val
end
rescue
puts "Unable to determine hostname. Defaulting to #{$hostname}\n"
end
$cloud_domain = "example.com"
begin
if File.exists?("/etc/openshift/node.conf")
config = ParseConfig.new("/etc/openshift/node.conf")
val = config["CLOUD_DOMAIN"].gsub(/[ \t]*#[^\n]*/,"")
val = val[1..-2] if val.start_with? "\""
$cloud_domain = val
end
rescue
puts "Unable to determine cloud domain. Defaulting to #{$cloud_domain}\n"
end
@random = nil
Before do
@base_url = "https://#{$hostname}/broker/rest"
end
After do |scenario|
#domains = ["api#{@random}", "apix#{@random}", "apiY#{@random}", "app-api#{@random}"]
@random = nil
(@undo_config || []).each do |(main, secondary, value)|
Rails.configuration[main.to_sym][secondary.to_sym] = value
end
end
Given /^a new user, verify updating a domain with an php-([^ ]+) application in it over ([^ ]+) format$/ do |php_version, format|
steps %{
Given a new user
And I accept "#{format}"
When I send a POST request to "/domains" with the following:"name=api<random>"
Then the response should be "201"
When I send a POST request to "/domains/api<random>/applications" with the following:"name=app&cartridge=php-#{php_version}"
Then the response should be "201"
When I send a PUT request to "/domains/api<random>" with the following:"name=apix<random>"
Then the response should be "422"
And the error message should have "severity=error&exit_code=128"
When I send a DELETE request to "/domains/api<random>/applications/app"
Then the response should be "200"
When I send a PUT request to "/domains/api<random>" with the following:"name=apix<random>"
Then the response should be "200"
And the response should be a "domain" with attributes "name=apix<random>"
}
end
Given /^a new user, verify deleting a domain with an php-([^ ]+) application in it over ([^ ]+) format$/ do |php_version, format|
steps %{
Given a new user
And I accept "#{format}"
When I send a POST request to "/domains" with the following:"name=api<random>"
Then the response should be "201"
When I send a POST request to "/domains/api<random>/applications" with the following:"name=app&cartridge=php-#{php_version}"
Then the response should be "201"
When I send a DELETE request to "/domains/api<random>"
Then the response should be "422"
And the error message should have "severity=error&exit_code=128"
When I send a DELETE request to "/domains/api<random>/applications/app"
Then the response should be "200"
}
end
Given /^a new user, verify force deleting a domain with an php-([^ ]+) application in it over ([^ ]+) format$/ do |php_version, format|
steps %{
Given a new user
And I accept "#{format}"
When I send a POST request to "/domains" with the following:"name=api<random>"
Then the response should be "201"
When I send a POST request to "/domains/api<random>/applications" with the following:"name=app&cartridge=php-#{php_version}"
Then the response should be "201"
When I send a DELETE request to "/domains/api<random>?force=true"
Then the response should be "200"
}
end
Given /^a new user, create a ([^ ]+) application using ([^ ]+) format and verify application state on gear$/ do |cart_name, format|
steps %{
Given a new user
And I accept "#{format}"
When I send a POST request to "/domains" with the following:"name=api<random>"
Then the response should be "201"
When I send a POST request to "/domains/api<random>/applications" with the following:"name=app&cartridge=#{cart_name}"
Then the response should be "201"
When I send a GET request to "/domains/api<random>/applications/app/gear_groups"
Then the response should be a "gear-group/gears/gear" with attributes "state=started"
When I send a POST request to "/domains/api<random>/applications/app/events" with the following:"event=stop"
Then the response should be "200"
When I send a GET request to "/domains/api<random>/applications/app/gear_groups"
Then the response should be a "gear-group/gears/gear" with attributes "state=stopped"
When I send a POST request to "/domains/api<random>/applications/app/events" with the following:"event=start"
Then the response should be "200"
When I send a GET request to "/domains/api<random>/applications/app/gear_groups"
Then the response should be a "gear-group/gears/gear" with attributes "state=started"
When I send a POST request to "/domains/api<random>/applications/app/events" with the following:"event=restart"
Then the response should be "200"
When I send a GET request to "/domains/api<random>/applications/app/gear_groups"
Then the response should be a "gear-group/gears/gear" with attributes "state=started"
When I send a DELETE request to "/domains/api<random>/applications/app"
Then the response should be "200"
}
end
Given /^a new user$/ do
@random = rand(99999999)
@username = "rest-test-#{@random}"
@password = "xyz123"
register_user(@username, @password) if $registration_required
#TODO authenticate user
end
Given /^the Rails ([^\s]+) configuration key ([^\s]+) is "([^\"]*)"$/ do |main, secondary, value|
(@undo_config ||= []) << [main, secondary, Rails.configuration.config[main.to_sym][secondary.to_sym]]
Rails.configuration.config[main.to_sym][secondary.to_sym] = value
end
Given /^I send and accept "([^\"]*)"$/ do |type|
@headers = {:accept => type, :content_type => type}
end
Given /^I accept "([^\"]*)"$/ do |type|
@accept_type = type
@headers = {:accept => type.to_s.downcase}
end
Given /^a quickstart UUID$/ do
path = sub_random('/quickstarts')
url = @base_url + path.to_s
@request = RestClient::Request.new(:method => :get, :url => url, :headers => @headers)
begin
@response = @request.execute()
rescue => e
@response = e.response
end
# Get a normalized list of quickstarts
quickstarts = unpacked_data(@response.body)
@uuid = quickstarts[0]['quickstart']['id']
end
When /^the user has MAX_DOMAINS set to (\d*)$/ do |max_domains|
set_max_domains(@username,max_domains)
end
When /^I send a GET request to "([^\"]*)"$/ do |path|
path = sub_random(path)
url = @base_url + path.to_s
@request = RestClient::Request.new(:method => :get, :url => url,
:user => @username, :password => @password, :headers => @headers)
begin
@response = @request.execute()
rescue Timeout::Error, RestClient::RequestTimeout => e
raise Exception.new("#{e.message}: #{@request.method} #{@request.url} timed out")
rescue RestClient::ExceptionWithResponse => e
@response = e.response
end
end
When /^I send an unauthenticated GET request to "([^\"]*)"$/ do |path|
path = sub_random(sub_uuid(path))
url = @base_url + path.to_s
@request = RestClient::Request.new(:method => :get, :url => url, :headers => @headers)
begin
@response = @request.execute()
rescue Timeout::Error, RestClient::RequestTimeout => e
raise Exception.new("#{e.message}: #{@request.method} #{@request.url} timed out")
rescue RestClient::ExceptionWithResponse => e
@response = e.response
end
end
When /^I send a POST request to "([^\"]*)" with the following:"([^\"]*)"$/ do |path, body|
path = sub_random(path)
body = sub_random(body)
#puts "path #{path}"
#puts "body #{body}"
payload = {}
params = body.split("&")
params.each do |param|
key, value = param.split("=", 2)
if payload[key].nil?
payload[key] = value
else
values = [payload[key], value]
payload[key] = values.flatten
end
end
url = @base_url + path.to_s
@request = RestClient::Request.new(:method => :post, :url => url,
:user => @username, :password => @password, :headers => @headers,
:payload => payload, :timeout => 180)
begin
@response = @request.execute()
rescue Timeout::Error, RestClient::RequestTimeout => e
@request.inspect
raise Exception.new("#{e.message}: #{@request.method} #{@request.url} with payload #{@request.payload} timed out")
rescue RestClient::ExceptionWithResponse => e
@response = e.response
end
end
When /^I send a PUT request to "([^\"]*)" with the following:"([^\"]*)"$/ do |path, body|
path = sub_random(path)
body = sub_random(body)
#puts "path #{path}"
#puts "body #{body}"
payload = {}
params = body.split("&")
params.each do |param|
key, value = param.split("=", 2)
payload[key] = value
end
url = @base_url + path.to_s
@request = RestClient::Request.new(:method => :put, :url => url,
:user => @username, :password => @password, :headers => @headers,
:payload => payload, :timeout => 180)
begin
@response = @request.execute()
rescue Timeout::Error, RestClient::RequestTimeout => e
@request.inspect
raise Exception.new("#{e.message}: #{@request.method} #{@request.url} with payload #{@request.payload} timed out")
rescue RestClient::ExceptionWithResponse => e
@response = e.response
end
end
When /^I send a DELETE request to "([^\"]*)"$/ do |path|
path = sub_random(path)
#puts "path #{path}"
url = @base_url + path.to_s
@request = RestClient::Request.new(:method => :delete, :url => url,
:user => @username, :password => @password, :headers => @headers)
begin
@response = @request.execute()
rescue Timeout::Error, RestClient::RequestTimeout => e
raise Exception.new("#{e.message}: #{@request.method} #{@request.url} timed out")
rescue RestClient::ExceptionWithResponse => e
@response = e.response
end
end
Then /^the response should be "([^\"]*)"$/ do |status|
puts "#{@response.body}" if @response.code != status.to_i
@response.code.should == status.to_i
end
Then /^the response should have the link(?:s)? "([^\"]*)"$/ do |link|
response_acceptable = false
link_names = link.split(",")
missing_names = link_names.select do |name|
if link = links[name.strip]
URI.parse(link['href'])
!link['method'] || !link['rel'] || !link['required_params']
else
true
end
end
raise "Response did not contain link(s) #{missing_names.join(", ")}" unless missing_names.empty?
true
end
Then /^the response should be one of "([^\"]*)"$/ do |acceptable_statuses|
response_acceptable = false
statuses = acceptable_statuses.split(",")
statuses.each do | status|
if @response.code == status.to_i
response_acceptable = true
break
end
end
puts "#{@response.body}" unless response_acceptable
response_acceptable.should == true
end
Then /^the response should be a "([^\"]*)" with attributes "([^\"]*)"$/ do |tag, attributes_str|
attributes_str = sub_random(attributes_str)
attributes_array = attributes_str.split("&")
if @accept_type.upcase == "XML"
#puts @response.body
result = Nokogiri::XML(@response.body)
attributes_array.each do |attributes|
key, value = attributes.split("=", 2)
#puts "#{result.xpath("//#{tag}/#{key}").text} #{value}"
result.xpath("//#{tag}/#{key}").text.should == value
end
elsif @accept_type.upcase == "JSON"
result = JSON.parse(@response.body)
obj = result["data"]
tag = tag.split("/").each do |t|
case obj.class.to_s
when 'Hash'
obj = obj[t] unless obj[t].nil?
when 'Array'
obj = obj.first
end
end
attributes_array.each do |attributes|
key, value = attributes.split("=", 2)
obj[key].should == value
end
else
false
end
end
Then /^the response should be a list of "([^\"]*)" with attributes "([^\"]*)"$/ do |tag, attributes_str|
attributes_str = sub_random(attributes_str)
attributes_array = attributes_str.split("&")
if @accept_type.upcase == "XML"
#puts @response.body
result = Nokogiri::XML(@response.body)
attributes_array.each do |attributes|
key, value = attributes.split("=", 2)
#puts "#{result.xpath("//#{tag}/#{key}").text} #{value}"
result.xpath("//#{tag}/#{key}").text.should == value
end
elsif @accept_type.upcase == "JSON"
result = JSON.parse(@response.body)
obj = result["data"]
attributes_array.each do |attributes|
key, value = attributes.split("=", 2)
obj[key].should == value
end
else
false
end
end
Then /^the error message should have "([^\"]*)"$/ do |attributes_str|
attributes_str = sub_random(attributes_str)
attributes_array = attributes_str.split("&")
if @accept_type.upcase == "XML"
#puts @response.body
result = Nokogiri::XML(@response.body)
messages = result.xpath("//message")
#puts messages
attributes_array.each do |attributes|
key, value = attributes.split("=", 2)
key = key.sub("_", "-")
messages.each do |message|
#puts message
#puts message.xpath("#{key}").text
message.xpath("#{key}").text.should == value
end
end
elsif @accept_type.upcase == "JSON"
result = JSON.parse(@response.body)
messages = result["messages"]
attributes_array.each do |attributes|
key, value = attributes.split("=", 2)
messages.each do |message|
message[key].to_s.should == value
end
end
else
false
end
end
Then /^the response descriptor should have "([^\"]*)" as dependencies$/ do |deps|
#puts @response.body
if @accept_type.upcase == "XML"
page = Nokogiri::XML(@response.body)
desc_yaml = page.xpath("//response/data/datum")
desc = YAML.load(desc_yaml.text.to_s)
elsif @accept_type.upcase == "JSON"
page = JSON.parse(@response.body)
desc_yaml = page["data"]
desc = YAML.load(desc_yaml)
end
#desc = YAML.load(desc_yaml.text.to_s)
deps.split(",").each do |dep|
desc["Requires"].include?(dep).should
end
end
Then /^the response should be a list of "([^\"]*)"$/ do |list_type|
items = unpacked_data(@response.body)
if items.length < 1
raise("I got an empty list of #{list_type}")
end
if list_type == 'cartridges'
items.each do |cartridge|
check_cartridge(cartridge)
end
elsif list_type == 'quickstarts'
items.each do |item|
check_quickstart(item)
end
else
raise("I don't recognize list type #{list_type}")
end
end
Then /^the response should be a "([^\"]*)"$/ do |item_type|
item = unpacked_data(@response.body)[0]
if item_type == 'cartridge'
check_cartridge(item)
elsif item_type == 'quickstart'
check_quickstart(item)
else
raise("I don't recognize item type #{item_type}")
end
end
def check_cartridge(cartridge)
unless cartridge.has_key?("name") && cartridge['name'].match(/\S+/)
raise("I found a cartridge without a name")
end
end
def check_quickstart(quickstart)
unless quickstart.has_key?("quickstart") && quickstart['quickstart'].has_key?("id") && quickstart['quickstart']['id'].match(/\S+/)
raise("I found a quickstart without an ID")
end
end
# Gets a normalized response
def unpacked_data(response_body)
if @accept_type.upcase == 'JSON'
data = JSON.parse(@response.body)['data']
elsif @accept_type.upcase == 'XML'
data = Hash.from_xml(@response.body)['response']['data']['template']
end
return data.is_a?(Array) ? data : [data]
end
def sub_random(value)
if value and value.include? "<random>"
value = value.gsub("<random>", @random.to_s)
end
return value
end
def sub_uuid(value)
if value and value.include? "<uuid>"
value = value.sub("<uuid>", @uuid)
end
return value
end
def links
@links ||= if @accept_type.upcase == "JSON"
result = JSON.parse(@response.body)['data']
end
end
| 33.614737 | 135 | 0.660299 |
79fef90295bde30135b7c62775a901d5a56d7a72 | 2,827 | # frozen_string_literal: true
require 'spec_helper'
module LicenseFinder
describe ConanInfoParser do
subject { ConanInfoParser.new }
let(:parsed_config) do
[
{
'name' => 'conanfile.txt',
'ID' => '4c3dfe99a9c2d5003148e0054b9bacf58ac69f66',
'BuildID' => 'None',
'Requires' => ['Poco/1.7.9@pocoproject/stable', 'OpenSSL/1.0.2l@conan/stable', 'range-v3/0.3.0@ericniebler/stable']
},
{
'name' => 'OpenSSL/1.0.2l@conan/stable',
'ID' => '0197c20e330042c026560da838f5b4c4bf094b8a',
'BuildID' => 'None',
'Remote' => 'conan-center=https://center.conan.io',
'URL' => 'http://github.com/lasote/conan-openssl',
'License' => 'The current OpenSSL licence is an \'Apache style\' license: https://www.openssl.org/source/license.html',
'Updates' => 'Version not checked',
'Creation date' => '2017-08-21 10:28:57',
'Required by' => ['Poco/1.7.9@pocoproject/stable', 'conanfile.txt'],
'Requires' => ['zlib/1.2.11@conan/stable']
},
{
'name' => 'Poco/1.7.9@pocoproject/stable',
'ID' => '33fe7ea34efc04fb6d81fabd9e34f51da57f9e09',
'BuildID' => 'None',
'Remote' => 'conan-center=https://center.conan.io',
'URL' => 'http://github.com/lasote/conan-poco',
'License' => 'The Boost Software License 1.0',
'Updates' => 'Version not checked',
'Creation date' => '2017-09-20 16:51:10',
'Required by' => ['conanfile.txt'],
'Requires' => ['OpenSSL/1.0.2l@conan/stable']
},
{
'name' => 'range-v3/0.3.0@ericniebler/stable',
'ID' => '5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9',
'BuildID' => 'None',
'Remote' => 'conan-center=https://center.conan.io',
'URL' => 'https://github.com/ericniebler/range-v3',
'License' => 'Boost Software License - Version 1.0 - August 17th, 2003',
'Updates' => 'Version not checked',
'Creation date' => '2017-06-30 13:20:56',
'Required by' => ['conanfile.txt']
},
{
'name' => 'zlib/1.2.11@conan/stable',
'ID' => '09512ff863f37e98ed748eadd9c6df3e4ea424a8',
'BuildID' => 'None',
'Remote' => 'conan-center=https://center.conan.io',
'URL' => 'http://github.com/lasote/conan-zlib',
'License' => 'http://www.zlib.net/zlib_license.html',
'Updates' => 'Version not checked',
'Creation date' => '2017-09-25 14:42:53',
'Required by' => ['OpenSSL/1.0.2l@conan/stable']
}
]
end
it 'should parse valid conan info output' do
expect(subject.parse(fixture_from('conan.txt'))).to eq(parsed_config)
end
end
end
| 40.971014 | 129 | 0.551468 |
bb00daad9ab205ceb4fce36893272aabb32f4a71 | 3,200 | # frozen_string_literal: true
module Results
module Csv
# Responsible for converting DB result rows into calls to buffer.write with appropriate
# header names. Expects the following columns in the passed rows:
# - question_code
# - value
# - time_value
# - date_value
# - datetime_value
# - latitude
# - longitude
# - altitude
# - accuracy
# - answer_option_name
# - choice_option_name
# - option_level_name
class AnswerProcessor
attr_accessor :buffer, :row
LOCATION_COLS = %w[latitude longitude altitude accuracy].freeze
VALUE_COLS = %w[value time_value date_value datetime_value].freeze
def initialize(buffer)
self.buffer = buffer
end
def process(row)
self.row = row
write_select_cells if select_cols?
write_location_cells if location_cols?
write_value unless select_cols? || location_cols?
end
private
def code
row["question_code"]
end
def select_cols?
row["answer_option_name"].present? || row["choice_option_name"].present?
end
def location_cols?
LOCATION_COLS.any? { |c| row[c].present? }
end
# Writes all four location values with appropriate header.
def write_location_cells
LOCATION_COLS.each do |c|
suffix = I18n.t("response.csv_headers.#{c}")
buffer.write("#{code}:#{suffix}", row[c]) if row[c].present?
end
end
def write_select_cells
if row["answer_option_name"].present?
suffix = (level = row["option_level_name"]) ? ":#{level}" : ""
value = row["answer_option_value"] || row["answer_option_name"]
buffer.write("#{code}#{suffix}", value)
else # select multiple
value = row["choice_option_value"] || row["choice_option_name"]
buffer.write(code, value, append: true)
end
end
# Writes the first non-blank column in VALUE_COLS.
def write_value
VALUE_COLS.each do |c|
next if row[c].blank?
convert_line_endings(row[c]) if c == "value"
buffer.write(code, row[c])
break
end
end
def convert_line_endings(str)
# We do this with loops instead of regexps b/c regexps are slow.
convert_unix_line_endings_to_windows(str)
convert_mac_line_endings_to_windows(str)
end
def convert_unix_line_endings_to_windows(str)
# Insert \r before any \ns without \rs before
offset = 0
loop do
idx = str.index("\n", offset)
break if idx.nil?
offset = idx + 1
if idx.zero? || str[idx - 1] != "\r"
str.insert(idx, "\r")
offset += 1
end
end
end
def convert_mac_line_endings_to_windows(str)
# Insert \n after any \rs without \ns after
offset = 0
loop do
idx = str.index("\r", offset)
break if idx.nil?
offset = idx + 1
if str[idx + 1] != "\n"
str.insert(idx + 1, "\n")
offset += 1
end
end
end
end
end
end
| 27.826087 | 91 | 0.583438 |
e8042fff7d23bfedd25a691292d00f42ff647286 | 9,825 | class Event < ApplicationRecord
has_many :event_instances
belongs_to :project
serialize :exclusions
belongs_to :creator, class_name: 'User', touch: true
extend FriendlyId
friendly_id :name, use: :slugged
include IceCube
validates :name, :time_zone, :repeats, :category, :start_datetime, :duration, presence: true
validates :url, uri: true, :allow_blank => true
validates :repeats_every_n_weeks, :presence => true, :if => lambda { |e| e.repeats == 'weekly' or e.repeats == 'biweekly' }
validates :repeat_ends_on, :presence => true, :allow_blank => false, :if => lambda{ |e| (e.repeats == 'weekly' or e.repeats == 'biweekly') and e.repeat_ends_string == 'on' }
validate :must_have_at_least_one_repeats_weekly_each_days_of_the_week, :if => lambda { |e| e.repeats == 'weekly' or e.repeats == 'biweekly' }
attr_accessor :next_occurrence_time_attr
attr_accessor :repeat_ends_string
COLLECTION_TIME_FUTURE = 10.days
COLLECTION_TIME_PAST = 300.minutes
NEXT_SCRUM_COLLECTION_TIME_PAST = 15.minutes
REPEATS_OPTIONS = %w[never weekly biweekly]
REPEAT_ENDS_OPTIONS = %w[on never]
DAYS_OF_THE_WEEK = %w[monday tuesday wednesday thursday friday saturday sunday]
# hoped the below would help address the issue about how rails 4 -> 5 is going to treat setting non-booleans
# to booleans - adding the setter fixes the specs, but breaks the acceptance tests
#
# def repeat_ends=(repeat_ends)
# super repeat_ends == 'on'
# end
#
# def repeat_ends
# super ? 'on' : 'never'
# end
after_save do
Event.upcoming_events(nil, true)
end
def set_repeat_ends_string
@repeat_ends_string = repeat_ends ? 'on' : 'never'
end
def self.base_future_events(project)
project.nil? ? Event.future_events : Event.future_events.where(project_id: project)
end
def self.future_events
Event.where('repeats = \'never\' OR repeat_ends = false OR repeat_ends IS NULL OR repeat_ends_on > ?', Time.now)
end
def repeats?
schedule.recurrence_rules.map { |rule| rule.class.name }.include?('IceCube::WeeklyRule')
end
def self.upcoming_events(project=nil, force=false)
return self.upcoming_events_raw(project) if project
Rails.cache.fetch("upcoming_events:#{project.nil? ? '' : project.title}", force: force, expires_in: 1.hour) do
self.upcoming_events_raw
end
end
def self.upcoming_events_raw(project=nil)
events = Event.base_future_events(project).inject([]) do |memo, event|
memo << event.next_occurrences
end.flatten.sort_by { |e| e[:time] }
Event.remove_past_events(events)
end
def self.remove_past_events(events)
events.delete_if {|event| (event[:time] + event[:event].duration.minutes) < Time.current &&
!event[:event].event_instances.last.try(:live?)}
end
def self.hookups
Event.where(category: "PairProgramming")
end
def self.pending_hookups
pending = []
hookups.each do |h|
started = h.last_hangout && h.last_hangout.started?
expired_without_starting = !h.last_hangout && Time.now.utc > h.instance_end_time
pending << h if !started && !expired_without_starting
end
pending
end
def event_date
start_datetime
end
def start_time
start_datetime
end
def series_end_time
repeat_ends && repeat_ends_on.present? ? repeat_ends_on.to_time : nil
end
def instance_end_time
(start_datetime + duration*60).utc
end
def end_date
if (series_end_time < start_time)
(event_date.to_datetime + 1.day).strftime('%Y-%m-%d')
else
event_date
end
end
def live?
last_hangout.present? && last_hangout.live?
end
def final_datetime_for_collection(options = {})
if repeating_and_ends? && options[:end_time].present?
final_datetime = [options[:end_time], repeat_ends_on.to_datetime].min
elsif repeating_and_ends?
final_datetime = repeat_ends_on.to_datetime
else
final_datetime = options[:end_time]
end
final_datetime ? final_datetime.to_datetime.utc : COLLECTION_TIME_FUTURE.from_now
end
def start_datetime_for_collection(options = {})
first_datetime = options.fetch(:start_time, COLLECTION_TIME_PAST.ago)
first_datetime = [start_datetime, first_datetime.to_datetime].max
first_datetime.to_datetime.utc
end
def next_occurrence_time_method(start = Time.now)
next_occurrence = next_event_occurrence_with_time(start)
next_occurrence.present? ? next_occurrence[:time] : nil
end
def self.next_occurrence(event_type, begin_time = NEXT_SCRUM_COLLECTION_TIME_PAST.ago)
events_with_times = []
events_with_times = Event.where(category: event_type).map { |event|
event.next_event_occurrence_with_time(begin_time)
}.compact
return nil if events_with_times.empty?
events_with_times = events_with_times.sort_by { |e| e[:time] }
events_with_times[0][:event].next_occurrence_time_attr = events_with_times[0][:time]
return events_with_times[0][:event]
end
# The IceCube Schedule's occurrences_between method requires a time range as input to find the next time
# Most of the time, the next instance will be within the next weeek.do
# But some event instances may have been excluded, so there's not guarantee that the next time for an event will be within the next week, or even the next month
# To cover these cases, the while loop looks farther and farther into the future for the next event occurrence, just in case there are many exclusions.
def next_event_occurrence_with_time(start = Time.now, final= 2.months.from_now)
begin_datetime = start_datetime_for_collection(start_time: start)
final_datetime = repeating_and_ends? ? repeat_ends_on : final
n_days = 8
end_datetime = n_days.days.from_now
event = nil
return next_event_occurrence_with_time_inner(start, final_datetime) if self.repeats == 'never'
while event.nil? && end_datetime < final_datetime
event = next_event_occurrence_with_time_inner(start, final_datetime)
n_days *= 2
end_datetime = n_days.days.from_now
end
event
end
def next_event_occurrence_with_time_inner(start_time, end_time)
occurrences = occurrences_between(start_time, end_time)
{ event: self, time: occurrences.first.start_time } if occurrences.present?
end
def next_occurrences(options = {})
begin_datetime = start_datetime_for_collection(options)
final_datetime = final_datetime_for_collection(options)
limit = options.fetch(:limit, 100)
[].tap do |occurences|
occurrences_between(begin_datetime, final_datetime).each do |time|
occurences << { event: self, time: time }
return occurences if occurences.count >= limit
end
end
end
def occurrences_between(start_time, end_time)
schedule.occurrences_between(start_time.to_time, end_time.to_time)
end
def repeats_weekly_each_days_of_the_week=(repeats_weekly_each_days_of_the_week)
self.repeats_weekly_each_days_of_the_week_mask = (repeats_weekly_each_days_of_the_week & DAYS_OF_THE_WEEK).map { |r| 2**DAYS_OF_THE_WEEK.index(r) }.inject(0, :+)
end
def repeats_weekly_each_days_of_the_week
DAYS_OF_THE_WEEK.reject do |r|
((repeats_weekly_each_days_of_the_week_mask || 0) & 2**DAYS_OF_THE_WEEK.index(r)).zero?
end
end
def remove_from_schedule(timedate)
# best if schedule is serialized into the events record... and an attribute.
if timedate >= Time.now && timedate == next_occurrence_time_method
_next_occurrences = next_occurrences(limit: 2)
self.start_datetime = (_next_occurrences.size > 1) ? _next_occurrences[1][:time] : timedate + 1.day
elsif timedate >= Time.now
self.exclusions ||= []
self.exclusions << timedate
end
save!
end
def schedule()
sched = series_end_time.nil? || !repeat_ends ? IceCube::Schedule.new(start_datetime) : IceCube::Schedule.new(start_datetime, :end_time => series_end_time)
case repeats
when 'never'
sched.add_recurrence_time(start_datetime)
when 'weekly', 'biweekly'
days = repeats_weekly_each_days_of_the_week.map { |d| d.to_sym }
sched.add_recurrence_rule IceCube::Rule.weekly(repeats_every_n_weeks).day(*days)
end
self.exclusions ||= []
self.exclusions.each do |ex|
sched.add_exception_time(ex)
end
sched
end
def start_time_with_timezone
DateTime.parse(start_time.strftime('%k:%M ')).in_time_zone(time_zone)
end
def last_hangout
event_instances.order(:created_at).last
end
def recent_hangouts
event_instances
.where('created_at BETWEEN ? AND ?', 1.days.ago + duration.minutes, DateTime.now.end_of_day)
.order(created_at: :desc)
end
def less_than_ten_till_start?
return true if within_current_event_duration?
Time.now > next_event_occurrence_with_time[:time] - 10.minutes
rescue
false
end
def within_current_event_duration?
after_current_start_time? and before_current_end_time?
end
def current_start_time
schedule.previous_occurrence(Time.now)
end
def current_end_time
schedule.previous_occurrence(Time.now) + duration*60
end
def before_current_end_time?
Time.now < current_end_time
rescue
false
end
def after_current_start_time?
Time.now > current_start_time
rescue
false
end
def jitsi_room_link
"https://meet.jit.si/AV_#{name.tr(' ', '_').gsub(/[^0-9a-zA-Z_]/i, '')}"
end
def modifier
User.find modifier_id
end
private
def must_have_at_least_one_repeats_weekly_each_days_of_the_week
if repeats_weekly_each_days_of_the_week.empty?
errors.add(:base, 'You must have at least one repeats weekly each days of the week')
end
end
def repeating_and_ends?
repeats != 'never' && repeat_ends && !repeat_ends_on.blank?
end
end
| 33.192568 | 175 | 0.727532 |
5d64e70d6e10d7975274ad28c783bb9a2fd03a87 | 1,058 | Gem::Specification.new do |s|
s.name = "cqm-reports"
s.summary = "A library for import and export of reports for use with electronic Clinical Quality Measures (eCQMs)."
s.description = "A library for import and export of reports for use with electronic Clinical Quality Measures (eCQMs)."
s.email = "[email protected]"
s.homepage = "https://github.com/projecttacoma/cqm-reports"
s.authors = ["The MITRE Corporation"]
s.license = 'Apache-2.0'
s.version = '3.1.3'
s.add_dependency 'cqm-models', '~> 3.0.3'
s.add_dependency 'cqm-validators', '~> 3.0.0'
s.add_dependency 'mustache'
s.add_dependency 'erubis', '~> 2.7'
s.add_dependency 'mongoid-tree', '> 2.0'
s.add_dependency 'nokogiri', '~> 1.10'
s.add_dependency 'uuid', '~> 2.3'
s.add_dependency 'zip-zip', '~> 0.3'
s.add_dependency 'log4r', '~> 1.1'
s.add_dependency 'memoist', '~> 0.9'
s.files = Dir.glob('lib/**/*.rb') + Dir.glob('lib/**/*.json') + Dir.glob('lib/**/*.mustache') + Dir.glob('lib/**/*.rake') + ["Gemfile", "README.md", "Rakefile"]
end
| 35.266667 | 162 | 0.652174 |
f7d5192f997331ce6904dc1979dd84d31b4dc974 | 259 | require 'wrap_response_decorator'
module HaikuBot
class WrapResponse < Grape::API
use WrapResponseDecorator
format :json
namespace :decorated do
desc 'Returns pong.'
get :ping do
{ ping: 'pong' }
end
end
end
end
| 17.266667 | 33 | 0.644788 |
7acfef7e975303b7b9d3f0564bc43764788d1ed3 | 3,606 | require 'spec_helper'
require 'matchers/paper'
describe Arx do
context '.search' do
let :papers do
[
'Parallel Coordinate Descent for L1-Regularized Loss Minimization',
'Optical absorption of non-interacting tight-binding electrons in a Peierls-distorted chain at half band-filling'
]
end
it { is_expected.to respond_to(:search).with(0..1).arguments }
it { is_expected.to respond_to(:search).with_unlimited_arguments }
it { is_expected.to respond_to(:search).with_keywords(:query) }
it { is_expected.to respond_to(:search).with_keywords(:sort_by) }
it { is_expected.to respond_to(:search).with_keywords(:sort_order) }
it { is_expected.to respond_to(:search).with_keywords(:query, :sort_by) }
it { is_expected.to respond_to(:search).with_keywords(:query, :sort_order) }
it { is_expected.to respond_to(:search).with_keywords(:sort_by, :sort_order) }
it { is_expected.to respond_to(:search).with_keywords(:query, :sort_by, :sort_order) }
context 'with a block' do
it { expect {|b| Arx.search &b}.to yield_control.once }
it { expect {|b| Arx.search &b}.to yield_with_args Query }
end
context 'with one ID' do
context '(valid)' do
subject { Arx.search '1105.5379' }
it { is_expected.to be_a Paper }
it { is_expected.to get_paper papers.first }
end
context '(valid restricted)' do
it { expect { Arx.search('1809.09415') {|q| q.title 'bob'} }.to raise_error Error::MissingPaper }
end
context '(valid URL)' do
subject { Arx.search 'https://arxiv.org/abs/1105.5379' }
it { is_expected.to be_a Paper }
it { is_expected.to get_paper papers.first }
end
context '(invalid)' do
it { expect { Arx.search '1234.1234' }.to raise_error Error::MissingPaper }
it { expect { Arx.search '1809.0000' }.to raise_error Error::MissingPaper }
it { expect { Arx.search '1809.00000' }.to raise_error Error::MissingPaper }
end
context '(invalid format)' do
it { expect { Arx.search 'abc' }.to raise_error ArgumentError }
end
end
context 'with multiple IDs' do
context '(valid)' do
subject { Arx.search '1105.5379', 'cond-mat/9609089' }
it { is_expected.to be_an Array }
it { is_expected.to all be_a Paper }
it { is_expected.to get_papers papers }
end
context '(valid restricted)' do
it { expect(Arx.search('1809.09415', 'cond-mat/9609089') {|q| q.title 'bob'}).to eq [] }
end
context '(invalid)' do
it { expect { Arx.search '1234.1234', 'invalid-category/1234567' }.to raise_error ArgumentError }
end
context '(invalid format)' do
it { expect { Arx.search '1105.5379', 'cond-mat/9609089', 'a' }.to raise_error ArgumentError }
end
end
context 'with a predefined query' do
context '(valid)' do
subject { Query.new('1105.5379') }
context 'with a block' do
it { expect {|b| Arx.search(query: subject, &b)}.to yield_with_args subject }
it { expect(Arx.search(query: subject) {}).to get_papers papers.first }
end
context 'without a block' do
it { expect(Arx.search(query: subject)).to get_papers papers.first }
end
end
context '(invalid)' do
it { expect { Arx.search(query: String.new) }.to raise_error TypeError }
end
end
end
context ".get" do
it "should alias .search" do
expect(subject.method(:get).original_name).to eq :search
end
end
end | 38.774194 | 121 | 0.634498 |
3891b509adbcafca887dc7d348ecd90346b9b340 | 270 | class AddMessageThreadsPublicTokenColumn < ActiveRecord::Migration
def up
add_column :message_threads, :public_token, :string
add_index :message_threads, :public_token, unique: true
end
def down
remove_column :message_threads, :public_token
end
end
| 24.545455 | 66 | 0.777778 |
b9ace5e3422b4b5268b35afa84be36b2516b6789 | 13,222 | module ArelExtensions
module Visitors
class Arel::Visitors::SQLite
DATE_MAPPING = {
'd' => '%d', 'm' => '%m', 'w' => '%W', 'y' => '%Y', 'wd' => '%w', 'M' => '%M',
'h' => '%H', 'mn' => '%M', 's' => '%S'
}.freeze
DATE_FORMAT_DIRECTIVES = { # ISO C / POSIX
'%Y' => '%Y', '%C' => '', '%y' => '%y', '%m' => '%m', '%B' => '%M', '%b' => '%b', '%^b' => '%b', # year, month
'%d' => '%d', '%e' => '%e', '%j' => '%j', '%w' => '%w', '%A' => '%W', # day, weekday
'%H' => '%H', '%k' => '%k', '%I' => '%I', '%l' => '%l', '%P' => '%p', '%p' => '%p', # hours
'%M' => '%M', '%S' => '%S', '%L' => '', '%N' => '%f', '%z' => '' # seconds, subseconds
}.freeze
NUMBER_COMMA_MAPPING = {
'fr_FR' => {',' => ' ', '.' =>','}
}.freeze
# String functions
def visit_ArelExtensions_Nodes_IMatches o, collector # insensitive on ASCII
collector = visit o.left.ci_collate, collector
collector << ' LIKE '
collector = visit o.right.ci_collate, collector
if o.escape
collector << ' ESCAPE '
visit o.escape, collector
else
collector
end
end
def visit_ArelExtensions_Nodes_AiMatches o, collector
collector = visit o.left.ai_collate, collector
collector << ' LIKE '
collector = visit o.right.ai_collate, collector
if o.escape
collector << ' ESCAPE '
visit o.escape, collector
else
collector
end
end
def visit_ArelExtensions_Nodes_AiIMatches o, collector
collector = visit o.left.collate(true,true), collector
collector << ' LIKE '
collector = visit o.right.collate(true,true), collector
if o.escape
collector << ' ESCAPE '
visit o.escape, collector
else
collector
end
end
def visit_ArelExtensions_Nodes_SMatches o, collector
collector = visit o.left.collate, collector
collector << ' LIKE '
collector = visit o.right.collate, collector
if o.escape
collector << ' ESCAPE '
visit o.escape, collector
else
collector
end
end
def visit_ArelExtensions_Nodes_Collate o, collector
if o.ai
collector = visit o.expressions.first, collector
collector << ' COLLATE NOACCENTS'
elsif o.ci
collector = visit o.expressions.first, collector
collector << ' COLLATE NOCASE'
else
collector = visit o.expressions.first, collector
collector << ' COLLATE BINARY'
end
collector
end
def visit_ArelExtensions_Nodes_IDoesNotMatch o, collector
collector = visit o.left.lower, collector
collector << ' NOT LIKE '
collector = visit o.right.lower(o.right), collector
if o.escape
collector << ' ESCAPE '
visit o.escape, collector
else
collector
end
end
# Date operations
def visit_ArelExtensions_Nodes_DateAdd o, collector
collector << "date("
collector = visit o.expressions.first, collector
collector << COMMA
collector = visit o.sqlite_value, collector
collector << ")"
collector
end
def visit_ArelExtensions_Nodes_DateDiff o, collector
case o.left_node_type
when :ruby_time, :datetime, :time
collector << "strftime('%s', "
collector = visit o.left, collector
collector << ") - strftime('%s', "
collector = visit o.right, collector
else
collector << "julianday("
collector = visit o.left, collector
collector << ") - julianday("
collector = visit o.right, collector
end
collector << ")"
collector
end
def visit_ArelExtensions_Nodes_Duration o, collector
collector << "strftime('#{DATE_MAPPING[o.left]}'#{COMMA}"
collector = visit o.right, collector
collector << ")"
collector
end
def visit_ArelExtensions_Nodes_Locate o, collector
collector << "instr("
collector = visit o.expr, collector
collector << COMMA
collector = visit o.right, collector
collector << ")"
collector
end
def visit_ArelExtensions_Nodes_Concat o, collector
collector << '('
o.expressions.each_with_index { |arg, i|
collector = visit arg, collector
collector << ' || ' unless i == o.expressions.length - 1
}
collector << ")"
collector
end
def visit_ArelExtensions_Nodes_Substring o, collector
collector << "SUBSTR("
o.expressions.each_with_index { |arg, i|
collector << COMMA if i != 0
collector = visit arg, collector
}
collector << ")"
collector
end
def visit_ArelExtensions_Nodes_IsNull o, collector
collector = visit o.expr, collector
collector << ' IS NULL'
collector
end
def visit_ArelExtensions_Nodes_IsNotNull o, collector
collector = visit o.expr, collector
collector << ' IS NOT NULL'
collector
end
def visit_ArelExtensions_Nodes_Rand o, collector
collector << "RANDOM("
if o.left != nil && o.right != nil
collector = visit o.left, collector
collector << COMMA
collector = visit o.right, collector
end
collector << ")"
collector
end
def visit_Arel_Nodes_Regexp o, collector
collector = visit o.left, collector
collector << " REGEXP"
collector = visit o.right, collector
collector
end
def visit_Arel_Nodes_NotRegexp o, collector
collector = visit o.left, collector
collector << " NOT REGEXP "
collector = visit o.right, collector
collector
end
def visit_ArelExtensions_Nodes_Wday o, collector
collector << "STRFTIME('%w',"
collector = visit o.date, collector
collector << ")"
collector
end
# CAST(
# CASE
# WHEN 3.42 >= 0 THEN CAST(3.42 AS INT)
# WHEN CAST(3.42 AS INT) = 3.42 THEN CAST(3.42 AS INT)
# ELSE CAST((3.42 - 1.0) AS INT)
# END
# AS FLOAT
# )
def visit_ArelExtensions_Nodes_Floor o, collector
collector << "CAST(CASE WHEN "
collector = visit o.left, collector
collector << " >= 0 THEN CAST("
collector = visit o.left, collector
collector << " AS INT) WHEN CAST("
collector = visit o.left, collector
collector << " AS INT) = "
collector = visit o.left, collector
collector << " THEN CAST("
collector = visit o.left, collector
collector << " AS INT) ELSE CAST(("
collector = visit o.left, collector
collector << " - 1.0) AS INT) END AS FLOAT)"
collector
end
def visit_ArelExtensions_Nodes_Ceil o, collector
collector << "CASE WHEN ROUND("
collector = visit o.left, collector
collector << ", 1) > ROUND("
collector = visit o.left, collector
collector << ") THEN ROUND("
collector = visit o.left, collector
collector << ") + 1 ELSE ROUND("
collector = visit o.left, collector
collector << ") END"
collector
end
if Arel::VERSION.to_i < 7
def visit_ArelExtensions_InsertManager_BulkValues o, collector
o.left.each_with_index do |row, idx|
collector << 'SELECT '
len = row.length - 1
row.zip(o.cols).each_with_index { |(value, attr), i|
case value
when Arel::Nodes::SqlLiteral, Arel::Nodes::BindParam
collector = visit value.as(attr.name), collector
else
collector << quote(value, attr && column_for(attr)).to_s
if idx == 0
collector << " AS "
collector << quote(attr.name)
end
end
collector << COMMA unless i == len
}
collector << ' UNION ALL ' unless idx == o.left.length - 1
end
collector
end
else
def visit_ArelExtensions_InsertManager_BulkValues o, collector
o.left.each_with_index do |row, idx|
collector << 'SELECT '
len = row.length - 1
row.zip(o.cols).each_with_index { |(value, attr), i|
case value
when Arel::Nodes::SqlLiteral, Arel::Nodes::BindParam
collector = visit value.as(attr.name), collector
when Integer
collector << value.to_s
if idx == 0
collector << " AS "
collector << quote(attr.name)
end
else
collector << (attr && attr.able_to_type_cast? ? quote(attr.type_cast_for_database(value)) : quote(value).to_s)
if idx == 0
collector << " AS "
collector << quote(attr.name)
end
end
collector << COMMA unless i == len
}
collector << ' UNION ALL ' unless idx == o.left.length - 1
end
collector
end
end
def visit_ArelExtensions_Nodes_Union o, collector
collector =
if o.left.is_a?(Arel::SelectManager)
visit o.left.ast, collector
else
visit o.left, collector
end
collector << " UNION "
collector =
if o.right.is_a?(Arel::SelectManager)
visit o.right.ast, collector
else
visit o.right, collector
end
collector
end
def visit_ArelExtensions_Nodes_UnionAll o, collector
collector =
if o.left.is_a?(Arel::SelectManager)
visit o.left.ast, collector
else
visit o.left, collector
end
collector << " UNION ALL "
collector =
if o.right.is_a?(Arel::SelectManager)
visit o.right.ast, collector
else
visit o.right, collector
end
collector
end
def get_time_converted element
if element.is_a?(Time)
return Arel::Nodes::NamedFunction.new('STRFTIME',[element, '%H:%M:%S'])
elsif element.is_a?(Arel::Attributes::Attribute)
col = Arel::Table.engine.connection.schema_cache.columns_hash(element.relation.table_name)[element.name.to_s]
if col && (col.type == :time)
return Arel::Nodes::NamedFunction.new('STRFTIME',[element, '%H:%M:%S'])
else
return element
end
else
return element
end
end
remove_method(:visit_Arel_Nodes_GreaterThanOrEqual) rescue nil
def visit_Arel_Nodes_GreaterThanOrEqual o, collector
collector = visit get_time_converted(o.left), collector
collector << " >= "
collector = visit get_time_converted(o.right), collector
collector
end
remove_method(:visit_Arel_Nodes_GreaterThan) rescue nil
def visit_Arel_Nodes_GreaterThan o, collector
collector = visit get_time_converted(o.left), collector
collector << " > "
collector = visit get_time_converted(o.right), collector
collector
end
remove_method(:visit_Arel_Nodes_LessThanOrEqual) rescue nil
def visit_Arel_Nodes_LessThanOrEqual o, collector
collector = visit get_time_converted(o.left), collector
collector << " <= "
collector = visit get_time_converted(o.right), collector
collector
end
remove_method(:visit_Arel_Nodes_LessThan) rescue nil
def visit_Arel_Nodes_LessThan o, collector
collector = visit get_time_converted(o.left), collector
collector << " < "
collector = visit get_time_converted(o.right), collector
collector
end
alias_method(:old_visit_Arel_Nodes_As, :visit_Arel_Nodes_As) rescue nil
def visit_Arel_Nodes_As o, collector
if o.left.is_a?(Arel::Nodes::Binary)
collector << '('
collector = visit o.left, collector
collector << ')'
else
collector = visit o.left, collector
end
collector << " AS \""
collector = visit o.right, collector
collector << "\""
collector
end
def visit_ArelExtensions_Nodes_FormattedNumber o, collector
format = Arel::Nodes::NamedFunction.new('printf',[Arel::Nodes.build_quoted(o.original_string),o.left])
locale_map = NUMBER_COMMA_MAPPING[o.locale]
if locale_map
format = format.replace(',',locale_map[',']).replace('.',locale_map['.'])
end
visit format, collector
collector
end
end
end
end
| 33.055 | 128 | 0.550371 |
ff57f976614de6f898bd6015c5d22b427a6569ff | 2,897 | # encoding: utf-8
module Ebooks
# This generator uses data identical to a markov model, but
# instead of making a chain by looking up bigrams it uses the
# positions to randomly replace suffixes in one sentence with
# matching suffixes in another
class SuffixGenerator
# Build a generator from a corpus of tikified sentences
# @param sentences [Array<Array<Integer>>]
# @return [SuffixGenerator]
def self.build(sentences)
SuffixGenerator.new(sentences)
end
def initialize(sentences)
@sentences = sentences.reject { |s| s.length < 2 }
@unigrams = {}
@bigrams = {}
@sentences.each_with_index do |tikis, i|
last_tiki = INTERIM
tikis.each_with_index do |tiki, j|
@unigrams[last_tiki] ||= []
@unigrams[last_tiki] << [i, j]
@bigrams[last_tiki] ||= {}
@bigrams[last_tiki][tiki] ||= []
if j == tikis.length-1 # Mark sentence endings
@unigrams[tiki] ||= []
@unigrams[tiki] << [i, INTERIM]
@bigrams[last_tiki][tiki] << [i, INTERIM]
else
@bigrams[last_tiki][tiki] << [i, j+1]
end
last_tiki = tiki
end
end
self
end
# Generate a recombined sequence of tikis
# @param passes [Integer] number of times to recombine
# @param n [Symbol] :unigrams or :bigrams (affects how conservative the model is)
# @return [Array<Integer>]
def generate(passes=5, n=:unigrams)
index = rand(@sentences.length)
tikis = @sentences[index]
used = [index] # Sentences we've already used
verbatim = [tikis] # Verbatim sentences to avoid reproducing
0.upto(passes-1) do
varsites = {} # Map bigram start site => next tiki alternatives
tikis.each_with_index do |tiki, i|
next_tiki = tikis[i+1]
break if next_tiki.nil?
alternatives = (n == :unigrams) ? @unigrams[next_tiki] : @bigrams[tiki][next_tiki]
# Filter out suffixes from previous sentences
alternatives.reject! { |a| a[1] == INTERIM || used.include?(a[0]) }
varsites[i] = alternatives unless alternatives.empty?
end
variant = nil
varsites.to_a.shuffle.each do |site|
start = site[0]
site[1].shuffle.each do |alt|
verbatim << @sentences[alt[0]]
suffix = @sentences[alt[0]][alt[1]..-1]
potential = tikis[0..start+1] + suffix
# Ensure we're not just rebuilding some segment of another sentence
unless verbatim.find { |v| NLP.subseq?(v, potential) || NLP.subseq?(potential, v) }
used << alt[0]
variant = potential
break
end
end
break if variant
end
tikis = variant if variant
end
tikis
end
end
end
| 30.177083 | 95 | 0.581981 |
261aef9518e2a2516f715f3dabe91fd533ec2643 | 2,344 | # frozen_string_literal: true
module SmimeHelper
INFINITE_EXPIRY = 1000.years
SHORT_EXPIRY = 30.minutes
def generate_root
issue(cn: 'RootCA', signed_by: nil, expires_in: INFINITE_EXPIRY, certificate_authority: true)
end
def generate_intermediate(signer_ca:)
issue(cn: 'IntermediateCA', signed_by: signer_ca, expires_in: INFINITE_EXPIRY, certificate_authority: true)
end
def generate_cert(signer_ca:, expires_in: SHORT_EXPIRY)
issue(signed_by: signer_ca, expires_in: expires_in, certificate_authority: false)
end
# returns a hash { key:, cert: } containing a generated key, cert pair
def issue(email_address: '[email protected]', cn: nil, signed_by:, expires_in:, certificate_authority:)
key = OpenSSL::PKey::RSA.new(4096)
public_key = key.public_key
subject = if certificate_authority
OpenSSL::X509::Name.parse("/CN=#{cn}")
else
OpenSSL::X509::Name.parse("/CN=#{email_address}")
end
cert = OpenSSL::X509::Certificate.new
cert.subject = subject
cert.issuer = signed_by&.fetch(:cert, nil)&.subject || subject
cert.not_before = Time.now
cert.not_after = expires_in.from_now
cert.public_key = public_key
cert.serial = 0x0
cert.version = 2
extension_factory = OpenSSL::X509::ExtensionFactory.new
if certificate_authority
extension_factory.subject_certificate = cert
extension_factory.issuer_certificate = cert
cert.add_extension(extension_factory.create_extension('subjectKeyIdentifier', 'hash'))
cert.add_extension(extension_factory.create_extension('basicConstraints', 'CA:TRUE', true))
cert.add_extension(extension_factory.create_extension('keyUsage', 'cRLSign,keyCertSign', true))
else
cert.add_extension(extension_factory.create_extension('subjectAltName', "email:#{email_address}", false))
cert.add_extension(extension_factory.create_extension('basicConstraints', 'CA:FALSE', true))
cert.add_extension(extension_factory.create_extension('keyUsage', 'digitalSignature,keyEncipherment', true))
cert.add_extension(extension_factory.create_extension('extendedKeyUsage', 'clientAuth,emailProtection', false))
end
cert.sign(signed_by&.fetch(:key, nil) || key, OpenSSL::Digest::SHA256.new)
{ key: key, cert: cert }
end
end
| 39.066667 | 117 | 0.726536 |
38a7126c7a7f300a2a6b1cc7382c8c7fb45c8272 | 6,493 | # frozen_string_literal: true
class Bridgetown::Site
module Configurable
# Set the site's configuration. This handles side-effects caused by
# changing values in the configuration.
#
# @param config [Configuration]
# An instance of {Configuration},
# containing the new configuration.
#
# @return [Configuration]
# The processed instance of {Configuration}
def config=(config)
@config = config.clone
# Source and destination may not be changed after the site has been created.
@root_dir = File.expand_path(config["root_dir"]).freeze
@source = File.expand_path(config["source"]).freeze
@dest = File.expand_path(config["destination"]).freeze
configure_cache
configure_component_paths
configure_file_read_opts
self.permalink_style = (config["permalink"] || "pretty").to_sym
@config
end
def uses_resource?
config[:content_engine] == "resource"
end
# Returns a base path from which the site is served (aka `/cool-site`) or
# `/` if served from root.
#
# @param strip_slash_only [Boolean] set to true if you wish "/" to be returned as ""
# @return [String]
def base_path(strip_slash_only: false)
(config[:base_path] || config[:baseurl]).then do |path|
strip_slash_only ? path.to_s.sub(%r{^/$}, "") : path
end
end
def baseurl
Bridgetown::Deprecator.deprecation_message "Site#baseurl is now Site#base_path"
base_path(strip_slash_only: true).presence
end
def defaults_reader
@defaults_reader ||= Bridgetown::DefaultsReader.new(self)
end
# Returns the current instance of {FrontmatterDefaults} or
# creates a new instance {FrontmatterDefaults} if it doesn't already exist.
#
# @return [FrontmatterDefaults]
# Returns an instance of {FrontmatterDefaults}
def frontmatter_defaults
@frontmatter_defaults ||= Bridgetown::FrontmatterDefaults.new(self)
end
# Prefix a path or paths with the {#root_dir} directory.
#
# @see Bridgetown.sanitized_path
# @param paths [Array<String>]
# An array of paths to prefix with the root_dir directory using the
# {Bridgetown.sanitized_path} method.
#
# @return [Array<String>] Return an array of updated paths if multiple paths given.
def in_root_dir(*paths)
paths.reduce(root_dir) do |base, path|
Bridgetown.sanitized_path(base, path.to_s)
end
end
# Prefix a path or paths with the {#source} directory.
#
# @see Bridgetown.sanitized_path
# @param paths [Array<String>]
# An array of paths to prefix with the source directory using the
# {Bridgetown.sanitized_path} method.
# @return [Array<String>] Return an array of updated paths if multiple paths given.
def in_source_dir(*paths)
# TODO: this operation is expensive across thousands of iterations. Look for ways
# to workaround use of this wherever possible...
paths.reduce(source) do |base, path|
Bridgetown.sanitized_path(base, path.to_s)
end
end
# Prefix a path or paths with the {#dest} directory.
#
# @see Bridgetown.sanitized_path
# @param paths [Array<String>]
# An array of paths to prefix with the destination directory using the
# {Bridgetown.sanitized_path} method.
#
# @return [Array<String>] Return an array of updated paths if multiple paths given.
def in_dest_dir(*paths)
paths.reduce(dest) do |base, path|
Bridgetown.sanitized_path(base, path)
end
end
# Prefix a path or paths with the {#cache_dir} directory.
#
# @see Bridgetown.sanitized_path
# @param paths [Array<String>]
# An array of paths to prefix with the {#cache_dir} directory using the
# {Bridgetown.sanitized_path} method.
#
# @return [Array<String>] Return an array of updated paths if multiple paths given.
def in_cache_dir(*paths)
paths.reduce(cache_dir) do |base, path|
Bridgetown.sanitized_path(base, path)
end
end
# The full path to the directory that houses all the registered collections
# for the current site.
#
# If `@collections_path` is specified use its value.
#
# If `@collections` is not specified and `config["collections_dir"]` is
# specified, prepend it with {#source} and assign it to
# {#collections_path}.
#
# If `@collections` is not specified and `config["collections_dir"]` is not
# specified, assign {#source} to `@collections_path`
#
# @return [String] Returns the full path to the collections directory
# @see #config
# @see #source
# @see #collections_path
# @see #in_source_dir
def collections_path
dir_str = config["collections_dir"]
@collections_path ||= dir_str.empty? ? source : in_source_dir(dir_str)
end
def frontend_bundling_path
in_root_dir(".bridgetown-cache", "frontend-bundling")
end
private
# Disable Marshaling cache to disk in Safe Mode
def configure_cache
@cache_dir = in_root_dir(config["cache_dir"]).freeze
Bridgetown::Cache.cache_dir = File.join(cache_dir, "Bridgetown/Cache")
Bridgetown::Cache.disable_disk_cache! if config["disable_disk_cache"]
end
def configure_component_paths # rubocop:todo Metrics/AbcSize
# Loop through plugins paths first
plugin_components_load_paths = Bridgetown::PluginManager.source_manifests
.filter_map(&:components)
local_components_load_paths = config["components_dir"].then do |dir|
dir.is_a?(Array) ? dir : [dir]
end
local_components_load_paths.map! do |dir|
if !!(dir =~ %r!^\.\.?/!)
# allow ./dir or ../../dir type options
File.expand_path(dir.to_s, root_dir)
else
in_source_dir(dir.to_s)
end
end
config.components_load_paths = plugin_components_load_paths + local_components_load_paths
# Because "first constant wins" in Zeitwerk, we need to load the local
# source components _before_ we load any from plugins
config.autoload_paths += config.components_load_paths.reverse
end
def configure_file_read_opts
self.file_read_opts = {}
file_read_opts[:encoding] = config["encoding"] if config["encoding"]
self.file_read_opts = Bridgetown::Utils.merged_file_read_opts(self, {})
end
end
end
| 34.908602 | 95 | 0.67026 |
bf2290e2cc396982504ad814d9f41007f9f4bbe5 | 7,480 | # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
require 'date'
require_relative 'database_details'
# rubocop:disable Lint/UnneededCopDisableDirective, Metrics/LineLength
module OCI
# The details of the Oracle Database Cloud Service to be registered as a target database in Data Safe.
class DataSafe::Models::DatabaseCloudServiceDetails < DataSafe::Models::DatabaseDetails
# The OCID of the VM cluster in which the database is running.
# @return [String]
attr_accessor :vm_cluster_id
# The OCID of the cloud database system registered as a target database in Data Safe.
# @return [String]
attr_accessor :db_system_id
# The database service name.
# @return [String]
attr_accessor :service_name
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
# rubocop:disable Style/SymbolLiteral
'database_type': :'databaseType',
'infrastructure_type': :'infrastructureType',
'vm_cluster_id': :'vmClusterId',
'db_system_id': :'dbSystemId',
'service_name': :'serviceName'
# rubocop:enable Style/SymbolLiteral
}
end
# Attribute type mapping.
def self.swagger_types
{
# rubocop:disable Style/SymbolLiteral
'database_type': :'String',
'infrastructure_type': :'String',
'vm_cluster_id': :'String',
'db_system_id': :'String',
'service_name': :'String'
# rubocop:enable Style/SymbolLiteral
}
end
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines, Style/SymbolLiteral
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
# @option attributes [String] :infrastructure_type The value to assign to the {OCI::DataSafe::Models::DatabaseDetails#infrastructure_type #infrastructure_type} proprety
# @option attributes [String] :vm_cluster_id The value to assign to the {#vm_cluster_id} property
# @option attributes [String] :db_system_id The value to assign to the {#db_system_id} property
# @option attributes [String] :service_name The value to assign to the {#service_name} property
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
attributes['databaseType'] = 'DATABASE_CLOUD_SERVICE'
super(attributes)
# convert string to symbol for hash key
attributes = attributes.each_with_object({}) { |(k, v), h| h[k.to_sym] = v }
self.vm_cluster_id = attributes[:'vmClusterId'] if attributes[:'vmClusterId']
raise 'You cannot provide both :vmClusterId and :vm_cluster_id' if attributes.key?(:'vmClusterId') && attributes.key?(:'vm_cluster_id')
self.vm_cluster_id = attributes[:'vm_cluster_id'] if attributes[:'vm_cluster_id']
self.db_system_id = attributes[:'dbSystemId'] if attributes[:'dbSystemId']
raise 'You cannot provide both :dbSystemId and :db_system_id' if attributes.key?(:'dbSystemId') && attributes.key?(:'db_system_id')
self.db_system_id = attributes[:'db_system_id'] if attributes[:'db_system_id']
self.service_name = attributes[:'serviceName'] if attributes[:'serviceName']
raise 'You cannot provide both :serviceName and :service_name' if attributes.key?(:'serviceName') && attributes.key?(:'service_name')
self.service_name = attributes[:'service_name'] if attributes[:'service_name']
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines, Style/SymbolLiteral
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity, Layout/EmptyLines
# Checks equality by comparing each attribute.
# @param [Object] other the other object to be compared
def ==(other)
return true if equal?(other)
self.class == other.class &&
database_type == other.database_type &&
infrastructure_type == other.infrastructure_type &&
vm_cluster_id == other.vm_cluster_id &&
db_system_id == other.db_system_id &&
service_name == other.service_name
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity, Layout/EmptyLines
# @see the `==` method
# @param [Object] other the other object to be compared
def eql?(other)
self == other
end
# rubocop:disable Metrics/AbcSize, Layout/EmptyLines
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[database_type, infrastructure_type, vm_cluster_id, db_system_id, service_name].hash
end
# rubocop:enable Metrics/AbcSize, Layout/EmptyLines
# rubocop:disable Metrics/AbcSize, Layout/EmptyLines
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /^Array<(.*)>/i
# check to ensure the input is an array given that the the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
public_method("#{key}=").call(
attributes[self.class.attribute_map[key]]
.map { |v| OCI::Internal::Util.convert_to_type(Regexp.last_match(1), v) }
)
end
elsif !attributes[self.class.attribute_map[key]].nil?
public_method("#{key}=").call(
OCI::Internal::Util.convert_to_type(type, attributes[self.class.attribute_map[key]])
)
end
# or else data not found in attributes(hash), not an issue as the data can be optional
end
self
end
# rubocop:enable Metrics/AbcSize, Layout/EmptyLines
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = public_method(attr).call
next if value.nil? && !instance_variable_defined?("@#{attr}")
hash[param] = _to_hash(value)
end
hash
end
private
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
# rubocop:enable Lint/UnneededCopDisableDirective, Metrics/LineLength
| 38.358974 | 245 | 0.684893 |
91574b31de7988251e2dc380a9209aeecf4c82e9 | 1,039 | require 'set'
describe "SortedSet#classify" do
before(:each) do
@set = SortedSet["one", "two", "three", "four"]
end
it "yields each Object in self in sorted order" do
res = []
@set.classify { |x| res << x }
res.should == ["one", "two", "three", "four"].sort
end
ruby_version_is "" ... "1.8.8" do
it "raises a LocalJumpError when passed no block" do
lambda { @set.classify }.should raise_error(LocalJumpError)
end
end
ruby_version_is "1.8.8" do
it "returns an Enumerator when passed no block" do
enum = @set.classify
enum.should be_an_instance_of(enumerator_class)
classified = enum.each { |x| x.length }
classified.should == { 3 => SortedSet["one", "two"], 4 => SortedSet["four"], 5 => SortedSet["three"] }
end
end
it "classifies the Objects in self based on the block's return value" do
classified = @set.classify { |x| x.length }
classified.should == { 3 => SortedSet["one", "two"], 4 => SortedSet["four"], 5 => SortedSet["three"] }
end
end
| 29.685714 | 108 | 0.623677 |
0100444a9e542d5e17c983fe7cb9764a927d268e | 372 | require_relative '../helper'
describe "show-input" do
before do
@t = pry_tester
end
it 'should correctly show the current lines in the input buffer' do
@t.push(*unindent(<<-STR).split("\n"))
def hello
puts :bing
STR
@t.process_command 'show-input'
expect(@t.last_output).to match(/\A\d+: def hello\n\d+: puts :bing/)
end
end
| 20.666667 | 74 | 0.629032 |
e26f2ae87c03ffaadeab3d9f5ac77fe65ebc9987 | 1,607 | module Fog
module AWS
class AutoScaling
class Real
require 'fog/aws/parsers/auto_scaling/describe_adjustment_types'
# Returns policy adjustment types for use in the put_scaling_policy
# action.
#
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
# * 'ResponseMetadata'<~Hash>:
# * 'RequestId'<~String> - Id of request
# * 'DescribeAdjustmentTypesResponse'<~Hash>:
# * 'AdjustmentTypes'<~Array>:
# * 'AdjustmentType'<~String> - A policy adjustment type.
#
# ==== See Also
# http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeAdjustmentTypes.html
#
def describe_adjustment_types()
request({
'Action' => 'DescribeAdjustmentTypes',
:idempotent => true,
:parser => Fog::Parsers::AWS::AutoScaling::DescribeAdjustmentTypes.new
})
end
end
class Mock
def describe_adjustment_types()
results = { 'AdjustmentTypes' => [] }
self.data[:adjustment_types].each do |adjustment_type|
results['AdjustmentTypes'] << { 'AdjustmentType' => adjustment_type }
end
response = Excon::Response.new
response.status = 200
response.body = {
'DescribeAdjustmentTypesResult' => results,
'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }
}
response
end
end
end
end
end
| 29.759259 | 108 | 0.554449 |
3861be64b0a1484c9594e985d6d9b5952a578d88 | 17,232 | #-- copyright
# OpenProject is a project management system.
# Copyright (C) 2012-2018 the OpenProject Foundation (OPF)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3.
#
# OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
# Copyright (C) 2006-2017 Jean-Philippe Lang
# Copyright (C) 2010-2013 the ChiliProject Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# See docs/COPYRIGHT.rdoc for more details.
#++
require 'spec_helper'
describe ::API::V3::Queries::Schemas::QuerySchemaRepresenter do
include ::API::V3::Utilities::PathHelper
let(:query) do
query = Query.new project: project
# Stub some methods to avoid a test failure in unrelated tests
allow(query)
.to receive(:groupable_columns)
.and_return([])
allow(query)
.to receive(:available_columns)
.and_return([])
allow(query)
.to receive(:sortable_columns)
.and_return([])
query
end
let(:instance) { described_class.new(query, self_link, current_user: user, form_embedded: form_embedded) }
let(:user) do
FactoryBot.build_stubbed(:user).tap do |user|
allow(user)
.to receive(:allowed_to?)
.and_return(false)
end
end
let(:form_embedded) { false }
let(:self_link) { 'bogus_self_path' }
let(:project) { nil }
subject(:generated) { instance.to_json }
shared_examples_for 'has a collection of allowed values' do
before do
allow(query).to receive(available_values_method).and_return(available_values)
end
context 'when no values are allowed' do
let(:available_values) do
[]
end
it_behaves_like 'links to and embeds allowed values directly' do
let(:hrefs) { [] }
end
end
context 'when values are allowed' do
it_behaves_like 'links to and embeds allowed values directly' do
let(:hrefs) { expected_hrefs }
end
end
end
context 'generation' do
context '_links' do
it_behaves_like 'has an untitled link' do
let(:link) { 'self' }
let(:href) { self_link }
end
end
context 'attributes' do
describe '_type' do
it 'is Schema' do
expect(subject)
.to be_json_eql('Schema'.to_json)
.at_path('_type')
end
end
describe 'id' do
let(:path) { 'id' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'Integer' }
let(:name) { Query.human_attribute_name('id') }
let(:required) { true }
let(:writable) { false }
end
end
describe 'name' do
let(:path) { 'name' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'String' }
let(:name) { Query.human_attribute_name('name') }
let(:required) { true }
let(:writable) { true }
end
it_behaves_like 'indicates length requirements' do
let(:min_length) { 1 }
let(:max_length) { 255 }
end
end
describe 'createdAt' do
let(:path) { 'createdAt' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'DateTime' }
let(:name) { Query.human_attribute_name('created_at') }
let(:required) { true }
let(:writable) { false }
end
end
describe 'updatedAt' do
let(:path) { 'updatedAt' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'DateTime' }
let(:name) { Query.human_attribute_name('updated_at') }
let(:required) { true }
let(:writable) { false }
end
end
describe 'user' do
let(:path) { 'user' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'User' }
let(:name) { Query.human_attribute_name('user') }
let(:required) { true }
let(:writable) { false }
let(:has_default) { true }
end
end
describe 'project' do
let(:path) { 'project' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'Project' }
let(:name) { Query.human_attribute_name('project') }
let(:required) { false }
let(:writable) { true }
end
it_behaves_like 'does not link to allowed values'
context 'when embedding' do
let(:form_embedded) { true }
it_behaves_like 'links to allowed values via collection link' do
let(:href) { api_v3_paths.query_available_projects }
end
end
end
describe 'public' do
let(:path) { 'public' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'Boolean' }
let(:name) { Query.human_attribute_name('public') }
let(:required) { false }
let(:writable) { false }
let(:has_default) { true }
end
context 'when having the :manage_public_queries permission' do
before do
allow(user)
.to receive(:allowed_to?)
.with(:manage_public_queries, project, global: project.nil?)
.and_return(true)
end
it 'marks public as writable' do
expect(subject)
.to be_json_eql(true)
.at_path('public/writable')
end
end
end
describe 'hidden' do
let(:path) { 'hidden' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'Boolean' }
let(:name) { Query.human_attribute_name('hidden') }
let(:required) { true }
let(:writable) { true }
let(:has_default) { true }
end
end
describe 'sums' do
let(:path) { 'sums' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'Boolean' }
let(:name) { Query.human_attribute_name('sums') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
end
describe 'timelineVisible' do
let(:path) { 'timelineVisible' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'Boolean' }
let(:name) { Query.human_attribute_name('timeline_visible') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
end
describe 'timelineZoomLevel' do
let(:path) { 'timelineZoomLevel' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'String' }
let(:name) { Query.human_attribute_name('timeline_zoom_level') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
end
describe 'timelineLabels' do
let(:path) { 'timelineLabels' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'QueryTimelineLabels' }
let(:name) { Query.human_attribute_name('timeline_labels') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
end
describe 'show hierarchies' do
let(:path) { 'showHierarchies' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'Boolean' }
let(:name) { Query.human_attribute_name('show_hierarchies') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
end
describe 'starred' do
let(:path) { 'starred' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'Boolean' }
let(:name) { Query.human_attribute_name('starred') }
let(:required) { false }
let(:writable) { false }
let(:has_default) { true }
end
end
describe 'highlighting_mode' do
let(:path) { 'highlightingMode' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'String' }
let(:name) { Query.human_attribute_name('highlighting_mode') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
end
describe 'display_representation' do
let(:path) { 'displayRepresentation' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'String' }
let(:name) { Query.human_attribute_name('display_representation') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
end
describe 'columns' do
let(:path) { 'columns' }
it_behaves_like 'has basic schema properties' do
let(:type) { '[]QueryColumn' }
let(:name) { Query.human_attribute_name('columns') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
it_behaves_like 'does not link to allowed values'
context 'when embedding' do
let(:form_embedded) { true }
let(:type) { FactoryBot.build_stubbed(:type) }
let(:available_values) do
[Queries::WorkPackages::Columns::PropertyColumn.new(:bogus1),
Queries::WorkPackages::Columns::PropertyColumn.new(:bogus2),
Queries::WorkPackages::Columns::PropertyColumn.new(:bogus3),
Queries::WorkPackages::Columns::RelationToTypeColumn.new(type),
Queries::WorkPackages::Columns::RelationOfTypeColumn.new(name: :label_relates_to, sym: :relation1)]
end
let(:available_values_method) { :available_columns }
it_behaves_like 'has a collection of allowed values' do
let(:expected_hrefs) do
available_values.map do |value|
api_v3_paths.query_column(value.name.to_s.camelcase(:lower))
end
end
it 'has available columns of both types' do
types = JSON.parse(generated)
.dig('columns',
'_embedded',
'allowedValues')
.map { |v| v['_type'] }
.uniq
expect(types).to match_array(%w(QueryColumn::Property QueryColumn::RelationToType QueryColumn::RelationOfType))
end
end
end
end
describe 'show highlighted_attributes' do
let(:path) { 'highlightedAttributes' }
it_behaves_like 'has basic schema properties' do
let(:type) { '[]QueryColumn' }
let(:name) { Query.human_attribute_name('highlighted_attributes') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
it_behaves_like 'does not link to allowed values'
context 'when embedding' do
let(:form_embedded) { true }
let(:type) { FactoryBot.build_stubbed(:type) }
let(:available_values) do
[Queries::WorkPackages::Columns::PropertyColumn.new(:bogus1, highlightable: true),
Queries::WorkPackages::Columns::PropertyColumn.new(:bogus2, highlightable: true)]
end
let(:available_values_method) { :available_columns }
it_behaves_like 'has a collection of allowed values' do
let(:expected_hrefs) do
available_values.map do |value|
api_v3_paths.query_column(value.name.to_s.camelcase(:lower))
end
end
end
end
end
describe 'filters' do
let(:path) { 'filters' }
it_behaves_like 'has basic schema properties' do
let(:type) { '[]QueryFilterInstance' }
let(:name) { Query.human_attribute_name('filters') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
it_behaves_like 'does not link to allowed values'
context 'when global query' do
let(:href) { api_v3_paths.query_filter_instance_schemas }
it 'contains the link to the filter schemas' do
is_expected
.to be_json_eql(href.to_json)
.at_path("#{path}/_links/allowedValuesSchemas/href")
end
end
context 'when project query' do
let(:project) { FactoryBot.build_stubbed(:project) }
let(:href) { api_v3_paths.query_project_filter_instance_schemas(project.id) }
it 'contains the link to the filter schemas' do
is_expected
.to be_json_eql(href.to_json)
.at_path("#{path}/_links/allowedValuesSchemas/href")
end
end
end
describe 'groupBy' do
let(:path) { 'groupBy' }
it_behaves_like 'has basic schema properties' do
let(:type) { '[]QueryGroupBy' }
let(:name) { Query.human_attribute_name('group_by') }
let(:required) { false }
let(:writable) { true }
end
it_behaves_like 'does not link to allowed values'
context 'when embedding' do
let(:form_embedded) { true }
it_behaves_like 'has a collection of allowed values' do
let(:available_values) do
[Queries::WorkPackages::Columns::PropertyColumn.new(:bogus1),
Queries::WorkPackages::Columns::PropertyColumn.new(:bogus2),
Queries::WorkPackages::Columns::PropertyColumn.new(:bogus3)]
end
let(:available_values_method) { :groupable_columns }
let(:expected_hrefs) do
available_values.map do |value|
api_v3_paths.query_group_by(value.name)
end
end
end
end
end
describe 'sortBy' do
let(:path) { 'sortBy' }
it_behaves_like 'has basic schema properties' do
let(:type) { '[]QuerySortBy' }
let(:name) { Query.human_attribute_name('sort_by') }
let(:required) { false }
let(:writable) { true }
let(:has_default) { true }
end
it_behaves_like 'does not link to allowed values'
context 'when embedding' do
let(:form_embedded) { true }
it_behaves_like 'has a collection of allowed values' do
before do
allow(Query)
.to receive(:sortable_columns)
.and_return(available_values)
end
let(:available_values) do
[Queries::WorkPackages::Columns::PropertyColumn.new(:bogus1),
Queries::WorkPackages::Columns::PropertyColumn.new(:bogus2),
Queries::WorkPackages::Columns::PropertyColumn.new(:bogus3)]
end
let(:available_values_method) { :sortable_columns }
let(:expected_hrefs) do
expected = available_values.map do |value|
[api_v3_paths.query_sort_by(value.name, 'asc'),
api_v3_paths.query_sort_by(value.name, 'desc')]
end
expected.flatten
end
end
end
end
describe 'results' do
let(:path) { 'results' }
it_behaves_like 'has basic schema properties' do
let(:type) { 'WorkPackageCollection' }
let(:name) { Query.human_attribute_name('results') }
let(:required) { false }
let(:writable) { false }
end
end
end
context '_embedded' do
describe 'filtersSchemas' do
let(:path) { '_embedded/filtersSchemas' }
context 'when global query' do
let(:href) { api_v3_paths.query_filter_instance_schemas }
it 'contains a collection of filter schemas' do
is_expected
.to be_json_eql(href.to_json)
.at_path("#{path}/_links/self/href")
end
end
context 'when project query' do
let(:project) { FactoryBot.build_stubbed(:project) }
let(:href) { api_v3_paths.query_project_filter_instance_schemas(project.id) }
it 'contains a collection of filter schemas' do
is_expected
.to be_json_eql(href.to_json)
.at_path("#{path}/_links/self/href")
end
end
end
end
end
end
| 31.387978 | 125 | 0.576602 |
03dd1c9ffb09f907aa8f1edd6e61a1149e5833bb | 762 | #encoding: utf-8
# Copyright (c) 2013 Universidade Federal Fluminense (UFF).
# This file is part of SAPOS. Please, consult the license terms in the LICENSE file.
class PhaseDuration < ActiveRecord::Base
belongs_to :phase
belongs_to :level
has_paper_trail
validates :phase, :presence => true
validates :level, :presence => true
validate :deadline_validation
def to_label
"#{deadline_semesters} períodos, #{deadline_months} meses e #{deadline_days} dias"
end
def deadline_validation
if (([0,nil].include?(self.deadline_semesters)) && ([0,nil].include?(self.deadline_months)) && ([0,nil].include?(self.deadline_days)))
errors.add(:deadline, I18n.t("activerecord.errors.models.phase_duration.blank_deadline"))
end
end
end
| 29.307692 | 138 | 0.730971 |
283fb6106855495c11b663a9dc01dae52a28d155 | 221 |
class Testify::CaseNotFoundError < Testify::TestifyError
def initialize(msg = nil, error_code = nil)
msg ||= 'Test case was not found.'
super(msg)
error_code ||= 'NOT_FOUND'
@code = error_code
end
end | 24.555556 | 56 | 0.674208 |
1db63757ed30bd140332e2f95e4c9c7050ec1210 | 1,860 | require 'test_helper'
class UsersSignupTest < ActionDispatch::IntegrationTest
def setup
ActionMailer::Base.deliveries.clear
end
test "invalid signup information" do
get signup_path
assert_no_difference 'User.count' do
# 以下と等価
# before_count = User.count
# post users_path, ...
# after_count = User.count
# assert_equal before_count, after_count
post signup_path, params: { user:{ name:"",
email: "user@invalid",
password: "foo",
password_confirmation: "bar"}}
end
assert_template 'users/new'
assert_select 'div#error_explanation'
assert_select 'div.alert'
assert_select 'form[action="/signup"]'
end
test "valid signup information with account activation" do
get signup_path
assert_difference 'User.count', 1 do
post users_path, params: { user: { name: "Example User",
email: "[email protected]",
password: "password",
password_confirmation: "password" } }
end
assert_equal 1, ActionMailer::Base.deliveries.size
user = assigns(:user)
assert_not user.activated?
# 有効化していない状態でログインしてみる
log_in_as(user)
assert_not is_logged_in?
# 有効化トークンが不正な場合
get edit_account_activation_path("invalid token", email: user.email)
assert_not is_logged_in?
# トークンは正しいがメールアドレスが無効な場合
get edit_account_activation_path(user.activation_token, email: 'wrong')
assert_not is_logged_in?
# 有効化トークンが正しい場合
get edit_account_activation_path(user.activation_token, email: user.email)
assert user.reload.activated?
follow_redirect!
assert_template 'users/show'
assert is_logged_in?
# assert_not flash.empty?
end
end
| 32.631579 | 78 | 0.638172 |
015d9da9dcfa6c8c9ee507ab7f8a270d5d200564 | 1,434 | require "feedjira"
require_relative "../repositories/story_repository"
require_relative "../repositories/feed_repository"
require_relative "../commands/feeds/find_new_stories"
class FetchFeed
USER_AGENT = "Stringer (https://github.com/swanson/stringer)"
def initialize(feed, parser: Feedjira::Feed, logger: nil)
@feed = feed
@parser = parser
@logger = logger
end
def fetch
begin
options = {
user_agent: USER_AGENT,
if_modified_since: @feed.last_fetched,
timeout: 30,
max_redirects: 2,
compress: true
}
raw_feed = @parser.fetch_and_parse(@feed.url, options)
if raw_feed == 304
@logger.info "#{@feed.url} has not been modified since last fetch" if @logger
else
new_entries_from(raw_feed).each do |entry|
StoryRepository.add(entry, @feed)
end
FeedRepository.update_last_fetched(@feed, raw_feed.last_modified)
end
FeedRepository.set_status(:green, @feed)
rescue => ex
FeedRepository.set_status(:red, @feed)
@logger.error "Something went wrong when parsing #{@feed.url}: #{ex}" if @logger
end
end
private
def new_entries_from(raw_feed)
finder = FindNewStories.new(raw_feed, @feed.id, @feed.last_fetched, latest_entry_id)
finder.new_stories
end
def latest_entry_id
return @feed.stories.first.entry_id unless @feed.stories.empty?
end
end
| 25.607143 | 88 | 0.677127 |
ed1b82f049d57a393fcf2f7eec81d5dc4e5b7bb2 | 2,305 | require 'rails_helper'
require 'json2qti'
require 'nokogiri'
describe Json2Qti::Converter do
let(:json) {
{
"title" => "This & That",
"ident" => "ib7b957_swyk",
"assessmentId" => "152",
"standard" => "qti",
"items" => [
{
"id" => "4965",
"title" => "",
"question_type" => "multiple_answers_question",
"material" => "Which of the following is/are an example(s) of a service?",
"answers" => [
{
"id" => "9755",
"material" => "a flight from Los Angeles to Dallas",
"isCorrect" => true
},
{
"id" => "4501",
"material" => "a couch or sofa",
"isCorrect" => false
},
{
"id" => "6570",
"material" => "a computer",
"isCorrect" => false
}
],
"outcome" => {
"shortOutcome" => "What Is Business?",
"longOutcome" => "Define the concept of business",
"outcomeGuid" => "f71c5ce2-46b7-4cce-9531-1680d42faf1b"
}
}
]
}
}
let(:converter){Json2Qti::Converter.new(json)}
it "should build the proper structure" do
node = Nokogiri::XML(converter.convert_to_qti)
expect(node.at_css('assessment > section > section > item')).not_to eq nil
expect(node.at_css('assessment > qtimetadata > qtimetadatafield')).not_to eq nil
end
it "should use sections" do
converter = Json2Qti::Converter.new(json, {"group_by_section" => true, "per_sec" => 1})
node = Nokogiri::XML(converter.convert_to_qti)
expect(node.css('assessment > section > item').count).to eq 0
expect(node.css('assessment > section > section').count).to eq 1
expect(node.css('assessment > section > section > item').count).to eq 1
expect(node.css('selection_number').text).to eq '1'
end
it "should update duplicate item idents" do
2.times{ json["items"] << json["items"][0] }
converter.check_duplicate_question_idents
expect(converter.items[1].ident).to eq converter.items[0].ident + "_2"
expect(converter.items[2].ident).to eq converter.items[0].ident + "_3"
end
it "should escape title" do
expect(converter.convert_to_qti).to include(%{<assessment title="This & That"})
end
end | 31.148649 | 91 | 0.576139 |
6a918dcd72c8ae416a1870e64d9bb7cbfb09ff9c | 483 | # frozen_string_literal: true
class Remove3StateBooleans < ActiveRecord::Migration[5.0]
def change
change_column_null :stages, :confirm, false, false
change_column_null :stages, :deploy_on_release, false, false
change_column_null :stages, :production, false, false
change_column_null :stages, :no_code_deployed, false, false
change_column_null :users, :desktop_notify, false, false
change_column_null :users, :access_request_pending, false, false
end
end
| 40.25 | 68 | 0.778468 |
382b2aa7c4a4b54aeb55b504a8281efdc2508285 | 735 | # This migration comes from spree (originally 20151021163309)
class ConvertSalePromotions < ActiveRecord::Migration
def up
sale_promotions.update_all(apply_automatically: true)
end
def down
# intentionally left blank
end
private
def sale_promotions
promo_table = Spree::Promotion.arel_table
code_table = Spree::PromotionCode.arel_table
promotion_code_join = promo_table.join(code_table, Arel::Nodes::OuterJoin).on(
promo_table[:id].eq(code_table[:promotion_id])
).join_sources
Spree::Promotion.includes(:promotion_rules).
joins(promotion_code_join).
where(
code_table[:value].eq(nil).and(
promo_table[:path].eq(nil)
)
).distinct
end
end
| 24.5 | 82 | 0.707483 |
8727756ce501fc42288c80be4dea777ada77b439 | 2,969 | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::GithubImport::ReschedulingMethods do
let(:worker) do
Class.new { include(Gitlab::GithubImport::ReschedulingMethods) }.new
end
describe '#perform' do
context 'with a non-existing project' do
it 'does not perform any work' do
expect(worker)
.not_to receive(:try_import)
worker.perform(-1, {})
end
it 'notifies any waiters so they do not wait forever' do
expect(worker)
.to receive(:notify_waiter)
.with('123')
worker.perform(-1, {}, '123')
end
end
context 'with an existing project' do
let(:project) { create(:project, import_url: 'https://[email protected]/repo/repo.git') }
it 'notifies any waiters upon successfully importing the data' do
expect(worker)
.to receive(:try_import)
.with(
an_instance_of(Project),
an_instance_of(Gitlab::GithubImport::Client),
{ 'number' => 2 }
)
.and_return(true)
expect(worker)
.to receive(:notify_waiter).with('123')
worker.perform(project.id, { 'number' => 2 }, '123')
end
it 'reschedules itself if the data could not be imported' do
expect(worker)
.to receive(:try_import)
.with(
an_instance_of(Project),
an_instance_of(Gitlab::GithubImport::Client),
{ 'number' => 2 }
)
.and_return(false)
expect(worker)
.not_to receive(:notify_waiter)
expect_next_instance_of(Gitlab::GithubImport::Client) do |instance|
expect(instance).to receive(:rate_limit_resets_in).and_return(14)
end
expect(worker.class)
.to receive(:perform_in)
.with(14, project.id, { 'number' => 2 }, '123')
worker.perform(project.id, { 'number' => 2 }, '123')
end
end
end
describe '#try_import' do
it 'returns true when the import succeeds' do
expect(worker)
.to receive(:import)
.with(10, 20)
expect(worker.try_import(10, 20)).to eq(true)
end
it 'returns false when the import fails due to hitting the GitHub API rate limit' do
expect(worker)
.to receive(:import)
.with(10, 20)
.and_raise(Gitlab::GithubImport::RateLimitError)
expect(worker.try_import(10, 20)).to eq(false)
end
end
describe '#notify_waiter' do
it 'notifies the waiter if a waiter key is specified' do
expect(worker)
.to receive(:jid)
.and_return('abc123')
expect(Gitlab::JobWaiter)
.to receive(:notify)
.with('123', 'abc123')
worker.notify_waiter('123')
end
it 'does not notify any waiters if no waiter key is specified' do
expect(Gitlab::JobWaiter)
.not_to receive(:notify)
worker.notify_waiter(nil)
end
end
end
| 26.274336 | 94 | 0.59616 |
91074347345de84416c6a26b9ecccd884cd8fe3b | 1,861 | # frozen_string_literal: true
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "gapic/common"
require "gapic/config"
require "gapic/config/method"
require "google/ads/google_ads/version"
require "google/ads/google_ads/v3/services/product_bidding_category_constant_service/credentials"
require "google/ads/google_ads/v3/services/product_bidding_category_constant_service/paths"
require "google/ads/google_ads/v3/services/product_bidding_category_constant_service/client"
module Google
module Ads
module GoogleAds
module V3
module Services
##
# Service to fetch Product Bidding Categories.
#
# To load this service and instantiate a client:
#
# require "google/ads/google_ads/v3/services/product_bidding_category_constant_service"
# client = ::Google::Ads::GoogleAds::V3::Services::ProductBiddingCategoryConstantService::Client.new
#
module ProductBiddingCategoryConstantService
end
end
end
end
end
end
helper_path = ::File.join __dir__, "product_bidding_category_constant_service", "helpers.rb"
require "google/ads/google_ads/v3/services/product_bidding_category_constant_service/helpers" if ::File.file? helper_path
| 35.113208 | 121 | 0.744224 |
abbd8a2f6dc5e49d3bbd94b837e837fc6937bce8 | 1,376 | # frozen_string_literal: true
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
require 'aws-sdk-core'
require 'aws-sigv4'
require_relative 'aws-sdk-guardduty/types'
require_relative 'aws-sdk-guardduty/client_api'
require_relative 'aws-sdk-guardduty/client'
require_relative 'aws-sdk-guardduty/errors'
require_relative 'aws-sdk-guardduty/resource'
require_relative 'aws-sdk-guardduty/customizations'
# This module provides support for Amazon GuardDuty. This module is available in the
# `aws-sdk-guardduty` gem.
#
# # Client
#
# The {Client} class provides one method for each API operation. Operation
# methods each accept a hash of request parameters and return a response
# structure.
#
# guard_duty = Aws::GuardDuty::Client.new
# resp = guard_duty.accept_invitation(params)
#
# See {Client} for more information.
#
# # Errors
#
# Errors returned from Amazon GuardDuty are defined in the
# {Errors} module and all extend {Errors::ServiceError}.
#
# begin
# # do stuff
# rescue Aws::GuardDuty::Errors::ServiceError
# # rescues all Amazon GuardDuty API errors
# end
#
# See {Errors} for more information.
#
# @!group service
module Aws::GuardDuty
GEM_VERSION = '1.57.0'
end
| 25.481481 | 84 | 0.74564 |
28e01f30475439490ef383ed30df87de44ba1f72 | 717 | Pod::Spec.new do |s|
s.name = 'AWSCloudWatch'
s.version = '2.10.0'
s.summary = 'Amazon Web Services SDK for iOS.'
s.description = 'The AWS SDK for iOS provides a library, code samples, and documentation for developers to build connected mobile applications using AWS.'
s.homepage = 'http://aws.amazon.com/mobile/sdk'
s.license = 'Apache License, Version 2.0'
s.author = { 'Amazon Web Services' => 'amazonwebservices' }
s.platform = :ios, '8.0'
s.source = { :git => 'https://github.com/aws/aws-sdk-ios.git',
:tag => s.version}
s.requires_arc = true
s.dependency 'AWSCore', '2.10.0'
s.source_files = 'AWSCloudWatch/*.{h,m}'
end
| 39.833333 | 157 | 0.615063 |
33cd08b666908ac35e68892ee5de2cc7e8d8d960 | 334 | # frozen_string_literal: true
class FilterUsernameParamsValidator < ActiveModel::EachValidator
def validate_each(record, _attribute, value)
return if value.blank?
return if value.match?(/\A[A-Za-z0-9_]+\z/)
message = I18n.t('messages._validators.is_invalid')
record.errors.add(:filter_username, message)
end
end
| 27.833333 | 64 | 0.745509 |
6aa5c16fe6c926261a05881ea1c4a27ad944ce55 | 39 | module Mockjax
VERSION = "0.0.3"
end
| 9.75 | 19 | 0.666667 |
acd6b609e390b711439b97156227d9aee1c5deb5 | 488 | cask 'gingko' do
version '2.1.1'
sha256 '17af34934998c27b5fb09366bd887daaca8458338fef950f79c00fcba92db506'
# github.com/gingko/client was verified as official when first introduced to the cask
url "https://github.com/gingko/client/releases/download/v#{version}/gingko-client-#{version}-mac.zip"
appcast 'https://github.com/gingko/client/releases.atom'
name 'Gingko'
homepage 'https://gingko.io/'
app 'Gingko.app'
zap trash: '~/Library/Application Support/Gingko'
end
| 32.533333 | 103 | 0.758197 |
1aaa341ec8fea955401696ceae142e38edc9a2d4 | 1,780 | class UsersController < ApplicationController
before_action :set_user, only: [:show, :edit, :update, :destroy]
# GET /users
# GET /users.json
def index
@users = User.all
end
# GET /users/1
# GET /users/1.json
def show
end
# GET /users/new
def new
@user = User.new
end
# GET /users/1/edit
def edit
end
# POST /users
# POST /users.json
def create
@user = User.new(user_params)
respond_to do |format|
if @user.save
format.html { redirect_to @user, notice: 'User was successfully created.' }
format.json { render :show, status: :created, location: @user }
else
format.html { render :new }
format.json { render json: @user.errors, status: :unprocessable_entity }
end
end
end
# PATCH/PUT /users/1
# PATCH/PUT /users/1.json
def update
respond_to do |format|
if @user.update(user_params)
format.html { redirect_to @user, notice: 'User was successfully updated.' }
format.json { render :show, status: :ok, location: @user }
else
format.html { render :edit }
format.json { render json: @user.errors, status: :unprocessable_entity }
end
end
end
# DELETE /users/1
# DELETE /users/1.json
def destroy
@user.destroy
respond_to do |format|
format.html { redirect_to users_url, notice: 'User was successfully destroyed.' }
format.json { head :no_content }
end
end
private
# Use callbacks to share common setup or constraints between actions.
def set_user
@user = User.find(params[:id])
end
# Never trust parameters from the scary internet, only allow the white list through.
def user_params
params.require(:user).permit(:email, :name)
end
end
| 23.733333 | 88 | 0.637079 |
ffef0720fd01f29c0c1aedc359ae8e9df8ae35c4 | 202 | require 'bundler/setup'
require 'sinatra'
require 'json'
set :bind, '0.0.0.0'
set :port, 1300
get '/', :provides => :json do
{:status => 'success',:info => 'yes!!!', :service => 'ruby'}.to_json
end
| 18.363636 | 70 | 0.613861 |
791a32747aa5aa4dde93fd10fbf5a241aacc4190 | 496 | class SessionsController < ApplicationController
def new
end
def create
user = User.find_by(email: params[:session][:email].downcase)
if user && user.authenticate(params[:session][:password])
# Log the user in and redirect to the user's show page.
log_in user
redirect_to user
else
flash.now[:danger] = 'Invalid email/password combination' #Not quite right!
render 'new'
end
end
def destroy
log_out
redirect_to root_url
end
end
| 22.545455 | 81 | 0.677419 |
1d322a80f809a40fd54247fa562ad721d23e0f98 | 421 | module Octokit
class Client
# Methods for the Emojis API
module Emojis
# List all emojis used on GitHub
#
# @return [Sawyer::Resource] A list of all emojis on GitHub
# @see https://developer.github.com/v3/emojis/#emojis
# @example List all emojis
# Octokit.emojis
def emojis(options = {})
get "emojis", options
end
end
end
end
| 22.157895 | 66 | 0.579572 |
e8ea25a391858c6a28b18b0d15673c5372b108de | 820 | # frozen_string_literal: true
OmniAuth.config.logger = Rails.logger
Rails.application.config.middleware.use OmniAuth::Builder do
if Rails.env.development? || Rails.env.test?
ENV['GPLUS_KEY'] = '463111787485-rj34ev5ai9pncbjl0oreqg3gr86jt92j.apps.googleusercontent.com'
ENV['GPLUS_SECRET'] = 'IR5APLsAJhmP8NPLSkRZan48'
ENV['GITHUB_KEY'] = 'd05eb310ebf549e53889'
ENV['GITHUB_SECRET'] = '6a5988af12a8a012399e037d0586bf706c4bfbf0'
end
provider :github, ENV['GITHUB_KEY'], ENV['GITHUB_SECRET']
provider :google_oauth2, ENV['GPLUS_KEY'], ENV['GPLUS_SECRET'], {
name: 'gplus',
setup: lambda { |env|
if (params = env['rack.session']['omniauth.params']) && params.fetch('youtube', false)
env['omniauth.strategy'].options[:scope] = 'youtube,userinfo.email'
end
}
}
end
| 34.166667 | 97 | 0.709756 |
bb2b612d173e14421860258d7a4dadcb57d137f3 | 3,010 | #!/usr/bin/env ruby
require 'spec_helper'
require 'easy_type/helpers.rb'
describe "convert_csv_data_to_hash" do
include EasyType::Helpers
context "with deprecated option :column_delimeter" do
subject { "col1,col2,col3\nvalue1,value2,value3"}
it "returns an Array of Hashes" do
expect(convert_csv_data_to_hash(subject)).to \
eq [{'col1' => 'value1', 'col2' => 'value2', 'col3' => 'value3'}]
end
end
context "a valid comma separated string with header" do
subject { "col1,col2,col3\nvalue1,value2,value3"}
it "returns an Array of Hashes" do
expect(convert_csv_data_to_hash(subject)).to \
eq [{'col1' => 'value1', 'col2' => 'value2', 'col3' => 'value3'}]
end
context "with spaces in the header and values" do
subject { "col1 ,col2 ,col3 \nvalue1 ,value2 ,value3 "}
it "returns an Array of hashes with trimmed headers, but values with spaces" do
expect(convert_csv_data_to_hash(subject)).to \
eq [{'col1' => 'value1 ', 'col2' => 'value2 ', 'col3' => 'value3 '}]
end
end
context "with null columns in the header and values" do
subject { "col1,,col3\nvalue1,value2,"}
it "returns an Array of hashes with nil key and nil value" do
expect(convert_csv_data_to_hash(subject)).to \
eq [{'col1' => 'value1', nil => 'value2', 'col3' => nil}]
end
end
context "with deprecated option :column_delimeter" do
it "returns a depraction message" do
Puppet.expects(:deprecation_warning)
convert_csv_data_to_hash(subject, [], :column_delimeter => ',' )
end
end
context "with deprecated option :line_delimeter" do
it "returns a depraction message" do
Puppet.expects(:deprecation_warning)
convert_csv_data_to_hash(subject, [], :row_sep => :auto, :line_delimeter => "\n" )
end
end
end
context "a valid comma separated string with header and a marker line" do
subject { "col1,col2,col3\n--------\nvalue1,value2,value3"}
it "returns an Array of Hashes" do
expect(convert_csv_data_to_hash(subject)).to \
eq [{'col1' => 'value1', 'col2' => 'value2', 'col3' => 'value3'}]
end
end
context "a valid comma separated string without header" do
subject { "value1,value2,value3\n"}
context "called with header specified" do
it "returns an Array of Hashes" do
expect(convert_csv_data_to_hash(subject, ['col1', 'col2', 'col3'])).to \
eq [{'col1' => 'value1', 'col2' => 'value2', 'col3' => 'value3'}]
end
end
end
end
describe EasyType::Helpers::InstancesResults do
include EasyType::Helpers
let(:the_hash) {EasyType::Helpers::InstancesResults[:known_key,10]}
subject {the_hash.column_data(key)}
describe "#column_data" do
context "a valid column name given" do
let(:key) {:known_key}
it "returns the content" do
expect(subject).to eq 10
end
end
context "an invalid column name given" do
let(:key) {:unknown_key}
it "raises an error" do
expect{subject}.to raise_error(RuntimeError)
end
end
end
end | 25.726496 | 86 | 0.672093 |
bbd58a4c762a90dbee54ae0b496658863dac9825 | 910 | cask "zulu" do
if Hardware::CPU.intel?
version "15.0.2,15.29.15-ca"
sha256 "6284c7fb89cbbc8552788a3db522f6226a64d84454d21e075558c050328f6ed7"
url "https://cdn.azul.com/zulu/bin/zulu#{version.after_comma}-jdk#{version.before_comma}-macosx_x64.dmg",
referer: "https://www.azul.com/downloads/zulu/zulu-mac/"
else
version "15.0.1,15.28.1013-ca"
sha256 "055a493236ed0023216f40d4e222ebc9cebd13bcaa8288d9f6c6c5c2d61f30ee"
url "https://cdn.azul.com/zulu/bin/zulu#{version.after_comma}-jdk#{version.before_comma}-macosx_aarch64.dmg",
referer: "https://www.azul.com/downloads/zulu/zulu-mac/"
end
name "Azul Zulu Java Standard Edition Development Kit"
homepage "https://www.azul.com/downloads/zulu/zulu-mac/"
depends_on macos: ">= :sierra"
pkg "Double-Click to Install Zulu #{version.major}.pkg"
uninstall pkgutil: "com.azulsystems.zulu.#{version.major}"
end
| 36.4 | 113 | 0.732967 |
5d5a6620ea74b5c755e42f140a97e2c0aedfa235 | 97 | require "tv_chart_rails/version"
module TvChartRails
class Engine < ::Rails::Engine
end
end
| 13.857143 | 32 | 0.762887 |
269762931129670fa2c7167d7e40178cb9b6ba1c | 1,707 | Puppet::Parser::Functions::newfunction(:redact,
:doc => <<DOC
This function will modify the catalog during compilation to remove the named
parameter from the class from which it was called. For example, if you wrote a
class named `foo` and called `redact('bar')` from within that class, then the
catalog would not record the value of `bar` that `foo` was called with.
~~~ puppet
class foo($bar) {
# this call will display the proper output, but because it's not a resource
# the string won't exist in the catalog.
notice("Class['foo'] was called with param ${bar}")
# but the catalog won't record what the passed in param was.
redact('bar')
}
class { 'foo':
bar => 'this will not appear in the catalog',
}
~~~
**Warning**: If you use that parameter to declare other classes or resources,
then you must take further action to remove the parameter from those declarations!
This takes an optional second parameter of the value to replace the original
parameter declaration with. This parameter is required if the class declares
a type that is not `String` for the parameter you're redacting.
DOC
) do |args|
raise Puppet::ParseError, 'The redact function requires 1 or 2 arguments' unless [1,2].include? args.size
raise Puppet::ParseError, 'The redact function should only be called from a class' unless self.source.type == :hostclass
param = args[0]
message = args[1] || '<<redacted>>'
# find the class in the catalog matching the name of the class this was called in
klass = self.catalog.resources.select { |res|
res.type == 'Class' && res.name == self.source.name.capitalize
}.first
# and rewrite its parameter
klass.parameters[param.to_sym].value = message
end | 38.795455 | 122 | 0.732279 |
398eb8b88d6c8294c05c5c05d554a865539c795f | 1,547 | require File.expand_path('../boot', __FILE__)
require 'rails/all'
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(*Rails.groups)
module EngineerMatching
class Application < Rails::Application
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
config.i18n.load_path += Dir[Rails.root.join('config', 'locales', '**', '*.{rb,yml}').to_s]
# Markdownのレンダリングを読み込む
config.autoload_paths += %W(#{config.root}/lib)
config.autoload_paths += Dir["#{config.root}/lib/**/"]
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC.
# config.time_zone = 'Central Time (US & Canada)'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
# config.i18n.default_locale = :de
# Do not swallow errors in after_commit/after_rollback callbacks.
config.active_record.raise_in_transactional_callbacks = true
# 表示時のタイムゾーンをJSTに変更
config.time_zone = 'Tokyo'
# DB保存時のタイムゾーンをJSTに変更
config.active_record.default_timezone = :local
# ロケールを日本語に変更
config.i18n.default_locale = :ja
end
end
| 38.675 | 99 | 0.714932 |
f8ffa95cde47e7884b6a3b8a411018b6b6794183 | 979 | module Gitlab
module Ci
module Status
module Build
class Stop < SimpleDelegator
include Status::Extended
def text
'manual'
end
def label
'manual stop action'
end
def icon
'icon_status_manual'
end
def has_action?
can?(user, :update_build, subject)
end
def action_icon
'stop'
end
def action_title
'Stop'
end
def action_path
play_namespace_project_build_path(subject.project.namespace,
subject.project,
subject)
end
def action_method
:post
end
def self.matches?(build, user)
build.playable? && build.stops_environment?
end
end
end
end
end
end
| 19.58 | 72 | 0.446374 |
013cb60b5885ba7836b3ee5bbe5c4e618f846ed6 | 880 | module Less2Sass
module Less
module Tree
# CSS color representation.
#
# Usually appears at variable definitions.
# Example:
# - `@color: #fff;` rule contains ColorNode
# - `color: #fff;` property declaration does
# not contain ColorNode
#
# The Sass equivalent is {::Sass::Script::Value::Color}.
class ColorNode < Node
attr_accessor :rgb
attr_accessor :alpha
attr_accessor :value
# @return [::Sass::Script::Value::Color]
# @see Node#to_sass
def to_sass
color = node(::Sass::Script::Value::Color.new(@rgb, @value), nil)
node(::Sass::Script::Tree::Literal.new(color), line)
end
# TODO: Check if Less is capable of operating with colors using the standard operators besides its color functions
end
end
end
end
| 29.333333 | 122 | 0.597727 |
1add3f9e3885fe8cd8a61879ef55e89efc4dbe90 | 477 | require "tinet/command/base"
module Tinet
module Command
class Exec < Base
def run(node_name, command)
node = nodes.find { |node| node.name == node_name }
raise "No such container: #{node_name}" if node.nil?
case node.type
when :docker
sudo "docker exec -it #{namespaced(node.name)} #{command}"
when :netns
sudo "ip netns exec #{namespaced(node.name)} #{command}"
end
end
end
end
end
| 25.105263 | 68 | 0.589099 |
4a88d7bbbe93d0317369a639b138deebfeff4465 | 975 | class Help2man < Formula
desc "Automatically generate simple man pages"
homepage "https://www.gnu.org/software/help2man/"
url "https://ftpmirror.gnu.org/help2man/help2man-1.47.4.tar.xz"
mirror "https://ftp.gnu.org/gnu/help2man/help2man-1.47.4.tar.xz"
sha256 "d4ecf697d13f14dd1a78c5995f06459bff706fd1ce593d1c02d81667c0207753"
bottle do
cellar :any_skip_relocation
sha256 "1ce372ea4da79821e251a867c380232a036569d5e05ab8734ca52bd25b9ff3bb" => :el_capitan
sha256 "b52243aae3f9552873d6a0befa2158c116993560719b7aada59dbafb2cdf281d" => :yosemite
sha256 "d63079ec5272bb4d5be4c244ffa36af7ddbcb0fd738e2acfb657b8268b932c05" => :mavericks
end
def install
# install is not parallel safe
# see https://github.com/Homebrew/homebrew/issues/12609
ENV.j1
system "./configure", "--prefix=#{prefix}"
system "make", "install"
end
test do
assert_match "help2man #{version}", shell_output("#{bin}/help2man #{bin}/help2man")
end
end
| 34.821429 | 92 | 0.757949 |
87c9d0d970154a392ec9f649c1f3039960113959 | 941 | maintainer 'Bryan Crossland'
maintainer_email '[email protected]'
license 'Apache 2.0'
description 'Installs and configures Passenger under Ruby Enterprise Edition with Apache'
source_url 'https://github.com/bacrossland/passenger-enterprise-install'
issues_url 'https://github.com/bacrossland/passenger-enterprise-install/issues'
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version '1.00.0'
recipe 'passenger-enterprise-install', 'Installs Passenger gem with Ruby Enterprise Edition'
recipe 'passenger-enterprise-install::apache2', 'Enables Apache module configuration for passenger under Ruby Enterprise Edition'
recipe 'passenger-enterprise-install::nginx', 'Installs Passenger gem w/ REE, and recompiles support into NGINX'
%w{ ruby-enterprise-install nginx apache2 }.each do |cb|
depends cb
end
%w{redhat centos fedora ubuntu}.each do |os|
supports os
end
| 44.809524 | 129 | 0.768332 |
b9576d6de04957c4792e4e8c34b427a306d47476 | 2,576 | module ActsAsSplittable
module Splittable
def splittable_attributes
@splittable_attributes ||= self.class.splittable_attributes_class.new
end
def split_column_values!(columns = nil)
splittable_aggregate_columns(columns) do |column, splitter|
begin
value = __send__(column)
rescue ActiveModel::MissingAttributeError
next
end
next if not splitter.allow_nil? and value.nil?
values = splitter.split(value, self)
splitter.attributes.zip(values).each do |key, value|
__send__ :"#{key}=", value
end
reset_splittable_changed_attributes splitter.attributes
end
self
end
def join_column_values!(columns = nil)
splittable_aggregate_columns(columns) do |column, splitter|
values = splitter.attributes.map {|partial| __send__(partial) }
next if not splitter.allow_nil? and values.include?(nil)
__send__ :"#{column}=", splitter.restore(values, self)
reset_splittable_changed_attributes splitter.attributes
end
self
end
protected
attr_writer :splittable_changed_attributes
def splittable_changed_attributes
@splittable_changed_attributes ||= []
end
def splittable_changed_attribute?(attribute)
splittable_changed_attributes.include? attribute
end
def reset_splittable_changed_attributes(attributes)
self.splittable_changed_attributes.uniq!
self.splittable_changed_attributes -= attributes
end
private
def splittable_aggregate_columns(columns = nil)
config = self.class.splittable_config
columns = columns ? Array(columns) : config.splitters.collect(&:name)
columns.collect!(&:to_sym)
columns.collect do |column|
yield(column, config.splitter(column)) if block_given?
end
end
def splittable_run_callback(callback, *args)
if callback.is_a?(Proc)
instance_exec(*args, &callback)
else
send(callback, *args)
end
end
Utility.alias_methods_with_warning_for self do
alias_method :splittable_partials, :splittable_attributes
alias_method :splittable_changed_partials, :splittable_changed_attributes
alias_method :splittable_changed_partial?, :splittable_changed_attribute?
alias_method :reset_splittable_changed_partials, :reset_splittable_changed_attributes
protected :splittable_changed_partials, :splittable_changed_partial?, :reset_splittable_changed_partials
end
end
end
| 30.666667 | 110 | 0.706134 |
01651d546098629ee870e9a1a7a2eec0fa931391 | 4,004 | #
# Author:: Adam Leff (<[email protected]>)
# Author:: Ryan Cragun (<[email protected]>)
#
# Copyright:: Copyright 2012-2016, Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "chef/exceptions"
class Chef
class DataCollector
class ResourceReport
attr_reader :action, :elapsed_time, :new_resource, :status
attr_accessor :conditional, :current_resource, :exception
def initialize(new_resource, action, current_resource = nil)
@new_resource = new_resource
@action = action
@current_resource = current_resource
@status = "unprocessed"
end
def skipped(conditional)
@status = "skipped"
@conditional = conditional
end
def updated
@status = "updated"
end
def failed(exception)
@current_resource = nil
@status = "failed"
@exception = exception
end
def up_to_date
@status = "up-to-date"
end
def finish
@elapsed_time = new_resource.elapsed_time
end
def elapsed_time_in_milliseconds
elapsed_time.nil? ? nil : (elapsed_time * 1000).to_i
end
def potentially_changed?
%w{updated failed}.include?(status)
end
def to_hash
hash = {
"type" => new_resource.resource_name.to_sym,
"name" => new_resource.name.to_s,
"id" => resource_identity,
"after" => new_resource_state_reporter,
"before" => current_resource_state_reporter,
"duration" => elapsed_time_in_milliseconds.to_s,
"delta" => new_resource.respond_to?(:diff) && potentially_changed? ? new_resource.diff : "",
"ignore_failure" => new_resource.ignore_failure,
"result" => action.to_s,
"status" => status,
}
if new_resource.cookbook_name
hash["cookbook_name"] = new_resource.cookbook_name
hash["cookbook_version"] = new_resource.cookbook_version.version
hash["recipe_name"] = new_resource.recipe_name
end
hash["conditional"] = conditional.to_text if status == "skipped"
hash["error_message"] = exception.message unless exception.nil?
hash
end
alias :to_h :to_hash
alias :for_json :to_hash
# We should be able to call the identity of a resource safely, but there
# is an edge case where resources that have a lazy property that is both
# the name_property and the identity property, it will thow a validation
# exception causing the chef-client run to fail. We are not fixing this
# case since Chef is actually doing the right thing but we are making the
# ResourceReporter smarter so that it detects the failure and sends a
# message to the data collector containing a static resource identity
# since we were unable to generate a proper one.
def resource_identity
new_resource.identity.to_s
rescue => e
"unknown identity (due to #{e.class})"
end
def new_resource_state_reporter
new_resource.state_for_resource_reporter
rescue
{}
end
def current_resource_state_reporter
current_resource ? current_resource.state_for_resource_reporter : {}
rescue
{}
end
end
end
end
| 32.290323 | 111 | 0.636114 |
5de2ded9b9ff348c8538c05e54d18f4e726036a7 | 1,456 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::ServiceFabric::V6_2_0_9
module Models
#
# Metadata about an Analysis Event.
#
class AnalysisEventMetadata
include MsRestAzure
# @return [Duration] The analysis delay.
attr_accessor :delay
# @return [Duration] The duration of analysis.
attr_accessor :duration
#
# Mapper for AnalysisEventMetadata class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'AnalysisEventMetadata',
type: {
name: 'Composite',
class_name: 'AnalysisEventMetadata',
model_properties: {
delay: {
client_side_validation: true,
required: false,
serialized_name: 'Delay',
type: {
name: 'TimeSpan'
}
},
duration: {
client_side_validation: true,
required: false,
serialized_name: 'Duration',
type: {
name: 'TimeSpan'
}
}
}
}
}
end
end
end
end
| 25.103448 | 70 | 0.525412 |
79301c1f08731377475037c4411ddca5a39ef8c4 | 436 | # The Book of Ruby - http://www.sapphiresteel.com
class MyClass
def initialize
@aVar = "Hello world"
end
end
ob = MyClass.new
p( ob.instance_eval { @aVar } ) #=> "Hello world"
p( ob.instance_eval( "@aVar" ) ) #=> "Hello world"
# p( ob.eval( "@aVar" ) ) #=> error: eval is a private method
# class Object
public :eval #=> Try commenting this out!
# end
p( ob.eval( "@aVar" ) ) #=> "Hello world" | 24.222222 | 66 | 0.582569 |
616f28fc6f48828556c2103d096d0eab3830dc40 | 571 | module HTTP
module Protocol
class Request
include Message
extend Forwardable
def self.build(request_line)
Builder.(request_line)
end
def_delegators :headers, :accept, :accept_charset
attr_reader :action
attr_reader :path
def initialize(action, path)
@action = action
@path = path
end
def headers
@headers ||= Headers.build
end
def request_line
"#{action} #{path} HTTP/1.1"
end
alias_method :first_line, :request_line
end
end
end
| 17.84375 | 55 | 0.597198 |
edb502adf18144eb2876d708aab7bbf744d75f51 | 1,449 | class Api::V3::Transformer
class << self
def redirect_to_deduped_patient(attributes)
# NOTE: Move this to a different layer if/when this becomes more complex
deduped_record = DeduplicationLog.find_by(deleted_record_id: attributes["id"])&.deduped_record
deduped_patient = DeduplicationLog.find_by(deleted_record_id: attributes["patient_id"])&.deduped_record
return attributes unless deduped_record || deduped_patient
attributes["id"] = deduped_record.id if deduped_record.present?
attributes["patient_id"] = deduped_patient.id if deduped_patient.present? && attributes["patient_id"].present?
attributes
end
def from_request(payload_attributes)
rename_attributes(payload_attributes, from_request_key_mapping)
.then { |attributes| redirect_to_deduped_patient(attributes) }
end
def to_response(model)
rename_attributes(model.attributes, to_response_key_mapping).as_json
end
def rename_attributes(attributes, mapping)
replace_keys(attributes.to_hash, mapping).with_indifferent_access
end
def replace_keys(hsh, mapping)
mapping.each do |k, v|
hsh[v] = hsh.delete(k)
end
hsh
end
def from_request_key_mapping
{
"created_at" => "device_created_at",
"updated_at" => "device_updated_at"
}
end
def to_response_key_mapping
from_request_key_mapping.invert
end
end
end
| 31.5 | 116 | 0.717736 |
1dc772f8bdbdcb7b43b4d4ac9f35085111a5addf | 381 | # Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::MediaServices::Mgmt::V2018_03_30_preview
module Models
#
# Defines values for StretchMode
#
module StretchMode
None = "None"
AutoSize = "AutoSize"
AutoFit = "AutoFit"
end
end
end
| 22.411765 | 70 | 0.690289 |
f8df08233692d578cba3f598d4c45cf7a5cd6439 | 66 | class Admin::Checkout::BaseController < Admin::BaseController
end | 22 | 61 | 0.818182 |
03df66a6ef2cfb5132f46d4f1a40879088d84727 | 411 | cask 'mouse-locator' do
version '1.1'
sha256 '1809760210e5afb80f9be34dc930c0c6fb84efee91747640d2d9717561149645'
url 'http://www.2point5fish.com/files/MouseLocator.dmg'
appcast 'http://www.2point5fish.com/index.html'
name 'Mouse Locator'
homepage 'http://www.2point5fish.com/index.html'
prefpane "Mouse Locator v#{version} Installer.app/Contents/Resources/Distribution/MouseLocator.prefPane"
end
| 34.25 | 106 | 0.788321 |
e9ab710cba53737014b00984c9347cade06975b9 | 1,035 | require 'net/ftp'
module Seek
module DownloadHandling
##
# A class to handle streaming remote content over FTP.
class FTPStreamer
def initialize(url, options = {})
@url = url
@size_limit = options[:size_limit]
end
# yields a chunk of data to the given block
def stream(&block)
total_size = 0
uri = URI(@url)
username, password = uri.userinfo.split(/:/) unless uri.userinfo.nil?
Net::FTP.open(uri.host) do |ftp|
ftp.login(username || 'anonymous', password)
# Setting the flag below prevented:
# app error: "500 Illegal PORT command.\n" (Net::FTPPermError)
ftp.passive = true
ftp.getbinaryfile(uri.path) do |chunk|
total_size += chunk.size
fail SizeLimitExceededException.new(total_size) if @size_limit && (total_size > @size_limit)
block.call(chunk)
end
end
end
end
class SizeLimitExceededException < Exception; end
end
end
| 27.972973 | 104 | 0.601932 |
38295461de46c1d40356385dcaba15ce34c67118 | 5,357 | # Copyright:: (c) Autotelik Media Ltd 2016
# Author :: Tom Statter
# Date :: March 2016
# License:: MIT.
#
# Usage::
#
# thor help datashift:paperclip:attach
#
require 'datashift'
require 'thor'
# Note, not DataShift, case sensitive, create namespace for command line : datashift
module Datashift
class Paperclip < Thor
include DataShift::Logging
desc "attach", "Create paperclip attachments and attach to a Model from files in a directory.
This is specifically for the use case where the paperclip attachments are stored in a class, such as Image, Icon, Asset,
and this class has a relationship, such as belongs_to, with another class, such as Product, User, Document.
Each matching file is used to create an instance of the paperclip attachment, given by :attachment_klass.
The class with the relationship, can be specified via :attach_to_klass
Examples
Owner has_many pdfs and mp3 files as Digitals .... :attachment_klass = Digital and :attach_to_klass = Owner
User has a single Image used as an avatar ... attachment_klass = Image and :attach_to_klass = User
The file name is used to lookup the instance of :attach_to_klass to assign the new attachment to, via :attach_to_find_by_field
So say we have a file called smithj_avatar.gif, and we want to lookup Users by login
:attach_to_find_by_field = login => Run a loookup based on find_by_login == 'smithj'
Once instance of :attach_to_klass found, the new attachment is assigned.
The attribute to assign new attachment to is gtiven by :attach_to_field
Examples
:attach_to_field => digitals : Owner.digitals = attachment(Digital)
:attach_to_field => avatar : User.avatar = attachment(Image)"
# :dummy => dummy run without actual saving to DB
method_option :input, :aliases => '-i', :required => true, :desc => "The input path containing images "
method_option :glob, :aliases => '-g', :desc => 'The glob to use to find files e.g. \'{*.jpg,*.gif,*.png}\' '
method_option :recursive, :aliases => '-r', :type => :boolean, :desc => "Scan sub directories of input for images"
method_option :attachment_klass, :required => true, :aliases => '-a', :desc => "Ruby Class name of the Attachment e.g Image, Icon"
method_option :attach_to_klass, :required => true, :aliases => '-k', :desc => "A class that has a relationship with the attachment (has_many, has_one, belongs_to)"
method_option :attach_to_find_by_field, :required => true, :aliases => '-l', :desc => "The field to use to find the :attach_to_klass record"
method_option :attach_to_field, :required => true, :aliases => '-f', :desc => "Attachment belongs to field e.g Product.image, Blog.digital"
# => :attach_to_find_by_field
# For the :attach_to_klass, this is the field used to search for the parent
# object to assign the new attachment to.
# Examples
# Owner has a unique 'name' field ... :attach_to_find_by_field = :name
# User has a unique 'login' field ... :attach_to_klass = :login
#
# => :attach_to_field
# Attribute/association to assign attachment to on :attach_to_klass.
# Examples
# :attach_to_field => digitals : Owner.digitals = attachment
# :attach_to_field => avatar : User.avatar = attachment
method_option :split_file_name_on, :type => :string,
:desc => "delimiter to progressivley split file_name for lookup", :default => ' '
method_option :case_sensitive, :type => :boolean, :desc => "Use case sensitive where clause to find :attach_to_klass"
method_option :use_like, :type => :boolean, :desc => "Use :lookup_field LIKE 'string%' instead of :lookup_field = 'string' in where clauses to find :attach_to_klass"
method_option :skip_when_assoc, :aliases => '-x', :type => :boolean, :desc => "Do not process if :attach_to_klass already has an attachment"
method_option :verbose, :aliases => '-v', :type => :boolean, :desc => "Verbose logging"
def attach()
@attachment_path = options[:input]
unless(File.exist?(@attachment_path))
puts "ERROR: Supplied Path [#{@attachment_path}] not accesible"
exit(-1)
end
start_connections #unless(Rails.application.initialized?)
require 'paperclip/attachment_loader'
puts "Using Field #{options[:attach_to_find_by_field]} to lookup matching [#{options[:attach_to_klass]}]"
loader = DataShift::Paperclip::AttachmentLoader.new
loader.init_from_options(options)
logger.info "Loading attachments from #{@attachment_path}"
loader.run(@attachment_path, options[:attachment_klass])
end
no_commands do
def start_connections
puts "WTFFFFFFFFF", Rails.application.inspect
if File.exist?(File.expand_path('config/environment.rb'))
begin
require File.expand_path('config/environment.rb')
rescue => e
logger.error("Failed to initialise ActiveRecord : #{e.message}")
raise DataShift::ConnectionError.new("Failed to initialise ActiveRecord : #{e.message}")
end
else
raise PathError.new('No config/environment.rb found - cannot initialise ActiveRecord')
end
end
end
end
end
| 41.207692 | 169 | 0.680791 |
394ccb5f66f304d043685638a15c52c0c4311448 | 96 | # frozen_string_literal: true
module IronBank
VERSION = "5.2.6"
API_VERSION = "v1"
end
| 13.714286 | 29 | 0.677083 |
1dda08d6d13c4e4ab2e816b1739b7fde4356f4c0 | 583 | # frozen_string_literal: true
describe IssuesController, type: :controller do
describe 'GET :new' do
before { sign_in manager }
let(:manager) { create :manager }
let!(:first_link) { create :link }
let!(:second_link) { create :link }
it 'returns new issue page' do
get :new
expect(assigns(:issue).class).to eq Issue
expect(assigns(:issue).new_record?).to eq true
expect(assigns(:links)).to match_array [first_link, second_link]
expect(subject).to render_template :new
expect(response.status).to eq 200
end
end
end
| 24.291667 | 70 | 0.667238 |
e891d85b958d23d802136874b8afe2cc32fef0f6 | 53,399 | # frozen_string_literal: true
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
module Google
module Cloud
module SecurityCenter
module V1
# Request message for creating a finding.
# @!attribute [rw] parent
# @return [::String]
# Required. Resource name of the new finding's parent. Its format should be
# "organizations/[organization_id]/sources/[source_id]".
# @!attribute [rw] finding_id
# @return [::String]
# Required. Unique identifier provided by the client within the parent scope.
# It must be alphanumeric and less than or equal to 32 characters and
# greater than 0 characters in length.
# @!attribute [rw] finding
# @return [::Google::Cloud::SecurityCenter::V1::Finding]
# Required. The Finding being created. The name and security_marks will be
# ignored as they are both output only fields on this resource.
class CreateFindingRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for creating a notification config.
# @!attribute [rw] parent
# @return [::String]
# Required. Resource name of the new notification config's parent. Its format
# is "organizations/[organization_id]".
# @!attribute [rw] config_id
# @return [::String]
# Required.
# Unique identifier provided by the client within the parent scope.
# It must be between 1 and 128 characters, and contains alphanumeric
# characters, underscores or hyphens only.
# @!attribute [rw] notification_config
# @return [::Google::Cloud::SecurityCenter::V1::NotificationConfig]
# Required. The notification config being created. The name and the service
# account will be ignored as they are both output only fields on this
# resource.
class CreateNotificationConfigRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for creating a source.
# @!attribute [rw] parent
# @return [::String]
# Required. Resource name of the new source's parent. Its format should be
# "organizations/[organization_id]".
# @!attribute [rw] source
# @return [::Google::Cloud::SecurityCenter::V1::Source]
# Required. The Source being created, only the display_name and description
# will be used. All other fields will be ignored.
class CreateSourceRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for deleting a notification config.
# @!attribute [rw] name
# @return [::String]
# Required. Name of the notification config to delete. Its format is
# "organizations/[organization_id]/notificationConfigs/[config_id]".
class DeleteNotificationConfigRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for getting a notification config.
# @!attribute [rw] name
# @return [::String]
# Required. Name of the notification config to get. Its format is
# "organizations/[organization_id]/notificationConfigs/[config_id]".
class GetNotificationConfigRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for getting organization settings.
# @!attribute [rw] name
# @return [::String]
# Required. Name of the organization to get organization settings for. Its
# format is "organizations/[organization_id]/organizationSettings".
class GetOrganizationSettingsRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for getting a source.
# @!attribute [rw] name
# @return [::String]
# Required. Relative resource name of the source. Its format is
# "organizations/[organization_id]/source/[source_id]".
class GetSourceRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for grouping by assets.
# @!attribute [rw] parent
# @return [::String]
# Required. Name of the organization to groupBy. Its format is
# "organizations/[organization_id], folders/[folder_id], or
# projects/[project_id]".
# @!attribute [rw] filter
# @return [::String]
# Expression that defines the filter to apply across assets.
# The expression is a list of zero or more restrictions combined via logical
# operators `AND` and `OR`.
# Parentheses are supported, and `OR` has higher precedence than `AND`.
#
# Restrictions have the form `<field> <operator> <value>` and may have a `-`
# character in front of them to indicate negation. The fields map to those
# defined in the Asset resource. Examples include:
#
# * name
# * security_center_properties.resource_name
# * resource_properties.a_property
# * security_marks.marks.marka
#
# The supported operators are:
#
# * `=` for all value types.
# * `>`, `<`, `>=`, `<=` for integer values.
# * `:`, meaning substring matching, for strings.
#
# The supported value types are:
#
# * string literals in quotes.
# * integer literals without quotes.
# * boolean literals `true` and `false` without quotes.
#
# The following field and operator combinations are supported:
#
# * name: `=`
# * update_time: `=`, `>`, `<`, `>=`, `<=`
#
# Usage: This should be milliseconds since epoch or an RFC3339 string.
# Examples:
# `update_time = "2019-06-10T16:07:18-07:00"`
# `update_time = 1560208038000`
#
# * create_time: `=`, `>`, `<`, `>=`, `<=`
#
# Usage: This should be milliseconds since epoch or an RFC3339 string.
# Examples:
# `create_time = "2019-06-10T16:07:18-07:00"`
# `create_time = 1560208038000`
#
# * iam_policy.policy_blob: `=`, `:`
# * resource_properties: `=`, `:`, `>`, `<`, `>=`, `<=`
# * security_marks.marks: `=`, `:`
# * security_center_properties.resource_name: `=`, `:`
# * security_center_properties.resource_display_name: `=`, `:`
# * security_center_properties.resource_type: `=`, `:`
# * security_center_properties.resource_parent: `=`, `:`
# * security_center_properties.resource_parent_display_name: `=`, `:`
# * security_center_properties.resource_project: `=`, `:`
# * security_center_properties.resource_project_display_name: `=`, `:`
# * security_center_properties.resource_owners: `=`, `:`
#
# For example, `resource_properties.size = 100` is a valid filter string.
#
# Use a partial match on the empty string to filter based on a property
# existing: `resource_properties.my_property : ""`
#
# Use a negated partial match on the empty string to filter based on a
# property not existing: `-resource_properties.my_property : ""`
# @!attribute [rw] group_by
# @return [::String]
# Required. Expression that defines what assets fields to use for grouping.
# The string value should follow SQL syntax: comma separated list of fields.
# For example:
# "security_center_properties.resource_project,security_center_properties.project".
#
# The following fields are supported when compare_duration is not set:
#
# * security_center_properties.resource_project
# * security_center_properties.resource_project_display_name
# * security_center_properties.resource_type
# * security_center_properties.resource_parent
# * security_center_properties.resource_parent_display_name
#
# The following fields are supported when compare_duration is set:
#
# * security_center_properties.resource_type
# * security_center_properties.resource_project_display_name
# * security_center_properties.resource_parent_display_name
# @!attribute [rw] compare_duration
# @return [::Google::Protobuf::Duration]
# When compare_duration is set, the GroupResult's "state_change" property is
# updated to indicate whether the asset was added, removed, or remained
# present during the compare_duration period of time that precedes the
# read_time. This is the time between (read_time - compare_duration) and
# read_time.
#
# The state change value is derived based on the presence of the asset at the
# two points in time. Intermediate state changes between the two times don't
# affect the result. For example, the results aren't affected if the asset is
# removed and re-created again.
#
# Possible "state_change" values when compare_duration is specified:
#
# * "ADDED": indicates that the asset was not present at the start of
# compare_duration, but present at reference_time.
# * "REMOVED": indicates that the asset was present at the start of
# compare_duration, but not present at reference_time.
# * "ACTIVE": indicates that the asset was present at both the
# start and the end of the time period defined by
# compare_duration and reference_time.
#
# If compare_duration is not specified, then the only possible state_change
# is "UNUSED", which will be the state_change set for all assets present at
# read_time.
#
# If this field is set then `state_change` must be a specified field in
# `group_by`.
# @!attribute [rw] read_time
# @return [::Google::Protobuf::Timestamp]
# Time used as a reference point when filtering assets. The filter is limited
# to assets existing at the supplied time and their values are those at that
# specific time. Absence of this field will default to the API's version of
# NOW.
# @!attribute [rw] page_token
# @return [::String]
# The value returned by the last `GroupAssetsResponse`; indicates
# that this is a continuation of a prior `GroupAssets` call, and that the
# system should return the next page of data.
# @!attribute [rw] page_size
# @return [::Integer]
# The maximum number of results to return in a single response. Default is
# 10, minimum is 1, maximum is 1000.
class GroupAssetsRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Response message for grouping by assets.
# @!attribute [rw] group_by_results
# @return [::Array<::Google::Cloud::SecurityCenter::V1::GroupResult>]
# Group results. There exists an element for each existing unique
# combination of property/values. The element contains a count for the number
# of times those specific property/values appear.
# @!attribute [rw] read_time
# @return [::Google::Protobuf::Timestamp]
# Time used for executing the groupBy request.
# @!attribute [rw] next_page_token
# @return [::String]
# Token to retrieve the next page of results, or empty if there are no more
# results.
# @!attribute [rw] total_size
# @return [::Integer]
# The total number of results matching the query.
class GroupAssetsResponse
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for grouping by findings.
# @!attribute [rw] parent
# @return [::String]
# Required. Name of the source to groupBy. Its format is
# "organizations/[organization_id]/sources/[source_id]",
# folders/[folder_id]/sources/[source_id], or
# projects/[project_id]/sources/[source_id]. To groupBy across all sources
# provide a source_id of `-`. For example:
# organizations/\\{organization_id}/sources/-, folders/\\{folder_id}/sources/-,
# or projects/\\{project_id}/sources/-
# @!attribute [rw] filter
# @return [::String]
# Expression that defines the filter to apply across findings.
# The expression is a list of one or more restrictions combined via logical
# operators `AND` and `OR`.
# Parentheses are supported, and `OR` has higher precedence than `AND`.
#
# Restrictions have the form `<field> <operator> <value>` and may have a `-`
# character in front of them to indicate negation. Examples include:
#
# * name
# * source_properties.a_property
# * security_marks.marks.marka
#
# The supported operators are:
#
# * `=` for all value types.
# * `>`, `<`, `>=`, `<=` for integer values.
# * `:`, meaning substring matching, for strings.
#
# The supported value types are:
#
# * string literals in quotes.
# * integer literals without quotes.
# * boolean literals `true` and `false` without quotes.
#
# The following field and operator combinations are supported:
#
# * name: `=`
# * parent: `=`, `:`
# * resource_name: `=`, `:`
# * state: `=`, `:`
# * category: `=`, `:`
# * external_uri: `=`, `:`
# * event_time: `=`, `>`, `<`, `>=`, `<=`
#
# Usage: This should be milliseconds since epoch or an RFC3339 string.
# Examples:
# `event_time = "2019-06-10T16:07:18-07:00"`
# `event_time = 1560208038000`
#
# * severity: `=`, `:`
# * workflow_state: `=`, `:`
# * security_marks.marks: `=`, `:`
# * source_properties: `=`, `:`, `>`, `<`, `>=`, `<=`
#
# For example, `source_properties.size = 100` is a valid filter string.
#
# Use a partial match on the empty string to filter based on a property
# existing: `source_properties.my_property : ""`
#
# Use a negated partial match on the empty string to filter based on a
# property not existing: `-source_properties.my_property : ""`
#
# * resource:
# * resource.name: `=`, `:`
# * resource.parent_name: `=`, `:`
# * resource.parent_display_name: `=`, `:`
# * resource.project_name: `=`, `:`
# * resource.project_display_name: `=`, `:`
# * resource.type: `=`, `:`
# @!attribute [rw] group_by
# @return [::String]
# Required. Expression that defines what assets fields to use for grouping
# (including `state_change`). The string value should follow SQL syntax:
# comma separated list of fields. For example: "parent,resource_name".
#
# The following fields are supported:
#
# * resource_name
# * category
# * state
# * parent
# * severity
#
# The following fields are supported when compare_duration is set:
#
# * state_change
# @!attribute [rw] read_time
# @return [::Google::Protobuf::Timestamp]
# Time used as a reference point when filtering findings. The filter is
# limited to findings existing at the supplied time and their values are
# those at that specific time. Absence of this field will default to the
# API's version of NOW.
# @!attribute [rw] compare_duration
# @return [::Google::Protobuf::Duration]
# When compare_duration is set, the GroupResult's "state_change" attribute is
# updated to indicate whether the finding had its state changed, the
# finding's state remained unchanged, or if the finding was added during the
# compare_duration period of time that precedes the read_time. This is the
# time between (read_time - compare_duration) and read_time.
#
# The state_change value is derived based on the presence and state of the
# finding at the two points in time. Intermediate state changes between the
# two times don't affect the result. For example, the results aren't affected
# if the finding is made inactive and then active again.
#
# Possible "state_change" values when compare_duration is specified:
#
# * "CHANGED": indicates that the finding was present and matched the given
# filter at the start of compare_duration, but changed its
# state at read_time.
# * "UNCHANGED": indicates that the finding was present and matched the given
# filter at the start of compare_duration and did not change
# state at read_time.
# * "ADDED": indicates that the finding did not match the given filter or
# was not present at the start of compare_duration, but was
# present at read_time.
# * "REMOVED": indicates that the finding was present and matched the
# filter at the start of compare_duration, but did not match
# the filter at read_time.
#
# If compare_duration is not specified, then the only possible state_change
# is "UNUSED", which will be the state_change set for all findings present
# at read_time.
#
# If this field is set then `state_change` must be a specified field in
# `group_by`.
# @!attribute [rw] page_token
# @return [::String]
# The value returned by the last `GroupFindingsResponse`; indicates
# that this is a continuation of a prior `GroupFindings` call, and
# that the system should return the next page of data.
# @!attribute [rw] page_size
# @return [::Integer]
# The maximum number of results to return in a single response. Default is
# 10, minimum is 1, maximum is 1000.
class GroupFindingsRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Response message for group by findings.
# @!attribute [rw] group_by_results
# @return [::Array<::Google::Cloud::SecurityCenter::V1::GroupResult>]
# Group results. There exists an element for each existing unique
# combination of property/values. The element contains a count for the number
# of times those specific property/values appear.
# @!attribute [rw] read_time
# @return [::Google::Protobuf::Timestamp]
# Time used for executing the groupBy request.
# @!attribute [rw] next_page_token
# @return [::String]
# Token to retrieve the next page of results, or empty if there are no more
# results.
# @!attribute [rw] total_size
# @return [::Integer]
# The total number of results matching the query.
class GroupFindingsResponse
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Result containing the properties and count of a groupBy request.
# @!attribute [rw] properties
# @return [::Google::Protobuf::Map{::String => ::Google::Protobuf::Value}]
# Properties matching the groupBy fields in the request.
# @!attribute [rw] count
# @return [::Integer]
# Total count of resources for the given properties.
class GroupResult
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# @!attribute [rw] key
# @return [::String]
# @!attribute [rw] value
# @return [::Google::Protobuf::Value]
class PropertiesEntry
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
# Request message for listing notification configs.
# @!attribute [rw] parent
# @return [::String]
# Required. Name of the organization to list notification configs.
# Its format is "organizations/[organization_id]".
# @!attribute [rw] page_token
# @return [::String]
# The value returned by the last `ListNotificationConfigsResponse`; indicates
# that this is a continuation of a prior `ListNotificationConfigs` call, and
# that the system should return the next page of data.
# @!attribute [rw] page_size
# @return [::Integer]
# The maximum number of results to return in a single response. Default is
# 10, minimum is 1, maximum is 1000.
class ListNotificationConfigsRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Response message for listing notification configs.
# @!attribute [rw] notification_configs
# @return [::Array<::Google::Cloud::SecurityCenter::V1::NotificationConfig>]
# Notification configs belonging to the requested parent.
# @!attribute [rw] next_page_token
# @return [::String]
# Token to retrieve the next page of results, or empty if there are no more
# results.
class ListNotificationConfigsResponse
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for listing sources.
# @!attribute [rw] parent
# @return [::String]
# Required. Resource name of the parent of sources to list. Its format should
# be "organizations/[organization_id], folders/[folder_id], or
# projects/[project_id]".
# @!attribute [rw] page_token
# @return [::String]
# The value returned by the last `ListSourcesResponse`; indicates
# that this is a continuation of a prior `ListSources` call, and
# that the system should return the next page of data.
# @!attribute [rw] page_size
# @return [::Integer]
# The maximum number of results to return in a single response. Default is
# 10, minimum is 1, maximum is 1000.
class ListSourcesRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Response message for listing sources.
# @!attribute [rw] sources
# @return [::Array<::Google::Cloud::SecurityCenter::V1::Source>]
# Sources belonging to the requested parent.
# @!attribute [rw] next_page_token
# @return [::String]
# Token to retrieve the next page of results, or empty if there are no more
# results.
class ListSourcesResponse
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for listing assets.
# @!attribute [rw] parent
# @return [::String]
# Required. Name of the organization assets should belong to. Its format is
# "organizations/[organization_id], folders/[folder_id], or
# projects/[project_id]".
# @!attribute [rw] filter
# @return [::String]
# Expression that defines the filter to apply across assets.
# The expression is a list of zero or more restrictions combined via logical
# operators `AND` and `OR`.
# Parentheses are supported, and `OR` has higher precedence than `AND`.
#
# Restrictions have the form `<field> <operator> <value>` and may have a `-`
# character in front of them to indicate negation. The fields map to those
# defined in the Asset resource. Examples include:
#
# * name
# * security_center_properties.resource_name
# * resource_properties.a_property
# * security_marks.marks.marka
#
# The supported operators are:
#
# * `=` for all value types.
# * `>`, `<`, `>=`, `<=` for integer values.
# * `:`, meaning substring matching, for strings.
#
# The supported value types are:
#
# * string literals in quotes.
# * integer literals without quotes.
# * boolean literals `true` and `false` without quotes.
#
# The following are the allowed field and operator combinations:
#
# * name: `=`
# * update_time: `=`, `>`, `<`, `>=`, `<=`
#
# Usage: This should be milliseconds since epoch or an RFC3339 string.
# Examples:
# `update_time = "2019-06-10T16:07:18-07:00"`
# `update_time = 1560208038000`
#
# * create_time: `=`, `>`, `<`, `>=`, `<=`
#
# Usage: This should be milliseconds since epoch or an RFC3339 string.
# Examples:
# `create_time = "2019-06-10T16:07:18-07:00"`
# `create_time = 1560208038000`
#
# * iam_policy.policy_blob: `=`, `:`
# * resource_properties: `=`, `:`, `>`, `<`, `>=`, `<=`
# * security_marks.marks: `=`, `:`
# * security_center_properties.resource_name: `=`, `:`
# * security_center_properties.resource_display_name: `=`, `:`
# * security_center_properties.resource_type: `=`, `:`
# * security_center_properties.resource_parent: `=`, `:`
# * security_center_properties.resource_parent_display_name: `=`, `:`
# * security_center_properties.resource_project: `=`, `:`
# * security_center_properties.resource_project_display_name: `=`, `:`
# * security_center_properties.resource_owners: `=`, `:`
#
# For example, `resource_properties.size = 100` is a valid filter string.
#
# Use a partial match on the empty string to filter based on a property
# existing: `resource_properties.my_property : ""`
#
# Use a negated partial match on the empty string to filter based on a
# property not existing: `-resource_properties.my_property : ""`
# @!attribute [rw] order_by
# @return [::String]
# Expression that defines what fields and order to use for sorting. The
# string value should follow SQL syntax: comma separated list of fields. For
# example: "name,resource_properties.a_property". The default sorting order
# is ascending. To specify descending order for a field, a suffix " desc"
# should be appended to the field name. For example: "name
# desc,resource_properties.a_property". Redundant space characters in the
# syntax are insignificant. "name desc,resource_properties.a_property" and "
# name desc , resource_properties.a_property " are equivalent.
#
# The following fields are supported:
# name
# update_time
# resource_properties
# security_marks.marks
# security_center_properties.resource_name
# security_center_properties.resource_display_name
# security_center_properties.resource_parent
# security_center_properties.resource_parent_display_name
# security_center_properties.resource_project
# security_center_properties.resource_project_display_name
# security_center_properties.resource_type
# @!attribute [rw] read_time
# @return [::Google::Protobuf::Timestamp]
# Time used as a reference point when filtering assets. The filter is limited
# to assets existing at the supplied time and their values are those at that
# specific time. Absence of this field will default to the API's version of
# NOW.
# @!attribute [rw] compare_duration
# @return [::Google::Protobuf::Duration]
# When compare_duration is set, the ListAssetsResult's "state_change"
# attribute is updated to indicate whether the asset was added, removed, or
# remained present during the compare_duration period of time that precedes
# the read_time. This is the time between (read_time - compare_duration) and
# read_time.
#
# The state_change value is derived based on the presence of the asset at the
# two points in time. Intermediate state changes between the two times don't
# affect the result. For example, the results aren't affected if the asset is
# removed and re-created again.
#
# Possible "state_change" values when compare_duration is specified:
#
# * "ADDED": indicates that the asset was not present at the start of
# compare_duration, but present at read_time.
# * "REMOVED": indicates that the asset was present at the start of
# compare_duration, but not present at read_time.
# * "ACTIVE": indicates that the asset was present at both the
# start and the end of the time period defined by
# compare_duration and read_time.
#
# If compare_duration is not specified, then the only possible state_change
# is "UNUSED", which will be the state_change set for all assets present at
# read_time.
# @!attribute [rw] field_mask
# @return [::Google::Protobuf::FieldMask]
# A field mask to specify the ListAssetsResult fields to be listed in the
# response.
# An empty field mask will list all fields.
# @!attribute [rw] page_token
# @return [::String]
# The value returned by the last `ListAssetsResponse`; indicates
# that this is a continuation of a prior `ListAssets` call, and
# that the system should return the next page of data.
# @!attribute [rw] page_size
# @return [::Integer]
# The maximum number of results to return in a single response. Default is
# 10, minimum is 1, maximum is 1000.
class ListAssetsRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Response message for listing assets.
# @!attribute [rw] list_assets_results
# @return [::Array<::Google::Cloud::SecurityCenter::V1::ListAssetsResponse::ListAssetsResult>]
# Assets matching the list request.
# @!attribute [rw] read_time
# @return [::Google::Protobuf::Timestamp]
# Time used for executing the list request.
# @!attribute [rw] next_page_token
# @return [::String]
# Token to retrieve the next page of results, or empty if there are no more
# results.
# @!attribute [rw] total_size
# @return [::Integer]
# The total number of assets matching the query.
class ListAssetsResponse
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# Result containing the Asset and its State.
# @!attribute [rw] asset
# @return [::Google::Cloud::SecurityCenter::V1::Asset]
# Asset matching the search request.
# @!attribute [rw] state_change
# @return [::Google::Cloud::SecurityCenter::V1::ListAssetsResponse::ListAssetsResult::StateChange]
# State change of the asset between the points in time.
class ListAssetsResult
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# The change in state of the asset.
#
# When querying across two points in time this describes
# the change between the two points: ADDED, REMOVED, or ACTIVE.
# If there was no compare_duration supplied in the request the state change
# will be: UNUSED
module StateChange
# State change is unused, this is the canonical default for this enum.
UNUSED = 0
# Asset was added between the points in time.
ADDED = 1
# Asset was removed between the points in time.
REMOVED = 2
# Asset was present at both point(s) in time.
ACTIVE = 3
end
end
end
# Request message for listing findings.
# @!attribute [rw] parent
# @return [::String]
# Required. Name of the source the findings belong to. Its format is
# "organizations/[organization_id]/sources/[source_id],
# folders/[folder_id]/sources/[source_id], or
# projects/[project_id]/sources/[source_id]". To list across all sources
# provide a source_id of `-`. For example:
# organizations/\\{organization_id}/sources/-, folders/\\{folder_id}/sources/- or
# projects/\\{projects_id}/sources/-
# @!attribute [rw] filter
# @return [::String]
# Expression that defines the filter to apply across findings.
# The expression is a list of one or more restrictions combined via logical
# operators `AND` and `OR`.
# Parentheses are supported, and `OR` has higher precedence than `AND`.
#
# Restrictions have the form `<field> <operator> <value>` and may have a `-`
# character in front of them to indicate negation. Examples include:
#
# * name
# * source_properties.a_property
# * security_marks.marks.marka
#
# The supported operators are:
#
# * `=` for all value types.
# * `>`, `<`, `>=`, `<=` for integer values.
# * `:`, meaning substring matching, for strings.
#
# The supported value types are:
#
# * string literals in quotes.
# * integer literals without quotes.
# * boolean literals `true` and `false` without quotes.
#
# The following field and operator combinations are supported:
#
# * name: `=`
# * parent: `=`, `:`
# * resource_name: `=`, `:`
# * state: `=`, `:`
# * category: `=`, `:`
# * external_uri: `=`, `:`
# * event_time: `=`, `>`, `<`, `>=`, `<=`
#
# Usage: This should be milliseconds since epoch or an RFC3339 string.
# Examples:
# `event_time = "2019-06-10T16:07:18-07:00"`
# `event_time = 1560208038000`
#
# * severity: `=`, `:`
# * workflow_state: `=`, `:`
# * security_marks.marks: `=`, `:`
# * source_properties: `=`, `:`, `>`, `<`, `>=`, `<=`
#
# For example, `source_properties.size = 100` is a valid filter string.
#
# Use a partial match on the empty string to filter based on a property
# existing: `source_properties.my_property : ""`
#
# Use a negated partial match on the empty string to filter based on a
# property not existing: `-source_properties.my_property : ""`
#
# * resource:
# * resource.name: `=`, `:`
# * resource.parent_name: `=`, `:`
# * resource.parent_display_name: `=`, `:`
# * resource.project_name: `=`, `:`
# * resource.project_display_name: `=`, `:`
# * resource.type: `=`, `:`
# * resource.folders.resource_folder: `=`, `:`
# @!attribute [rw] order_by
# @return [::String]
# Expression that defines what fields and order to use for sorting. The
# string value should follow SQL syntax: comma separated list of fields. For
# example: "name,resource_properties.a_property". The default sorting order
# is ascending. To specify descending order for a field, a suffix " desc"
# should be appended to the field name. For example: "name
# desc,source_properties.a_property". Redundant space characters in the
# syntax are insignificant. "name desc,source_properties.a_property" and "
# name desc , source_properties.a_property " are equivalent.
#
# The following fields are supported:
# name
# parent
# state
# category
# resource_name
# event_time
# source_properties
# security_marks.marks
# @!attribute [rw] read_time
# @return [::Google::Protobuf::Timestamp]
# Time used as a reference point when filtering findings. The filter is
# limited to findings existing at the supplied time and their values are
# those at that specific time. Absence of this field will default to the
# API's version of NOW.
# @!attribute [rw] compare_duration
# @return [::Google::Protobuf::Duration]
# When compare_duration is set, the ListFindingsResult's "state_change"
# attribute is updated to indicate whether the finding had its state changed,
# the finding's state remained unchanged, or if the finding was added in any
# state during the compare_duration period of time that precedes the
# read_time. This is the time between (read_time - compare_duration) and
# read_time.
#
# The state_change value is derived based on the presence and state of the
# finding at the two points in time. Intermediate state changes between the
# two times don't affect the result. For example, the results aren't affected
# if the finding is made inactive and then active again.
#
# Possible "state_change" values when compare_duration is specified:
#
# * "CHANGED": indicates that the finding was present and matched the given
# filter at the start of compare_duration, but changed its
# state at read_time.
# * "UNCHANGED": indicates that the finding was present and matched the given
# filter at the start of compare_duration and did not change
# state at read_time.
# * "ADDED": indicates that the finding did not match the given filter or
# was not present at the start of compare_duration, but was
# present at read_time.
# * "REMOVED": indicates that the finding was present and matched the
# filter at the start of compare_duration, but did not match
# the filter at read_time.
#
# If compare_duration is not specified, then the only possible state_change
# is "UNUSED", which will be the state_change set for all findings present at
# read_time.
# @!attribute [rw] field_mask
# @return [::Google::Protobuf::FieldMask]
# A field mask to specify the Finding fields to be listed in the response.
# An empty field mask will list all fields.
# @!attribute [rw] page_token
# @return [::String]
# The value returned by the last `ListFindingsResponse`; indicates
# that this is a continuation of a prior `ListFindings` call, and
# that the system should return the next page of data.
# @!attribute [rw] page_size
# @return [::Integer]
# The maximum number of results to return in a single response. Default is
# 10, minimum is 1, maximum is 1000.
class ListFindingsRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Response message for listing findings.
# @!attribute [rw] list_findings_results
# @return [::Array<::Google::Cloud::SecurityCenter::V1::ListFindingsResponse::ListFindingsResult>]
# Findings matching the list request.
# @!attribute [rw] read_time
# @return [::Google::Protobuf::Timestamp]
# Time used for executing the list request.
# @!attribute [rw] next_page_token
# @return [::String]
# Token to retrieve the next page of results, or empty if there are no more
# results.
# @!attribute [rw] total_size
# @return [::Integer]
# The total number of findings matching the query.
class ListFindingsResponse
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# Result containing the Finding and its StateChange.
# @!attribute [rw] finding
# @return [::Google::Cloud::SecurityCenter::V1::Finding]
# Finding matching the search request.
# @!attribute [rw] state_change
# @return [::Google::Cloud::SecurityCenter::V1::ListFindingsResponse::ListFindingsResult::StateChange]
# State change of the finding between the points in time.
# @!attribute [r] resource
# @return [::Google::Cloud::SecurityCenter::V1::ListFindingsResponse::ListFindingsResult::Resource]
# Output only. Resource that is associated with this finding.
class ListFindingsResult
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
# Information related to the Google Cloud resource that is
# associated with this finding.
# @!attribute [rw] name
# @return [::String]
# The full resource name of the resource. See:
# https://cloud.google.com/apis/design/resource_names#full_resource_name
# @!attribute [rw] project_name
# @return [::String]
# The full resource name of project that the resource belongs to.
# @!attribute [rw] project_display_name
# @return [::String]
# The human readable name of project that the resource belongs to.
# @!attribute [rw] parent_name
# @return [::String]
# The full resource name of resource's parent.
# @!attribute [rw] parent_display_name
# @return [::String]
# The human readable name of resource's parent.
# @!attribute [rw] folders
# @return [::Array<::Google::Cloud::SecurityCenter::V1::Folder>]
# Contains a Folder message for each folder in the assets ancestry.
# The first folder is the deepest nested folder, and the last folder is
# the folder directly under the Organization.
class Resource
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# The change in state of the finding.
#
# When querying across two points in time this describes
# the change in the finding between the two points: CHANGED, UNCHANGED,
# ADDED, or REMOVED. Findings can not be deleted, so REMOVED implies that
# the finding at timestamp does not match the filter specified, but it did
# at timestamp - compare_duration. If there was no compare_duration
# supplied in the request the state change will be: UNUSED
module StateChange
# State change is unused, this is the canonical default for this enum.
UNUSED = 0
# The finding has changed state in some way between the points in time
# and existed at both points.
CHANGED = 1
# The finding has not changed state between the points in time and
# existed at both points.
UNCHANGED = 2
# The finding was created between the points in time.
ADDED = 3
# The finding at timestamp does not match the filter specified, but it
# did at timestamp - compare_duration.
REMOVED = 4
end
end
end
# Request message for updating a finding's state.
# @!attribute [rw] name
# @return [::String]
# Required. The relative resource name of the finding. See:
# https://cloud.google.com/apis/design/resource_names#relative_resource_name
# Example:
# "organizations/\\{organization_id}/sources/\\{source_id}/finding/\\{finding_id}".
# @!attribute [rw] state
# @return [::Google::Cloud::SecurityCenter::V1::Finding::State]
# Required. The desired State of the finding.
# @!attribute [rw] start_time
# @return [::Google::Protobuf::Timestamp]
# Required. The time at which the updated state takes effect.
class SetFindingStateRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for running asset discovery for an organization.
# @!attribute [rw] parent
# @return [::String]
# Required. Name of the organization to run asset discovery for. Its format
# is "organizations/[organization_id]".
class RunAssetDiscoveryRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for updating or creating a finding.
# @!attribute [rw] finding
# @return [::Google::Cloud::SecurityCenter::V1::Finding]
# Required. The finding resource to update or create if it does not already
# exist. parent, security_marks, and update_time will be ignored.
#
# In the case of creation, the finding id portion of the name must be
# alphanumeric and less than or equal to 32 characters and greater than 0
# characters in length.
# @!attribute [rw] update_mask
# @return [::Google::Protobuf::FieldMask]
# The FieldMask to use when updating the finding resource. This field should
# not be specified when creating a finding.
#
# When updating a finding, an empty mask is treated as updating all mutable
# fields and replacing source_properties. Individual source_properties can
# be added/updated by using "source_properties.<property key>" in the field
# mask.
class UpdateFindingRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for updating a notification config.
# @!attribute [rw] notification_config
# @return [::Google::Cloud::SecurityCenter::V1::NotificationConfig]
# Required. The notification config to update.
# @!attribute [rw] update_mask
# @return [::Google::Protobuf::FieldMask]
# The FieldMask to use when updating the notification config.
#
# If empty all mutable fields will be updated.
class UpdateNotificationConfigRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for updating an organization's settings.
# @!attribute [rw] organization_settings
# @return [::Google::Cloud::SecurityCenter::V1::OrganizationSettings]
# Required. The organization settings resource to update.
# @!attribute [rw] update_mask
# @return [::Google::Protobuf::FieldMask]
# The FieldMask to use when updating the settings resource.
#
# If empty all mutable fields will be updated.
class UpdateOrganizationSettingsRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for updating a source.
# @!attribute [rw] source
# @return [::Google::Cloud::SecurityCenter::V1::Source]
# Required. The source resource to update.
# @!attribute [rw] update_mask
# @return [::Google::Protobuf::FieldMask]
# The FieldMask to use when updating the source resource.
#
# If empty all mutable fields will be updated.
class UpdateSourceRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# Request message for updating a SecurityMarks resource.
# @!attribute [rw] security_marks
# @return [::Google::Cloud::SecurityCenter::V1::SecurityMarks]
# Required. The security marks resource to update.
# @!attribute [rw] update_mask
# @return [::Google::Protobuf::FieldMask]
# The FieldMask to use when updating the security marks resource.
#
# The field mask must not contain duplicate fields.
# If empty or set to "marks", all marks will be replaced. Individual
# marks can be updated using "marks.<mark_key>".
# @!attribute [rw] start_time
# @return [::Google::Protobuf::Timestamp]
# The time at which the updated SecurityMarks take effect.
# If not set uses current server time. Updates will be applied to the
# SecurityMarks that are active immediately preceding this time.
class UpdateSecurityMarksRequest
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
end
end
end
| 49.581244 | 114 | 0.586977 |
870119dc7f97b923558f10af2cc4daabfb85bd52 | 3,410 | # frozen_string_literal: true
require "date"
require "active_support/inflector/methods"
require "active_support/core_ext/date/zones"
require "active_support/core_ext/module/redefine_method"
class Date
DATE_FORMATS = {
short: "%d %b",
long: "%B %d, %Y",
db: "%Y-%m-%d",
number: "%Y%m%d",
long_ordinal: lambda { |date|
day_format = ActiveSupport::Inflector.ordinalize(date.day)
date.strftime("%B #{day_format}, %Y") # => "April 25th, 2007"
},
rfc822: "%d %b %Y",
iso8601: lambda { |date| date.iso8601 }
}
# Convert to a formatted string. See DATE_FORMATS for predefined formats.
#
# This method is aliased to <tt>to_s</tt>.
#
# date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007
#
# date.to_formatted_s(:db) # => "2007-11-10"
# date.to_s(:db) # => "2007-11-10"
#
# date.to_formatted_s(:short) # => "10 Nov"
# date.to_formatted_s(:number) # => "20071110"
# date.to_formatted_s(:long) # => "November 10, 2007"
# date.to_formatted_s(:long_ordinal) # => "November 10th, 2007"
# date.to_formatted_s(:rfc822) # => "10 Nov 2007"
# date.to_formatted_s(:iso8601) # => "2007-11-10"
#
# == Adding your own date formats to to_formatted_s
# You can add your own formats to the Date::DATE_FORMATS hash.
# Use the format name as the hash key and either a strftime string
# or Proc instance that takes a date argument as the value.
#
# # config/initializers/date_formats.rb
# Date::DATE_FORMATS[:month_and_year] = '%B %Y'
# Date::DATE_FORMATS[:short_ordinal] = ->(date) { date.strftime("%B #{date.day.ordinalize}") }
def to_formatted_s(format = :default)
if formatter = DATE_FORMATS[format]
if formatter.respond_to?(:call)
formatter.call(self).to_s
else
strftime(formatter)
end
else
to_default_s
end
end
alias_method :to_default_s, :to_s
alias_method :to_s, :to_formatted_s
# Overrides the default inspect method with a human readable one, e.g., "Mon, 21 Feb 2005"
def readable_inspect
strftime("%a, %d %b %Y")
end
alias_method :default_inspect, :inspect
alias_method :inspect, :readable_inspect
silence_redefinition_of_method :to_time
# Converts a Date instance to a Time, where the time is set to the beginning of the day.
# The timezone can be either :local or :utc (default :local).
#
# date = Date.new(2007, 11, 10) # => Sat, 10 Nov 2007
#
# date.to_time # => 2007-11-10 00:00:00 0800
# date.to_time(:local) # => 2007-11-10 00:00:00 0800
#
# date.to_time(:utc) # => 2007-11-10 00:00:00 UTC
#
# NOTE: The :local timezone is Ruby's *process* timezone, i.e. ENV['TZ'].
# If the *application's* timezone is needed, then use +in_time_zone+ instead.
def to_time(form = :local)
raise ArgumentError, "Expected :local or :utc, got #{form.inspect}." unless [:local, :utc].include?(form)
::Time.send(form, year, month, day)
end
silence_redefinition_of_method :xmlschema
# Returns a string which represents the time in used time zone as DateTime
# defined by XML Schema:
#
# date = Date.new(2015, 05, 23) # => Sat, 23 May 2015
# date.xmlschema # => "2015-05-23T00:00:00+04:00"
def xmlschema
in_time_zone.xmlschema
end
end
| 35.154639 | 109 | 0.628152 |
bbf34fbcc40886852e588a016f0e309ac2b4dc6f | 2,745 | class Gtkdatabox < Formula
desc "Widget for live display of large amounts of changing data"
homepage "https://sourceforge.net/projects/gtkdatabox/"
url "https://downloads.sourceforge.net/project/gtkdatabox/gtkdatabox/0.9.2.0/gtkdatabox-0.9.2.0.tar.gz"
sha256 "745a6843e8f790504a86ad1b8642e1a9e595d75586215e0d2cb2f0bf0a324040"
revision 1
bottle do
cellar :any
sha256 "999cb4ae98d293fb666ba2f6b819dd9febb9bebb8b1ef4e2dcb2ad409666a534" => :el_capitan
sha256 "954c826eb6005ecda2916bafe1583b4c77b0db1f22dc3ca2c9c2151036320635" => :yosemite
sha256 "9f4aca58b94eac38ad5e8b08c97c11e8942691e6010cfae57e5ebfb8fc47b82e" => :mavericks
end
depends_on "pkg-config" => :build
depends_on "gtk+"
def install
system "./configure", "--disable-dependency-tracking",
"--prefix=#{prefix}"
# We need to re-enable deprecated features of gtk
# in order to build without errors
# Will be fixed in the next upstream release
inreplace "gtk/Makefile", "-DGTK_DISABLE_DEPRECATED", ""
inreplace "examples/Makefile", "-DGTK_DISABLE_DEPRECATED", ""
system "make", "install"
end
test do
(testpath/"test.c").write <<-EOS.undent
#include <gtkdatabox.h>
int main(int argc, char *argv[]) {
GtkWidget *db = gtk_databox_new();
return 0;
}
EOS
atk = Formula["atk"]
cairo = Formula["cairo"]
fontconfig = Formula["fontconfig"]
freetype = Formula["freetype"]
gdk_pixbuf = Formula["gdk-pixbuf"]
gettext = Formula["gettext"]
glib = Formula["glib"]
gtkx = Formula["gtk+"]
libpng = Formula["libpng"]
pango = Formula["pango"]
pixman = Formula["pixman"]
flags = %W[
-I#{atk.opt_include}/atk-1.0
-I#{cairo.opt_include}/cairo
-I#{fontconfig.opt_include}
-I#{freetype.opt_include}/freetype2
-I#{gdk_pixbuf.opt_include}/gdk-pixbuf-2.0
-I#{gettext.opt_include}
-I#{glib.opt_include}/glib-2.0
-I#{glib.opt_lib}/glib-2.0/include
-I#{gtkx.opt_include}/gtk-2.0
-I#{gtkx.opt_lib}/gtk-2.0/include
-I#{include}
-I#{libpng.opt_include}/libpng16
-I#{pango.opt_include}/pango-1.0
-I#{pixman.opt_include}/pixman-1
-D_REENTRANT
-L#{atk.opt_lib}
-L#{cairo.opt_lib}
-L#{gdk_pixbuf.opt_lib}
-L#{gettext.opt_lib}
-L#{glib.opt_lib}
-L#{gtkx.opt_lib}
-L#{lib}
-L#{pango.opt_lib}
-latk-1.0
-lcairo
-lgdk-quartz-2.0
-lgdk_pixbuf-2.0
-lgio-2.0
-lglib-2.0
-lgobject-2.0
-lgtk-quartz-2.0
-lgtkdatabox
-lintl
-lpango-1.0
-lpangocairo-1.0
]
system ENV.cc, "test.c", "-o", "test", *flags
system "./test"
end
end
| 30.164835 | 105 | 0.638616 |
ed448606251938372c87840e5c517f5ae63b5485 | 190 | require "spec_helper"
describe Aaet do
it "has a version number" do
expect(Aaet::VERSION).not_to be nil
end
it "does something useful" do
expect(false).to eq(true)
end
end
| 15.833333 | 39 | 0.694737 |
26117574cf60639e9222d62fb25b6456f7286bba | 428 | class Public::PagesController < ApplicationController
layout 'public'
skip_before_filter :authenticate_user!
skip_before_filter :authenticate_client!
def home
render :layout => false
end
def about
end
def services
end
def contact
end
def download_blank_manifest
send_file 'spreadsheets/blank_manifest.xlsm', :type => 'application/vnd.ms-excel.sheet.macroEnabled.12'
end
end
| 17.12 | 107 | 0.726636 |
bfd3471c99a79f233e7f199519fa6a47e6913c0c | 3,646 | require "rails_helper"
RSpec.describe HandleSfnNotificationsTimeout, type: :job do
subject { HandleSfnNotificationsTimeout.perform }
describe "#perform" do
let(:project) { create(:project) }
let(:sample) { create(:sample, project: project) }
let(:run1) { create(:workflow_run, sample: sample, status: WorkflowRun::STATUS[:running], executed_at: 5.hours.ago) }
let(:run2) { create(:workflow_run, sample: sample, status: WorkflowRun::STATUS[:succeeded], executed_at: 25.hours.ago) }
let(:run3) { create(:workflow_run, sample: sample, status: WorkflowRun::STATUS[:running], executed_at: 25.hours.ago) }
let(:run4) { create(:workflow_run, sample: sample, status: WorkflowRun::STATUS[:running], executed_at: 2.days.ago) }
# job_status for run5 and run7 will be set using run.format_job_status_text in the test
let(:run5) { create(:pipeline_run, sample: sample, executed_at: 5.hours.ago) }
let(:run6) { create(:pipeline_run, sample: sample, job_status: PipelineRun::STATUS_CHECKED, executed_at: 25.hours.ago, finalized: 1) }
let(:run7) { create(:pipeline_run, sample: sample, executed_at: 25.hours.ago) }
context "when there are no overdue runs" do
it "does nothing" do
_ = [run1, run2, run5, run6]
# Setting the run5's job_status using run.format_job_status_text; in practice, this is how job_status gets set by update_job_status/async_update_job_status.
run5_job_status = run5.send(:format_job_status_text, run5.active_stage.step_number, run5.active_stage.name, PipelineRun::STATUS_RUNNING, run5.report_ready?)
run5.update(job_status: run5_job_status)
expect(subject).to eq(0)
expect(run1.reload.status).to eq(WorkflowRun::STATUS[:running])
expect(run2.reload.status).to eq(WorkflowRun::STATUS[:succeeded])
expect(run5.reload.job_status).to eq(run5_job_status)
expect(run6.reload.job_status).to eq(PipelineRun::STATUS_CHECKED)
end
end
context "when there are overdue runs" do
it "marks overdue workflow runs as failed" do
_ = [run1, run2, run3, run4]
expect(CloudWatchUtil).to receive(:put_metric_data)
expect(subject).to eq(2)
expect(run1.reload.status).to eq(WorkflowRun::STATUS[:running])
expect(run2.reload.status).to eq(WorkflowRun::STATUS[:succeeded])
expect(run3.reload.status).to eq(WorkflowRun::STATUS[:failed])
expect(run4.reload.status).to eq(WorkflowRun::STATUS[:failed])
end
it "marks overdue pipeline runs as failed" do
AppConfigHelper.set_app_config(AppConfig::ENABLE_SFN_NOTIFICATIONS, "1")
_ = [run5, run6, run7]
# Setting the job_status using run.format_job_status_text; in practice, this is how job_status gets set by update_job_status/async_update_job_status.
run5_job_status = run5.send(:format_job_status_text, run5.active_stage.step_number, run5.active_stage.name, PipelineRun::STATUS_RUNNING, run5.report_ready?)
run5.update(job_status: run5_job_status)
run7_job_status = run5.send(:format_job_status_text, run5.active_stage.step_number, run5.active_stage.name, PipelineRun::STATUS_RUNNING, run5.report_ready?)
run7.update(job_status: run7_job_status)
expect(CloudWatchUtil).to receive(:put_metric_data)
expect(subject).to eq(1)
expect(run5.reload.job_status).to eq(run5_job_status)
expect(run6.reload.job_status).to eq(PipelineRun::STATUS_CHECKED)
expect(run7.reload.job_status).to eq(PipelineRun::STATUS_FAILED)
expect(run7.reload.results_finalized?).to eq(true)
end
end
end
end
| 51.352113 | 164 | 0.717773 |
d5724e407b77934dcc7560575cab9067a43f89b0 | 1,869 | module DL
# Adds Windows type aliases to the including class for use with
# DL::Importer.
#
# The aliases added are:
# * ATOM
# * BOOL
# * BYTE
# * DWORD
# * DWORD32
# * DWORD64
# * HANDLE
# * HDC
# * HINSTANCE
# * HWND
# * LPCSTR
# * LPSTR
# * PBYTE
# * PDWORD
# * PHANDLE
# * PVOID
# * PWORD
# * UCHAR
# * UINT
# * ULONG
# * WORD
module Win32Types
def included(m) # :nodoc:
m.module_eval{
typealias "DWORD", "unsigned long"
typealias "PDWORD", "unsigned long *"
typealias "DWORD32", "unsigned long"
typealias "DWORD64", "unsigned long long"
typealias "WORD", "unsigned short"
typealias "PWORD", "unsigned short *"
typealias "BOOL", "int"
typealias "ATOM", "int"
typealias "BYTE", "unsigned char"
typealias "PBYTE", "unsigned char *"
typealias "UINT", "unsigned int"
typealias "ULONG", "unsigned long"
typealias "UCHAR", "unsigned char"
typealias "HANDLE", "uintptr_t"
typealias "PHANDLE", "void*"
typealias "PVOID", "void*"
typealias "LPCSTR", "char*"
typealias "LPSTR", "char*"
typealias "HINSTANCE", "unsigned int"
typealias "HDC", "unsigned int"
typealias "HWND", "unsigned int"
}
end
module_function :included
end
# Adds basic type aliases to the including class for use with DL::Importer.
#
# The aliases added are +uint+ and +u_int+ (<tt>unsigned int</tt>) and
# +ulong+ and +u_long+ (<tt>unsigned long</tt>)
module BasicTypes
def included(m) # :nodoc:
m.module_eval{
typealias "uint", "unsigned int"
typealias "u_int", "unsigned int"
typealias "ulong", "unsigned long"
typealias "u_long", "unsigned long"
}
end
module_function :included
end
end
| 25.958333 | 77 | 0.581594 |
e2d33ce3f73cf5aafa22426b115a8f01d751654b | 4,177 | class BlockchainService
Error = Class.new(StandardError)
BalanceLoadError = Class.new(StandardError)
attr_reader :blockchain, :currencies, :adapter
def initialize(blockchian)
@blockchain = blockchian
@currencies = blockchian.currencies.enabled
@adapter = Peatio::Blockchain.registry[blockchian.client.to_sym]
@adapter.configure(server: @blockchain.server,
currencies: @currencies.map(&:to_blockchain_api_settings))
end
def latest_block_number
Rails.cache.fetch("latest_#{@blockchain.client}_block_number", expires_in: 5.seconds) do
@adapter.latest_block_number
end
end
def load_balance!(address, currency_id)
@adapter.load_balance_of_address!(address, currency_id)
rescue Peatio::Blockchain::Error => e
report_exception(e)
raise BalanceLoadError
end
def case_sensitive?
@adapter.features[:case_sensitive]
end
def supports_cash_addr_format?
@adapter.features[:cash_addr_format]
end
def process_block(block_number)
block = @adapter.fetch_block!(block_number)
deposits = filter_deposits(block)
withdrawals = filter_withdrawals(block)
accepted_deposits = []
ActiveRecord::Base.transaction do
accepted_deposits = deposits.map(&method(:update_or_create_deposit)).compact
withdrawals.each(&method(:update_withdrawal))
update_height(block_number, adapter.latest_block_number)
end
accepted_deposits.each(&:collect!)
block
end
private
def filter_deposits(block)
# TODO: Process addresses in batch in case of huge number of PA.
addresses = PaymentAddress.where(currency: @currencies).pluck(:address).compact
block.select { |transaction| transaction.to_address.in?(addresses) }
end
def filter_withdrawals(block)
# TODO: Process addresses in batch in case of huge number of confirming withdrawals.
withdraw_txids = Withdraws::Coin.confirming.where(currency: @currencies).pluck(:txid)
block.select { |transaction| transaction.hash.in?(withdraw_txids) }
end
def update_or_create_deposit(transaction)
if transaction.amount <= Currency.find(transaction.currency_id).min_deposit_amount
# Currently we just skip tiny deposits.
Rails.logger.info do
"Skipped deposit with txid: #{transaction.hash} with amount: #{transaction.hash}"\
" to #{transaction.to_address} in block number #{transaction.block_number}"
end
return
end
# TODO: Rewrite this guard clause
return unless PaymentAddress.exists?(currency_id: transaction.currency_id, address: transaction.to_address)
deposit =
Deposits::Coin.find_or_create_by!(
currency_id: transaction.currency_id,
txid: transaction.hash,
txout: transaction.txout
) do |d|
d.address = transaction.to_address
d.amount = transaction.amount
d.member = PaymentAddress.find_by(currency_id: transaction.currency_id, address: transaction.to_address).account.member
d.block_number = transaction.block_number
end
deposit.update_column(:block_number, transaction.block_number) if deposit.block_number != transaction.block_number
if deposit.confirmations >= @blockchain.min_confirmations && deposit.accept!
deposit
else
nil
end
end
def update_withdrawal(transaction)
withdrawal =
Withdraws::Coin.confirming
.find_by(currency_id: transaction.currency_id, txid: transaction.hash)
# Skip non-existing in database withdrawals.
if withdrawal.blank?
Rails.logger.info { "Skipped withdrawal: #{transaction.hash}." }
return
end
withdrawal.update_column(:block_number, transaction.block_number)
if transaction.status.failed?
withdrawal.fail!
elsif transaction.status.success? && withdrawal.confirmations >= @blockchain.min_confirmations
withdrawal.success!
end
end
def update_height(block_number, latest_block)
raise Error, "#{blockchain.name} height was reset." if blockchain.height != blockchain.reload.height
blockchain.update(height: block_number) if latest_block - block_number >= blockchain.min_confirmations
end
end
| 33.95935 | 127 | 0.731147 |
2639455e910a22139ab7b5e27ad493bdbc0deba8 | 23,029 | # frozen_string_literal: true
$: << File.expand_path(__dir__)
Encoding.default_external = 'UTF-8'
require_relative '../lib/gitlab'
require_relative '../lib/gitlab/utils'
require_relative '../config/initializers/0_inject_enterprise_edition_module'
require 'chemlab'
module QA
##
# Helper classes to represent frequently used sequences of actions
# (e.g., login)
#
module Flow
autoload :Login, 'qa/flow/login'
autoload :Project, 'qa/flow/project'
autoload :Saml, 'qa/flow/saml'
autoload :User, 'qa/flow/user'
autoload :MergeRequest, 'qa/flow/merge_request'
autoload :Pipeline, 'qa/flow/pipeline'
autoload :SignUp, 'qa/flow/sign_up'
end
##
# GitLab QA runtime classes, mostly singletons.
#
module Runtime
autoload :Release, 'qa/runtime/release'
autoload :User, 'qa/runtime/user'
autoload :Namespace, 'qa/runtime/namespace'
autoload :Scenario, 'qa/runtime/scenario'
autoload :Browser, 'qa/runtime/browser'
autoload :Env, 'qa/runtime/env'
autoload :Address, 'qa/runtime/address'
autoload :Path, 'qa/runtime/path'
autoload :Feature, 'qa/runtime/feature'
autoload :Fixtures, 'qa/runtime/fixtures'
autoload :Logger, 'qa/runtime/logger'
autoload :GPG, 'qa/runtime/gpg'
autoload :MailHog, 'qa/runtime/mail_hog'
autoload :IPAddress, 'qa/runtime/ip_address'
autoload :Search, 'qa/runtime/search'
autoload :ApplicationSettings, 'qa/runtime/application_settings'
autoload :AllureReport, 'qa/runtime/allure_report'
module API
autoload :Client, 'qa/runtime/api/client'
autoload :RepositoryStorageMoves, 'qa/runtime/api/repository_storage_moves'
autoload :Request, 'qa/runtime/api/request'
end
module Key
autoload :Base, 'qa/runtime/key/base'
autoload :RSA, 'qa/runtime/key/rsa'
autoload :ECDSA, 'qa/runtime/key/ecdsa'
autoload :ED25519, 'qa/runtime/key/ed25519'
end
end
##
# GitLab QA fabrication mechanisms
#
module Resource
autoload :ApiFabricator, 'qa/resource/api_fabricator'
autoload :Base, 'qa/resource/base'
autoload :GroupBase, 'qa/resource/group_base'
autoload :Sandbox, 'qa/resource/sandbox'
autoload :Group, 'qa/resource/group'
autoload :Issue, 'qa/resource/issue'
autoload :ProjectIssueNote, 'qa/resource/project_issue_note'
autoload :Project, 'qa/resource/project'
autoload :LabelBase, 'qa/resource/label_base'
autoload :ProjectLabel, 'qa/resource/project_label'
autoload :GroupLabel, 'qa/resource/group_label'
autoload :MergeRequest, 'qa/resource/merge_request'
autoload :ProjectImportedFromGithub, 'qa/resource/project_imported_from_github'
autoload :MergeRequestFromFork, 'qa/resource/merge_request_from_fork'
autoload :DeployKey, 'qa/resource/deploy_key'
autoload :DeployToken, 'qa/resource/deploy_token'
autoload :ProtectedBranch, 'qa/resource/protected_branch'
autoload :Pipeline, 'qa/resource/pipeline'
autoload :CiVariable, 'qa/resource/ci_variable'
autoload :Runner, 'qa/resource/runner'
autoload :PersonalAccessToken, 'qa/resource/personal_access_token'
autoload :ProjectAccessToken, 'qa/resource/project_access_token'
autoload :User, 'qa/resource/user'
autoload :ProjectMilestone, 'qa/resource/project_milestone'
autoload :GroupMilestone, 'qa/resource/group_milestone'
autoload :Members, 'qa/resource/members'
autoload :File, 'qa/resource/file'
autoload :Fork, 'qa/resource/fork'
autoload :SSHKey, 'qa/resource/ssh_key'
autoload :Snippet, 'qa/resource/snippet'
autoload :Tag, 'qa/resource/tag'
autoload :ProjectMember, 'qa/resource/project_member'
autoload :ProjectSnippet, 'qa/resource/project_snippet'
autoload :UserGPG, 'qa/resource/user_gpg'
autoload :Visibility, 'qa/resource/visibility'
autoload :ProjectSnippet, 'qa/resource/project_snippet'
autoload :Design, 'qa/resource/design'
autoload :RegistryRepository, 'qa/resource/registry_repository'
autoload :Package, 'qa/resource/package'
autoload :PipelineSchedules, 'qa/resource/pipeline_schedules'
module KubernetesCluster
autoload :Base, 'qa/resource/kubernetes_cluster/base'
autoload :ProjectCluster, 'qa/resource/kubernetes_cluster/project_cluster'
end
module Clusters
autoload :Agent, 'qa/resource/clusters/agent.rb'
autoload :AgentToken, 'qa/resource/clusters/agent_token.rb'
end
module Events
autoload :Base, 'qa/resource/events/base'
autoload :Project, 'qa/resource/events/project'
end
module Repository
autoload :Commit, 'qa/resource/repository/commit'
autoload :Push, 'qa/resource/repository/push'
autoload :ProjectPush, 'qa/resource/repository/project_push'
autoload :WikiPush, 'qa/resource/repository/wiki_push'
end
module Wiki
autoload :ProjectPage, 'qa/resource/wiki/project_page'
autoload :GroupPage, 'qa/resource/wiki/group_page'
end
end
##
# GitLab QA Scenarios
#
module Scenario
##
# Support files
#
autoload :Bootable, 'qa/scenario/bootable'
autoload :Actable, 'qa/scenario/actable'
autoload :Template, 'qa/scenario/template'
autoload :SharedAttributes, 'qa/scenario/shared_attributes'
##
# Test scenario entrypoints.
#
module Test
autoload :Instance, 'qa/scenario/test/instance'
module Instance
autoload :All, 'qa/scenario/test/instance/all'
autoload :Smoke, 'qa/scenario/test/instance/smoke'
autoload :Airgapped, 'qa/scenario/test/instance/airgapped'
end
module Integration
autoload :Github, 'qa/scenario/test/integration/github'
autoload :LDAPNoTLS, 'qa/scenario/test/integration/ldap_no_tls'
autoload :LDAPNoServer, 'qa/scenario/test/integration/ldap_no_server'
autoload :LDAPTLS, 'qa/scenario/test/integration/ldap_tls'
autoload :InstanceSAML, 'qa/scenario/test/integration/instance_saml'
autoload :Kubernetes, 'qa/scenario/test/integration/kubernetes'
autoload :Mattermost, 'qa/scenario/test/integration/mattermost'
autoload :ObjectStorage, 'qa/scenario/test/integration/object_storage'
autoload :SMTP, 'qa/scenario/test/integration/smtp'
autoload :SSHTunnel, 'qa/scenario/test/integration/ssh_tunnel'
end
module Sanity
autoload :Framework, 'qa/scenario/test/sanity/framework'
autoload :Selectors, 'qa/scenario/test/sanity/selectors'
end
end
end
##
# Classes describing structure of GitLab, pages, menus etc.
#
# Needed to execute click-driven-only black-box tests.
#
module Page
autoload :Base, 'qa/page/base'
autoload :View, 'qa/page/view'
autoload :Element, 'qa/page/element'
autoload :PageConcern, 'qa/page/page_concern'
autoload :Validator, 'qa/page/validator'
autoload :Validatable, 'qa/page/validatable'
module SubMenus
autoload :Common, 'qa/page/sub_menus/common'
end
module Main
autoload :Login, 'qa/page/main/login'
autoload :Menu, 'qa/page/main/menu'
autoload :OAuth, 'qa/page/main/oauth'
autoload :TwoFactorAuth, 'qa/page/main/two_factor_auth'
autoload :Terms, 'qa/page/main/terms'
end
module Registration
autoload :SignUp, 'qa/page/registration/sign_up'
autoload :Welcome, 'qa/page/registration/welcome'
end
module Settings
autoload :Common, 'qa/page/settings/common'
end
module Dashboard
autoload :Projects, 'qa/page/dashboard/projects'
autoload :Groups, 'qa/page/dashboard/groups'
autoload :Welcome, 'qa/page/dashboard/welcome'
autoload :Todos, 'qa/page/dashboard/todos'
module Snippet
autoload :New, 'qa/page/dashboard/snippet/new'
autoload :Index, 'qa/page/dashboard/snippet/index'
autoload :Show, 'qa/page/dashboard/snippet/show'
autoload :Edit, 'qa/page/dashboard/snippet/edit'
end
end
module Group
autoload :New, 'qa/page/group/new'
autoload :Show, 'qa/page/group/show'
autoload :Menu, 'qa/page/group/menu'
autoload :Members, 'qa/page/group/members'
autoload :BulkImport, 'qa/page/group/bulk_import'
module Milestone
autoload :Index, 'qa/page/group/milestone/index'
autoload :New, 'qa/page/group/milestone/new'
end
module SubMenus
autoload :Common, 'qa/page/group/sub_menus/common'
end
module Settings
autoload :General, 'qa/page/group/settings/general'
autoload :PackageRegistries, 'qa/page/group/settings/package_registries'
end
end
module Milestone
autoload :Index, 'qa/page/milestone/index'
autoload :New, 'qa/page/milestone/new'
autoload :Show, 'qa/page/milestone/show'
end
module File
autoload :Form, 'qa/page/file/form'
autoload :Show, 'qa/page/file/show'
autoload :Edit, 'qa/page/file/edit'
module Shared
autoload :CommitMessage, 'qa/page/file/shared/commit_message'
autoload :CommitButton, 'qa/page/file/shared/commit_button'
autoload :Editor, 'qa/page/file/shared/editor'
end
end
module Project
autoload :New, 'qa/page/project/new'
autoload :Show, 'qa/page/project/show'
autoload :Activity, 'qa/page/project/activity'
autoload :Menu, 'qa/page/project/menu'
autoload :Members, 'qa/page/project/members'
module Artifact
autoload :Show, 'qa/page/project/artifact/show'
end
module Branches
autoload :Show, 'qa/page/project/branches/show'
end
module Commit
autoload :Show, 'qa/page/project/commit/show'
end
module Import
autoload :Github, 'qa/page/project/import/github'
end
module Pipeline
autoload :Index, 'qa/page/project/pipeline/index'
autoload :Show, 'qa/page/project/pipeline/show'
autoload :New, 'qa/page/project/pipeline/new'
end
module Tag
autoload :Index, 'qa/page/project/tag/index'
autoload :New, 'qa/page/project/tag/new'
autoload :Show, 'qa/page/project/tag/show'
end
module Job
autoload :Show, 'qa/page/project/job/show'
end
module Packages
autoload :Index, 'qa/page/project/packages/index'
autoload :Show, 'qa/page/project/packages/show'
end
module Registry
autoload :Show, 'qa/page/project/registry/show'
end
module Settings
autoload :Advanced, 'qa/page/project/settings/advanced'
autoload :Main, 'qa/page/project/settings/main'
autoload :Repository, 'qa/page/project/settings/repository'
autoload :CICD, 'qa/page/project/settings/ci_cd'
autoload :Integrations, 'qa/page/project/settings/integrations'
autoload :GeneralPipelines, 'qa/page/project/settings/general_pipelines'
autoload :AutoDevops, 'qa/page/project/settings/auto_devops'
autoload :DeployKeys, 'qa/page/project/settings/deploy_keys'
autoload :DeployTokens, 'qa/page/project/settings/deploy_tokens'
autoload :ProtectedBranches, 'qa/page/project/settings/protected_branches'
autoload :CiVariables, 'qa/page/project/settings/ci_variables'
autoload :Runners, 'qa/page/project/settings/runners'
autoload :MergeRequest, 'qa/page/project/settings/merge_request'
autoload :MirroringRepositories, 'qa/page/project/settings/mirroring_repositories'
autoload :ProtectedTags, 'qa/page/project/settings/protected_tags'
autoload :VisibilityFeaturesPermissions, 'qa/page/project/settings/visibility_features_permissions'
autoload :AccessTokens, 'qa/page/project/settings/access_tokens'
module Services
autoload :Jira, 'qa/page/project/settings/services/jira'
autoload :Jenkins, 'qa/page/project/settings/services/jenkins'
autoload :Prometheus, 'qa/page/project/settings/services/prometheus'
end
autoload :Operations, 'qa/page/project/settings/operations'
autoload :Alerts, 'qa/page/project/settings/alerts'
autoload :Integrations, 'qa/page/project/settings/integrations'
end
module SubMenus
autoload :CiCd, 'qa/page/project/sub_menus/ci_cd'
autoload :Common, 'qa/page/project/sub_menus/common'
autoload :Issues, 'qa/page/project/sub_menus/issues'
autoload :Operations, 'qa/page/project/sub_menus/operations'
autoload :Repository, 'qa/page/project/sub_menus/repository'
autoload :Settings, 'qa/page/project/sub_menus/settings'
autoload :Project, 'qa/page/project/sub_menus/project'
autoload :Packages, 'qa/page/project/sub_menus/packages'
end
module Issue
autoload :New, 'qa/page/project/issue/new'
autoload :Show, 'qa/page/project/issue/show'
autoload :Index, 'qa/page/project/issue/index'
autoload :JiraImport, 'qa/page/project/issue/jira_import'
end
module Fork
autoload :New, 'qa/page/project/fork/new'
end
module Milestone
autoload :New, 'qa/page/project/milestone/new'
autoload :Index, 'qa/page/project/milestone/index'
end
module Operations
module Environments
autoload :Index, 'qa/page/project/operations/environments/index'
autoload :Show, 'qa/page/project/operations/environments/show'
end
module Kubernetes
autoload :Index, 'qa/page/project/operations/kubernetes/index'
autoload :Add, 'qa/page/project/operations/kubernetes/add'
autoload :AddExisting, 'qa/page/project/operations/kubernetes/add_existing'
autoload :Show, 'qa/page/project/operations/kubernetes/show'
end
module Metrics
autoload :Show, 'qa/page/project/operations/metrics/show'
end
module Incidents
autoload :Index, 'qa/page/project/operations/incidents/index'
end
end
module Wiki
autoload :Edit, 'qa/page/project/wiki/edit'
autoload :Show, 'qa/page/project/wiki/show'
autoload :GitAccess, 'qa/page/project/wiki/git_access'
autoload :List, 'qa/page/project/wiki/list'
end
module WebIDE
autoload :Edit, 'qa/page/project/web_ide/edit'
end
module Snippet
autoload :New, 'qa/page/project/snippet/new'
autoload :Show, 'qa/page/project/snippet/show'
autoload :Index, 'qa/page/project/snippet/index'
end
end
module Profile
autoload :Menu, 'qa/page/profile/menu'
autoload :PersonalAccessTokens, 'qa/page/profile/personal_access_tokens'
autoload :SSHKeys, 'qa/page/profile/ssh_keys'
autoload :Emails, 'qa/page/profile/emails'
autoload :Password, 'qa/page/profile/password'
autoload :TwoFactorAuth, 'qa/page/profile/two_factor_auth'
module Accounts
autoload :Show, 'qa/page/profile/accounts/show'
end
end
module User
autoload :Show, 'qa/page/user/show'
end
module Issuable
autoload :New, 'qa/page/issuable/new'
end
module Alert
autoload :AutoDevopsAlert, 'qa/page/alert/auto_devops_alert'
autoload :FreeTrial, 'qa/page/alert/free_trial'
end
module Layout
autoload :Banner, 'qa/page/layout/banner'
autoload :Flash, 'qa/page/layout/flash'
autoload :PerformanceBar, 'qa/page/layout/performance_bar'
end
module Label
autoload :New, 'qa/page/label/new'
autoload :Index, 'qa/page/label/index'
end
module MergeRequest
autoload :New, 'qa/page/merge_request/new'
autoload :Show, 'qa/page/merge_request/show'
end
module Admin
autoload :Menu, 'qa/page/admin/menu'
autoload :NewSession, 'qa/page/admin/new_session'
module Settings
autoload :General, 'qa/page/admin/settings/general'
autoload :MetricsAndProfiling, 'qa/page/admin/settings/metrics_and_profiling'
autoload :Network, 'qa/page/admin/settings/network'
module Component
autoload :IpLimits, 'qa/page/admin/settings/component/ip_limits'
autoload :OutboundRequests, 'qa/page/admin/settings/component/outbound_requests'
autoload :AccountAndLimit, 'qa/page/admin/settings/component/account_and_limit'
autoload :PerformanceBar, 'qa/page/admin/settings/component/performance_bar'
autoload :SignUpRestrictions, 'qa/page/admin/settings/component/sign_up_restrictions'
end
end
module Overview
module Users
autoload :Index, 'qa/page/admin/overview/users/index'
autoload :Show, 'qa/page/admin/overview/users/show'
end
module Groups
autoload :Index, 'qa/page/admin/overview/groups/index'
autoload :Show, 'qa/page/admin/overview/groups/show'
autoload :Edit, 'qa/page/admin/overview/groups/edit'
end
end
end
module Mattermost
autoload :Main, 'qa/page/mattermost/main'
autoload :Login, 'qa/page/mattermost/login'
end
module Search
autoload :Results, 'qa/page/search/results'
end
##
# Classes describing components that are used by several pages.
#
module Component
autoload :Breadcrumbs, 'qa/page/component/breadcrumbs'
autoload :CiBadgeLink, 'qa/page/component/ci_badge_link'
autoload :ClonePanel, 'qa/page/component/clone_panel'
autoload :DesignManagement, 'qa/page/component/design_management'
autoload :LazyLoader, 'qa/page/component/lazy_loader'
autoload :LegacyClonePanel, 'qa/page/component/legacy_clone_panel'
autoload :Dropzone, 'qa/page/component/dropzone'
autoload :GroupsFilter, 'qa/page/component/groups_filter'
autoload :Select2, 'qa/page/component/select2'
autoload :DropdownFilter, 'qa/page/component/dropdown_filter'
autoload :UsersSelect, 'qa/page/component/users_select'
autoload :Note, 'qa/page/component/note'
autoload :ConfirmModal, 'qa/page/component/confirm_modal'
autoload :CustomMetric, 'qa/page/component/custom_metric'
autoload :DesignManagement, 'qa/page/component/design_management'
autoload :ProjectSelector, 'qa/page/component/project_selector'
autoload :Snippet, 'qa/page/component/snippet'
autoload :NewSnippet, 'qa/page/component/new_snippet'
autoload :InviteMembersModal, 'qa/page/component/invite_members_modal'
autoload :Wiki, 'qa/page/component/wiki'
autoload :WikiSidebar, 'qa/page/component/wiki_sidebar'
autoload :WikiPageForm, 'qa/page/component/wiki_page_form'
autoload :AccessTokens, 'qa/page/component/access_tokens'
autoload :CommitModal, 'qa/page/component/commit_modal'
autoload :VisibilitySetting, 'qa/page/component/visibility_setting'
module Issuable
autoload :Common, 'qa/page/component/issuable/common'
autoload :Sidebar, 'qa/page/component/issuable/sidebar'
end
module IssueBoard
autoload :Show, 'qa/page/component/issue_board/show'
end
module WebIDE
autoload :Alert, 'qa/page/component/web_ide/alert'
module Modal
autoload :CreateNewFile, 'qa/page/component/web_ide/modal/create_new_file'
end
end
module Project
autoload :Templates, 'qa/page/component/project/templates'
end
end
module Trials
autoload :New, 'qa/page/trials/new'
autoload :Select, 'qa/page/trials/select'
end
module Modal
autoload :DeleteWiki, 'qa/page/modal/delete_wiki'
end
end
##
# Classes describing operations on Git repositories.
#
module Git
autoload :Repository, 'qa/git/repository'
autoload :Location, 'qa/git/location'
end
##
# Classes describing services being part of GitLab and how we can interact
# with these services, like through the shell.
#
module Service
autoload :Shellout, 'qa/service/shellout'
autoload :KubernetesCluster, 'qa/service/kubernetes_cluster'
autoload :Omnibus, 'qa/service/omnibus'
autoload :PraefectManager, 'qa/service/praefect_manager'
module ClusterProvider
autoload :Base, 'qa/service/cluster_provider/base'
autoload :Gcloud, 'qa/service/cluster_provider/gcloud'
autoload :Minikube, 'qa/service/cluster_provider/minikube'
autoload :K3d, 'qa/service/cluster_provider/k3d'
autoload :K3s, 'qa/service/cluster_provider/k3s'
end
module DockerRun
autoload :Base, 'qa/service/docker_run/base'
autoload :Jenkins, 'qa/service/docker_run/jenkins'
autoload :LDAP, 'qa/service/docker_run/ldap'
autoload :Maven, 'qa/service/docker_run/maven'
autoload :NodeJs, 'qa/service/docker_run/node_js'
autoload :GitlabRunner, 'qa/service/docker_run/gitlab_runner'
autoload :MailHog, 'qa/service/docker_run/mail_hog'
autoload :SamlIdp, 'qa/service/docker_run/saml_idp'
autoload :K3s, 'qa/service/docker_run/k3s'
end
end
##
# Classes that make it possible to execute features tests.
#
module Specs
autoload :Config, 'qa/specs/config'
autoload :Runner, 'qa/specs/runner'
autoload :ParallelRunner, 'qa/specs/parallel_runner'
autoload :LoopRunner, 'qa/specs/loop_runner'
module Helpers
autoload :ContextSelector, 'qa/specs/helpers/context_selector'
autoload :Quarantine, 'qa/specs/helpers/quarantine'
autoload :RSpec, 'qa/specs/helpers/rspec'
end
end
##
# Classes that describe the structure of vendor/third party application pages
#
module Vendor
module SAMLIdp
module Page
autoload :Base, 'qa/vendor/saml_idp/page/base'
autoload :Login, 'qa/vendor/saml_idp/page/login'
end
end
module Jenkins
module Page
autoload :Base, 'qa/vendor/jenkins/page/base'
autoload :Login, 'qa/vendor/jenkins/page/login'
autoload :Configure, 'qa/vendor/jenkins/page/configure'
autoload :NewCredentials, 'qa/vendor/jenkins/page/new_credentials'
autoload :NewJob, 'qa/vendor/jenkins/page/new_job'
autoload :LastJobConsole, 'qa/vendor/jenkins/page/last_job_console'
autoload :ConfigureJob, 'qa/vendor/jenkins/page/configure_job'
end
end
module Jira
autoload :JiraAPI, 'qa/vendor/jira/jira_api'
end
end
# Classes that provide support to other parts of the framework.
#
module Support
module Page
autoload :Logging, 'qa/support/page/logging'
end
autoload :Api, 'qa/support/api'
autoload :Dates, 'qa/support/dates'
autoload :Repeater, 'qa/support/repeater'
autoload :Run, 'qa/support/run'
autoload :Retrier, 'qa/support/retrier'
autoload :Waiter, 'qa/support/waiter'
autoload :WaitForRequests, 'qa/support/wait_for_requests'
autoload :OTP, 'qa/support/otp'
autoload :SSH, 'qa/support/ssh'
end
end
QA::Runtime::Release.extend_autoloads!
| 34.892424 | 107 | 0.688523 |
03b914f66506a85c242fcca4d6a4e6cb3e35ed15 | 346 | require 'rails/generators'
module Railwaymen
class InstallGenerator < ::Rails::Generators::Base
namespace 'railwaymen:install'
source_root File.expand_path('../templates', __FILE__)
desc 'Generates railwaymen gem initializer.'
def install
template 'initializer.rb', 'config/initializers/railwaymen.rb'
end
end
end
| 24.714286 | 68 | 0.734104 |
7a620b3f5d11a8b70212b97a058e50f3a9d686bc | 250 | require "./lib/initializer"
require "clockwork"
module Clockwork
every(1.minute, "top-off-workers") do
PGPerf::PGBenchToolsWorker.top_off_workers
end
every(4.hours, "mark-restart") do
PGPerf::PGBenchToolsWorker.mark_restart
end
end
| 19.230769 | 46 | 0.748 |
87d8455a4872b0e316fc11fb51e4292c550af4d3 | 400 | cask "sunlogincontrol" do
version "3.0"
sha256 "87ff12a9192ed809c2c8668e63cf44b1101544e52f43e8cc912ed2b20acf077f"
url "https://download.oray.com/sunlogin/mac/SunloginControl#{version}.dmg"
name "SunloginControl"
name "向日葵控制端"
homepage "https://sunlogin.oray.com/"
pkg "SunloginControl.pkg"
uninstall quit: "com.oray.remote",
pkgutil: "com.oray.sunlogin.control"
end
| 26.666667 | 76 | 0.7375 |
3366a0466f7afda8d729f59994891c477ac60d8f | 1,038 | #
# webhook_service.rb
# ShipHero
#
# Copyright (c) 2017 Kyle Schutt. All rights reserved.
module ShipHero
module Services
class WebhookService < BaseService
def get_webhooks
response = client.query ShipHero::Queries::GetWebhooksQuery
edges = response&.data&.webhooks&.data&.edges
webhooks = []
edges.each do |e|
webhooks << ShipHero::Webhook.new(name: e.node.name, url: e.node.url)
end
ShipHero::Responses::GetWebhooks.new(webhooks: webhooks)
end
def register_webhook(request)
raise Exceptions::ServiceException, "Must be a ShipHero::Webhook" unless request.is_a?(ShipHero::Webhook)
client.query ShipHero::Queries::RegisterWebhookQuery, { newWebhook: request }
end
def unregister_webhook(request)
raise Exceptions::ServiceException, "Must be a ShipHero::Webhook" unless request.is_a?(ShipHero::Webhook)
client.query ShipHero::Queries::UnregisterWebhookQuery, { webhook: request }
end
end
end
end
| 32.4375 | 113 | 0.684008 |
1c5f60ce7f23ea87cc21f0258e8840283a129523 | 1,685 | require 'spec_helper'
require 'pp'
describe Databox::Client do
before do
Databox.configure do |c|
c.push_token = 'adxg1kq5a4g04k0wk0s4wkssow8osw84'
end
allow_any_instance_of(Databox::Client).to receive(:raw_push)\
.and_return({'id' => '147251'})
end
let!(:client) { Databox::Client.new }
context 'push' do
it { expect { client.push(nil) }.to raise_exception }
it { expect { client.push(key: 'sales.total', value: nil) }.to raise_exception }
it { expect { client.push(key: nil, value: 3000) }.to raise_exception }
it { expect(client.push(key: 'sales.total', value: 2000)).to eq true }
end
# context 'push w/ attributes' do
# it {
# payload = {
# key: 'test',
# value: 200,
# attributes: {
# 'me': 'Oto'
# }
# }
#
# expect(client).to receive(:raw_push)
# .with('/', [
# {"$test" => 200, :me => "Oto"}
# ])
# .once.and_call_original
# expect(client.push(payload)).to eq true
# }
# end
context 'insert_all' do
it { expect { client.insert_all([
{key: 'temp.lj'},
{key: 'temp.ljx', value: 60.3},
]) }.to raise_exception }
it { expect(client.insert_all([
{key: 'temp.ljx', value: 4.3},
{key: 'temp.ljx', value: 1.3, date: '2015-01-01 09:00:00'},
])).to eq true }
end
end
| 31.203704 | 97 | 0.455786 |
e2d95fe4c85f1d597dd0401e1b7fa1f167df879b | 132 | # frozen_string_literal: true
class Required < ActiveRecord::Base
has_one :account, required: true, null_object: NullAccount
end
| 22 | 60 | 0.795455 |
f7ffbd0f0540c007e0c6794f51af8273cb28ce29 | 2,455 | require 'quickeebooks/online/service/service_base'
require 'quickeebooks/online/model/payment'
require 'quickeebooks/online/model/payment_header'
require 'quickeebooks/online/model/payment_line_item'
require 'quickeebooks/online/model/payment_detail'
require 'quickeebooks/online/model/credit_card'
require 'quickeebooks/online/model/credit_charge_info'
require 'quickeebooks/online/model/credit_charge_response'
require 'nokogiri'
module Quickeebooks
module Online
module Service
class Payment < ServiceBase
def create(payment)
raise InvalidModelException unless payment.valid?
xml = payment.to_xml_ns
response = do_http_post(url_for_resource(Quickeebooks::Online::Model::Payment::REST_RESOURCE), valid_xml_document(xml))
if response.code.to_i == 200
Quickeebooks::Online::Model::Payment.from_xml(response.body)
else
nil
end
end
def fetch_by_id(id)
url = "#{url_for_resource("payment")}/#{id}"
response = do_http_get(url)
if response && response.code.to_i == 200
Quickeebooks::Online::Model::Payment.from_xml(response.body)
else
nil
end
end
def update(payment)
raise InvalidModelException.new("Missing required parameters for update") unless payment.valid_for_update?
url = "#{url_for_resource(Quickeebooks::Online::Model::Payment::REST_RESOURCE)}/#{payment.id}"
xml = payment.to_xml_ns
response = do_http_post(url, valid_xml_document(xml))
if response.code.to_i == 200
Quickeebooks::Online::Model::Payment.from_xml(response.body)
else
nil
end
end
def list(filters = [], page = 1, per_page = 20, sort = nil, options = {})
fetch_collection(Quickeebooks::Online::Model::Payment, filters, page, per_page, sort, options)
end
def delete(payment)
raise InvalidModelException.new("Missing required parameters for delete") unless payment.valid_for_deletion?
xml = valid_xml_document(payment.to_xml_ns(:fields => ['Id', 'SyncToken']))
url = "#{url_for_resource(Quickeebooks::Online::Model::Payment::REST_RESOURCE)}/#{payment.id}"
response = do_http_post(url, xml, {:methodx => "delete"})
response.code.to_i == 200
end
end
end
end
end | 38.968254 | 129 | 0.657841 |
2807e3abc0d4647ab465c9a6df59fc8df54f2936 | 165 | class AddStaffOnlyCommentsToQuestions < ActiveRecord::Migration[4.2]
def change
add_column :course_assessment_questions, :staff_only_comments, :text
end
end
| 27.5 | 72 | 0.812121 |
286ade573083f847b36de63279f475f7ba422d26 | 1,430 | #======================================================================
# VXA_DefaultScripts.rb
#======================================================================
# Stub definitions of default RPGMaker script objects used by the
# OmoSystem module.
#======================================================================
require 'rpg/base_item'
require 'rpg/state'
$data_states = [
nil,
begin; s = RPG::State.new; s.id = 1; s.note = "<OmoSystem:urge:bladder:need>"; s; end,
begin; s = RPG::State.new; s.id = 2; s.note = "<OmoSystem:urge:bladder:desperate>"; s; end,
begin; s = RPG::State.new; s.id = 3; s.note = "<OmoSystem:urge:bladder:bursting>"; s; end,
begin; s = RPG::State.new; s.id = 4; s.note = "<OmoSystem:urge:bladder:leaking>"; s; end,
begin; s = RPG::State.new; s.id = 5; s.note = "<OmoSystem:urge:bowel:need>"; s; end,
begin; s = RPG::State.new; s.id = 6; s.note = "<OmoSystem:urge:bowel:desperate>"; s; end,
begin; s = RPG::State.new; s.id = 7; s.note = "<OmoSystem:urge:bowel:bursting>"; s; end,
begin; s = RPG::State.new; s.id = 8; s.note = "<OmoSystem:urge:bowel:leaking>"; s; end
]
class Game_Battler
attr_accessor :id
end
class Game_Actor < Game_Battler
attr_accessor :actor
def setup(actor_id)
end
def state?
end
def add_state(state)
end
def remove_state(state)
end
def on_player_walk
end
end
module DataManager
def self.load_normal_database
end
end | 29.183673 | 93 | 0.567133 |
6ae0fed0510941d15edeb24aad32be017721ba55 | 735 |
Pod::Spec.new do |s|
s.name = "TiPixelReader"
s.version = "1.0.0"
s.summary = "The TiPixelReader Titanium module."
s.description = <<-DESC
The TiPixelReader Titanium module.
DESC
s.homepage = "https://example.com"
s.license = { :type => "Apache 2", :file => "LICENSE" }
s.author = 'Author'
s.platform = :ios
s.ios.deployment_target = '8.0'
s.source = { :git => "https://github.com/<organization>/<repository>.git" }
s.ios.weak_frameworks = 'UIKit', 'Foundation'
s.ios.dependency 'TitaniumKit'
s.public_header_files = 'Classes/*.h'
s.source_files = 'Classes/*.{h,m}'
end | 27.222222 | 85 | 0.534694 |
bbd49eddb114c4806ed4de47e713d0c0d571d0b3 | 41 | module Teamstuff
VERSION = "0.1.0"
end
| 10.25 | 19 | 0.682927 |
bbdfdaaab12ca32172d29c0f96df76af1869f5ea | 2,188 | # frozen_string_literal: true
module Consent
module Rspec
# @private
class ConsentView
def initialize(view_key, conditions)
@conditions = comparable_conditions(conditions) if conditions
@view_key = view_key
end
def to(*context)
@context = context
self
end
def description
message = "consents view #{@view_key}"
"#{message} with conditions #{@conditions}" if @conditions
end
def with_conditions(conditions)
@conditions = comparable_conditions(conditions)
self
end
def matches?(subject_key)
@subject_key = subject_key
@target = Consent.find_subjects(subject_key)
.map do |subject|
subject.views[@view_key]&.conditions(*@context)
end
.compact
.map(&method(:comparable_conditions))
@target.include?(@conditions)
end
def failure_message
failure_message_base 'to'
end
def failure_message_when_negated
failure_message_base 'to not'
end
private
def comparable_conditions(conditions)
return conditions.to_sql if conditions.respond_to?(:to_sql)
conditions
end
def failure_message_base(failure) # rubocop:disable Metrics/MethodLength
message = format(
'expected %<skey>s (%<sclass>s) %<fail>s provide view %<view>s with`\
`%<conditions>p, but',
skey: @subject_key.to_s, sclass: @subject_key.class,
view: @view_key, conditions: @conditions, fail: failure
)
if @target.any?
format(
'%<message>s conditions are %<conditions>p',
message: message, conditions: @target
)
else
actual_views = Consent.find_subjects(subject_key)
.map(&:views)
.map(&:keys).flatten
format(
'%<message>s available views are %<views>p',
message: message, views: actual_views
)
end
end
end
end
end
| 27.35 | 79 | 0.558501 |
260276317eb2e70579a9e3001ab1a68855b6ddcb | 361 | require 'bundler/setup'
require File.expand_path("../../test/dummy/config/environment", __FILE__)
Bundler.setup
require 'rspec/rails'
require 'factory_girl'
Dir[Rails.root.join("spec/factories/*.rb")].each { |f| require f }
RSpec.configure do |config|
config.fixture_path = "#{::Rails.root}/spec/fixtures"
config.include FactoryGirl::Syntax::Methods
end
| 25.785714 | 73 | 0.742382 |
1882439d63710bbed4634aa3d873b76087933c7d | 8,182 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# Source: google/cloud/retail/v2alpha/product_service.proto for package 'Google.Cloud.Retail.V2alpha'
# Original file comments:
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'grpc'
require 'google/cloud/retail/v2alpha/product_service_pb'
module Google
module Cloud
module Retail
module V2alpha
module ProductService
# Service for ingesting [Product][google.cloud.retail.v2alpha.Product]
# information of the customer's website.
class Service
include ::GRPC::GenericService
self.marshal_class_method = :encode
self.unmarshal_class_method = :decode
self.service_name = 'google.cloud.retail.v2alpha.ProductService'
# Creates a [Product][google.cloud.retail.v2alpha.Product].
rpc :CreateProduct, ::Google::Cloud::Retail::V2alpha::CreateProductRequest, ::Google::Cloud::Retail::V2alpha::Product
# Gets a [Product][google.cloud.retail.v2alpha.Product].
rpc :GetProduct, ::Google::Cloud::Retail::V2alpha::GetProductRequest, ::Google::Cloud::Retail::V2alpha::Product
# Gets a list of [Product][google.cloud.retail.v2alpha.Product]s.
rpc :ListProducts, ::Google::Cloud::Retail::V2alpha::ListProductsRequest, ::Google::Cloud::Retail::V2alpha::ListProductsResponse
# Updates a [Product][google.cloud.retail.v2alpha.Product].
rpc :UpdateProduct, ::Google::Cloud::Retail::V2alpha::UpdateProductRequest, ::Google::Cloud::Retail::V2alpha::Product
# Deletes a [Product][google.cloud.retail.v2alpha.Product].
rpc :DeleteProduct, ::Google::Cloud::Retail::V2alpha::DeleteProductRequest, ::Google::Protobuf::Empty
# Bulk import of multiple [Product][google.cloud.retail.v2alpha.Product]s.
#
# Request processing may be synchronous. No partial updating is supported.
# Non-existing items are created.
#
# Note that it is possible for a subset of the
# [Product][google.cloud.retail.v2alpha.Product]s to be successfully updated.
rpc :ImportProducts, ::Google::Cloud::Retail::V2alpha::ImportProductsRequest, ::Google::Longrunning::Operation
# Updates inventory information for a
# [Product][google.cloud.retail.v2alpha.Product] while respecting the last
# update timestamps of each inventory field.
#
# This process is asynchronous and does not require the
# [Product][google.cloud.retail.v2alpha.Product] to exist before updating
# fulfillment information. If the request is valid, the update will be
# enqueued and processed downstream. As a consequence, when a response is
# returned, updates are not immediately manifested in the
# [Product][google.cloud.retail.v2alpha.Product] queried by
# [GetProduct][google.cloud.retail.v2alpha.ProductService.GetProduct] or
# [ListProducts][google.cloud.retail.v2alpha.ProductService.ListProducts].
#
# When inventory is updated with
# [CreateProduct][google.cloud.retail.v2alpha.ProductService.CreateProduct]
# and
# [UpdateProduct][google.cloud.retail.v2alpha.ProductService.UpdateProduct],
# the specified inventory field value(s) will overwrite any existing value(s)
# while ignoring the last update time for this field. Furthermore, the last
# update time for the specified inventory fields will be overwritten to the
# time of the
# [CreateProduct][google.cloud.retail.v2alpha.ProductService.CreateProduct]
# or
# [UpdateProduct][google.cloud.retail.v2alpha.ProductService.UpdateProduct]
# request.
#
# If no inventory fields are set in
# [CreateProductRequest.product][google.cloud.retail.v2alpha.CreateProductRequest.product],
# then any pre-existing inventory information for this product will be used.
#
# If no inventory fields are set in [UpdateProductRequest.set_mask][],
# then any existing inventory information will be preserved.
#
# Pre-existing inventory information can only be updated with
# [SetInventory][google.cloud.retail.v2alpha.ProductService.SetInventory],
# [AddFulfillmentPlaces][google.cloud.retail.v2alpha.ProductService.AddFulfillmentPlaces],
# and
# [RemoveFulfillmentPlaces][google.cloud.retail.v2alpha.ProductService.RemoveFulfillmentPlaces].
#
# This feature is only available for users who have Retail Search enabled.
# Please submit a form [here](https://cloud.google.com/contact) to contact
# cloud sales if you are interested in using Retail Search.
rpc :SetInventory, ::Google::Cloud::Retail::V2alpha::SetInventoryRequest, ::Google::Longrunning::Operation
# Incrementally adds place IDs to
# [Product.fulfillment_info.place_ids][google.cloud.retail.v2alpha.FulfillmentInfo.place_ids].
#
# This process is asynchronous and does not require the
# [Product][google.cloud.retail.v2alpha.Product] to exist before updating
# fulfillment information. If the request is valid, the update will be
# enqueued and processed downstream. As a consequence, when a response is
# returned, the added place IDs are not immediately manifested in the
# [Product][google.cloud.retail.v2alpha.Product] queried by
# [GetProduct][google.cloud.retail.v2alpha.ProductService.GetProduct] or
# [ListProducts][google.cloud.retail.v2alpha.ProductService.ListProducts].
#
# This feature is only available for users who have Retail Search enabled.
# Please submit a form [here](https://cloud.google.com/contact) to contact
# cloud sales if you are interested in using Retail Search.
rpc :AddFulfillmentPlaces, ::Google::Cloud::Retail::V2alpha::AddFulfillmentPlacesRequest, ::Google::Longrunning::Operation
# Incrementally removes place IDs from a
# [Product.fulfillment_info.place_ids][google.cloud.retail.v2alpha.FulfillmentInfo.place_ids].
#
# This process is asynchronous and does not require the
# [Product][google.cloud.retail.v2alpha.Product] to exist before updating
# fulfillment information. If the request is valid, the update will be
# enqueued and processed downstream. As a consequence, when a response is
# returned, the removed place IDs are not immediately manifested in the
# [Product][google.cloud.retail.v2alpha.Product] queried by
# [GetProduct][google.cloud.retail.v2alpha.ProductService.GetProduct] or
# [ListProducts][google.cloud.retail.v2alpha.ProductService.ListProducts].
#
# This feature is only available for users who have Retail Search enabled.
# Please submit a form [here](https://cloud.google.com/contact) to contact
# cloud sales if you are interested in using Retail Search.
rpc :RemoveFulfillmentPlaces, ::Google::Cloud::Retail::V2alpha::RemoveFulfillmentPlacesRequest, ::Google::Longrunning::Operation
end
Stub = Service.rpc_stub_class
end
end
end
end
end
| 59.289855 | 140 | 0.671963 |
1c36e81f6a271207e612697a2180424045295e69 | 2,404 | require 'spec_helper'
context 'Numeric' do
describe '#to_num' do
context 'String' do
context 'containing fixnum' do
it 'converts to Fixnum' do
expect('12'.to_num).to eq 12
expect('-1'.to_num).to eq(-1)
end
end # on containing fixnum
context 'containing dot separated float' do
it 'converts string to float' do
expect('12.5'.to_num).to eq 12.5
expect('-5.75'.to_num).to eq(-5.75)
end
end # containing float
context 'containing comma separated float' do
it 'converts string to float' do
expect('12,5'.to_num).to eq 12.5
expect('-5,75'.to_num).to eq(-5.75)
end
end # containing comma separated float
context 'containing no valid value' do
it 'returns nil' do
expect('4a'.to_num).to be nil
expect('4.5.3'.to_num).to be nil
expect('nan'.to_num).to be nil
end
end # containing no valid value
end # String
context 'Fixnum' do
it 'returns same Fixnum' do
expect(12.to_num).to eq 12
expect(-2.to_num).to eq(-2)
end
end # Fixnum
context 'Float' do
it 'returns same float' do
expect(12.3.to_num).to eq 12.3
expect(-2.5.to_num).to eq(-2.5)
end
end # Float
context 'TrueClass' do
it 'returns 1' do
expect(true.to_num).to eq 1
end
end # TrueClass
context 'FalseClass' do
it 'returns -1' do
expect(false.to_num).to eq(-1)
end
end # FalseClass
context 'Array' do
context 'Array can be joined to valid number' do
it 'returns the joined number' do
expect([1, 2, 3, 4].to_num).to eq 1234
expect([1, '.', 5].to_num).to eq 1.5
expect([1, 1.5, 6, 7].to_num).to eq 11.567
end
end # Array can be joined to valid number
context 'Array can not be joined to valid number' do
it 'returns nil' do
expect(['1', 'as', 2].to_num).to be nil
expect([1, 1.5, 2.5, 3].to_num).to be nil
end
end # Array can not be joined to valid number
end # Array
context 'Object without numerical representation' do
it 'returns nil' do
expect(Object.new.to_num).to be nil
end
end # Object without numerical representation
end # #to_num
end # Numbers
| 25.041667 | 58 | 0.571963 |
1d7b5b0f6e82510af7bc49d96aa287b6444a8515 | 10,179 | require 'fog/openstack' #see https://github.com/fog/fog/blob/master/lib/fog/openstack/docs/storage.md
require 'dragonfly'
require 'cgi'
require 'uri'
require 'securerandom'
Dragonfly::App.register_datastore(:openstack_swift){ Dragonfly::OpenStackDataStore }
module Dragonfly
class OpenStackDataStore
# Exceptions
#class NotConfigured < RuntimeError; end
attr_accessor :container_name,
:fog_storage_options, :openstack_options, :storage_headers,
:access_control_allow_origin, :default_expires_in,
:url_scheme, :url_host, :url_port, :root_path
attr_writer :set_meta_temp_url_key_on_startup
def initialize(opts={})
# case opts
# when Hash then opts
# when String
# file = File.expand_path(opts, __FILE__)
# fail "#{opts} is not a file" unless File.exists?(file) && !File.directory?(file)
# require 'yaml'
# opts = YAML.load(opts)
# end
fail "opts must be an Hash" unless opts.is_a?(Hash)
### Symbolizing keys
opts = opts.inject({}) do |hash, (key, value)|
hash[(key.to_sym rescue key)] = value
hash
end
fail "opts must contain :openstack & must be an Hash" unless opts.key?(:openstack) && opts[:openstack].is_a?(Hash)
@environment = opts.delete(:environment) || 'development'
@container_name = if opts[:container]
opts[:container]
elsif defined?(::Rails)
"#{Rails.application.class.name.split('::').first.underscore}-#{@environment}"
else
"dragonfly-system-#{@environment}"
end
@fog_storage_options = opts[:fog_storage_options] || {}
@openstack_options = opts[:openstack].inject({}) do |memo, item|
key, value = item
memo[:"openstack_#{key}"] = value
memo
end
@default_expires_in = @openstack_options.delete(:openstack_temp_url_expires_in).to_i.nonzero? || 3600
@access_control_allow_origin = opts[:access_control_allow_origin] || '*'
@storage_headers = opts[:storage_headers] || {}
@url_scheme = opts[:url_scheme] || 'http'
@url_host = opts[:url_host]
@url_port = opts[:url_port]
@root_path = opts[:root_path]
@set_meta_temp_url_key_on_startup = opts.fetch(:set_meta_temp_url_key_on_startup, false)
end
def set_meta_temp_url_key_on_startup?
@set_meta_temp_url_key_on_startup
end
def environment
@environment ||= ENV['RACK_ENV'] || ENV['RAILS_ENV'] || 'development'
end
def environment=(environment)
@environment = environment ? environment.to_s.downcase : nil
end
def write(content, opts={})
#TODO: Upload large files. See https://github.com/fog/fog/blob/master/lib/fog/openstack/docs/storage.md#upload_large_files
uid = opts[:path] || generate_uid(content.name || 'file')
headers = {'x-original-name' => content.name}
headers.merge!(opts[:headers]) if opts[:headers]
# thread = Thread.new do
# sleep 30
Dragonfly.info "Uploading #{content.name} (#{content.mime_type}) file on openstack swift"
rescuing_socket_errors do
#content.data....
content.file do |f|
container.files.create({
:key => full_path(uid),
:body => f,
:content_type => content.mime_type,
:metadata => full_storage_headers(headers, content.meta),
:access_control_allow_origin => access_control_allow_origin
})
end
end
# end
uid
end
def read(uid)
file = rescuing_socket_errors{ container.files.get(full_path(uid)) }
raise Excon::Errors::NotFound.new("#{uid} not found") unless file
[
file.body, # can be a String, File, Pathname, Tempfile
headers_to_meta(file.metadata) # the same meta Hash that was stored with write
]
rescue Excon::Errors::NotFound => e
Dragonfly.warn("#{self.class.name} read error: #{e}")
nil # return nil if not found
end
def destroy(uid)
rescuing_socket_errors do
Thread.new do
begin
file = container.files.get(full_path(uid))
raise Excon::Errors::NotFound.new("#{full_path(uid)} doesn't exist") unless file
file.destroy
rescue Excon::Errors::NotFound, Excon::Errors::Conflict => e
Dragonfly.warn("#{self.class.name} destroy error: #{e}")
end
end
# Thread.new do
# begin
# object_key = full_path(uid)
# Dragonfly.info("Deleting object #{object_key} inside #{container.key} with Thread (pid: #{Process.pid}")
# storage.delete_object(container.key, object_key)
# rescue => e
# Dragonfly.warn("#{object_key} doesn't exist, can't delete object: #{e.inspect}")
# raise Excon::Errors::NotFound.new("#{object_key} doesn't exist")
# end
# end.join
end
end
def url_for(uid, opts={})
#ensure_meta_temp_url_key! if set_meta_temp_url_key_on_startup
file_key = full_path(uid)
expires_in = (opts[:expires_in].to_i.nonzero?) || @default_expires_in
expires_at = Time.now.to_i + expires_in
#file = container.files.get(file_key)
#return nil unless file
#file.url(expires_at)
opts = {
scheme: @url_scheme,
host: @url_host,
port: @url_port,
}.merge(opts)
method = opts[:scheme] == 'https' ? :get_object_https_url : :get_object_http_url
url = storage.send(method, container_name, file_key, expires_at, opts)
if opts[:query]
opts[:query] = case opts[:query]
when Hash, Array then URI.encode_www_form(opts[:query])
else opts[:query].to_s
end
url = "#{url}&#{opts[:query]}"
end
if opts[:inline]
url = "#{url}&inline"
end
url
end
def storage
retry_times = 0
@storage ||= begin
begin
fog_storage = ::Fog::Storage.new(full_storage_options)
rescue => e
should_retry = retry_times < 10
Dragonfly.warn("#{e.class}: #{e.message} (#{should_retry ? " RETRYING #{retry_times}" : ''})")
retry_times += 1
# puts "retrying #{retry_times}"
retry if should_retry
end
if @openstack_options[:openstack_temp_url_key] && set_meta_temp_url_key_on_startup?
set_meta_temp_url_key!(storage_instance: fog_storage)
end
fog_storage
end
@storage
end
def set_meta_temp_url_key!(key = nil, force = false, storage_instance: nil)
return true if @_meta_temp_url_key_sent && !force
key ||= @openstack_options[:openstack_temp_url_key]
if key
begin
storage_instance ||= storage
storage_instance.post_set_meta_temp_url_key(@openstack_options[:openstack_temp_url_key])
# request(
# :expects => [201, 202, 204],
# :method => 'POST',
# :headers => {
# 'X-Account-Meta-Temp-Url-Key' => @openstack_options[:openstack_temp_url_key],
# 'X-Container-Meta-Access-Control-Allow-Origin' => '*'
# }
# )
@_meta_temp_url_key_sent = true
rescue => e
Dragonfly.warn("#{e.class}: #{e.message}")
@_meta_temp_url_key_sent = false
end
end
@_meta_temp_url_key_sent
end
alias ensure_meta_temp_url_key! set_meta_temp_url_key!
def meta_temp_url_key_sent?
@_meta_temp_url_key_sent
end
def container
ensure_container_initialized
@container ||= begin
rescuing_socket_errors{ storage.directories.get(container_name) }
end
end
def container_exists?
!rescuing_socket_errors{ storage.directories.get(container_name) }.nil?
rescue Excon::Errors::NotFound => e
false
end
private
def ensure_container_initialized
unless @container_initialized
rescuing_socket_errors{ storage.directories.create(:key => container_name) } unless container_exists?
@container_initialized = true
end
end
def generate_uid(name)
"#{Time.now.strftime '%Y/%m/%d/%H/%M/%S'}/#{SecureRandom.uuid}/#{name}"
end
def full_path(uid)
File.join *[root_path, uid].compact
end
def full_storage_options
openstack_options.merge(fog_storage_options.merge({:provider => 'OpenStack'}).
reject { |_name, value| value.nil? })
end
def full_storage_headers(headers, meta)
storage_headers.merge(meta_to_headers(meta)).merge(headers)
end
def headers_to_meta(headers)
json = headers['x-openstack-meta-json']
if json && !json.empty?
unescape_meta_values(Serializer.json_decode(json))
elsif marshal_data = headers['x-openstack-meta-extra']
Utils.stringify_keys(Serializer.marshal_b64_decode(marshal_data))
end
end
def meta_to_headers(meta)
meta = escape_meta_values(meta)
{'x-openstack-meta-json' => Serializer.json_encode(meta)}
end
def rescuing_socket_errors(&block)
yield
rescue Excon::Errors::SocketError => e
storage.reload
@container = nil
yield
end
def escape_meta_values(meta)
meta.inject({}) {|hash, (key, value)|
hash[key] = value.is_a?(String) ? CGI.escape(value) : value
hash
}
end
def unescape_meta_values(meta)
meta.inject({}) {|hash, (key, value)|
hash[key] = value.is_a?(String) ? CGI.unescape(value) : value
hash
}
end
end
end
| 33.93 | 128 | 0.589842 |
f7d139efe8566511f540c435ab25fc7185bc1e76 | 2,724 | class Plplot < Formula
desc "Cross-platform software package for creating scientific plots"
homepage "https://plplot.sourceforge.io"
url "https://downloads.sourceforge.net/project/plplot/plplot/5.15.0%20Source/plplot-5.15.0.tar.gz"
sha256 "b92de4d8f626a9b20c84fc94f4f6a9976edd76e33fb1eae44f6804bdcc628c7b"
revision 3
bottle do
sha256 arm64_big_sur: "8d088f536d53da047d2031923ef00ceaa57745c80fa59913b1bebe89b2f3b1b7"
sha256 big_sur: "68234fbf98737c25674da67d4a207a52bb19119e3e71eaf6d4e5948167502fc5"
sha256 catalina: "3f1ac3dcde8f3eec89b1abc4237e0eb386a2e8c72f08515c52f185e083fd3c73"
sha256 mojave: "6adc5277a905f281ec0498843ecfcf16a6300fa656f749173ac7c8af28dab157"
end
depends_on "cmake" => :build
depends_on "pkg-config" => :build
depends_on "cairo"
depends_on "freetype"
depends_on "gcc" # for gfortran
depends_on "pango"
def install
args = std_cmake_args + %w[
-DPL_HAVE_QHULL=OFF
-DENABLE_ada=OFF
-DENABLE_d=OFF
-DENABLE_octave=OFF
-DENABLE_qt=OFF
-DENABLE_lua=OFF
-DENABLE_tk=OFF
-DENABLE_python=OFF
-DENABLE_tcl=OFF
-DPLD_xcairo=OFF
-DPLD_wxwidgets=OFF
-DENABLE_wxwidgets=OFF
-DENABLE_DYNDRIVERS=OFF
-DENABLE_java=OFF
-DPLD_xwin=OFF
]
# std_cmake_args tries to set CMAKE_INSTALL_LIBDIR to a prefix-relative
# directory, but plplot's cmake scripts don't like that
args.map! { |x| x.start_with?("-DCMAKE_INSTALL_LIBDIR=") ? "-DCMAKE_INSTALL_LIBDIR=#{lib}" : x }
# Also make sure it already exists:
lib.mkdir
mkdir "plplot-build" do
system "cmake", "..", *args
system "make"
# These example files end up with references to the Homebrew build
# shims unless we tweak them:
inreplace "examples/c/Makefile.examples", %r{^CC = .*/}, "CC = "
inreplace "examples/c++/Makefile.examples", %r{^CXX = .*/}, "CXX = "
system "make", "install"
end
# fix rpaths
cd (lib.to_s) do
Dir["*.dylib"].select { |f| File.ftype(f) == "file" }.each do |f|
MachO::Tools.dylibs(f).select { |d| d.start_with?("@rpath") }.each do |d|
d_new = d.sub("@rpath", opt_lib.to_s)
MachO::Tools.change_install_name(f, d, d_new)
end
end
end
end
test do
(testpath/"test.c").write <<~EOS
#include <plplot.h>
int main(int argc, char *argv[]) {
plparseopts(&argc, argv, PL_PARSE_FULL);
plsdev("extcairo");
plinit();
return 0;
}
EOS
system ENV.cc, "test.c", "-o", "test", "-I#{include}/plplot", "-L#{lib}",
"-lcsirocsa", "-lm", "-lplplot", "-lqsastime"
system "./test"
end
end
| 32.428571 | 100 | 0.64978 |
acc1a53fea1e19e897d6313b0ae34ee721491d65 | 4,445 | # frozen_string_literal: true
require "spec_helper"
RSpec.describe AcaEntities::BenefitMarkets::Products::ProductContract do
let(:benefit_market_kind) { :benefit_market_kind }
let(:effective_date) { Date.today.next_month }
let(:effective_period) { effective_date..(effective_date.next_year) }
let(:application_period) { effective_date..effective_date.next_year.prev_day }
let(:hbx_id) { 'Hbx id'}
let(:title) { 'Title' }
let(:description) { 'Description' }
let(:product_package_kinds) { [:product_package_kinds] }
let(:kind) { :health }
let(:provider_directory_url) { 'provider_directory_url' }
let(:is_reference_plan_eligible) { true }
let(:deductible) { 'deductible' }
let(:family_deductible) { 'family_deductible' }
let(:network_information) { 'network_information'}
let(:nationwide) { true }
let(:dc_in_network) { false }
let(:hsa_eligibility) { true }
let(:sbc_document) do
{
title: 'title', creator: 'creator', publisher: 'publisher', format: 'file_format',
language: 'language', type: 'type', source: 'source'
}
end
let(:issuer_profile_reference) do
{
hbx_id: '1234', fein: '123333333', hbx_carrier_id: '333333',
name: 'Delta Dental', abbrev: 'DDPA'
}
end
let(:service_area) do
{
active_year: 2020, issuer_hios_id: 'hios_id', issuer_provided_title: 'Issuer Provided Title',
issuer_provided_code: 'Issuer Provided code', county_zip_ids: [{}], covered_states: ['DC'],
issuer_profile_reference: issuer_profile_reference
}
end
let(:product_reference) do
{
hios_id: '92479DC0020002', name: 'Access PPO', active_year: 2020, is_dental_only: false,
metal_level: 'gold', product_kind: 'health', benefit_market_kind: 'aca_shop', ehb_percent: '0.0',
issuer_profile_reference: issuer_profile_reference
}
end
let(:premium_tables) { [{ effective_period: effective_period }] }
let(:missing_params) do
{
benefit_market_kind: benefit_market_kind, application_period: application_period, product_reference: product_reference,
hbx_id: hbx_id, title: title, description: description, product_package_kinds: product_package_kinds,
issuer_profile_reference: issuer_profile_reference, premium_ages: 19..60, provider_directory_url: provider_directory_url,
is_reference_plan_eligible: is_reference_plan_eligible, deductible: deductible, family_deductible: family_deductible,
issuer_assigned_id: 'issuer_assigned_id', service_area: service_area, network_information: network_information,
nationwide: nationwide, dc_in_network: dc_in_network, sbc_document: sbc_document
}
end
let(:invalid_params) { missing_params.merge({ kind: kind, premium_tables: 'premium_tables' }) }
let(:error_message1) { { :premium_tables => ["is missing"], :kind => ["is missing"] } }
let(:error_message2) { { :premium_tables => ["must be an array"] } }
context "Given invalid required parameters" do
context "sending with missing parameters should fail validation with errors" do
it { expect(subject.call(missing_params).failure?).to be_truthy }
it { expect(subject.call(missing_params).errors.to_h).to eq error_message1 }
end
context "sending with invalid parameters should fail validation with errors" do
it { expect(subject.call(invalid_params).failure?).to be_truthy }
it { expect(subject.call(invalid_params).errors.to_h).to eq error_message2 }
end
end
context "Given valid required parameters" do
context "with all/required params" do
let(:premium_tuples) { { age: 12, cost: 227.07 } }
let(:rating_area_params) {{ county_zip_ids: [{}], exchange_provided_code: 'code', covered_states: [{}], active_year: 2020 }}
let(:premium_tables) { [{ effective_period: effective_period, premium_tuples: [premium_tuples], rating_area: rating_area_params }] }
let(:all_params) { missing_params.merge({ kind: kind, premium_tables: premium_tables })}
it "should pass validation" do
expect(subject.call(all_params).success?).to be_truthy
expect(subject.call(all_params).to_h).to eq all_params
end
end
end
end | 45.824742 | 140 | 0.67649 |
389c7710a4d1dddc772a034511d7539b47fcdeb9 | 9,752 | require 'chronic'
require 'sexp_processor'
require 'ruby_parser'
require 'json'
require 'hirb'
require 'fileutils'
require_relative 'scm/source_control'
require_relative 'scm/git_analyzer'
require_relative 'scm/svn_analyzer'
require_relative 'scm/hg_analyzer'
require_relative 'scm/bzr_analyzer'
require_relative 'location_mapping'
require_relative 'history'
require_relative 'options'
module Churn
# The work horse of the the churn library.
# This class takes user input, determines the SCM the user is using.
# It then determines changes made during this revision.
# Finally it reads all the changes from previous revisions and displays human
# readable output on the command line.
# It can also output a yaml format readable by other tools such as metric_fu
# and Caliper.
class ChurnCalculator
# intialize the churn calculator object
def initialize(options={})
@churn_options = ChurnOptions.new.set_options(options)
@minimum_churn_count = @churn_options.minimum_churn_count
@ignores = @churn_options.ignores
@source_control = SourceControl.set_source_control(@churn_options.start_date)
@changes = {}
@revision_changes = {}
@class_changes = {}
@method_changes = {}
end
# prepares the data for the given project to be reported.
# reads git/svn logs analyzes the output, generates a report and either
# formats as a nice string or returns hash.
# @param [Boolean] print to return the data, true for string or false for hash
# @return [Object] returns either a pretty string or a hash representing the
# churn of the project
def report(print = true)
if @churn_options.history
generate_history
else
emit
analyze
print ? self.to_s : self.to_h
end
end
# this method generates the past history of a churn project from first
# commit to current running the report for oldest commits first so they
# are built up correctly
def generate_history
history_starting_point = Chronic.parse(@churn_options.history)
@source_control.generate_history(history_starting_point)
"churn history complete, this has manipulated your source control system so please make sure you are back on HEAD where you expect to be"
end
# Emits various data from source control to be analyzed later...
# Currently this is broken up like this as a throwback to metric_fu
def emit
@changes = reject_ignored_files(reject_low_churn_files(parse_log_for_changes))
@revisions = parse_log_for_revision_changes
end
# Analyze the source control data, filter, sort, and find more information
# on the edited files
def analyze
@changes = sort_changes(@changes)
@changes = filter_changes(@changes)
@changes = @changes.map {|file_path, times_changed| {:file_path => file_path, :times_changed => times_changed }}
calculate_revision_changes
@method_changes = sort_changes(@method_changes)
@method_changes = @method_changes.map {|method, times_changed| {'method' => method, 'times_changed' => times_changed }}
@class_changes = sort_changes(@class_changes)
@class_changes = @class_changes.map {|klass, times_changed| {'klass' => klass, 'times_changed' => times_changed }}
end
# collect all the data into a single hash data structure.
def to_h
hash = {:churn => {:changes => @changes}}
hash[:churn][:class_churn] = @class_changes
hash[:churn][:method_churn] = @method_changes
#detail the most recent changes made this revision
first_revision = @revisions.first
first_revision_changes = @revision_changes[first_revision]
if first_revision_changes
changes = first_revision_changes
hash[:churn][:changed_files] = changes[:files]
hash[:churn][:changed_classes] = changes[:classes]
hash[:churn][:changed_methods] = changes[:methods]
end
# TODO crappy place to do this but save hash to revision file but
# while entirely under metric_fu only choice
ChurnHistory.store_revision_history(first_revision, hash, @churn_options.data_directory)
hash
end
def to_s
ChurnCalculator.to_s(to_h[:churn])
end
# Pretty print the data as a string for the user
def self.to_s(hash)
result = separator
result +="* Revision Changes \n"
result += separator
result += display_array("Files", hash[:changed_files], :fields=>[:to_str], :headers=>{:to_str=>'file'})
result += "\n"
result += display_array("Classes", hash[:changed_classes])
result += "\n"
result += display_array("Methods", hash[:changed_methods]) + "\n"
result += separator
result +="* Project Churn \n"
result += separator
result += "\n"
result += display_array("Files", hash[:changes])
result += "\n"
class_churn = collect_items(hash[:class_churn], 'klass')
result += display_array("Classes", class_churn)
result += "\n"
method_churn = collect_items(hash[:method_churn], 'method')
result += display_array("Methods", method_churn)
end
private
def self.collect_items(collection, match)
return [] unless collection
collection.map {|item| (item.delete(match) || {}).merge(item) }
end
def sort_changes(changes)
changes.to_a.sort! {|first,second| second[1] <=> first[1]}
end
def filter_changes(changes)
if @churn_options.file_extension && !@churn_options.file_extension.empty?
changes = changes.select { |file_path, _revision_count| file_path =~ /\.#{@churn_options.file_extension}\z/ }
end
if @churn_options.file_prefix && !@churn_options.file_prefix.empty?
changes = changes.select { |file_path, _revision_count| file_path =~ /\A#{@churn_options.file_prefix}/ }
end
changes
end
def filters
/.*\.rb/
end
def self.display_array(title, array, options={})
response = ''
if array && array.length > 0
response = "#{title}\n"
response << Hirb::Helpers::AutoTable.render(array, options.merge(:description=>false)) + "\n"
end
response
end
def self.separator
"*"*70+"\n"
end
def calculate_revision_changes
@revisions.each do |revision|
if revision == @revisions.first
#can't iterate through all the changes and tally them up
#it only has the current files not the files at the time of the revision
#parsing requires the files
changed_files, changed_classes, changed_methods = calculate_revision_data(revision)
else
changed_files, changed_classes, changed_methods = ChurnHistory.load_revision_data(revision, @churn_options.data_directory)
end
calculate_changes!(changed_methods, @method_changes) if changed_methods
calculate_changes!(changed_classes, @class_changes) if changed_classes
@revision_changes[revision] = { :files => changed_files, :classes => changed_classes, :methods => changed_methods }
end
end
def calculate_revision_data(revision)
changed_files = parse_logs_for_updated_files(revision, @revisions)
changed_classes = []
changed_methods = []
changed_files.each do |file_changes|
if file_changes.first =~ filters
classes, methods = get_changes(file_changes)
changed_classes += classes
changed_methods += methods
end
end
changed_files = changed_files.map { |file, lines| file }
[changed_files, changed_classes, changed_methods]
end
def calculate_changes!(changed_objs, total_changes)
if changed_objs
changed_objs.each do |change|
total_changes.include?(change) ? total_changes[change] = total_changes[change]+1 : total_changes[change] = 1
end
end
total_changes
end
def get_changes(change)
file = change.first
breakdown = LocationMapping.new
breakdown.get_info(file)
changes = change.last
classes = changes_for_type(changes, breakdown.klasses_collection)
methods = changes_for_type(changes, breakdown.methods_collection)
classes = classes.map{ |klass| {'file' => file, 'klass' => klass} }
methods = methods.map{ |method| {'file' => file, 'klass' => get_klass_for(method), 'method' => method} }
[classes, methods]
rescue
[[],[]]
end
def get_klass_for(method)
method.gsub(/(#|\.).*/,'')
end
def changes_for_type(changes, item_collection)
changed_items = []
item_collection.each_pair do |item, item_lines|
item_lines = item_lines[0].to_a
changes.each do |change_range|
item_lines.each do |line|
changed_items << item if change_range.include?(line) && !changed_items.include?(item)
end
end
end
changed_items
end
def parse_log_for_changes
changes = Hash.new(0)
logs = @source_control.get_logs
logs.each do |line|
changes[line] += 1
end
changes
end
def parse_log_for_revision_changes
@source_control.get_revisions
end
def parse_logs_for_updated_files(revision, revisions)
files = @source_control.get_updated_files_change_info(revision, revisions)
reject_ignored_files(files)
end
def reject_low_churn_files(files)
files.reject{ |_, change_count| change_count < @minimum_churn_count }
end
def reject_ignored_files(files)
files.reject{ |file, _| @ignores.any?{ |ignore| /#{ignore}/ =~ file } }
end
end
end
| 34.828571 | 143 | 0.672067 |
ac2f82eec29357c29bc0ce98799cf937b4a157c2 | 1,025 | require 'active_support/core_ext/hash/deep_merge'
module AxlsxStyler
module Cell
attr_accessor :raw_style
def add_style(style)
self.raw_style ||= {}
add_to_raw_style(style)
workbook.add_styled_cell self
end
private
def workbook
row.worksheet.workbook
end
def add_to_raw_style(style)
# using deep_merge from active_support:
# with regular Hash#merge adding borders fails miserably
new_style = raw_style.deep_merge style
if with_border?(raw_style) && with_border?(style)
border_at = (raw_style[:border][:edges] || all_edges) + (style[:border][:edges] || all_edges)
new_style[:border][:edges] = border_at.uniq.sort
elsif with_border?(style)
new_style[:border] = style[:border]
end
self.raw_style = new_style
end
def with_border?(style)
!style[:border].nil?
end
def all_edges
[:top, :right, :bottom, :left]
end
end
end
Axlsx::Cell.send(:include, AxlsxStyler::Cell)
| 22.777778 | 101 | 0.657561 |
1cd8ef403cf9983d7db80aa98c1dc767afe3ce30 | 829 | cask "lens" do
arch = Hardware::CPU.intel? ? "" : "-arm64"
version "5.2.7-latest.20211110.1"
if Hardware::CPU.intel?
sha256 "51312def38e7c00030a285204ac47f6822ad1b31b93b0d8369947e4ab05cb07f"
else
sha256 "8f220e3541c6e5a6d1524532e3a9cafe7e9c33fa9ae0a27c7646500df6af63fe"
end
url "https://api.k8slens.dev/binaries/Lens-#{version}#{arch}.dmg"
name "Lens"
desc "Kubernetes IDE"
homepage "https://k8slens.dev/"
livecheck do
url "https://lens-binaries.s3.amazonaws.com/ide/latest-mac.yml"
strategy :electron_builder
end
auto_updates true
app "Lens.app"
zap trash: [
"~/Library/Application Support/Lens",
"~/Library/Caches/Lens",
"~/Library/Preferences/com.electron.kontena-lens.plist",
"~/Library/Saved Application State/com.electron.kontena-lens.savedState",
]
end
| 25.121212 | 77 | 0.717732 |
38152f0fe21d01ef9fe48d835cb47e2e779d6575 | 2,606 | require 'spec_helper'
require 'data_magic'
describe "DataMagic #import_without_data_yaml" do
describe "without ALLOW_MISSING_YML" do
it "not found locally raises error" do
ENV['DATA_PATH'] = './spec/fixtures/cities_without_yml'
expect {
DataMagic.init(load_now: true)
}.to raise_error(IOError, "No data.y?ml found at ./spec/fixtures/cities_without_yml. Did you mean to define ALLOW_MISSING_YML environment variable?")
end
it "not found on s3 raises error" do
ENV['DATA_PATH'] = 's3://mybucket'
fake_s3 = Aws::S3::Client.new(stub_responses: true)
fake_s3.stub_responses(:get_object, Aws::S3::Errors::NoSuchKey.new(Seahorse::Client::RequestContext, 'Fake Error'))
expect {
config = DataMagic::Config.new(s3: fake_s3)
}.to raise_error(IOError, "No data.y?ml found at s3://mybucket. Did you mean to define ALLOW_MISSING_YML environment variable?")
end
end
describe "with ALLOW_MISSING_YML" do
let (:expected) do
{
"metadata" => {
"total" => 1,
"page" => 0,
"per_page" => DataMagic::DEFAULT_PAGE_SIZE
},
"results" => []
}
end
before(:all) do
DataMagic.destroy
ENV['ALLOW_MISSING_YML'] = 'allow'
ENV['DATA_PATH'] = './spec/fixtures/cities_without_yml'
DataMagic.init(load_now: true)
end
after(:all) do
DataMagic.destroy
ENV['ALLOW_MISSING_YML'] = ''
end
it "can get list of imported csv files" do
file_list = [
"./spec/fixtures/cities_without_yml/cities50.csv",
"./spec/fixtures/cities_without_yml/cities51-100.csv",
"./spec/fixtures/cities_without_yml/more.csv",
]
expect(DataMagic.config.files.sort).to eq(file_list)
end
it "can get index name from api endpoint" do
expect(DataMagic.config.find_index_for('cities-without-yml')).to eq('cities-without-yml')
end
it "indexes files with yaml mapping" do
result = DataMagic.search({NAME: "Chicago"}, api: 'cities-without-yml')
expected["results"] = [
{
"USPS"=>"IL",
"GEOID"=>"1714000",
"ANSICODE"=>"00428803",
"NAME"=>"Chicago",
"LSAD"=>"25",
"FUNCSTAT"=>"A",
"POP10"=>"2695598",
"HU10"=>"1194337",
"ALAND"=>"589571105",
"AWATER"=>"16781658",
"ALAND_SQMI"=>"227.635",
"AWATER_SQMI"=>"6.479",
"INTPTLAT"=>"41.837551",
"INTPTLONG"=>"-87.681844",
}
]
expect(result).to eq(expected)
end
end
end
| 31.780488 | 155 | 0.599386 |
03b1d52c3fe722a976ce1920fd3f41c6d2b86451 | 1,749 | module Librarian
module Puppet
module Source
module Local
def install!(manifest)
manifest.source == self or raise ArgumentError
debug { "Installing #{manifest}" }
name, version = manifest.name, manifest.version
found_path = found_path(name)
raise Error, "Path for #{name} doesn't contain a puppet module" if found_path.nil?
if name.include? '/'
new_name = name.split('/').last
debug { "Invalid module name '#{name}', guessing you meant '#{new_name}'" }
name = new_name
end
install_path = environment.install_path.join(name)
if install_path.exist?
debug { "Deleting #{relative_path_to(install_path)}" }
install_path.rmtree
end
install_perform_step_copy!(found_path, install_path)
end
def fetch_version(name, extra)
cache!
found_path = found_path(name)
'0.0.1'
end
def fetch_dependencies(name, version, extra)
{}
end
private
def install_perform_step_copy!(found_path, install_path)
debug { "Copying #{relative_path_to(found_path)} to #{relative_path_to(install_path)}" }
FileUtils.cp_r(found_path, install_path, :preserve => true)
end
def manifest?(name, path)
return true if path.join('manifests').exist?
return true if path.join('lib').join('puppet').exist?
return true if path.join('lib').join('facter').exist?
debug { "Could not find manifests, lib/puppet or lib/facter under #{path}, assuming is not a puppet module" }
false
end
end
end
end
end
| 30.155172 | 119 | 0.58948 |
b959d87e3f1bcde05dd177d8dd77cc7c9e401c48 | 1,956 | ##
# This module requires Metasploit: http://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'msf/core'
class Metasploit3 < Msf::Exploit::Local
Rank = ExcellentRanking
include Post::Windows::Priv
include Post::Windows::Runas
def initialize(info = {})
super(update_info(info,
'Name' => 'Windows Escalate UAC Execute RunAs',
'Description' => %q(
This module will attempt to elevate execution level using
the ShellExecute undocumented RunAs flag to bypass low
UAC settings.
),
'License' => MSF_LICENSE,
'Author' => [
'mubix', # Original technique
'b00stfr3ak' # Added powershell option
],
'Platform' => ['win'],
'SessionTypes' => ['meterpreter'],
'Targets' => [['Windows', {}]],
'DefaultTarget' => 0,
'References' => [
['URL', 'http://www.room362.com/blog/2012/1/3/uac-user-assisted-compromise.html']
],
'DisclosureDate' => 'Jan 3 2012'
))
register_options([
OptString.new('FILENAME', [false, 'File name on disk']),
OptString.new('PATH', [false, 'Location on disk, %TEMP% used if not set']),
OptEnum.new('TECHNIQUE', [true, 'Technique to use', 'EXE', %w(PSH EXE)]),
])
end
def exploit
if is_uac_enabled?
print_status 'UAC is Enabled, checking level...'
case get_uac_level
when UAC_NO_PROMPT
print_good 'UAC is not enabled, no prompt for the user'
else
print_status "The user will be prompted, wait for them to click 'Ok'"
end
else
print_good 'UAC is not enabled, no prompt for the user'
end
case datastore['TECHNIQUE']
when 'EXE'
shell_execute_exe(datastore['FILENAME'], datastore['PATH'])
when 'PSH'
shell_execute_psh
end
end
end
| 30.092308 | 90 | 0.587935 |
1d6a9c7f7d4b8888edc7a91d4d933bbeb96cb895 | 332 | # frozen_string_literal: true
class WeightRecord < ApplicationRecord
include ActsAsRecordable
UNITS = [
KILOGRAMS = "kg",
GRAMS = "g",
POUNDS = "lb"
].freeze
# Associations
belongs_to :animal
# Validations
validates :weight, presence: true
validates :unit, presence: true, inclusion: {in: UNITS}
end
| 17.473684 | 57 | 0.686747 |
6a8fd521f7525ee98280157dc69744d42252ec82 | 182,092 | # frozen_string_literal: true
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
require 'seahorse/client/plugins/content_length.rb'
require 'aws-sdk-core/plugins/credentials_configuration.rb'
require 'aws-sdk-core/plugins/logging.rb'
require 'aws-sdk-core/plugins/param_converter.rb'
require 'aws-sdk-core/plugins/param_validator.rb'
require 'aws-sdk-core/plugins/user_agent.rb'
require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
require 'aws-sdk-core/plugins/retry_errors.rb'
require 'aws-sdk-core/plugins/global_configuration.rb'
require 'aws-sdk-core/plugins/regional_endpoint.rb'
require 'aws-sdk-core/plugins/endpoint_discovery.rb'
require 'aws-sdk-core/plugins/endpoint_pattern.rb'
require 'aws-sdk-core/plugins/response_paging.rb'
require 'aws-sdk-core/plugins/stub_responses.rb'
require 'aws-sdk-core/plugins/idempotency_token.rb'
require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
require 'aws-sdk-core/plugins/transfer_encoding.rb'
require 'aws-sdk-core/plugins/http_checksum.rb'
require 'aws-sdk-core/plugins/protocols/rest_xml.rb'
require 'aws-sdk-s3control/plugins/arn.rb'
require 'aws-sdk-s3control/plugins/dualstack.rb'
require 'aws-sdk-s3control/plugins/s3_control_signer.rb'
require 'aws-sdk-s3control/plugins/s3_host_id.rb'
Aws::Plugins::GlobalConfiguration.add_identifier(:s3control)
module Aws::S3Control
# An API client for S3Control. To construct a client, you need to configure a `:region` and `:credentials`.
#
# client = Aws::S3Control::Client.new(
# region: region_name,
# credentials: credentials,
# # ...
# )
#
# For details on configuring region and credentials see
# the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
#
# See {#initialize} for a full list of supported configuration options.
class Client < Seahorse::Client::Base
include Aws::ClientStubs
@identifier = :s3control
set_api(ClientApi::API)
add_plugin(Seahorse::Client::Plugins::ContentLength)
add_plugin(Aws::Plugins::CredentialsConfiguration)
add_plugin(Aws::Plugins::Logging)
add_plugin(Aws::Plugins::ParamConverter)
add_plugin(Aws::Plugins::ParamValidator)
add_plugin(Aws::Plugins::UserAgent)
add_plugin(Aws::Plugins::HelpfulSocketErrors)
add_plugin(Aws::Plugins::RetryErrors)
add_plugin(Aws::Plugins::GlobalConfiguration)
add_plugin(Aws::Plugins::RegionalEndpoint)
add_plugin(Aws::Plugins::EndpointDiscovery)
add_plugin(Aws::Plugins::EndpointPattern)
add_plugin(Aws::Plugins::ResponsePaging)
add_plugin(Aws::Plugins::StubResponses)
add_plugin(Aws::Plugins::IdempotencyToken)
add_plugin(Aws::Plugins::JsonvalueConverter)
add_plugin(Aws::Plugins::ClientMetricsPlugin)
add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
add_plugin(Aws::Plugins::TransferEncoding)
add_plugin(Aws::Plugins::HttpChecksum)
add_plugin(Aws::Plugins::Protocols::RestXml)
add_plugin(Aws::S3Control::Plugins::ARN)
add_plugin(Aws::S3Control::Plugins::Dualstack)
add_plugin(Aws::S3Control::Plugins::S3ControlSigner)
add_plugin(Aws::S3Control::Plugins::S3HostId)
# @overload initialize(options)
# @param [Hash] options
# @option options [required, Aws::CredentialProvider] :credentials
# Your AWS credentials. This can be an instance of any one of the
# following classes:
#
# * `Aws::Credentials` - Used for configuring static, non-refreshing
# credentials.
#
# * `Aws::SharedCredentials` - Used for loading static credentials from a
# shared file, such as `~/.aws/config`.
#
# * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
#
# * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to
# assume a role after providing credentials via the web.
#
# * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an
# access token generated from `aws login`.
#
# * `Aws::ProcessCredentials` - Used for loading credentials from a
# process that outputs to stdout.
#
# * `Aws::InstanceProfileCredentials` - Used for loading credentials
# from an EC2 IMDS on an EC2 instance.
#
# * `Aws::ECSCredentials` - Used for loading credentials from
# instances running in ECS.
#
# * `Aws::CognitoIdentityCredentials` - Used for loading credentials
# from the Cognito Identity service.
#
# When `:credentials` are not configured directly, the following
# locations will be searched for credentials:
#
# * `Aws.config[:credentials]`
# * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
# * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
# * `~/.aws/credentials`
# * `~/.aws/config`
# * EC2/ECS IMDS instance profile - When used by default, the timeouts
# are very aggressive. Construct and pass an instance of
# `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
# enable retries and extended timeouts.
#
# @option options [required, String] :region
# The AWS region to connect to. The configured `:region` is
# used to determine the service `:endpoint`. When not passed,
# a default `:region` is searched for in the following locations:
#
# * `Aws.config[:region]`
# * `ENV['AWS_REGION']`
# * `ENV['AMAZON_REGION']`
# * `ENV['AWS_DEFAULT_REGION']`
# * `~/.aws/credentials`
# * `~/.aws/config`
#
# @option options [String] :access_key_id
#
# @option options [Boolean] :active_endpoint_cache (false)
# When set to `true`, a thread polling for endpoints will be running in
# the background every 60 secs (default). Defaults to `false`.
#
# @option options [Boolean] :adaptive_retry_wait_to_fill (true)
# Used only in `adaptive` retry mode. When true, the request will sleep
# until there is sufficent client side capacity to retry the request.
# When false, the request will raise a `RetryCapacityNotAvailableError` and will
# not retry instead of sleeping.
#
# @option options [Boolean] :client_side_monitoring (false)
# When `true`, client-side metrics will be collected for all API requests from
# this client.
#
# @option options [String] :client_side_monitoring_client_id ("")
# Allows you to provide an identifier for this client which will be attached to
# all generated client side metrics. Defaults to an empty string.
#
# @option options [String] :client_side_monitoring_host ("127.0.0.1")
# Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
# side monitoring agent is running on, where client metrics will be published via UDP.
#
# @option options [Integer] :client_side_monitoring_port (31000)
# Required for publishing client metrics. The port that the client side monitoring
# agent is running on, where client metrics will be published via UDP.
#
# @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
# Allows you to provide a custom client-side monitoring publisher class. By default,
# will use the Client Side Monitoring Agent Publisher.
#
# @option options [Boolean] :convert_params (true)
# When `true`, an attempt is made to coerce request parameters into
# the required types.
#
# @option options [Boolean] :correct_clock_skew (true)
# Used only in `standard` and adaptive retry modes. Specifies whether to apply
# a clock skew correction and retry requests with skewed client clocks.
#
# @option options [Boolean] :disable_host_prefix_injection (false)
# Set to true to disable SDK automatically adding host prefix
# to default service endpoint when available.
#
# @option options [String] :endpoint
# The client endpoint is normally constructed from the `:region`
# option. You should only configure an `:endpoint` when connecting
# to test or custom endpoints. This should be a valid HTTP(S) URI.
#
# @option options [Integer] :endpoint_cache_max_entries (1000)
# Used for the maximum size limit of the LRU cache storing endpoints data
# for endpoint discovery enabled operations. Defaults to 1000.
#
# @option options [Integer] :endpoint_cache_max_threads (10)
# Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
#
# @option options [Integer] :endpoint_cache_poll_interval (60)
# When :endpoint_discovery and :active_endpoint_cache is enabled,
# Use this option to config the time interval in seconds for making
# requests fetching endpoints information. Defaults to 60 sec.
#
# @option options [Boolean] :endpoint_discovery (false)
# When set to `true`, endpoint discovery will be enabled for operations when available.
#
# @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
# The log formatter.
#
# @option options [Symbol] :log_level (:info)
# The log level to send messages to the `:logger` at.
#
# @option options [Logger] :logger
# The Logger instance to send log messages to. If this option
# is not set, logging will be disabled.
#
# @option options [Integer] :max_attempts (3)
# An integer representing the maximum number attempts that will be made for
# a single request, including the initial attempt. For example,
# setting this value to 5 will result in a request being retried up to
# 4 times. Used in `standard` and `adaptive` retry modes.
#
# @option options [String] :profile ("default")
# Used when loading credentials from the shared credentials file
# at HOME/.aws/credentials. When not specified, 'default' is used.
#
# @option options [Proc] :retry_backoff
# A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
# This option is only used in the `legacy` retry mode.
#
# @option options [Float] :retry_base_delay (0.3)
# The base delay in seconds used by the default backoff function. This option
# is only used in the `legacy` retry mode.
#
# @option options [Symbol] :retry_jitter (:none)
# A delay randomiser function used by the default backoff function.
# Some predefined functions can be referenced by name - :none, :equal, :full,
# otherwise a Proc that takes and returns a number. This option is only used
# in the `legacy` retry mode.
#
# @see https://www.awsarchitectureblog.com/2015/03/backoff.html
#
# @option options [Integer] :retry_limit (3)
# The maximum number of times to retry failed requests. Only
# ~ 500 level server errors and certain ~ 400 level client errors
# are retried. Generally, these are throttling errors, data
# checksum errors, networking errors, timeout errors, auth errors,
# endpoint discovery, and errors from expired credentials.
# This option is only used in the `legacy` retry mode.
#
# @option options [Integer] :retry_max_delay (0)
# The maximum number of seconds to delay between retries (0 for no limit)
# used by the default backoff function. This option is only used in the
# `legacy` retry mode.
#
# @option options [String] :retry_mode ("legacy")
# Specifies which retry algorithm to use. Values are:
#
# * `legacy` - The pre-existing retry behavior. This is default value if
# no retry mode is provided.
#
# * `standard` - A standardized set of retry rules across the AWS SDKs.
# This includes support for retry quotas, which limit the number of
# unsuccessful retries a client can make.
#
# * `adaptive` - An experimental retry mode that includes all the
# functionality of `standard` mode along with automatic client side
# throttling. This is a provisional mode that may change behavior
# in the future.
#
#
# @option options [Boolean] :s3_use_arn_region (true)
# For S3 and S3 Outposts ARNs passed into the `:bucket` or `:name`
# parameter, this option will use the region in the ARN, allowing
# for cross-region requests to be made. Set to `false` to use the
# client's region instead.
#
# @option options [String] :secret_access_key
#
# @option options [String] :session_token
#
# @option options [Boolean] :stub_responses (false)
# Causes the client to return stubbed responses. By default
# fake responses are generated and returned. You can specify
# the response data to return or errors to raise by calling
# {ClientStubs#stub_responses}. See {ClientStubs} for more information.
#
# ** Please note ** When response stubbing is enabled, no HTTP
# requests are made, and retries are disabled.
#
# @option options [Boolean] :use_dualstack_endpoint (false)
# When set to `true`, IPv6-compatible bucket endpoints will be used
# for all operations.
#
# @option options [Boolean] :validate_params (true)
# When `true`, request parameters are validated before
# sending the request.
#
# @option options [URI::HTTP,String] :http_proxy A proxy to send
# requests through. Formatted like 'http://proxy.com:123'.
#
# @option options [Float] :http_open_timeout (15) The number of
# seconds to wait when opening a HTTP session before raising a
# `Timeout::Error`.
#
# @option options [Integer] :http_read_timeout (60) The default
# number of seconds to wait for response data. This value can
# safely be set per-request on the session.
#
# @option options [Float] :http_idle_timeout (5) The number of
# seconds a connection is allowed to sit idle before it is
# considered stale. Stale connections are closed and removed
# from the pool before making a request.
#
# @option options [Float] :http_continue_timeout (1) The number of
# seconds to wait for a 100-continue response before sending the
# request body. This option has no effect unless the request has
# "Expect" header set to "100-continue". Defaults to `nil` which
# disables this behaviour. This value can safely be set per
# request on the session.
#
# @option options [Boolean] :http_wire_trace (false) When `true`,
# HTTP debug output will be sent to the `:logger`.
#
# @option options [Boolean] :ssl_verify_peer (true) When `true`,
# SSL peer certificates are verified when establishing a
# connection.
#
# @option options [String] :ssl_ca_bundle Full path to the SSL
# certificate authority bundle file that should be used when
# verifying peer certificates. If you do not pass
# `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default
# will be used if available.
#
# @option options [String] :ssl_ca_directory Full path of the
# directory that contains the unbundled SSL certificate
# authority files for verifying peer certificates. If you do
# not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the
# system default will be used if available.
#
def initialize(*args)
super
end
# @!group API Operations
# Creates an access point and associates it with the specified bucket.
# For more information, see [Managing Data Access with Amazon S3 Access
# Points][1] in the *Amazon Simple Storage Service User Guide*.
#
#
#
# <note markdown="1"> S3 on Outposts only supports VPC-style Access Points.
#
# For more information, see [ Accessing Amazon S3 on Outposts using
# virtual private cloud (VPC) only Access Points][2] in the *Amazon
# Simple Storage Service User Guide*.
#
# </note>
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][3] section.
#
#
#
# The following actions are related to `CreateAccessPoint`\:
#
# * [GetAccessPoint][4]
#
# * [DeleteAccessPoint][5]
#
# * [ListAccessPoints][6]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html#API_control_CreateAccessPoint_Examples
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html
#
# @option params [required, String] :account_id
# The AWS account ID for the owner of the bucket for which you want to
# create an access point.
#
# @option params [required, String] :name
# The name you want to assign to this access point.
#
# @option params [required, String] :bucket
# The name of the bucket that you want to associate this access point
# with.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @option params [Types::VpcConfiguration] :vpc_configuration
# If you include this field, Amazon S3 restricts access to this access
# point to requests from the specified virtual private cloud (VPC).
#
# <note markdown="1"> This is required for creating an access point for Amazon S3 on
# Outposts buckets.
#
# </note>
#
# @option params [Types::PublicAccessBlockConfiguration] :public_access_block_configuration
# The `PublicAccessBlock` configuration that you want to apply to this
# Amazon S3 account. You can enable the configuration options in any
# combination. For more information about when Amazon S3 considers a
# bucket or object public, see [The Meaning of "Public"][1] in the
# *Amazon Simple Storage Service Developer Guide*.
#
# This is not supported for Amazon S3 on Outposts.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
#
# @return [Types::CreateAccessPointResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateAccessPointResult#access_point_arn #access_point_arn} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_access_point({
# account_id: "AccountId", # required
# name: "AccessPointName", # required
# bucket: "BucketName", # required
# vpc_configuration: {
# vpc_id: "VpcId", # required
# },
# public_access_block_configuration: {
# block_public_acls: false,
# ignore_public_acls: false,
# block_public_policy: false,
# restrict_public_buckets: false,
# },
# })
#
# @example Response structure
#
# resp.access_point_arn #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/CreateAccessPoint AWS API Documentation
#
# @overload create_access_point(params = {})
# @param [Hash] params ({})
def create_access_point(params = {}, options = {})
req = build_request(:create_access_point, params)
req.send_request(options)
end
# Creates an Object Lambda Access Point. For more information, see
# [Transforming objects with Object Lambda Access Points][1] in the
# *Amazon Simple Storage Service User Guide*.
#
# The following actions are related to
# `CreateAccessPointForObjectLambda`\:
#
# * [DeleteAccessPointForObjectLambda][2]
#
# * [GetAccessPointForObjectLambda][3]
#
# * [ListAccessPointsForObjectLambda][4]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointForObjectLambda.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointForObjectLambda.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPointsForObjectLambda.html
#
# @option params [required, String] :account_id
# The AWS account ID for owner of the specified Object Lambda Access
# Point.
#
# @option params [required, String] :name
# The name you want to assign to this Object Lambda Access Point.
#
# @option params [required, Types::ObjectLambdaConfiguration] :configuration
# Object Lambda Access Point configuration as a JSON document.
#
# @return [Types::CreateAccessPointForObjectLambdaResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateAccessPointForObjectLambdaResult#object_lambda_access_point_arn #object_lambda_access_point_arn} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_access_point_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# configuration: { # required
# supporting_access_point: "ObjectLambdaSupportingAccessPointArn", # required
# cloud_watch_metrics_enabled: false,
# allowed_features: ["GetObject-Range"], # accepts GetObject-Range, GetObject-PartNumber
# transformation_configurations: [ # required
# {
# actions: ["GetObject"], # required, accepts GetObject
# content_transformation: { # required
# aws_lambda: {
# function_arn: "FunctionArnString", # required
# function_payload: "AwsLambdaTransformationPayload",
# },
# },
# },
# ],
# },
# })
#
# @example Response structure
#
# resp.object_lambda_access_point_arn #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/CreateAccessPointForObjectLambda AWS API Documentation
#
# @overload create_access_point_for_object_lambda(params = {})
# @param [Hash] params ({})
def create_access_point_for_object_lambda(params = {}, options = {})
req = build_request(:create_access_point_for_object_lambda, params)
req.send_request(options)
end
# <note markdown="1"> This action creates an Amazon S3 on Outposts bucket. To create an S3
# bucket, see [Create Bucket][1] in the *Amazon Simple Storage Service
# API*.
#
# </note>
#
# Creates a new Outposts bucket. By creating the bucket, you become the
# bucket owner. To create an Outposts bucket, you must have S3 on
# Outposts. For more information, see [Using Amazon S3 on Outposts][2]
# in *Amazon Simple Storage Service User Guide*.
#
# Not every string is an acceptable bucket name. For information on
# bucket naming restrictions, see [Working with Amazon S3 Buckets][3].
#
# S3 on Outposts buckets support:
#
# * Tags
#
# * LifecycleConfigurations for deleting expired objects
#
# For a complete list of restrictions and Amazon S3 feature limitations
# on S3 on Outposts, see [ Amazon S3 on Outposts Restrictions and
# Limitations][4].
#
# For an example of the request syntax for Amazon S3 on Outposts that
# uses the S3 on Outposts endpoint hostname prefix and
# `x-amz-outpost-id` in your API request, see the [Examples][5] section.
#
# The following actions are related to `CreateBucket` for Amazon S3 on
# Outposts:
#
# * [PutObject][6]
#
# * [GetBucket][7]
#
# * [DeleteBucket][8]
#
# * [CreateAccessPoint][9]
#
# * [PutAccessPointPolicy][10]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/BucketRestrictions.html#bucketnamingrules
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3OnOutpostsRestrictionsLimitations.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html#API_control_CreateBucket_Examples
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
# [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucket.html
# [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucket.html
# [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html
# [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html
#
# @option params [String] :acl
# The canned ACL to apply to the bucket.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [required, String] :bucket
# The name of the bucket.
#
# @option params [Types::CreateBucketConfiguration] :create_bucket_configuration
# The configuration information for the bucket.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [String] :grant_full_control
# Allows grantee the read, write, read ACP, and write ACP permissions on
# the bucket.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [String] :grant_read
# Allows grantee to list the objects in the bucket.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [String] :grant_read_acp
# Allows grantee to read the bucket ACL.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [String] :grant_write
# Allows grantee to create, overwrite, and delete any object in the
# bucket.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [String] :grant_write_acp
# Allows grantee to write the ACL for the applicable bucket.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [Boolean] :object_lock_enabled_for_bucket
# Specifies whether you want S3 Object Lock to be enabled for the new
# bucket.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [String] :outpost_id
# The ID of the Outposts where the bucket is being created.
#
# <note markdown="1"> This is required by Amazon S3 on Outposts buckets.
#
# </note>
#
# @return [Types::CreateBucketResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateBucketResult#location #location} => String
# * {Types::CreateBucketResult#bucket_arn #bucket_arn} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_bucket({
# acl: "private", # accepts private, public-read, public-read-write, authenticated-read
# bucket: "BucketName", # required
# create_bucket_configuration: {
# location_constraint: "EU", # accepts EU, eu-west-1, us-west-1, us-west-2, ap-south-1, ap-southeast-1, ap-southeast-2, ap-northeast-1, sa-east-1, cn-north-1, eu-central-1
# },
# grant_full_control: "GrantFullControl",
# grant_read: "GrantRead",
# grant_read_acp: "GrantReadACP",
# grant_write: "GrantWrite",
# grant_write_acp: "GrantWriteACP",
# object_lock_enabled_for_bucket: false,
# outpost_id: "NonEmptyMaxLength64String",
# })
#
# @example Response structure
#
# resp.location #=> String
# resp.bucket_arn #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/CreateBucket AWS API Documentation
#
# @overload create_bucket(params = {})
# @param [Hash] params ({})
def create_bucket(params = {}, options = {})
req = build_request(:create_bucket, params)
req.send_request(options)
end
# You can use S3 Batch Operations to perform large-scale batch actions
# on Amazon S3 objects. Batch Operations can run a single action on
# lists of Amazon S3 objects that you specify. For more information, see
# [S3 Batch Operations][1] in the *Amazon Simple Storage Service User
# Guide*.
#
# This action creates a S3 Batch Operations job.
#
#
#
# Related actions include:
#
# * [DescribeJob][2]
#
# * [ListJobs][3]
#
# * [UpdateJobPriority][4]
#
# * [UpdateJobStatus][5]
#
# * [JobOperation][6]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_JobOperation.html
#
# @option params [required, String] :account_id
# The AWS account ID that creates the job.
#
# @option params [Boolean] :confirmation_required
# Indicates whether confirmation is required before Amazon S3 runs the
# job. Confirmation is only required for jobs created through the Amazon
# S3 console.
#
# @option params [required, Types::JobOperation] :operation
# The action that you want this job to perform on every object listed in
# the manifest. For more information about the available actions, see
# [Operations][1] in the *Amazon Simple Storage Service User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-actions.html
#
# @option params [required, Types::JobReport] :report
# Configuration parameters for the optional job-completion report.
#
# @option params [required, String] :client_request_token
# An idempotency token to ensure that you don't accidentally submit the
# same request twice. You can use any string up to the maximum length.
#
# **A suitable default value is auto-generated.** You should normally
# not need to pass this option.**
#
# @option params [required, Types::JobManifest] :manifest
# Configuration parameters for the manifest.
#
# @option params [String] :description
# A description for this job. You can use any string within the
# permitted length. Descriptions don't need to be unique and can be
# used for multiple jobs.
#
# @option params [required, Integer] :priority
# The numerical priority for this job. Higher numbers indicate higher
# priority.
#
# @option params [required, String] :role_arn
# The Amazon Resource Name (ARN) for the AWS Identity and Access
# Management (IAM) role that Batch Operations will use to run this
# job's action on every object in the manifest.
#
# @option params [Array<Types::S3Tag>] :tags
# A set of tags to associate with the S3 Batch Operations job. This is
# an optional parameter.
#
# @return [Types::CreateJobResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateJobResult#job_id #job_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_job({
# account_id: "AccountId", # required
# confirmation_required: false,
# operation: { # required
# lambda_invoke: {
# function_arn: "FunctionArnString",
# },
# s3_put_object_copy: {
# target_resource: "S3BucketArnString",
# canned_access_control_list: "private", # accepts private, public-read, public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, bucket-owner-full-control
# access_control_grants: [
# {
# grantee: {
# type_identifier: "id", # accepts id, emailAddress, uri
# identifier: "NonEmptyMaxLength1024String",
# display_name: "NonEmptyMaxLength1024String",
# },
# permission: "FULL_CONTROL", # accepts FULL_CONTROL, READ, WRITE, READ_ACP, WRITE_ACP
# },
# ],
# metadata_directive: "COPY", # accepts COPY, REPLACE
# modified_since_constraint: Time.now,
# new_object_metadata: {
# cache_control: "NonEmptyMaxLength1024String",
# content_disposition: "NonEmptyMaxLength1024String",
# content_encoding: "NonEmptyMaxLength1024String",
# content_language: "NonEmptyMaxLength1024String",
# user_metadata: {
# "NonEmptyMaxLength1024String" => "MaxLength1024String",
# },
# content_length: 1,
# content_md5: "NonEmptyMaxLength1024String",
# content_type: "NonEmptyMaxLength1024String",
# http_expires_date: Time.now,
# requester_charged: false,
# sse_algorithm: "AES256", # accepts AES256, KMS
# },
# new_object_tagging: [
# {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# ],
# redirect_location: "NonEmptyMaxLength2048String",
# requester_pays: false,
# storage_class: "STANDARD", # accepts STANDARD, STANDARD_IA, ONEZONE_IA, GLACIER, INTELLIGENT_TIERING, DEEP_ARCHIVE
# un_modified_since_constraint: Time.now,
# sse_aws_kms_key_id: "KmsKeyArnString",
# target_key_prefix: "NonEmptyMaxLength1024String",
# object_lock_legal_hold_status: "OFF", # accepts OFF, ON
# object_lock_mode: "COMPLIANCE", # accepts COMPLIANCE, GOVERNANCE
# object_lock_retain_until_date: Time.now,
# },
# s3_put_object_acl: {
# access_control_policy: {
# access_control_list: {
# owner: { # required
# id: "NonEmptyMaxLength1024String",
# display_name: "NonEmptyMaxLength1024String",
# },
# grants: [
# {
# grantee: {
# type_identifier: "id", # accepts id, emailAddress, uri
# identifier: "NonEmptyMaxLength1024String",
# display_name: "NonEmptyMaxLength1024String",
# },
# permission: "FULL_CONTROL", # accepts FULL_CONTROL, READ, WRITE, READ_ACP, WRITE_ACP
# },
# ],
# },
# canned_access_control_list: "private", # accepts private, public-read, public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, bucket-owner-full-control
# },
# },
# s3_put_object_tagging: {
# tag_set: [
# {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# ],
# },
# s3_delete_object_tagging: {
# },
# s3_initiate_restore_object: {
# expiration_in_days: 1,
# glacier_job_tier: "BULK", # accepts BULK, STANDARD
# },
# s3_put_object_legal_hold: {
# legal_hold: { # required
# status: "OFF", # required, accepts OFF, ON
# },
# },
# s3_put_object_retention: {
# bypass_governance_retention: false,
# retention: { # required
# retain_until_date: Time.now,
# mode: "COMPLIANCE", # accepts COMPLIANCE, GOVERNANCE
# },
# },
# },
# report: { # required
# bucket: "S3BucketArnString",
# format: "Report_CSV_20180820", # accepts Report_CSV_20180820
# enabled: false, # required
# prefix: "ReportPrefixString",
# report_scope: "AllTasks", # accepts AllTasks, FailedTasksOnly
# },
# client_request_token: "NonEmptyMaxLength64String", # required
# manifest: { # required
# spec: { # required
# format: "S3BatchOperations_CSV_20180820", # required, accepts S3BatchOperations_CSV_20180820, S3InventoryReport_CSV_20161130
# fields: ["Ignore"], # accepts Ignore, Bucket, Key, VersionId
# },
# location: { # required
# object_arn: "S3KeyArnString", # required
# object_version_id: "S3ObjectVersionId",
# etag: "NonEmptyMaxLength1024String", # required
# },
# },
# description: "NonEmptyMaxLength256String",
# priority: 1, # required
# role_arn: "IAMRoleArn", # required
# tags: [
# {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# ],
# })
#
# @example Response structure
#
# resp.job_id #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/CreateJob AWS API Documentation
#
# @overload create_job(params = {})
# @param [Hash] params ({})
def create_job(params = {}, options = {})
req = build_request(:create_job, params)
req.send_request(options)
end
# Deletes the specified access point.
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][1] section.
#
# The following actions are related to `DeleteAccessPoint`\:
#
# * [CreateAccessPoint][2]
#
# * [GetAccessPoint][3]
#
# * [ListAccessPoints][4]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html#API_control_DeleteAccessPoint_Examples
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified access point.
#
# @option params [required, String] :name
# The name of the access point you want to delete.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the access point accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>`.
# For example, to access the access point `reports-ap` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap`.
# The value must be URL encoded.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_access_point({
# account_id: "AccountId", # required
# name: "AccessPointName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteAccessPoint AWS API Documentation
#
# @overload delete_access_point(params = {})
# @param [Hash] params ({})
def delete_access_point(params = {}, options = {})
req = build_request(:delete_access_point, params)
req.send_request(options)
end
# Deletes the specified Object Lambda Access Point.
#
# The following actions are related to
# `DeleteAccessPointForObjectLambda`\:
#
# * [CreateAccessPointForObjectLambda][1]
#
# * [GetAccessPointForObjectLambda][2]
#
# * [ListAccessPointsForObjectLambda][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPointForObjectLambda.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointForObjectLambda.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPointsForObjectLambda.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [required, String] :name
# The name of the access point you want to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_access_point_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteAccessPointForObjectLambda AWS API Documentation
#
# @overload delete_access_point_for_object_lambda(params = {})
# @param [Hash] params ({})
def delete_access_point_for_object_lambda(params = {}, options = {})
req = build_request(:delete_access_point_for_object_lambda, params)
req.send_request(options)
end
# Deletes the access point policy for the specified access point.
#
#
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][1] section.
#
# The following actions are related to `DeleteAccessPointPolicy`\:
#
# * [PutAccessPointPolicy][2]
#
# * [GetAccessPointPolicy][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html#API_control_DeleteAccessPointPolicy_Examples
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified access point.
#
# @option params [required, String] :name
# The name of the access point whose policy you want to delete.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the access point accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>`.
# For example, to access the access point `reports-ap` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap`.
# The value must be URL encoded.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_access_point_policy({
# account_id: "AccountId", # required
# name: "AccessPointName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteAccessPointPolicy AWS API Documentation
#
# @overload delete_access_point_policy(params = {})
# @param [Hash] params ({})
def delete_access_point_policy(params = {}, options = {})
req = build_request(:delete_access_point_policy, params)
req.send_request(options)
end
# Removes the resource policy for an Object Lambda Access Point.
#
# The following actions are related to
# `DeleteAccessPointPolicyForObjectLambda`\:
#
# * [GetAccessPointPolicyForObjectLambda][1]
#
# * [PutAccessPointPolicyForObjectLambda][2]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicyForObjectLambda.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicyForObjectLambda.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [required, String] :name
# The name of the Object Lambda Access Point you want to delete the
# policy for.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_access_point_policy_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteAccessPointPolicyForObjectLambda AWS API Documentation
#
# @overload delete_access_point_policy_for_object_lambda(params = {})
# @param [Hash] params ({})
def delete_access_point_policy_for_object_lambda(params = {}, options = {})
req = build_request(:delete_access_point_policy_for_object_lambda, params)
req.send_request(options)
end
# <note markdown="1"> This action deletes an Amazon S3 on Outposts bucket. To delete an S3
# bucket, see [DeleteBucket][1] in the *Amazon Simple Storage Service
# API*.
#
# </note>
#
# Deletes the Amazon S3 on Outposts bucket. All objects (including all
# object versions and delete markers) in the bucket must be deleted
# before the bucket itself can be deleted. For more information, see
# [Using Amazon S3 on Outposts][2] in *Amazon Simple Storage Service
# User Guide*.
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][3] section.
#
# **Related Resources**
#
# * [CreateBucket][4]
#
# * [GetBucket][5]
#
# * [DeleteObject][6]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucket.html#API_control_DeleteBucket_Examples
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucket.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
#
# @option params [required, String] :account_id
# The account ID that owns the Outposts bucket.
#
# @option params [required, String] :bucket
# Specifies the bucket being deleted.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_bucket({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucket AWS API Documentation
#
# @overload delete_bucket(params = {})
# @param [Hash] params ({})
def delete_bucket(params = {}, options = {})
req = build_request(:delete_bucket, params)
req.send_request(options)
end
# <note markdown="1"> This action deletes an Amazon S3 on Outposts bucket's lifecycle
# configuration. To delete an S3 bucket's lifecycle configuration, see
# [DeleteBucketLifecycle][1] in the *Amazon Simple Storage Service API*.
#
# </note>
#
# Deletes the lifecycle configuration from the specified Outposts
# bucket. Amazon S3 on Outposts removes all the lifecycle configuration
# rules in the lifecycle subresource associated with the bucket. Your
# objects never expire, and Amazon S3 on Outposts no longer
# automatically deletes any objects on the basis of rules contained in
# the deleted lifecycle configuration. For more information, see [Using
# Amazon S3 on Outposts][2] in *Amazon Simple Storage Service User
# Guide*.
#
# To use this action, you must have permission to perform the
# `s3-outposts:DeleteLifecycleConfiguration` action. By default, the
# bucket owner has this permission and the Outposts bucket owner can
# grant this permission to others.
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][3] section.
#
# For more information about object expiration, see [Elements to
# Describe Lifecycle Actions][4].
#
# Related actions include:
#
# * [PutBucketLifecycleConfiguration][5]
#
# * [GetBucketLifecycleConfiguration][6]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html#API_control_DeleteBucketLifecycleConfiguration_Examples
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html
#
# @option params [required, String] :account_id
# The account ID of the lifecycle configuration to delete.
#
# @option params [required, String] :bucket
# Specifies the bucket.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_bucket_lifecycle_configuration({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketLifecycleConfiguration AWS API Documentation
#
# @overload delete_bucket_lifecycle_configuration(params = {})
# @param [Hash] params ({})
def delete_bucket_lifecycle_configuration(params = {}, options = {})
req = build_request(:delete_bucket_lifecycle_configuration, params)
req.send_request(options)
end
# <note markdown="1"> This action deletes an Amazon S3 on Outposts bucket policy. To delete
# an S3 bucket policy, see [DeleteBucketPolicy][1] in the *Amazon Simple
# Storage Service API*.
#
# </note>
#
# This implementation of the DELETE action uses the policy subresource
# to delete the policy of a specified Amazon S3 on Outposts bucket. If
# you are using an identity other than the root user of the AWS account
# that owns the bucket, the calling identity must have the
# `s3-outposts:DeleteBucketPolicy` permissions on the specified Outposts
# bucket and belong to the bucket owner's account to use this action.
# For more information, see [Using Amazon S3 on Outposts][2] in *Amazon
# Simple Storage Service User Guide*.
#
# If you don't have `DeleteBucketPolicy` permissions, Amazon S3 returns
# a `403 Access Denied` error. If you have the correct permissions, but
# you're not using an identity that belongs to the bucket owner's
# account, Amazon S3 returns a `405 Method Not Allowed` error.
#
# As a security precaution, the root user of the AWS account that owns a
# bucket can always use this action, even if the policy explicitly
# denies the root user the ability to perform this action.
#
# For more information about bucket policies, see [Using Bucket Policies
# and User Policies][3].
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][4] section.
#
# The following actions are related to `DeleteBucketPolicy`\:
#
# * [GetBucketPolicy][5]
#
# * [PutBucketPolicy][6]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketPolicy.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html#API_control_DeleteBucketPolicy_Examples
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html
#
# @option params [required, String] :account_id
# The account ID of the Outposts bucket.
#
# @option params [required, String] :bucket
# Specifies the bucket.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_bucket_policy({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketPolicy AWS API Documentation
#
# @overload delete_bucket_policy(params = {})
# @param [Hash] params ({})
def delete_bucket_policy(params = {}, options = {})
req = build_request(:delete_bucket_policy, params)
req.send_request(options)
end
# <note markdown="1"> This action deletes an Amazon S3 on Outposts bucket's tags. To delete
# an S3 bucket tags, see [DeleteBucketTagging][1] in the *Amazon Simple
# Storage Service API*.
#
# </note>
#
# Deletes the tags from the Outposts bucket. For more information, see
# [Using Amazon S3 on Outposts][2] in *Amazon Simple Storage Service
# User Guide*.
#
# To use this action, you must have permission to perform the
# `PutBucketTagging` action. By default, the bucket owner has this
# permission and can grant this permission to others.
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][3] section.
#
# The following actions are related to `DeleteBucketTagging`\:
#
# * [GetBucketTagging][4]
#
# * [PutBucketTagging][5]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html#API_control_DeleteBucketTagging_Examples
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket tag set to be removed.
#
# @option params [required, String] :bucket
# The bucket ARN that has the tag set to be removed.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_bucket_tagging({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketTagging AWS API Documentation
#
# @overload delete_bucket_tagging(params = {})
# @param [Hash] params ({})
def delete_bucket_tagging(params = {}, options = {})
req = build_request(:delete_bucket_tagging, params)
req.send_request(options)
end
# Removes the entire tag set from the specified S3 Batch Operations job.
# To use this operation, you must have permission to perform the
# `s3:DeleteJobTagging` action. For more information, see [Controlling
# access and labeling jobs using tags][1] in the *Amazon Simple Storage
# Service User Guide*.
#
#
#
# Related actions include:
#
# * [CreateJob][2]
#
# * [GetJobTagging][3]
#
# * [PutJobTagging][4]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html
#
# @option params [required, String] :account_id
# The AWS account ID associated with the S3 Batch Operations job.
#
# @option params [required, String] :job_id
# The ID for the S3 Batch Operations job whose tags you want to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_job_tagging({
# account_id: "AccountId", # required
# job_id: "JobId", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteJobTagging AWS API Documentation
#
# @overload delete_job_tagging(params = {})
# @param [Hash] params ({})
def delete_job_tagging(params = {}, options = {})
req = build_request(:delete_job_tagging, params)
req.send_request(options)
end
# Removes the `PublicAccessBlock` configuration for an AWS account. For
# more information, see [ Using Amazon S3 block public access][1].
#
# Related actions include:
#
# * [GetPublicAccessBlock][2]
#
# * [PutPublicAccessBlock][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html
#
# @option params [required, String] :account_id
# The account ID for the AWS account whose `PublicAccessBlock`
# configuration you want to remove.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_public_access_block({
# account_id: "AccountId", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeletePublicAccessBlock AWS API Documentation
#
# @overload delete_public_access_block(params = {})
# @param [Hash] params ({})
def delete_public_access_block(params = {}, options = {})
req = build_request(:delete_public_access_block, params)
req.send_request(options)
end
# Deletes the Amazon S3 Storage Lens configuration. For more information
# about S3 Storage Lens, see [Assessing your storage activity and usage
# with Amazon S3 Storage Lens ][1] in the *Amazon Simple Storage Service
# User Guide*.
#
# <note markdown="1"> To use this action, you must have permission to perform the
# `s3:DeleteStorageLensConfiguration` action. For more information, see
# [Setting permissions to use Amazon S3 Storage Lens][2] in the *Amazon
# Simple Storage Service User Guide*.
#
# </note>
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html
#
# @option params [required, String] :config_id
# The ID of the S3 Storage Lens configuration.
#
# @option params [required, String] :account_id
# The account ID of the requester.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_storage_lens_configuration({
# config_id: "ConfigId", # required
# account_id: "AccountId", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteStorageLensConfiguration AWS API Documentation
#
# @overload delete_storage_lens_configuration(params = {})
# @param [Hash] params ({})
def delete_storage_lens_configuration(params = {}, options = {})
req = build_request(:delete_storage_lens_configuration, params)
req.send_request(options)
end
# Deletes the Amazon S3 Storage Lens configuration tags. For more
# information about S3 Storage Lens, see [Assessing your storage
# activity and usage with Amazon S3 Storage Lens ][1] in the *Amazon
# Simple Storage Service User Guide*.
#
# <note markdown="1"> To use this action, you must have permission to perform the
# `s3:DeleteStorageLensConfigurationTagging` action. For more
# information, see [Setting permissions to use Amazon S3 Storage
# Lens][2] in the *Amazon Simple Storage Service User Guide*.
#
# </note>
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html
#
# @option params [required, String] :config_id
# The ID of the S3 Storage Lens configuration.
#
# @option params [required, String] :account_id
# The account ID of the requester.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_storage_lens_configuration_tagging({
# config_id: "ConfigId", # required
# account_id: "AccountId", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteStorageLensConfigurationTagging AWS API Documentation
#
# @overload delete_storage_lens_configuration_tagging(params = {})
# @param [Hash] params ({})
def delete_storage_lens_configuration_tagging(params = {}, options = {})
req = build_request(:delete_storage_lens_configuration_tagging, params)
req.send_request(options)
end
# Retrieves the configuration parameters and status for a Batch
# Operations job. For more information, see [S3 Batch Operations][1] in
# the *Amazon Simple Storage Service User Guide*.
#
#
#
# Related actions include:
#
# * [CreateJob][2]
#
# * [ListJobs][3]
#
# * [UpdateJobPriority][4]
#
# * [UpdateJobStatus][5]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html
#
# @option params [required, String] :account_id
# The AWS account ID associated with the S3 Batch Operations job.
#
# @option params [required, String] :job_id
# The ID for the job whose information you want to retrieve.
#
# @return [Types::DescribeJobResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeJobResult#job #job} => Types::JobDescriptor
#
# @example Request syntax with placeholder values
#
# resp = client.describe_job({
# account_id: "AccountId", # required
# job_id: "JobId", # required
# })
#
# @example Response structure
#
# resp.job.job_id #=> String
# resp.job.confirmation_required #=> Boolean
# resp.job.description #=> String
# resp.job.job_arn #=> String
# resp.job.status #=> String, one of "Active", "Cancelled", "Cancelling", "Complete", "Completing", "Failed", "Failing", "New", "Paused", "Pausing", "Preparing", "Ready", "Suspended"
# resp.job.manifest.spec.format #=> String, one of "S3BatchOperations_CSV_20180820", "S3InventoryReport_CSV_20161130"
# resp.job.manifest.spec.fields #=> Array
# resp.job.manifest.spec.fields[0] #=> String, one of "Ignore", "Bucket", "Key", "VersionId"
# resp.job.manifest.location.object_arn #=> String
# resp.job.manifest.location.object_version_id #=> String
# resp.job.manifest.location.etag #=> String
# resp.job.operation.lambda_invoke.function_arn #=> String
# resp.job.operation.s3_put_object_copy.target_resource #=> String
# resp.job.operation.s3_put_object_copy.canned_access_control_list #=> String, one of "private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"
# resp.job.operation.s3_put_object_copy.access_control_grants #=> Array
# resp.job.operation.s3_put_object_copy.access_control_grants[0].grantee.type_identifier #=> String, one of "id", "emailAddress", "uri"
# resp.job.operation.s3_put_object_copy.access_control_grants[0].grantee.identifier #=> String
# resp.job.operation.s3_put_object_copy.access_control_grants[0].grantee.display_name #=> String
# resp.job.operation.s3_put_object_copy.access_control_grants[0].permission #=> String, one of "FULL_CONTROL", "READ", "WRITE", "READ_ACP", "WRITE_ACP"
# resp.job.operation.s3_put_object_copy.metadata_directive #=> String, one of "COPY", "REPLACE"
# resp.job.operation.s3_put_object_copy.modified_since_constraint #=> Time
# resp.job.operation.s3_put_object_copy.new_object_metadata.cache_control #=> String
# resp.job.operation.s3_put_object_copy.new_object_metadata.content_disposition #=> String
# resp.job.operation.s3_put_object_copy.new_object_metadata.content_encoding #=> String
# resp.job.operation.s3_put_object_copy.new_object_metadata.content_language #=> String
# resp.job.operation.s3_put_object_copy.new_object_metadata.user_metadata #=> Hash
# resp.job.operation.s3_put_object_copy.new_object_metadata.user_metadata["NonEmptyMaxLength1024String"] #=> String
# resp.job.operation.s3_put_object_copy.new_object_metadata.content_length #=> Integer
# resp.job.operation.s3_put_object_copy.new_object_metadata.content_md5 #=> String
# resp.job.operation.s3_put_object_copy.new_object_metadata.content_type #=> String
# resp.job.operation.s3_put_object_copy.new_object_metadata.http_expires_date #=> Time
# resp.job.operation.s3_put_object_copy.new_object_metadata.requester_charged #=> Boolean
# resp.job.operation.s3_put_object_copy.new_object_metadata.sse_algorithm #=> String, one of "AES256", "KMS"
# resp.job.operation.s3_put_object_copy.new_object_tagging #=> Array
# resp.job.operation.s3_put_object_copy.new_object_tagging[0].key #=> String
# resp.job.operation.s3_put_object_copy.new_object_tagging[0].value #=> String
# resp.job.operation.s3_put_object_copy.redirect_location #=> String
# resp.job.operation.s3_put_object_copy.requester_pays #=> Boolean
# resp.job.operation.s3_put_object_copy.storage_class #=> String, one of "STANDARD", "STANDARD_IA", "ONEZONE_IA", "GLACIER", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
# resp.job.operation.s3_put_object_copy.un_modified_since_constraint #=> Time
# resp.job.operation.s3_put_object_copy.sse_aws_kms_key_id #=> String
# resp.job.operation.s3_put_object_copy.target_key_prefix #=> String
# resp.job.operation.s3_put_object_copy.object_lock_legal_hold_status #=> String, one of "OFF", "ON"
# resp.job.operation.s3_put_object_copy.object_lock_mode #=> String, one of "COMPLIANCE", "GOVERNANCE"
# resp.job.operation.s3_put_object_copy.object_lock_retain_until_date #=> Time
# resp.job.operation.s3_put_object_acl.access_control_policy.access_control_list.owner.id #=> String
# resp.job.operation.s3_put_object_acl.access_control_policy.access_control_list.owner.display_name #=> String
# resp.job.operation.s3_put_object_acl.access_control_policy.access_control_list.grants #=> Array
# resp.job.operation.s3_put_object_acl.access_control_policy.access_control_list.grants[0].grantee.type_identifier #=> String, one of "id", "emailAddress", "uri"
# resp.job.operation.s3_put_object_acl.access_control_policy.access_control_list.grants[0].grantee.identifier #=> String
# resp.job.operation.s3_put_object_acl.access_control_policy.access_control_list.grants[0].grantee.display_name #=> String
# resp.job.operation.s3_put_object_acl.access_control_policy.access_control_list.grants[0].permission #=> String, one of "FULL_CONTROL", "READ", "WRITE", "READ_ACP", "WRITE_ACP"
# resp.job.operation.s3_put_object_acl.access_control_policy.canned_access_control_list #=> String, one of "private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"
# resp.job.operation.s3_put_object_tagging.tag_set #=> Array
# resp.job.operation.s3_put_object_tagging.tag_set[0].key #=> String
# resp.job.operation.s3_put_object_tagging.tag_set[0].value #=> String
# resp.job.operation.s3_initiate_restore_object.expiration_in_days #=> Integer
# resp.job.operation.s3_initiate_restore_object.glacier_job_tier #=> String, one of "BULK", "STANDARD"
# resp.job.operation.s3_put_object_legal_hold.legal_hold.status #=> String, one of "OFF", "ON"
# resp.job.operation.s3_put_object_retention.bypass_governance_retention #=> Boolean
# resp.job.operation.s3_put_object_retention.retention.retain_until_date #=> Time
# resp.job.operation.s3_put_object_retention.retention.mode #=> String, one of "COMPLIANCE", "GOVERNANCE"
# resp.job.priority #=> Integer
# resp.job.progress_summary.total_number_of_tasks #=> Integer
# resp.job.progress_summary.number_of_tasks_succeeded #=> Integer
# resp.job.progress_summary.number_of_tasks_failed #=> Integer
# resp.job.status_update_reason #=> String
# resp.job.failure_reasons #=> Array
# resp.job.failure_reasons[0].failure_code #=> String
# resp.job.failure_reasons[0].failure_reason #=> String
# resp.job.report.bucket #=> String
# resp.job.report.format #=> String, one of "Report_CSV_20180820"
# resp.job.report.enabled #=> Boolean
# resp.job.report.prefix #=> String
# resp.job.report.report_scope #=> String, one of "AllTasks", "FailedTasksOnly"
# resp.job.creation_time #=> Time
# resp.job.termination_date #=> Time
# resp.job.role_arn #=> String
# resp.job.suspended_date #=> Time
# resp.job.suspended_cause #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DescribeJob AWS API Documentation
#
# @overload describe_job(params = {})
# @param [Hash] params ({})
def describe_job(params = {}, options = {})
req = build_request(:describe_job, params)
req.send_request(options)
end
# Returns configuration information about the specified access point.
#
#
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][1] section.
#
# The following actions are related to `GetAccessPoint`\:
#
# * [CreateAccessPoint][2]
#
# * [DeleteAccessPoint][3]
#
# * [ListAccessPoints][4]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html#API_control_GetAccessPoint_Examples
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified access point.
#
# @option params [required, String] :name
# The name of the access point whose configuration information you want
# to retrieve.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the access point accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>`.
# For example, to access the access point `reports-ap` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap`.
# The value must be URL encoded.
#
# @return [Types::GetAccessPointResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetAccessPointResult#name #name} => String
# * {Types::GetAccessPointResult#bucket #bucket} => String
# * {Types::GetAccessPointResult#network_origin #network_origin} => String
# * {Types::GetAccessPointResult#vpc_configuration #vpc_configuration} => Types::VpcConfiguration
# * {Types::GetAccessPointResult#public_access_block_configuration #public_access_block_configuration} => Types::PublicAccessBlockConfiguration
# * {Types::GetAccessPointResult#creation_date #creation_date} => Time
#
# @example Request syntax with placeholder values
#
# resp = client.get_access_point({
# account_id: "AccountId", # required
# name: "AccessPointName", # required
# })
#
# @example Response structure
#
# resp.name #=> String
# resp.bucket #=> String
# resp.network_origin #=> String, one of "Internet", "VPC"
# resp.vpc_configuration.vpc_id #=> String
# resp.public_access_block_configuration.block_public_acls #=> Boolean
# resp.public_access_block_configuration.ignore_public_acls #=> Boolean
# resp.public_access_block_configuration.block_public_policy #=> Boolean
# resp.public_access_block_configuration.restrict_public_buckets #=> Boolean
# resp.creation_date #=> Time
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPoint AWS API Documentation
#
# @overload get_access_point(params = {})
# @param [Hash] params ({})
def get_access_point(params = {}, options = {})
req = build_request(:get_access_point, params)
req.send_request(options)
end
# Returns configuration for an Object Lambda Access Point.
#
# The following actions are related to
# `GetAccessPointConfigurationForObjectLambda`\:
#
# * [PutAccessPointConfigurationForObjectLambda][1]
#
# ^
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointConfigurationForObjectLambda.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [required, String] :name
# The name of the Object Lambda Access Point you want to return the
# configuration for.
#
# @return [Types::GetAccessPointConfigurationForObjectLambdaResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetAccessPointConfigurationForObjectLambdaResult#configuration #configuration} => Types::ObjectLambdaConfiguration
#
# @example Request syntax with placeholder values
#
# resp = client.get_access_point_configuration_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# })
#
# @example Response structure
#
# resp.configuration.supporting_access_point #=> String
# resp.configuration.cloud_watch_metrics_enabled #=> Boolean
# resp.configuration.allowed_features #=> Array
# resp.configuration.allowed_features[0] #=> String, one of "GetObject-Range", "GetObject-PartNumber"
# resp.configuration.transformation_configurations #=> Array
# resp.configuration.transformation_configurations[0].actions #=> Array
# resp.configuration.transformation_configurations[0].actions[0] #=> String, one of "GetObject"
# resp.configuration.transformation_configurations[0].content_transformation.aws_lambda.function_arn #=> String
# resp.configuration.transformation_configurations[0].content_transformation.aws_lambda.function_payload #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointConfigurationForObjectLambda AWS API Documentation
#
# @overload get_access_point_configuration_for_object_lambda(params = {})
# @param [Hash] params ({})
def get_access_point_configuration_for_object_lambda(params = {}, options = {})
req = build_request(:get_access_point_configuration_for_object_lambda, params)
req.send_request(options)
end
# Returns configuration information about the specified Object Lambda
# Access Point
#
# The following actions are related to `GetAccessPointForObjectLambda`\:
#
# * [CreateAccessPointForObjectLambda][1]
#
# * [DeleteAccessPointForObjectLambda][2]
#
# * [ListAccessPointsForObjectLambda][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPointForObjectLambda.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointForObjectLambda.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPointsForObjectLambda.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [required, String] :name
# The name of the Object Lambda Access Point.
#
# @return [Types::GetAccessPointForObjectLambdaResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetAccessPointForObjectLambdaResult#name #name} => String
# * {Types::GetAccessPointForObjectLambdaResult#public_access_block_configuration #public_access_block_configuration} => Types::PublicAccessBlockConfiguration
# * {Types::GetAccessPointForObjectLambdaResult#creation_date #creation_date} => Time
#
# @example Request syntax with placeholder values
#
# resp = client.get_access_point_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# })
#
# @example Response structure
#
# resp.name #=> String
# resp.public_access_block_configuration.block_public_acls #=> Boolean
# resp.public_access_block_configuration.ignore_public_acls #=> Boolean
# resp.public_access_block_configuration.block_public_policy #=> Boolean
# resp.public_access_block_configuration.restrict_public_buckets #=> Boolean
# resp.creation_date #=> Time
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointForObjectLambda AWS API Documentation
#
# @overload get_access_point_for_object_lambda(params = {})
# @param [Hash] params ({})
def get_access_point_for_object_lambda(params = {}, options = {})
req = build_request(:get_access_point_for_object_lambda, params)
req.send_request(options)
end
# Returns the access point policy associated with the specified access
# point.
#
# The following actions are related to `GetAccessPointPolicy`\:
#
# * [PutAccessPointPolicy][1]
#
# * [DeleteAccessPointPolicy][2]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified access point.
#
# @option params [required, String] :name
# The name of the access point whose policy you want to retrieve.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the access point accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>`.
# For example, to access the access point `reports-ap` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap`.
# The value must be URL encoded.
#
# @return [Types::GetAccessPointPolicyResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetAccessPointPolicyResult#policy #policy} => String
#
# @example Request syntax with placeholder values
#
# resp = client.get_access_point_policy({
# account_id: "AccountId", # required
# name: "AccessPointName", # required
# })
#
# @example Response structure
#
# resp.policy #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicy AWS API Documentation
#
# @overload get_access_point_policy(params = {})
# @param [Hash] params ({})
def get_access_point_policy(params = {}, options = {})
req = build_request(:get_access_point_policy, params)
req.send_request(options)
end
# Returns the resource policy for an Object Lambda Access Point.
#
# The following actions are related to
# `GetAccessPointPolicyForObjectLambda`\:
#
# * [DeleteAccessPointPolicyForObjectLambda][1]
#
# * [PutAccessPointPolicyForObjectLambda][2]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicyForObjectLambda.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicyForObjectLambda.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [required, String] :name
# The name of the Object Lambda Access Point.
#
# @return [Types::GetAccessPointPolicyForObjectLambdaResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetAccessPointPolicyForObjectLambdaResult#policy #policy} => String
#
# @example Request syntax with placeholder values
#
# resp = client.get_access_point_policy_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# })
#
# @example Response structure
#
# resp.policy #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicyForObjectLambda AWS API Documentation
#
# @overload get_access_point_policy_for_object_lambda(params = {})
# @param [Hash] params ({})
def get_access_point_policy_for_object_lambda(params = {}, options = {})
req = build_request(:get_access_point_policy_for_object_lambda, params)
req.send_request(options)
end
# Indicates whether the specified access point currently has a policy
# that allows public access. For more information about public access
# through access points, see [Managing Data Access with Amazon S3 Access
# Points][1] in the *Amazon Simple Storage Service Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified access point.
#
# @option params [required, String] :name
# The name of the access point whose policy status you want to retrieve.
#
# @return [Types::GetAccessPointPolicyStatusResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetAccessPointPolicyStatusResult#policy_status #policy_status} => Types::PolicyStatus
#
# @example Request syntax with placeholder values
#
# resp = client.get_access_point_policy_status({
# account_id: "AccountId", # required
# name: "AccessPointName", # required
# })
#
# @example Response structure
#
# resp.policy_status.is_public #=> Boolean
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicyStatus AWS API Documentation
#
# @overload get_access_point_policy_status(params = {})
# @param [Hash] params ({})
def get_access_point_policy_status(params = {}, options = {})
req = build_request(:get_access_point_policy_status, params)
req.send_request(options)
end
# Returns the status of the resource policy associated with an Object
# Lambda Access Point.
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [required, String] :name
# The name of the Object Lambda Access Point.
#
# @return [Types::GetAccessPointPolicyStatusForObjectLambdaResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetAccessPointPolicyStatusForObjectLambdaResult#policy_status #policy_status} => Types::PolicyStatus
#
# @example Request syntax with placeholder values
#
# resp = client.get_access_point_policy_status_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# })
#
# @example Response structure
#
# resp.policy_status.is_public #=> Boolean
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicyStatusForObjectLambda AWS API Documentation
#
# @overload get_access_point_policy_status_for_object_lambda(params = {})
# @param [Hash] params ({})
def get_access_point_policy_status_for_object_lambda(params = {}, options = {})
req = build_request(:get_access_point_policy_status_for_object_lambda, params)
req.send_request(options)
end
# Gets an Amazon S3 on Outposts bucket. For more information, see [
# Using Amazon S3 on Outposts][1] in the *Amazon Simple Storage Service
# User Guide*.
#
# If you are using an identity other than the root user of the AWS
# account that owns the Outposts bucket, the calling identity must have
# the `s3-outposts:GetBucket` permissions on the specified Outposts
# bucket and belong to the Outposts bucket owner's account in order to
# use this action. Only users from Outposts bucket owner account with
# the right permissions can perform actions on an Outposts bucket.
#
# If you don't have `s3-outposts:GetBucket` permissions or you're not
# using an identity that belongs to the bucket owner's account, Amazon
# S3 returns a `403 Access Denied` error.
#
# The following actions are related to `GetBucket` for Amazon S3 on
# Outposts:
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][2] section.
#
# * [PutObject][3]
#
# * [CreateBucket][4]
#
# * [DeleteBucket][5]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucket.html#API_control_GetBucket_Examples
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucket.html
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket.
#
# @option params [required, String] :bucket
# Specifies the bucket.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @return [Types::GetBucketResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetBucketResult#bucket #bucket} => String
# * {Types::GetBucketResult#public_access_block_enabled #public_access_block_enabled} => Boolean
# * {Types::GetBucketResult#creation_date #creation_date} => Time
#
# @example Request syntax with placeholder values
#
# resp = client.get_bucket({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# })
#
# @example Response structure
#
# resp.bucket #=> String
# resp.public_access_block_enabled #=> Boolean
# resp.creation_date #=> Time
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucket AWS API Documentation
#
# @overload get_bucket(params = {})
# @param [Hash] params ({})
def get_bucket(params = {}, options = {})
req = build_request(:get_bucket, params)
req.send_request(options)
end
# <note markdown="1"> This action gets an Amazon S3 on Outposts bucket's lifecycle
# configuration. To get an S3 bucket's lifecycle configuration, see
# [GetBucketLifecycleConfiguration][1] in the *Amazon Simple Storage
# Service API*.
#
# </note>
#
# Returns the lifecycle configuration information set on the Outposts
# bucket. For more information, see [Using Amazon S3 on Outposts][2] and
# for information about lifecycle configuration, see [ Object Lifecycle
# Management][3] in *Amazon Simple Storage Service User Guide*.
#
# To use this action, you must have permission to perform the
# `s3-outposts:GetLifecycleConfiguration` action. The Outposts bucket
# owner has this permission, by default. The bucket owner can grant this
# permission to others. For more information about permissions, see
# [Permissions Related to Bucket Subresource Operations][4] and
# [Managing Access Permissions to Your Amazon S3 Resources][5].
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][6] section.
#
# `GetBucketLifecycleConfiguration` has the following special error:
#
# * Error code: `NoSuchLifecycleConfiguration`
#
# * Description: The lifecycle configuration does not exist.
#
# * HTTP Status Code: 404 Not Found
#
# * SOAP Fault Code Prefix: Client
#
# The following actions are related to
# `GetBucketLifecycleConfiguration`\:
#
# * [PutBucketLifecycleConfiguration][7]
#
# * [DeleteBucketLifecycleConfiguration][8]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html#API_control_GetBucketLifecycleConfiguration_Examples
# [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html
# [8]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket.
#
# @option params [required, String] :bucket
# The Amazon Resource Name (ARN) of the bucket.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @return [Types::GetBucketLifecycleConfigurationResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetBucketLifecycleConfigurationResult#rules #rules} => Array<Types::LifecycleRule>
#
# @example Request syntax with placeholder values
#
# resp = client.get_bucket_lifecycle_configuration({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# })
#
# @example Response structure
#
# resp.rules #=> Array
# resp.rules[0].expiration.date #=> Time
# resp.rules[0].expiration.days #=> Integer
# resp.rules[0].expiration.expired_object_delete_marker #=> Boolean
# resp.rules[0].id #=> String
# resp.rules[0].filter.prefix #=> String
# resp.rules[0].filter.tag.key #=> String
# resp.rules[0].filter.tag.value #=> String
# resp.rules[0].filter.and.prefix #=> String
# resp.rules[0].filter.and.tags #=> Array
# resp.rules[0].filter.and.tags[0].key #=> String
# resp.rules[0].filter.and.tags[0].value #=> String
# resp.rules[0].status #=> String, one of "Enabled", "Disabled"
# resp.rules[0].transitions #=> Array
# resp.rules[0].transitions[0].date #=> Time
# resp.rules[0].transitions[0].days #=> Integer
# resp.rules[0].transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
# resp.rules[0].noncurrent_version_transitions #=> Array
# resp.rules[0].noncurrent_version_transitions[0].noncurrent_days #=> Integer
# resp.rules[0].noncurrent_version_transitions[0].storage_class #=> String, one of "GLACIER", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "DEEP_ARCHIVE"
# resp.rules[0].noncurrent_version_expiration.noncurrent_days #=> Integer
# resp.rules[0].abort_incomplete_multipart_upload.days_after_initiation #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketLifecycleConfiguration AWS API Documentation
#
# @overload get_bucket_lifecycle_configuration(params = {})
# @param [Hash] params ({})
def get_bucket_lifecycle_configuration(params = {}, options = {})
req = build_request(:get_bucket_lifecycle_configuration, params)
req.send_request(options)
end
# <note markdown="1"> This action gets a bucket policy for an Amazon S3 on Outposts bucket.
# To get a policy for an S3 bucket, see [GetBucketPolicy][1] in the
# *Amazon Simple Storage Service API*.
#
# </note>
#
# Returns the policy of a specified Outposts bucket. For more
# information, see [Using Amazon S3 on Outposts][2] in the *Amazon
# Simple Storage Service User Guide*.
#
# If you are using an identity other than the root user of the AWS
# account that owns the bucket, the calling identity must have the
# `GetBucketPolicy` permissions on the specified bucket and belong to
# the bucket owner's account in order to use this action.
#
# Only users from Outposts bucket owner account with the right
# permissions can perform actions on an Outposts bucket. If you don't
# have `s3-outposts:GetBucketPolicy` permissions or you're not using an
# identity that belongs to the bucket owner's account, Amazon S3
# returns a `403 Access Denied` error.
#
# As a security precaution, the root user of the AWS account that owns a
# bucket can always use this action, even if the policy explicitly
# denies the root user the ability to perform this action.
#
# For more information about bucket policies, see [Using Bucket Policies
# and User Policies][3].
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][4] section.
#
# The following actions are related to `GetBucketPolicy`\:
#
# * [GetObject][5]
#
# * [PutBucketPolicy][6]
#
# * [DeleteBucketPolicy][7]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html#API_control_GetBucketPolicy_Examples
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html
# [7]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket.
#
# @option params [required, String] :bucket
# Specifies the bucket.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @return [Types::GetBucketPolicyResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetBucketPolicyResult#policy #policy} => String
#
# @example Request syntax with placeholder values
#
# resp = client.get_bucket_policy({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# })
#
# @example Response structure
#
# resp.policy #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketPolicy AWS API Documentation
#
# @overload get_bucket_policy(params = {})
# @param [Hash] params ({})
def get_bucket_policy(params = {}, options = {})
req = build_request(:get_bucket_policy, params)
req.send_request(options)
end
# <note markdown="1"> This action gets an Amazon S3 on Outposts bucket's tags. To get an S3
# bucket tags, see [GetBucketTagging][1] in the *Amazon Simple Storage
# Service API*.
#
# </note>
#
# Returns the tag set associated with the Outposts bucket. For more
# information, see [Using Amazon S3 on Outposts][2] in the *Amazon
# Simple Storage Service User Guide*.
#
# To use this action, you must have permission to perform the
# `GetBucketTagging` action. By default, the bucket owner has this
# permission and can grant this permission to others.
#
# `GetBucketTagging` has the following special error:
#
# * Error code: `NoSuchTagSetError`
#
# * Description: There is no tag set associated with the bucket.
#
# ^
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][3] section.
#
# The following actions are related to `GetBucketTagging`\:
#
# * [PutBucketTagging][4]
#
# * [DeleteBucketTagging][5]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html#API_control_GetBucketTagging_Examples
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket.
#
# @option params [required, String] :bucket
# Specifies the bucket.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @return [Types::GetBucketTaggingResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetBucketTaggingResult#tag_set #tag_set} => Array<Types::S3Tag>
#
# @example Request syntax with placeholder values
#
# resp = client.get_bucket_tagging({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# })
#
# @example Response structure
#
# resp.tag_set #=> Array
# resp.tag_set[0].key #=> String
# resp.tag_set[0].value #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketTagging AWS API Documentation
#
# @overload get_bucket_tagging(params = {})
# @param [Hash] params ({})
def get_bucket_tagging(params = {}, options = {})
req = build_request(:get_bucket_tagging, params)
req.send_request(options)
end
# Returns the tags on an S3 Batch Operations job. To use this operation,
# you must have permission to perform the `s3:GetJobTagging` action. For
# more information, see [Controlling access and labeling jobs using
# tags][1] in the *Amazon Simple Storage Service User Guide*.
#
#
#
# Related actions include:
#
# * [CreateJob][2]
#
# * [PutJobTagging][3]
#
# * [DeleteJobTagging][4]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html
#
# @option params [required, String] :account_id
# The AWS account ID associated with the S3 Batch Operations job.
#
# @option params [required, String] :job_id
# The ID for the S3 Batch Operations job whose tags you want to
# retrieve.
#
# @return [Types::GetJobTaggingResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetJobTaggingResult#tags #tags} => Array<Types::S3Tag>
#
# @example Request syntax with placeholder values
#
# resp = client.get_job_tagging({
# account_id: "AccountId", # required
# job_id: "JobId", # required
# })
#
# @example Response structure
#
# resp.tags #=> Array
# resp.tags[0].key #=> String
# resp.tags[0].value #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetJobTagging AWS API Documentation
#
# @overload get_job_tagging(params = {})
# @param [Hash] params ({})
def get_job_tagging(params = {}, options = {})
req = build_request(:get_job_tagging, params)
req.send_request(options)
end
# Retrieves the `PublicAccessBlock` configuration for an AWS account.
# For more information, see [ Using Amazon S3 block public access][1].
#
# Related actions include:
#
# * [DeletePublicAccessBlock][2]
#
# * [PutPublicAccessBlock][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html
#
# @option params [required, String] :account_id
# The account ID for the AWS account whose `PublicAccessBlock`
# configuration you want to retrieve.
#
# @return [Types::GetPublicAccessBlockOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetPublicAccessBlockOutput#public_access_block_configuration #public_access_block_configuration} => Types::PublicAccessBlockConfiguration
#
# @example Request syntax with placeholder values
#
# resp = client.get_public_access_block({
# account_id: "AccountId", # required
# })
#
# @example Response structure
#
# resp.public_access_block_configuration.block_public_acls #=> Boolean
# resp.public_access_block_configuration.ignore_public_acls #=> Boolean
# resp.public_access_block_configuration.block_public_policy #=> Boolean
# resp.public_access_block_configuration.restrict_public_buckets #=> Boolean
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetPublicAccessBlock AWS API Documentation
#
# @overload get_public_access_block(params = {})
# @param [Hash] params ({})
def get_public_access_block(params = {}, options = {})
req = build_request(:get_public_access_block, params)
req.send_request(options)
end
# Gets the Amazon S3 Storage Lens configuration. For more information,
# see [Assessing your storage activity and usage with Amazon S3 Storage
# Lens ][1] in the *Amazon Simple Storage Service User Guide*.
#
# <note markdown="1"> To use this action, you must have permission to perform the
# `s3:GetStorageLensConfiguration` action. For more information, see
# [Setting permissions to use Amazon S3 Storage Lens][2] in the *Amazon
# Simple Storage Service User Guide*.
#
# </note>
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html
#
# @option params [required, String] :config_id
# The ID of the Amazon S3 Storage Lens configuration.
#
# @option params [required, String] :account_id
# The account ID of the requester.
#
# @return [Types::GetStorageLensConfigurationResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetStorageLensConfigurationResult#storage_lens_configuration #storage_lens_configuration} => Types::StorageLensConfiguration
#
# @example Request syntax with placeholder values
#
# resp = client.get_storage_lens_configuration({
# config_id: "ConfigId", # required
# account_id: "AccountId", # required
# })
#
# @example Response structure
#
# resp.storage_lens_configuration.id #=> String
# resp.storage_lens_configuration.account_level.activity_metrics.is_enabled #=> Boolean
# resp.storage_lens_configuration.account_level.bucket_level.activity_metrics.is_enabled #=> Boolean
# resp.storage_lens_configuration.account_level.bucket_level.prefix_level.storage_metrics.is_enabled #=> Boolean
# resp.storage_lens_configuration.account_level.bucket_level.prefix_level.storage_metrics.selection_criteria.delimiter #=> String
# resp.storage_lens_configuration.account_level.bucket_level.prefix_level.storage_metrics.selection_criteria.max_depth #=> Integer
# resp.storage_lens_configuration.account_level.bucket_level.prefix_level.storage_metrics.selection_criteria.min_storage_bytes_percentage #=> Float
# resp.storage_lens_configuration.include.buckets #=> Array
# resp.storage_lens_configuration.include.buckets[0] #=> String
# resp.storage_lens_configuration.include.regions #=> Array
# resp.storage_lens_configuration.include.regions[0] #=> String
# resp.storage_lens_configuration.exclude.buckets #=> Array
# resp.storage_lens_configuration.exclude.buckets[0] #=> String
# resp.storage_lens_configuration.exclude.regions #=> Array
# resp.storage_lens_configuration.exclude.regions[0] #=> String
# resp.storage_lens_configuration.data_export.s3_bucket_destination.format #=> String, one of "CSV", "Parquet"
# resp.storage_lens_configuration.data_export.s3_bucket_destination.output_schema_version #=> String, one of "V_1"
# resp.storage_lens_configuration.data_export.s3_bucket_destination.account_id #=> String
# resp.storage_lens_configuration.data_export.s3_bucket_destination.arn #=> String
# resp.storage_lens_configuration.data_export.s3_bucket_destination.prefix #=> String
# resp.storage_lens_configuration.data_export.s3_bucket_destination.encryption.ssekms.key_id #=> String
# resp.storage_lens_configuration.is_enabled #=> Boolean
# resp.storage_lens_configuration.aws_org.arn #=> String
# resp.storage_lens_configuration.storage_lens_arn #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetStorageLensConfiguration AWS API Documentation
#
# @overload get_storage_lens_configuration(params = {})
# @param [Hash] params ({})
def get_storage_lens_configuration(params = {}, options = {})
req = build_request(:get_storage_lens_configuration, params)
req.send_request(options)
end
# Gets the tags of Amazon S3 Storage Lens configuration. For more
# information about S3 Storage Lens, see [Assessing your storage
# activity and usage with Amazon S3 Storage Lens ][1] in the *Amazon
# Simple Storage Service User Guide*.
#
# <note markdown="1"> To use this action, you must have permission to perform the
# `s3:GetStorageLensConfigurationTagging` action. For more information,
# see [Setting permissions to use Amazon S3 Storage Lens][2] in the
# *Amazon Simple Storage Service User Guide*.
#
# </note>
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html
#
# @option params [required, String] :config_id
# The ID of the Amazon S3 Storage Lens configuration.
#
# @option params [required, String] :account_id
# The account ID of the requester.
#
# @return [Types::GetStorageLensConfigurationTaggingResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetStorageLensConfigurationTaggingResult#tags #tags} => Array<Types::StorageLensTag>
#
# @example Request syntax with placeholder values
#
# resp = client.get_storage_lens_configuration_tagging({
# config_id: "ConfigId", # required
# account_id: "AccountId", # required
# })
#
# @example Response structure
#
# resp.tags #=> Array
# resp.tags[0].key #=> String
# resp.tags[0].value #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetStorageLensConfigurationTagging AWS API Documentation
#
# @overload get_storage_lens_configuration_tagging(params = {})
# @param [Hash] params ({})
def get_storage_lens_configuration_tagging(params = {}, options = {})
req = build_request(:get_storage_lens_configuration_tagging, params)
req.send_request(options)
end
# Returns a list of the access points currently associated with the
# specified bucket. You can retrieve up to 1000 access points per call.
# If the specified bucket has more than 1,000 access points (or the
# number specified in `maxResults`, whichever is less), the response
# will include a continuation token that you can use to list the
# additional access points.
#
#
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][1] section.
#
# The following actions are related to `ListAccessPoints`\:
#
# * [CreateAccessPoint][2]
#
# * [DeleteAccessPoint][3]
#
# * [GetAccessPoint][4]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html#API_control_GetAccessPoint_Examples
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html
#
# @option params [required, String] :account_id
# The AWS account ID for owner of the bucket whose access points you
# want to list.
#
# @option params [String] :bucket
# The name of the bucket whose associated access points you want to
# list.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @option params [String] :next_token
# A continuation token. If a previous call to `ListAccessPoints`
# returned a continuation token in the `NextToken` field, then providing
# that value here causes Amazon S3 to retrieve the next page of results.
#
# @option params [Integer] :max_results
# The maximum number of access points that you want to include in the
# list. If the specified bucket has more than this number of access
# points, then the response will include a continuation token in the
# `NextToken` field that you can use to retrieve the next page of access
# points.
#
# @return [Types::ListAccessPointsResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListAccessPointsResult#access_point_list #access_point_list} => Array<Types::AccessPoint>
# * {Types::ListAccessPointsResult#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_access_points({
# account_id: "AccountId", # required
# bucket: "BucketName",
# next_token: "NonEmptyMaxLength1024String",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.access_point_list #=> Array
# resp.access_point_list[0].name #=> String
# resp.access_point_list[0].network_origin #=> String, one of "Internet", "VPC"
# resp.access_point_list[0].vpc_configuration.vpc_id #=> String
# resp.access_point_list[0].bucket #=> String
# resp.access_point_list[0].access_point_arn #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListAccessPoints AWS API Documentation
#
# @overload list_access_points(params = {})
# @param [Hash] params ({})
def list_access_points(params = {}, options = {})
req = build_request(:list_access_points, params)
req.send_request(options)
end
# Returns a list of the access points associated with the Object Lambda
# Access Point. You can retrieve up to 1000 access points per call. If
# there are more than 1,000 access points (or the number specified in
# `maxResults`, whichever is less), the response will include a
# continuation token that you can use to list the additional access
# points.
#
# The following actions are related to
# `ListAccessPointsForObjectLambda`\:
#
# * [CreateAccessPointForObjectLambda][1]
#
# * [DeleteAccessPointForObjectLambda][2]
#
# * [GetAccessPointForObjectLambda][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPointForObjectLambda.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointForObjectLambda.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointForObjectLambda.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [String] :next_token
# If the list has more access points than can be returned in one call to
# this API, this field contains a continuation token that you can
# provide in subsequent calls to this API to retrieve additional access
# points.
#
# @option params [Integer] :max_results
# The maximum number of access points that you want to include in the
# list. If there are more than this number of access points, then the
# response will include a continuation token in the `NextToken` field
# that you can use to retrieve the next page of access points.
#
# @return [Types::ListAccessPointsForObjectLambdaResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListAccessPointsForObjectLambdaResult#object_lambda_access_point_list #object_lambda_access_point_list} => Array<Types::ObjectLambdaAccessPoint>
# * {Types::ListAccessPointsForObjectLambdaResult#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_access_points_for_object_lambda({
# account_id: "AccountId", # required
# next_token: "NonEmptyMaxLength1024String",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.object_lambda_access_point_list #=> Array
# resp.object_lambda_access_point_list[0].name #=> String
# resp.object_lambda_access_point_list[0].object_lambda_access_point_arn #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListAccessPointsForObjectLambda AWS API Documentation
#
# @overload list_access_points_for_object_lambda(params = {})
# @param [Hash] params ({})
def list_access_points_for_object_lambda(params = {}, options = {})
req = build_request(:list_access_points_for_object_lambda, params)
req.send_request(options)
end
# Lists current S3 Batch Operations jobs and jobs that have ended within
# the last 30 days for the AWS account making the request. For more
# information, see [S3 Batch Operations][1] in the *Amazon Simple
# Storage Service User Guide*.
#
# Related actions include:
#
#
#
# * [CreateJob][2]
#
# * [DescribeJob][3]
#
# * [UpdateJobPriority][4]
#
# * [UpdateJobStatus][5]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html
#
# @option params [required, String] :account_id
# The AWS account ID associated with the S3 Batch Operations job.
#
# @option params [Array<String>] :job_statuses
# The `List Jobs` request returns jobs that match the statuses listed in
# this element.
#
# @option params [String] :next_token
# A pagination token to request the next page of results. Use the token
# that Amazon S3 returned in the `NextToken` element of the
# `ListJobsResult` from the previous `List Jobs` request.
#
# @option params [Integer] :max_results
# The maximum number of jobs that Amazon S3 will include in the `List
# Jobs` response. If there are more jobs than this number, the response
# will include a pagination token in the `NextToken` field to enable you
# to retrieve the next page of results.
#
# @return [Types::ListJobsResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListJobsResult#next_token #next_token} => String
# * {Types::ListJobsResult#jobs #jobs} => Array<Types::JobListDescriptor>
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_jobs({
# account_id: "AccountId", # required
# job_statuses: ["Active"], # accepts Active, Cancelled, Cancelling, Complete, Completing, Failed, Failing, New, Paused, Pausing, Preparing, Ready, Suspended
# next_token: "StringForNextToken",
# max_results: 1,
# })
#
# @example Response structure
#
# resp.next_token #=> String
# resp.jobs #=> Array
# resp.jobs[0].job_id #=> String
# resp.jobs[0].description #=> String
# resp.jobs[0].operation #=> String, one of "LambdaInvoke", "S3PutObjectCopy", "S3PutObjectAcl", "S3PutObjectTagging", "S3DeleteObjectTagging", "S3InitiateRestoreObject", "S3PutObjectLegalHold", "S3PutObjectRetention"
# resp.jobs[0].priority #=> Integer
# resp.jobs[0].status #=> String, one of "Active", "Cancelled", "Cancelling", "Complete", "Completing", "Failed", "Failing", "New", "Paused", "Pausing", "Preparing", "Ready", "Suspended"
# resp.jobs[0].creation_time #=> Time
# resp.jobs[0].termination_date #=> Time
# resp.jobs[0].progress_summary.total_number_of_tasks #=> Integer
# resp.jobs[0].progress_summary.number_of_tasks_succeeded #=> Integer
# resp.jobs[0].progress_summary.number_of_tasks_failed #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListJobs AWS API Documentation
#
# @overload list_jobs(params = {})
# @param [Hash] params ({})
def list_jobs(params = {}, options = {})
req = build_request(:list_jobs, params)
req.send_request(options)
end
# Returns a list of all Outposts buckets in an Outpost that are owned by
# the authenticated sender of the request. For more information, see
# [Using Amazon S3 on Outposts][1] in the *Amazon Simple Storage Service
# User Guide*.
#
# For an example of the request syntax for Amazon S3 on Outposts that
# uses the S3 on Outposts endpoint hostname prefix and
# `x-amz-outpost-id` in your request, see the [Examples][2] section.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListRegionalBuckets.html#API_control_ListRegionalBuckets_Examples
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket.
#
# @option params [String] :next_token
#
# @option params [Integer] :max_results
#
# @option params [String] :outpost_id
# The ID of the AWS Outposts.
#
# <note markdown="1"> This is required by Amazon S3 on Outposts buckets.
#
# </note>
#
# @return [Types::ListRegionalBucketsResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListRegionalBucketsResult#regional_bucket_list #regional_bucket_list} => Array<Types::RegionalBucket>
# * {Types::ListRegionalBucketsResult#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_regional_buckets({
# account_id: "AccountId", # required
# next_token: "NonEmptyMaxLength1024String",
# max_results: 1,
# outpost_id: "NonEmptyMaxLength64String",
# })
#
# @example Response structure
#
# resp.regional_bucket_list #=> Array
# resp.regional_bucket_list[0].bucket #=> String
# resp.regional_bucket_list[0].bucket_arn #=> String
# resp.regional_bucket_list[0].public_access_block_enabled #=> Boolean
# resp.regional_bucket_list[0].creation_date #=> Time
# resp.regional_bucket_list[0].outpost_id #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListRegionalBuckets AWS API Documentation
#
# @overload list_regional_buckets(params = {})
# @param [Hash] params ({})
def list_regional_buckets(params = {}, options = {})
req = build_request(:list_regional_buckets, params)
req.send_request(options)
end
# Gets a list of Amazon S3 Storage Lens configurations. For more
# information about S3 Storage Lens, see [Assessing your storage
# activity and usage with Amazon S3 Storage Lens ][1] in the *Amazon
# Simple Storage Service User Guide*.
#
# <note markdown="1"> To use this action, you must have permission to perform the
# `s3:ListStorageLensConfigurations` action. For more information, see
# [Setting permissions to use Amazon S3 Storage Lens][2] in the *Amazon
# Simple Storage Service User Guide*.
#
# </note>
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html
#
# @option params [required, String] :account_id
# The account ID of the requester.
#
# @option params [String] :next_token
# A pagination token to request the next page of results.
#
# @return [Types::ListStorageLensConfigurationsResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListStorageLensConfigurationsResult#next_token #next_token} => String
# * {Types::ListStorageLensConfigurationsResult#storage_lens_configuration_list #storage_lens_configuration_list} => Array<Types::ListStorageLensConfigurationEntry>
#
# @example Request syntax with placeholder values
#
# resp = client.list_storage_lens_configurations({
# account_id: "AccountId", # required
# next_token: "ContinuationToken",
# })
#
# @example Response structure
#
# resp.next_token #=> String
# resp.storage_lens_configuration_list #=> Array
# resp.storage_lens_configuration_list[0].id #=> String
# resp.storage_lens_configuration_list[0].storage_lens_arn #=> String
# resp.storage_lens_configuration_list[0].home_region #=> String
# resp.storage_lens_configuration_list[0].is_enabled #=> Boolean
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListStorageLensConfigurations AWS API Documentation
#
# @overload list_storage_lens_configurations(params = {})
# @param [Hash] params ({})
def list_storage_lens_configurations(params = {}, options = {})
req = build_request(:list_storage_lens_configurations, params)
req.send_request(options)
end
# Replaces configuration for an Object Lambda Access Point.
#
# The following actions are related to
# `PutAccessPointConfigurationForObjectLambda`\:
#
# * [GetAccessPointConfigurationForObjectLambda][1]
#
# ^
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointConfigurationForObjectLambda.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [required, String] :name
# The name of the Object Lambda Access Point.
#
# @option params [required, Types::ObjectLambdaConfiguration] :configuration
# Object Lambda Access Point configuration document.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_access_point_configuration_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# configuration: { # required
# supporting_access_point: "ObjectLambdaSupportingAccessPointArn", # required
# cloud_watch_metrics_enabled: false,
# allowed_features: ["GetObject-Range"], # accepts GetObject-Range, GetObject-PartNumber
# transformation_configurations: [ # required
# {
# actions: ["GetObject"], # required, accepts GetObject
# content_transformation: { # required
# aws_lambda: {
# function_arn: "FunctionArnString", # required
# function_payload: "AwsLambdaTransformationPayload",
# },
# },
# },
# ],
# },
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutAccessPointConfigurationForObjectLambda AWS API Documentation
#
# @overload put_access_point_configuration_for_object_lambda(params = {})
# @param [Hash] params ({})
def put_access_point_configuration_for_object_lambda(params = {}, options = {})
req = build_request(:put_access_point_configuration_for_object_lambda, params)
req.send_request(options)
end
# Associates an access policy with the specified access point. Each
# access point can have only one policy, so a request made to this API
# replaces any existing policy associated with the specified access
# point.
#
#
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][1] section.
#
# The following actions are related to `PutAccessPointPolicy`\:
#
# * [GetAccessPointPolicy][2]
#
# * [DeleteAccessPointPolicy][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html#API_control_PutAccessPointPolicy_Examples
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html
#
# @option params [required, String] :account_id
# The AWS account ID for owner of the bucket associated with the
# specified access point.
#
# @option params [required, String] :name
# The name of the access point that you want to associate with the
# specified policy.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the access point accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/accesspoint/<my-accesspoint-name>`.
# For example, to access the access point `reports-ap` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap`.
# The value must be URL encoded.
#
# @option params [required, String] :policy
# The policy that you want to apply to the specified access point. For
# more information about access point policies, see [Managing data
# access with Amazon S3 Access Points][1] in the *Amazon Simple Storage
# Service User Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_access_point_policy({
# account_id: "AccountId", # required
# name: "AccessPointName", # required
# policy: "Policy", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutAccessPointPolicy AWS API Documentation
#
# @overload put_access_point_policy(params = {})
# @param [Hash] params ({})
def put_access_point_policy(params = {}, options = {})
req = build_request(:put_access_point_policy, params)
req.send_request(options)
end
# Creates or replaces resource policy for an Object Lambda Access Point.
# For an example policy, see [Creating Object Lambda Access Points][1]
# in the *Amazon Simple Storage Service User Guide*.
#
# The following actions are related to
# `PutAccessPointPolicyForObjectLambda`\:
#
# * [DeleteAccessPointPolicyForObjectLambda][2]
#
# * [GetAccessPointPolicyForObjectLambda][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-create.html#olap-create-cli
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicyForObjectLambda.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicyForObjectLambda.html
#
# @option params [required, String] :account_id
# The account ID for the account that owns the specified Object Lambda
# Access Point.
#
# @option params [required, String] :name
# The name of the Object Lambda Access Point.
#
# @option params [required, String] :policy
# Object Lambda Access Point resource policy document.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_access_point_policy_for_object_lambda({
# account_id: "AccountId", # required
# name: "ObjectLambdaAccessPointName", # required
# policy: "ObjectLambdaPolicy", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutAccessPointPolicyForObjectLambda AWS API Documentation
#
# @overload put_access_point_policy_for_object_lambda(params = {})
# @param [Hash] params ({})
def put_access_point_policy_for_object_lambda(params = {}, options = {})
req = build_request(:put_access_point_policy_for_object_lambda, params)
req.send_request(options)
end
# <note markdown="1"> This action puts a lifecycle configuration to an Amazon S3 on Outposts
# bucket. To put a lifecycle configuration to an S3 bucket, see
# [PutBucketLifecycleConfiguration][1] in the *Amazon Simple Storage
# Service API*.
#
# </note>
#
# Creates a new lifecycle configuration for the S3 on Outposts bucket or
# replaces an existing lifecycle configuration. Outposts buckets only
# support lifecycle configurations that delete/expire objects after a
# certain period of time and abort incomplete multipart uploads.
#
#
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][2] section.
#
# The following actions are related to
# `PutBucketLifecycleConfiguration`\:
#
# * [GetBucketLifecycleConfiguration][3]
#
# * [DeleteBucketLifecycleConfiguration][4]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html#API_control_PutBucketLifecycleConfiguration_Examples
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket.
#
# @option params [required, String] :bucket
# The name of the bucket for which to set the configuration.
#
# @option params [Types::LifecycleConfiguration] :lifecycle_configuration
# Container for lifecycle rules. You can add as many as 1,000 rules.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_bucket_lifecycle_configuration({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# lifecycle_configuration: {
# rules: [
# {
# expiration: {
# date: Time.now,
# days: 1,
# expired_object_delete_marker: false,
# },
# id: "ID",
# filter: {
# prefix: "Prefix",
# tag: {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# and: {
# prefix: "Prefix",
# tags: [
# {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# ],
# },
# },
# status: "Enabled", # required, accepts Enabled, Disabled
# transitions: [
# {
# date: Time.now,
# days: 1,
# storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
# },
# ],
# noncurrent_version_transitions: [
# {
# noncurrent_days: 1,
# storage_class: "GLACIER", # accepts GLACIER, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, DEEP_ARCHIVE
# },
# ],
# noncurrent_version_expiration: {
# noncurrent_days: 1,
# },
# abort_incomplete_multipart_upload: {
# days_after_initiation: 1,
# },
# },
# ],
# },
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketLifecycleConfiguration AWS API Documentation
#
# @overload put_bucket_lifecycle_configuration(params = {})
# @param [Hash] params ({})
def put_bucket_lifecycle_configuration(params = {}, options = {})
req = build_request(:put_bucket_lifecycle_configuration, params)
req.send_request(options)
end
# <note markdown="1"> This action puts a bucket policy to an Amazon S3 on Outposts bucket.
# To put a policy on an S3 bucket, see [PutBucketPolicy][1] in the
# *Amazon Simple Storage Service API*.
#
# </note>
#
# Applies an Amazon S3 bucket policy to an Outposts bucket. For more
# information, see [Using Amazon S3 on Outposts][2] in the *Amazon
# Simple Storage Service User Guide*.
#
# If you are using an identity other than the root user of the AWS
# account that owns the Outposts bucket, the calling identity must have
# the `PutBucketPolicy` permissions on the specified Outposts bucket and
# belong to the bucket owner's account in order to use this action.
#
# If you don't have `PutBucketPolicy` permissions, Amazon S3 returns a
# `403 Access Denied` error. If you have the correct permissions, but
# you're not using an identity that belongs to the bucket owner's
# account, Amazon S3 returns a `405 Method Not Allowed` error.
#
# As a security precaution, the root user of the AWS account that owns a
# bucket can always use this action, even if the policy explicitly
# denies the root user the ability to perform this action.
#
# For more information about bucket policies, see [Using Bucket Policies
# and User Policies][3].
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][4] section.
#
# The following actions are related to `PutBucketPolicy`\:
#
# * [GetBucketPolicy][5]
#
# * [DeleteBucketPolicy][6]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html#API_control_PutBucketPolicy_Examples
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket.
#
# @option params [required, String] :bucket
# Specifies the bucket.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @option params [Boolean] :confirm_remove_self_bucket_access
# Set this parameter to true to confirm that you want to remove your
# permissions to change this bucket policy in the future.
#
# <note markdown="1"> This is not supported by Amazon S3 on Outposts buckets.
#
# </note>
#
# @option params [required, String] :policy
# The bucket policy as a JSON document.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_bucket_policy({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# confirm_remove_self_bucket_access: false,
# policy: "Policy", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketPolicy AWS API Documentation
#
# @overload put_bucket_policy(params = {})
# @param [Hash] params ({})
def put_bucket_policy(params = {}, options = {})
req = build_request(:put_bucket_policy, params)
req.send_request(options)
end
# <note markdown="1"> This action puts tags on an Amazon S3 on Outposts bucket. To put tags
# on an S3 bucket, see [PutBucketTagging][1] in the *Amazon Simple
# Storage Service API*.
#
# </note>
#
# Sets the tags for an S3 on Outposts bucket. For more information, see
# [Using Amazon S3 on Outposts][2] in the *Amazon Simple Storage Service
# User Guide*.
#
# Use tags to organize your AWS bill to reflect your own cost structure.
# To do this, sign up to get your AWS account bill with tag key values
# included. Then, to see the cost of combined resources, organize your
# billing information according to resources with the same tag key
# values. For example, you can tag several resources with a specific
# application name, and then organize your billing information to see
# the total cost of that application across several services. For more
# information, see [Cost allocation and tagging][3].
#
# <note markdown="1"> Within a bucket, if you add a tag that has the same key as an existing
# tag, the new value overwrites the old value. For more information, see
# [ Using cost allocation in Amazon S3 bucket tags][4].
#
# </note>
#
# To use this action, you must have permissions to perform the
# `s3-outposts:PutBucketTagging` action. The Outposts bucket owner has
# this permission by default and can grant this permission to others.
# For more information about permissions, see [ Permissions Related to
# Bucket Subresource Operations][5] and [Managing access permissions to
# your Amazon S3 resources][6].
#
# `PutBucketTagging` has the following special errors:
#
# * Error code: `InvalidTagError`
#
# * Description: The tag provided was not a valid tag. This error can
# occur if the tag did not pass input validation. For information
# about tag restrictions, see [ User-Defined Tag Restrictions][7]
# and [ AWS-Generated Cost Allocation Tag Restrictions][8].
#
# ^
#
# * Error code: `MalformedXMLError`
#
# * Description: The XML provided does not match the schema.
#
# ^
#
# * Error code: `OperationAbortedError `
#
# * Description: A conflicting conditional action is currently in
# progress against this resource. Try again.
#
# ^
#
# * Error code: `InternalError`
#
# * Description: The service was unable to apply the provided tag to
# the bucket.
#
# ^
#
# All Amazon S3 on Outposts REST API requests for this action require an
# additional parameter of `x-amz-outpost-id` to be passed with the
# request and an S3 on Outposts endpoint hostname prefix instead of
# `s3-control`. For an example of the request syntax for Amazon S3 on
# Outposts that uses the S3 on Outposts endpoint hostname prefix and the
# `x-amz-outpost-id` derived using the access point ARN, see the
# [Examples][9] section.
#
# The following actions are related to `PutBucketTagging`\:
#
# * [GetBucketTagging][10]
#
# * [DeleteBucketTagging][11]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
# [3]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
# [7]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html
# [8]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html
# [9]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html#API_control_PutBucketTagging_Examples
# [10]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html
# [11]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html
#
# @option params [required, String] :account_id
# The AWS account ID of the Outposts bucket.
#
# @option params [required, String] :bucket
# The Amazon Resource Name (ARN) of the bucket.
#
# For using this parameter with Amazon S3 on Outposts with the REST API,
# you must specify the name and the x-amz-outpost-id as well.
#
# For using this parameter with S3 on Outposts with the AWS SDK and CLI,
# you must specify the ARN of the bucket accessed in the format
# `arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/bucket/<my-bucket-name>`.
# For example, to access the bucket `reports` through outpost
# `my-outpost` owned by account `123456789012` in Region `us-west-2`,
# use the URL encoding of
# `arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports`.
# The value must be URL encoded.
#
# @option params [required, Types::Tagging] :tagging
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_bucket_tagging({
# account_id: "AccountId", # required
# bucket: "BucketName", # required
# tagging: { # required
# tag_set: [ # required
# {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# ],
# },
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketTagging AWS API Documentation
#
# @overload put_bucket_tagging(params = {})
# @param [Hash] params ({})
def put_bucket_tagging(params = {}, options = {})
req = build_request(:put_bucket_tagging, params)
req.send_request(options)
end
# Sets the supplied tag-set on an S3 Batch Operations job.
#
# A tag is a key-value pair. You can associate S3 Batch Operations tags
# with any job by sending a PUT request against the tagging subresource
# that is associated with the job. To modify the existing tag set, you
# can either replace the existing tag set entirely, or make changes
# within the existing tag set by retrieving the existing tag set using
# [GetJobTagging][1], modify that tag set, and use this action to
# replace the tag set with the one you modified. For more information,
# see [Controlling access and labeling jobs using tags][2] in the
# *Amazon Simple Storage Service User Guide*.
#
#
#
# <note markdown="1"> * If you send this request with an empty tag set, Amazon S3 deletes
# the existing tag set on the Batch Operations job. If you use this
# method, you are charged for a Tier 1 Request (PUT). For more
# information, see [Amazon S3 pricing][3].
#
# * For deleting existing tags for your Batch Operations job, a
# [DeleteJobTagging][4] request is preferred because it achieves the
# same result without incurring charges.
#
# * A few things to consider about using tags:
#
# * Amazon S3 limits the maximum number of tags to 50 tags per job.
#
# * You can associate up to 50 tags with a job as long as they have
# unique tag keys.
#
# * A tag key can be up to 128 Unicode characters in length, and tag
# values can be up to 256 Unicode characters in length.
#
# * The key and values are case sensitive.
#
# * For tagging-related restrictions related to characters and
# encodings, see [User-Defined Tag Restrictions][5] in the *AWS
# Billing and Cost Management User Guide*.
#
# </note>
#
#
#
# To use this action, you must have permission to perform the
# `s3:PutJobTagging` action.
#
# Related actions include:
#
# * [CreatJob][6]
#
# * [GetJobTagging][1]
#
# * [DeleteJobTagging][4]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags
# [3]: http://aws.amazon.com/s3/pricing/
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html
# [5]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html
# [6]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html
#
# @option params [required, String] :account_id
# The AWS account ID associated with the S3 Batch Operations job.
#
# @option params [required, String] :job_id
# The ID for the S3 Batch Operations job whose tags you want to replace.
#
# @option params [required, Array<Types::S3Tag>] :tags
# The set of tags to associate with the S3 Batch Operations job.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_job_tagging({
# account_id: "AccountId", # required
# job_id: "JobId", # required
# tags: [ # required
# {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# ],
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutJobTagging AWS API Documentation
#
# @overload put_job_tagging(params = {})
# @param [Hash] params ({})
def put_job_tagging(params = {}, options = {})
req = build_request(:put_job_tagging, params)
req.send_request(options)
end
# Creates or modifies the `PublicAccessBlock` configuration for an AWS
# account. For more information, see [ Using Amazon S3 block public
# access][1].
#
# Related actions include:
#
# * [GetPublicAccessBlock][2]
#
# * [DeletePublicAccessBlock][3]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html
#
# @option params [required, Types::PublicAccessBlockConfiguration] :public_access_block_configuration
# The `PublicAccessBlock` configuration that you want to apply to the
# specified AWS account.
#
# @option params [required, String] :account_id
# The account ID for the AWS account whose `PublicAccessBlock`
# configuration you want to set.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_public_access_block({
# public_access_block_configuration: { # required
# block_public_acls: false,
# ignore_public_acls: false,
# block_public_policy: false,
# restrict_public_buckets: false,
# },
# account_id: "AccountId", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutPublicAccessBlock AWS API Documentation
#
# @overload put_public_access_block(params = {})
# @param [Hash] params ({})
def put_public_access_block(params = {}, options = {})
req = build_request(:put_public_access_block, params)
req.send_request(options)
end
# Puts an Amazon S3 Storage Lens configuration. For more information
# about S3 Storage Lens, see [Working with Amazon S3 Storage Lens][1] in
# the *Amazon Simple Storage Service User Guide*.
#
# <note markdown="1"> To use this action, you must have permission to perform the
# `s3:PutStorageLensConfiguration` action. For more information, see
# [Setting permissions to use Amazon S3 Storage Lens][2] in the *Amazon
# Simple Storage Service User Guide*.
#
# </note>
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html
#
# @option params [required, String] :config_id
# The ID of the S3 Storage Lens configuration.
#
# @option params [required, String] :account_id
# The account ID of the requester.
#
# @option params [required, Types::StorageLensConfiguration] :storage_lens_configuration
# The S3 Storage Lens configuration.
#
# @option params [Array<Types::StorageLensTag>] :tags
# The tag set of the S3 Storage Lens configuration.
#
# <note markdown="1"> You can set up to a maximum of 50 tags.
#
# </note>
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_storage_lens_configuration({
# config_id: "ConfigId", # required
# account_id: "AccountId", # required
# storage_lens_configuration: { # required
# id: "ConfigId", # required
# account_level: { # required
# activity_metrics: {
# is_enabled: false,
# },
# bucket_level: { # required
# activity_metrics: {
# is_enabled: false,
# },
# prefix_level: {
# storage_metrics: { # required
# is_enabled: false,
# selection_criteria: {
# delimiter: "StorageLensPrefixLevelDelimiter",
# max_depth: 1,
# min_storage_bytes_percentage: 1.0,
# },
# },
# },
# },
# },
# include: {
# buckets: ["S3BucketArnString"],
# regions: ["S3AWSRegion"],
# },
# exclude: {
# buckets: ["S3BucketArnString"],
# regions: ["S3AWSRegion"],
# },
# data_export: {
# s3_bucket_destination: { # required
# format: "CSV", # required, accepts CSV, Parquet
# output_schema_version: "V_1", # required, accepts V_1
# account_id: "AccountId", # required
# arn: "S3BucketArnString", # required
# prefix: "Prefix",
# encryption: {
# sses3: {
# },
# ssekms: {
# key_id: "SSEKMSKeyId", # required
# },
# },
# },
# },
# is_enabled: false, # required
# aws_org: {
# arn: "AwsOrgArn", # required
# },
# storage_lens_arn: "StorageLensArn",
# },
# tags: [
# {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# ],
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutStorageLensConfiguration AWS API Documentation
#
# @overload put_storage_lens_configuration(params = {})
# @param [Hash] params ({})
def put_storage_lens_configuration(params = {}, options = {})
req = build_request(:put_storage_lens_configuration, params)
req.send_request(options)
end
# Put or replace tags on an existing Amazon S3 Storage Lens
# configuration. For more information about S3 Storage Lens, see
# [Assessing your storage activity and usage with Amazon S3 Storage Lens
# ][1] in the *Amazon Simple Storage Service User Guide*.
#
# <note markdown="1"> To use this action, you must have permission to perform the
# `s3:PutStorageLensConfigurationTagging` action. For more information,
# see [Setting permissions to use Amazon S3 Storage Lens][2] in the
# *Amazon Simple Storage Service User Guide*.
#
# </note>
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_iam_permissions.html
#
# @option params [required, String] :config_id
# The ID of the S3 Storage Lens configuration.
#
# @option params [required, String] :account_id
# The account ID of the requester.
#
# @option params [required, Array<Types::StorageLensTag>] :tags
# The tag set of the S3 Storage Lens configuration.
#
# <note markdown="1"> You can set up to a maximum of 50 tags.
#
# </note>
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.put_storage_lens_configuration_tagging({
# config_id: "ConfigId", # required
# account_id: "AccountId", # required
# tags: [ # required
# {
# key: "TagKeyString", # required
# value: "TagValueString", # required
# },
# ],
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutStorageLensConfigurationTagging AWS API Documentation
#
# @overload put_storage_lens_configuration_tagging(params = {})
# @param [Hash] params ({})
def put_storage_lens_configuration_tagging(params = {}, options = {})
req = build_request(:put_storage_lens_configuration_tagging, params)
req.send_request(options)
end
# Updates an existing S3 Batch Operations job's priority. For more
# information, see [S3 Batch Operations][1] in the *Amazon Simple
# Storage Service User Guide*.
#
#
#
# Related actions include:
#
# * [CreateJob][2]
#
# * [ListJobs][3]
#
# * [DescribeJob][4]
#
# * [UpdateJobStatus][5]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html
#
# @option params [required, String] :account_id
# The AWS account ID associated with the S3 Batch Operations job.
#
# @option params [required, String] :job_id
# The ID for the job whose priority you want to update.
#
# @option params [required, Integer] :priority
# The priority you want to assign to this job.
#
# @return [Types::UpdateJobPriorityResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::UpdateJobPriorityResult#job_id #job_id} => String
# * {Types::UpdateJobPriorityResult#priority #priority} => Integer
#
# @example Request syntax with placeholder values
#
# resp = client.update_job_priority({
# account_id: "AccountId", # required
# job_id: "JobId", # required
# priority: 1, # required
# })
#
# @example Response structure
#
# resp.job_id #=> String
# resp.priority #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobPriority AWS API Documentation
#
# @overload update_job_priority(params = {})
# @param [Hash] params ({})
def update_job_priority(params = {}, options = {})
req = build_request(:update_job_priority, params)
req.send_request(options)
end
# Updates the status for the specified job. Use this action to confirm
# that you want to run a job or to cancel an existing job. For more
# information, see [S3 Batch Operations][1] in the *Amazon Simple
# Storage Service User Guide*.
#
#
#
# Related actions include:
#
# * [CreateJob][2]
#
# * [ListJobs][3]
#
# * [DescribeJob][4]
#
# * [UpdateJobStatus][5]
#
#
#
# [1]: https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html
# [2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html
# [3]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html
# [4]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html
# [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html
#
# @option params [required, String] :account_id
# The AWS account ID associated with the S3 Batch Operations job.
#
# @option params [required, String] :job_id
# The ID of the job whose status you want to update.
#
# @option params [required, String] :requested_job_status
# The status that you want to move the specified job to.
#
# @option params [String] :status_update_reason
# A description of the reason why you want to change the specified
# job's status. This field can be any string up to the maximum length.
#
# @return [Types::UpdateJobStatusResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::UpdateJobStatusResult#job_id #job_id} => String
# * {Types::UpdateJobStatusResult#status #status} => String
# * {Types::UpdateJobStatusResult#status_update_reason #status_update_reason} => String
#
# @example Request syntax with placeholder values
#
# resp = client.update_job_status({
# account_id: "AccountId", # required
# job_id: "JobId", # required
# requested_job_status: "Cancelled", # required, accepts Cancelled, Ready
# status_update_reason: "JobStatusUpdateReason",
# })
#
# @example Response structure
#
# resp.job_id #=> String
# resp.status #=> String, one of "Active", "Cancelled", "Cancelling", "Complete", "Completing", "Failed", "Failing", "New", "Paused", "Pausing", "Preparing", "Ready", "Suspended"
# resp.status_update_reason #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobStatus AWS API Documentation
#
# @overload update_job_status(params = {})
# @param [Hash] params ({})
def update_job_status(params = {}, options = {})
req = build_request(:update_job_status, params)
req.send_request(options)
end
# @!endgroup
# @param params ({})
# @api private
def build_request(operation_name, params = {})
handlers = @handlers.for(operation_name)
context = Seahorse::Client::RequestContext.new(
operation_name: operation_name,
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-s3control'
context[:gem_version] = '1.31.0'
Seahorse::Client::Request.new(handlers, context)
end
# @api private
# @deprecated
def waiter_names
[]
end
class << self
# @api private
attr_reader :identifier
# @api private
def errors_module
Errors
end
end
end
end
| 44.729059 | 247 | 0.669294 |
e2bb624e75c0a270abb3915c4b1705f8cbe7ee97 | 545 | # Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::OperationalInsights::Mgmt::V2020_03_01_preview
module Models
#
# Defines values for ClusterEntityStatus
#
module ClusterEntityStatus
Creating = "Creating"
Succeeded = "Succeeded"
Failed = "Failed"
Canceled = "Canceled"
Deleting = "Deleting"
ProvisioningAccount = "ProvisioningAccount"
Updating = "Updating"
end
end
end
| 25.952381 | 70 | 0.695413 |
e93a85d76e3284ab03af22435577ffbd9e2a2f09 | 4,805 | raise "Only JRuby is supported at this time." unless RUBY_PLATFORM == "java"
require "net/http"
require "uri"
require "digest/sha1"
def vendor(*args)
return File.join("vendor", *args)
end
directory "vendor/" => ["vendor"] do |task, args|
mkdir task.name
end
def fetch(url, sha1, output)
puts "Downloading #{url}"
actual_sha1 = download(url, output)
if actual_sha1 != sha1
fail "SHA1 does not match (expected '#{sha1}' but got '#{actual_sha1}')"
end
end # def fetch
def file_fetch(url, sha1)
filename = File.basename( URI(url).path )
output = "vendor/#{filename}"
task output => [ "vendor/" ] do
begin
actual_sha1 = file_sha1(output)
if actual_sha1 != sha1
fetch(url, sha1, output)
end
rescue Errno::ENOENT
fetch(url, sha1, output)
end
end.invoke
return output
end
def file_sha1(path)
digest = Digest::SHA1.new
fd = File.new(path, "r")
while true
begin
digest << fd.sysread(16384)
rescue EOFError
break
end
end
return digest.hexdigest
ensure
fd.close if fd
end
def download(url, output)
uri = URI(url)
digest = Digest::SHA1.new
tmp = "#{output}.tmp"
Net::HTTP.start(uri.host, uri.port, :use_ssl => (uri.scheme == "https")) do |http|
request = Net::HTTP::Get.new(uri.path)
http.request(request) do |response|
fail "HTTP fetch failed for #{url}. #{response}" if [200, 301].include?(response.code)
size = (response["content-length"].to_i || -1).to_f
count = 0
File.open(tmp, "w") do |fd|
response.read_body do |chunk|
fd.write(chunk)
digest << chunk
if size > 0 && $stdout.tty?
count += chunk.bytesize
$stdout.write(sprintf("\r%0.2f%%", count/size * 100))
end
end
end
$stdout.write("\r \r") if $stdout.tty?
end
end
File.rename(tmp, output)
return digest.hexdigest
rescue SocketError => e
puts "Failure while downloading #{url}: #{e}"
raise
ensure
File.unlink(tmp) if File.exist?(tmp)
end # def download
def untar(tarball, &block)
require "archive/tar/minitar"
tgz = Zlib::GzipReader.new(File.open(tarball))
# Pull out typesdb
tar = Archive::Tar::Minitar::Input.open(tgz)
tar.each do |entry|
path = block.call(entry)
next if path.nil?
parent = File.dirname(path)
mkdir_p parent unless File.directory?(parent)
# Skip this file if the output file is the same size
if entry.directory?
mkdir path unless File.directory?(path)
else
entry_mode = entry.instance_eval { @mode } & 0777
if File.exists?(path)
stat = File.stat(path)
# TODO(sissel): Submit a patch to archive-tar-minitar upstream to
# expose headers in the entry.
entry_size = entry.instance_eval { @size }
# If file sizes are same, skip writing.
next if stat.size == entry_size && (stat.mode & 0777) == entry_mode
end
puts "Extracting #{entry.full_name} from #{tarball} #{entry_mode.to_s(8)}"
File.open(path, "w") do |fd|
# eof? check lets us skip empty files. Necessary because the API provided by
# Archive::Tar::Minitar::Reader::EntryStream only mostly acts like an
# IO object. Something about empty files in this EntryStream causes
# IO.copy_stream to throw "can't convert nil into String" on JRuby
# TODO(sissel): File a bug about this.
while !entry.eof?
chunk = entry.read(16384)
fd.write(chunk)
end
#IO.copy_stream(entry, fd)
end
File.chmod(entry_mode, path)
end
end
tar.close
File.unlink(tarball) if File.file?(tarball)
end # def untar
def ungz(file)
outpath = file.gsub('.gz', '')
tgz = Zlib::GzipReader.new(File.open(file))
begin
File.open(outpath, "w") do |out|
IO::copy_stream(tgz, out)
end
File.unlink(file)
rescue
File.unlink(outpath) if File.file?(outpath)
raise
end
tgz.close
end
desc "Process any vendor files required for this plugin"
task "vendor" => [ "vendor:files" ]
namespace "vendor" do
task "files" do
# TODO(sissel): refactor the @files Rakefile ivar usage anywhere into
# the vendor.json stuff.
if @files
@files.each do |file|
download = file_fetch(file['url'], file['sha1'])
if download =~ /.tar.gz/
prefix = download.gsub('.tar.gz', '').gsub('vendor/', '')
untar(download) do |entry|
if !file['files'].nil?
next unless file['files'].include?(entry.full_name.gsub(prefix, ''))
out = entry.full_name.split("/").last
end
File.join('vendor', out)
end
elsif download =~ /.gz/
ungz(download)
end
end
end
end
end
| 27.146893 | 92 | 0.613944 |
f8aba313861c6172fb6a4c9d63b92477bf94b2d1 | 1,423 | # encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20150130183018) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
create_table "bands", force: :cascade do |t|
t.string "name"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "bands_venues", force: :cascade do |t|
t.integer "band_id"
t.integer "venue_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "venues", force: :cascade do |t|
t.string "name"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
end
| 36.487179 | 86 | 0.741391 |
876b1041c4091023ccf8bc38d929d22b5f8aa783 | 650 | #
# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = 'animated_widgets'
s.version = '0.0.1'
s.summary = 'A new Flutter plugin.'
s.description = <<-DESC
A new Flutter plugin.
DESC
s.homepage = 'http://example.com'
s.license = { :file => '../LICENSE' }
s.author = { 'Your Company' => '[email protected]' }
s.source = { :path => '.' }
s.source_files = 'Classes/**/*'
s.public_header_files = 'Classes/**/*.h'
s.dependency 'Flutter'
s.ios.deployment_target = '8.0'
end
| 29.545455 | 83 | 0.54 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.