hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
397ced2327ab18c9e1389b0ddeb8702ef3fa2824 | 182 | require 'spec_helper'
describe PageController do
describe "GET 'about'" do
it "returns http success" do
get 'about'
response.should be_success
end
end
end
| 14 | 32 | 0.675824 |
61399345bbcc1cad098e7ec2f045412e2b08821e | 26,749 | require 'pathname'
[ 'command', "dsl" ].each do |lib|
require "beaker/#{lib}"
end
module Beaker
#Provides convienience methods for commonly run actions on hosts
module HostPrebuiltSteps
include Beaker::DSL::Patterns
NTPSERVER = 'pool.ntp.org'
SLEEPWAIT = 5
TRIES = 5
UNIX_PACKAGES = ['curl', 'ntpdate']
FREEBSD_PACKAGES = ['curl', 'perl5']
WINDOWS_PACKAGES = ['curl']
PSWINDOWS_PACKAGES = []
SLES10_PACKAGES = ['curl']
SLES_PACKAGES = ['curl', 'ntp']
DEBIAN_PACKAGES = ['curl', 'ntpdate', 'lsb-release']
CUMULUS_PACKAGES = ['curl', 'ntpdate']
ETC_HOSTS_PATH = "/etc/hosts"
ETC_HOSTS_PATH_SOLARIS = "/etc/inet/hosts"
ROOT_KEYS_SCRIPT = "https://raw.githubusercontent.com/puppetlabs/puppetlabs-sshkeys/master/templates/scripts/manage_root_authorized_keys"
ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s"
APT_CFG = %q{ Acquire::http::Proxy "http://proxy.puppetlabs.net:3128/"; }
IPS_PKG_REPO="http://solaris-11-internal-repo.delivery.puppetlabs.net"
#Run timesync on the provided hosts
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def timesync host, opts
logger = opts[:logger]
ntp_server = opts[:ntp_server] ? opts[:ntp_server] : NTPSERVER
block_on host do |host|
logger.notify "Update system time sync for '#{host.name}'"
if host['platform'].include? 'windows'
# The exit code of 5 is for Windows 2008 systems where the w32tm /register command
# is not actually necessary.
host.exec(Command.new("w32tm /register"), :acceptable_exit_codes => [0,5])
host.exec(Command.new("net start w32time"), :acceptable_exit_codes => [0,2])
host.exec(Command.new("w32tm /config /manualpeerlist:#{ntp_server} /syncfromflags:manual /update"))
host.exec(Command.new("w32tm /resync"))
logger.notify "NTP date succeeded on #{host}"
else
case
when host['platform'] =~ /sles-/
ntp_command = "sntp #{ntp_server}"
else
ntp_command = "ntpdate -t 20 #{ntp_server}"
end
success=false
try = 0
until try >= TRIES do
try += 1
if host.exec(Command.new(ntp_command), :accept_all_exit_codes => true).exit_code == 0
success=true
break
end
sleep SLEEPWAIT
end
if success
logger.notify "NTP date succeeded on #{host} after #{try} tries"
else
raise "NTP date was not successful after #{try} tries"
end
end
end
rescue => e
report_and_raise(logger, e, "timesync (--ntp)")
end
# Validate that hosts are prepared to be used as SUTs, if packages are missing attempt to
# install them.
#
# Verifies the presence of #{HostPrebuiltSteps::UNIX_PACKAGES} on unix platform hosts,
# {HostPrebuiltSteps::SLES_PACKAGES} on SUSE platform hosts,
# {HostPrebuiltSteps::DEBIAN_PACKAGES} on debian platform hosts,
# {HostPrebuiltSteps::CUMULUS_PACKAGES} on cumulus platform hosts,
# {HostPrebuiltSteps::WINDOWS_PACKAGES} on cygwin-installed windows platform hosts,
# and {HostPrebuiltSteps::PSWINDOWS_PACKAGES} on non-cygwin windows platform hosts.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def validate_host host, opts
logger = opts[:logger]
block_on host do |host|
case
when host['platform'] =~ /sles-10/
check_and_install_packages_if_needed(host, SLES10_PACKAGES)
when host['platform'] =~ /sles-/
check_and_install_packages_if_needed(host, SLES_PACKAGES)
when host['platform'] =~ /debian/
check_and_install_packages_if_needed(host, DEBIAN_PACKAGES)
when host['platform'] =~ /cumulus/
check_and_install_packages_if_needed(host, CUMULUS_PACKAGES)
when (host['platform'] =~ /windows/ and host.is_cygwin?)
check_and_install_packages_if_needed(host, WINDOWS_PACKAGES)
when (host['platform'] =~ /windows/ and not host.is_cygwin?)
check_and_install_packages_if_needed(host, PSWINDOWS_PACKAGES)
when host['platform'] =~ /freebsd/
check_and_install_packages_if_needed(host, FREEBSD_PACKAGES)
when host['platform'] !~ /debian|aix|solaris|windows|sles-|osx-|cumulus/
check_and_install_packages_if_needed(host, UNIX_PACKAGES)
end
end
rescue => e
report_and_raise(logger, e, "validate")
end
# Installs the given packages if they aren't already on a host
#
# @param [Host] host Host to act on
# @param [Array<String>] package_list List of package names to install
def check_and_install_packages_if_needed host, package_list
package_list.each do |pkg|
if not host.check_for_package pkg
host.install_package pkg
end
end
end
#Install a set of authorized keys using {HostPrebuiltSteps::ROOT_KEYS_SCRIPT}. This is a
#convenience method to allow for easy login to hosts after they have been provisioned with
#Beaker.
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def sync_root_keys host, opts
# JJM This step runs on every system under test right now. We're anticipating
# issues on Windows and maybe Solaris. We will likely need to filter this step
# but we're deliberately taking the approach of "assume it will work, fix it
# when reality dictates otherwise"
logger = opts[:logger]
block_on host do |host|
logger.notify "Sync root authorized_keys from github on #{host.name}"
# Allow all exit code, as this operation is unlikely to cause problems if it fails.
if host['platform'] =~ /solaris|eos/
host.exec(Command.new(ROOT_KEYS_SYNC_CMD % "bash"), :accept_all_exit_codes => true)
else
host.exec(Command.new(ROOT_KEYS_SYNC_CMD % "env PATH=/usr/gnu/bin:$PATH bash"), :accept_all_exit_codes => true)
end
end
rescue => e
report_and_raise(logger, e, "sync_root_keys")
end
#Determine the Extra Packages for Enterprise Linux URL for the provided Enterprise Linux host.
# @param [Host, Array<Host>] host One host to act on. Will use host epel_url, epel_arch and epel_pkg
# before using defaults provided in opts.
# @return [String, String, String] The URL, arch and package name for EPL for the provided host
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [String] :epel_url Link to download
# @option opts [String] :epel_arch Architecture to download (i386, x86_64, etc), defaults to i386
# @option opts [String] :epel_6_pkg Package to download from provided link for el-6
# @option opts [String] :epel_5_pkg Package to download from provided link for el-5
# @raise [Exception] Raises an error if the host provided's platform != /el-(5|6)/
def epel_info_for host, opts
if !el_based?(host)
raise "epel_info_for! not available for #{host.name} on platform #{host['platform']}"
end
version = host['platform'].version
if version == '6'
url = "#{host[:epel_url] || opts[:epel_url]}/#{version}"
pkg = host[:epel_pkg] || opts[:epel_6_pkg]
elsif version == '5'
url = "#{host[:epel_url] || opts[:epel_url]}/#{version}"
pkg = host[:epel_pkg] || opts[:epel_5_pkg]
else
raise "epel_info_for does not support el version #{version}, on #{host.name}"
end
return url, host[:epel_arch] || opts[:epel_arch] || 'i386', pkg
end
# Run 'apt-get update' on the provided host or hosts.
# If the platform of the provided host is not ubuntu, debian or cumulus: do nothing.
#
# @param [Host, Array<Host>] hosts One or more hosts to act upon
def apt_get_update hosts
block_on hosts do |host|
if host[:platform] =~ /ubuntu|debian|cumulus/
host.exec(Command.new("apt-get update"))
end
end
end
#Create a file on host or hosts at the provided file path with the provided file contents.
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [String] file_path The path at which the new file will be created on the host or hosts.
# @param [String] file_content The contents of the file to be created on the host or hosts.
def copy_file_to_remote(host, file_path, file_content)
block_on host do |host|
Tempfile.open 'beaker' do |tempfile|
File.open(tempfile.path, 'w') {|file| file.puts file_content }
host.do_scp_to(tempfile.path, file_path, @options)
end
end
end
# On ubuntu, debian, or cumulus host or hosts: alter apt configuration to use
# the internal Puppet Labs proxy {HostPrebuiltSteps::APT_CFG} proxy.
# On solaris-11 host or hosts: alter pkg to point to
# the internal Puppet Labs proxy {HostPrebuiltSteps::IPS_PKG_REPO}.
#
# Do nothing for other platform host or hosts.
#
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def proxy_config( host, opts )
logger = opts[:logger]
block_on host do |host|
case
when host['platform'] =~ /ubuntu|debian|cumulus/
host.exec(Command.new("if test -f /etc/apt/apt.conf; then mv /etc/apt/apt.conf /etc/apt/apt.conf.bk; fi"))
copy_file_to_remote(host, '/etc/apt/apt.conf', APT_CFG)
apt_get_update(host)
when host['platform'] =~ /solaris-11/
host.exec(Command.new("/usr/bin/pkg unset-publisher solaris || :"))
host.exec(Command.new("/usr/bin/pkg set-publisher -g %s solaris" % IPS_PKG_REPO))
else
logger.debug "#{host}: repo proxy configuration not modified"
end
end
rescue => e
report_and_raise(logger, e, "proxy_config")
end
#Install EPEL on host or hosts with platform = /el-(5|6)/. Do nothing on host or hosts of other platforms.
# @param [Host, Array<Host>] host One or more hosts to act upon. Will use individual host epel_url, epel_arch
# and epel_pkg before using defaults provided in opts.
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :debug If true, print verbose rpm information when installing EPEL
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
# @option opts [String] :epel_url Link to download from
# @option opts [String] :epel_arch Architecture of epel to download (i386, x86_64, etc)
# @option opts [String] :epel_6_pkg Package to download from provided link for el-6
# @option opts [String] :epel_5_pkg Package to download from provided link for el-5
def add_el_extras( host, opts )
#add_el_extras
#only supports el-* platforms
logger = opts[:logger]
debug_opt = opts[:debug] ? 'vh' : ''
block_on host do |host|
case
when el_based?(host) && ['5','6'].include?(host['platform'].version)
result = host.exec(Command.new('rpm -qa | grep epel-release'), :acceptable_exit_codes => [0,1])
if result.exit_code == 1
url, arch, pkg = epel_info_for host, opts
host.exec(Command.new("rpm -i#{debug_opt} #{url}/#{arch}/#{pkg}"))
#update /etc/yum.repos.d/epel.repo for new baseurl
host.exec(Command.new("sed -i -e 's;#baseurl.*$;baseurl=#{Regexp.escape(url)}/\$basearch;' /etc/yum.repos.d/epel.repo"))
#remove mirrorlist
host.exec(Command.new("sed -i -e '/mirrorlist/d' /etc/yum.repos.d/epel.repo"))
host.exec(Command.new('yum clean all && yum makecache'))
end
else
logger.debug "#{host}: package repo configuration not modified"
end
end
rescue => e
report_and_raise(logger, e, "add_repos")
end
#Determine the domain name of the provided host from its /etc/resolv.conf
# @param [Host] host the host to act upon
def get_domain_name(host)
domain = nil
search = nil
resolv_conf = host.exec(Command.new("cat /etc/resolv.conf")).stdout
resolv_conf.each_line { |line|
if line =~ /^\s*domain\s+(\S+)/
domain = $1
elsif line =~ /^\s*search\s+(\S+)/
search = $1
end
}
return domain if domain
return search if search
end
#Determine the ip address of the provided host
# @param [Host] host the host to act upon
# @deprecated use {Host#get_ip}
def get_ip(host)
host.get_ip
end
#Append the provided string to the /etc/hosts file of the provided host
# @param [Host] host the host to act upon
# @param [String] etc_hosts The string to append to the /etc/hosts file
def set_etc_hosts(host, etc_hosts)
if host['platform'] =~ /freebsd/
host.echo_to_file(etc_hosts, '/etc/hosts')
else
host.exec(Command.new("echo '#{etc_hosts}' > /etc/hosts"))
end
end
#Make it possible to log in as root by copying the current users ssh keys to the root account
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def copy_ssh_to_root host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug "Give root a copy of current user's keys, on #{host.name}"
if host['platform'] =~ /windows/ and host.is_cygwin?
host.exec(Command.new('cp -r .ssh /cygdrive/c/Users/Administrator/.'))
host.exec(Command.new('chown -R Administrator /cygdrive/c/Users/Administrator/.ssh'))
elsif host['platform'] =~ /windows/ and not host.is_cygwin?
host.exec(Command.new("if exist .ssh (xcopy .ssh C:\\Users\\Administrator\\.ssh /s /e)"))
elsif host['platform'] =~ /osx/
host.exec(Command.new('sudo cp -r .ssh /var/root/.'), {:pty => true})
elsif host['platform'] =~ /freebsd/
host.exec(Command.new('sudo cp -r .ssh /root/.'), {:pty => true})
else
host.exec(Command.new('sudo su -c "cp -r .ssh /root/."'), {:pty => true})
end
end
end
# Update /etc/hosts to make it possible for each provided host to reach each other host by name.
# Assumes that each provided host has host[:ip] set; in the instance where a provider sets
# host['ip'] to an address which facilitates access to the host externally, but the actual host
# addresses differ from this, we check first for the presence of a host['vm_ip'] key first,
# and use that if present.
# @param [Host, Array<Host>] hosts An array of hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def hack_etc_hosts hosts, opts
etc_hosts = "127.0.0.1\tlocalhost localhost.localdomain\n"
hosts.each do |host|
etc_hosts += "#{host['vm_ip'] || host['ip'].to_s}\t#{host[:vmhostname] || host.name}\n"
end
hosts.each do |host|
set_etc_hosts(host, etc_hosts)
end
end
# Update sshd_config on debian, ubuntu, centos, el, redhat, cumulus, and fedora boxes to allow for root login
#
# Does nothing on other platfoms.
#
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def enable_root_login host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug "Update /etc/ssh/sshd_config to allow root login"
if host['platform'] =~ /osx/
host.exec(Command.new("sudo sed -i '' 's/#PermitRootLogin no/PermitRootLogin Yes/g' /etc/sshd_config"))
host.exec(Command.new("sudo sed -i '' 's/#PermitRootLogin yes/PermitRootLogin Yes/g' /etc/sshd_config"))
elsif host['platform'] =~ /freebsd/
host.exec(Command.new("sudo sed -i -e 's/#PermitRootLogin no/PermitRootLogin yes/g' /etc/ssh/sshd_config"), {:pty => true} )
elsif not host.is_powershell?
host.exec(Command.new("sudo su -c \"sed -ri 's/^#?PermitRootLogin no|^#?PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config\""), {:pty => true})
else
logger.warn("Attempting to enable root login non-supported platform: #{host.name}: #{host['platform']}")
end
#restart sshd
if host['platform'] =~ /debian|ubuntu|cumulus/
host.exec(Command.new("sudo su -c \"service ssh restart\""), {:pty => true})
elsif host['platform'] =~ /centos-7|el-7|redhat-7/
host.exec(Command.new("sudo -E systemctl restart sshd.service"), {:pty => true})
elsif host['platform'] =~ /centos|el-|redhat|fedora|eos/
host.exec(Command.new("sudo -E /sbin/service sshd reload"), {:pty => true})
elsif host['platform'] =~ /freebsd/
host.exec(Command.new("sudo /etc/rc.d/sshd restart"))
else
logger.warn("Attempting to update ssh on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
#Disable SELinux on centos, does nothing on other platforms
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def disable_se_linux host, opts
logger = opts[:logger]
block_on host do |host|
if host['platform'] =~ /centos|el-|redhat|fedora|eos/
@logger.debug("Disabling se_linux on #{host.name}")
host.exec(Command.new("sudo su -c \"setenforce 0\""), {:pty => true})
else
@logger.warn("Attempting to disable SELinux on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
#Disable iptables on centos, does nothing on other platforms
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def disable_iptables host, opts
logger = opts[:logger]
block_on host do |host|
if host['platform'] =~ /centos|el-|redhat|fedora|eos/
logger.debug("Disabling iptables on #{host.name}")
host.exec(Command.new("sudo su -c \"/etc/init.d/iptables stop\""), {:pty => true})
else
logger.warn("Attempting to disable iptables on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
# Setup files for enabling requests to pass to a proxy server
# This works for the APT package manager on debian, ubuntu, and cumulus
# and YUM package manager on el, centos, fedora and redhat.
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Beaker::Logger] :logger A {Beaker::Logger} object
def package_proxy host, opts
logger = opts[:logger]
block_on host do |host|
logger.debug("enabling proxy support on #{host.name}")
case host['platform']
when /ubuntu/, /debian/, /cumulus/
host.exec(Command.new("echo 'Acquire::http::Proxy \"#{opts[:package_proxy]}/\";' >> /etc/apt/apt.conf.d/10proxy"))
when /^el-/, /centos/, /fedora/, /redhat/, /eos/
host.exec(Command.new("echo 'proxy=#{opts[:package_proxy]}/' >> /etc/yum.conf"))
else
logger.debug("Attempting to enable package manager proxy support on non-supported platform: #{host.name}: #{host['platform']}")
end
end
end
# Merge the two provided hashes so that an array of values is created from collisions
# @param [Hash] h1 The first hash
# @param [Hash] h2 The second hash
# @return [Hash] A merged hash with arrays of values where collisions between the two hashes occured.
# @example
# > h1 = {:PATH=>"/1st/path"}
# > h2 = {:PATH=>"/2nd/path"}
# > additive_hash_merge(h1, h2)
# => {:PATH=>["/1st/path", "/2nd/path"]}
def additive_hash_merge h1, h2
merged_hash = {}
normalized_h2 = h2.inject({}) { |h, (k, v)| h[k.to_s.upcase] = v; h }
h1.each_pair do |key, val|
normalized_key = key.to_s.upcase
if normalized_h2.has_key?(normalized_key)
merged_hash[key] = [h1[key], normalized_h2[normalized_key]]
merged_hash[key] = merged_hash[key].uniq #remove dupes
end
end
merged_hash
end
# Create the hash of default environment from host (:host_env), global options hash (:host_env) and default PE/Foss puppet variables
# @param [Host] host The host to construct the environment hash for, host specific environment should be in :host_env in a hash
# @param [Hash] opts Hash of options, including optional global host_env to be applied to each provided host
# @return [Hash] A hash of environment variables for provided host
def construct_env host, opts
env = additive_hash_merge(host[:host_env], opts[:host_env])
env.each_key do |key|
separator = host['pathseparator']
if key == 'PATH' && (not host.is_powershell?)
separator = ':'
end
env[key] = env[key].join(separator)
end
env
end
# Add a host specific set of env vars to each provided host's ~/.ssh/environment
# @param [Host, Array<Host>] host One or more hosts to act upon
# @param [Hash{Symbol=>String}] opts Options to alter execution.
def set_env host, opts
logger = opts[:logger]
block_on host do |host|
env = construct_env(host, opts)
logger.debug("setting local environment on #{host.name}")
case host['platform']
when /windows/
if host.is_cygwin?
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/sshd_config"))
# we get periodic failures to restart the service, so looping these with re-attempts
repeat_fibonacci_style_for(5) do
0 == host.exec(Command.new("cygrunsrv -E sshd"), :acceptable_exit_codes => [0, 1] ).exit_code
end
repeat_fibonacci_style_for(5) do
0 == host.exec(Command.new("cygrunsrv -S sshd"), :acceptable_exit_codes => [0, 1] ).exit_code
end
env['CYGWIN'] = 'nodosfilewarning'
else
#nothing to do here
end
when /osx/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/sshd_config"))
host.exec(Command.new("launchctl unload /System/Library/LaunchDaemons/ssh.plist"))
host.exec(Command.new("launchctl load /System/Library/LaunchDaemons/ssh.plist"))
when /debian|ubuntu|cumulus/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("service ssh restart"))
when /el-7|centos-7|redhat-7|oracle-7|scientific-7|eos-7/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("systemctl restart sshd.service"))
when /el-|centos|fedora|redhat|oracle|scientific|eos/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("/sbin/service sshd restart"))
when /sles/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("rcsshd restart"))
when /solaris/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("svcadm restart svc:/network/ssh:default"))
when /aix/
host.exec(Command.new("echo '\nPermitUserEnvironment yes' >> /etc/ssh/sshd_config"))
host.exec(Command.new("stopsrc -g ssh"))
host.exec(Command.new("startsrc -g ssh"))
when /freebsd/
host.exec(Command.new("sudo perl -pi -e 's/^#?PermitUserEnvironment no/PermitUserEnvironment yes/' /etc/ssh/sshd_config"), {:pty => true} )
host.exec(Command.new("sudo /etc/rc.d/sshd restart"))
end
if not host.is_powershell?
#ensure that ~/.ssh/environment exists
host.exec(Command.new("mkdir -p #{Pathname.new(host[:ssh_env_file]).dirname}"))
host.exec(Command.new("chmod 0600 #{Pathname.new(host[:ssh_env_file]).dirname}"))
host.exec(Command.new("touch #{host[:ssh_env_file]}"))
#add the constructed env vars to this host
host.add_env_var('PATH', '$PATH')
end
#add the env var set to this test host
env.each_pair do |var, value|
host.add_env_var(var, value)
end
# REMOVE POST BEAKER 3: backwards compatability, do some setup based upon the global type
# this is the worst and i hate it
if host[:type]
case host[:type]
when /git|foss|aio/
Class.new.extend(Beaker::DSL).configure_foss_defaults_on(host)
when /pe/
Class.new.extend(Beaker::DSL).configure_pe_defaults_on(host)
end
end
#close the host to re-establish the connection with the new sshd settings
host.close
# print out the working env
if host.is_powershell?
host.exec(Command.new("SET"))
else
host.exec(Command.new("cat #{host[:ssh_env_file]}"))
end
end
end
private
# A helper to tell whether a host is el-based
# @param [Host] host the host to act upon
#
# @return [Boolean] if the host is el_based
def el_based? host
['centos','redhat','scientific','el','oracle'].include?(host['platform'].variant)
end
end
end
| 46.358752 | 165 | 0.636472 |
033b7351cb637dc63d92117cc56ff209c89ba5d0 | 863 | #
# Be sure to run `pod lib lint CubicSpline.podspec' to ensure this is a
Pod::Spec.new do |s|
s.name = 'CubicSpline'
s.version = '0.1.2'
s.summary = 'Cubic Spline in Swift'
s.description = <<-DESC
A simple cubic spline (https://en.wikipedia.org/wiki/Cubic_Hermite_spline) library written in Swift
DESC
s.homepage = 'https://github.com/vkaramov/CubicSpline'
s.license = { :type => 'MIT', :file => 'LICENSE' }
s.author = { 'vkaramov' => 'vkaramov a_t yandex d|o|t ru' }
s.source = { :git => 'https://github.com/vkaramov/CubicSpline.git', :tag => s.version.to_s }
s.swift_version = '4.0'
s.ios.deployment_target = '8.0'
s.osx.deployment_target = '10.10'
s.source_files = 'CubicSpline/Classes/**/*'
s.frameworks = 'Foundation'
end
| 31.962963 | 104 | 0.587486 |
6ada497cc450c14540a49db0d0ac92dfd070b1f1 | 3,374 | require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
describe "Brainfuck" do
context "before execution" do
subject { Brainfuck.new('') }
its(:output) { should == "" }
its(:tape) { should == [0] }
its(:data_pointer) { should == 0 }
its(:input) { should be_nil }
end
context "after executing a simple program" do
before do
@program = "+>++>>+++++++++++++++++++++++++++++++++.<"
end
subject { Brainfuck.new(@program).execute }
its(:output) { should == "!" }
its(:tape) { should == [1, 2, 0, 33] }
its(:data_pointer) { should == 2 }
its(:input) { should == [] }
end
context "for a program that reads from input" do
before do
@program = ",>,<.>."
end
context "when executed with sufficient input" do
subject { Brainfuck.new(@program).execute([88, 121]) }
its(:output) { should == "Xy" }
its(:tape) { should == [88, 121] }
its(:data_pointer) { should == 1 }
its(:input) { should == [] }
end
it "raises an error when executed with insufficient input" do
lambda { Brainfuck.new(@program).execute([88]) }.should raise_error
end
end
context "after executing a program that wraps cells beyond their min/max values" do
before do
@program = "->-->--+>--++"
end
subject { Brainfuck.new(@program).execute }
its(:output) { should == "" }
its(:tape) { should == [255, 254, 255, 0] }
its(:data_pointer) { should == 3 }
its(:input) { should == [] }
end
context "after executing a program that uses a single loop" do
before do
@program = "+++++++[->+++++<]>."
end
subject { Brainfuck.new(@program).execute }
its(:output) { should == "#" }
its(:tape) { should == [0, 35] }
its(:data_pointer) { should == 1 }
its(:input) { should == [] }
end
context "after executing a program that uses multiple loops" do
before do
@program = ",>,<[->>+>+<<<]>[->>>+>+<<<<]"
end
subject { Brainfuck.new(@program).execute([17, 23]) }
its(:tape) { should == [0, 0, 17, 17, 23, 23] }
end
context "after executing a program that uses nested loops" do
before do
@program = "+++[->++++[->+++<]<]"
end
subject { Brainfuck.new(@program).execute }
its(:tape) { should == [0, 0, 36] }
end
context "after executing a program with multiple loops some of which are not executed" do
before do
@program = "[][]++[->+++<]"
end
subject { Brainfuck.new(@program).execute }
its(:tape) { should == [0, 6] }
end
it "puts whatever the state method returns when a '?' command is encountered" do
bf = Brainfuck.new('?')
bf.stub!(:state).and_return("my state")
bf.should_receive(:puts).with("my state")
bf.execute
end
it "raises an error when moving the data_pointer beyond the first position" do
lambda { Brainfuck.new("<").execute }.should raise_error
end
it "raises a syntax error when executing a program with more opening braces than closing ones" do
lambda { Brainfuck.new("+[+[-]").execute }.should raise_error(Brainfuck::SyntaxError, "Unmatched '['")
end
it "raises a syntax error when executing a program with more closing braces than opening ones" do
lambda { Brainfuck.new("++]").execute }.should raise_error(Brainfuck::SyntaxError, "Unmatched ']'")
end
end
| 28.352941 | 106 | 0.590101 |
9113568aac357cd29a93538f6dc39a9097b953f6 | 7,793 | require "test_helper"
class DevelopersTest < ActionDispatch::IntegrationTest
include DevelopersHelper
include MetaTagsHelper
include PagyHelper
test "can view developer profiles" do
get developers_path
assert_select "h2", developers(:one).hero
end
test "custom meta tags are rendered" do
get developers_path
assert_title_contains "Hire Ruby on Rails developers"
assert_description_contains "looking for their"
end
test "can't view developer with invisible profile" do
developer = create_developer(search_status: :invisible)
get developer_path(developer)
assert_redirected_to root_path
end
test "can see own developer profile when invisible" do
developer = create_developer(search_status: :invisible)
sign_in developer.user
get developer_path(developer)
assert_response :ok
end
test "developers are sorted newest first" do
create_developer(hero: "Oldest")
create_developer(hero: "Newest")
get developers_path
assert_select "button.font-medium[value=newest]"
assert response.body.index("Newest") < response.body.index("Oldest")
end
test "developers can be sorted by availability" do
create_developer(hero: "Available", available_on: Date.yesterday)
get developers_path(sort: :availability)
assert_select "button.font-medium[value=availability]"
assert_select "h2", "Available"
end
test "developers can be filtered by time zone" do
create_developer(hero: "Pacific", utc_offset: PACIFIC_UTC_OFFSET)
get developers_path(utc_offsets: [PACIFIC_UTC_OFFSET])
assert_select "input[checked][type=checkbox][value=#{PACIFIC_UTC_OFFSET}][name='utc_offsets[]']"
assert_select "h2", "Pacific"
end
test "developers can be filtered by role type" do
create_developer(hero: "Part-time", role_type_attributes: {part_time_contract: true})
get developers_path(role_types: ["part_time_contract"])
assert_select "input[checked][type=checkbox][value=part_time_contract][name='role_types[]']"
assert_select "h2", "Part-time"
end
test "developers can be filtered by role level" do
create_developer(hero: "Mid", role_level_attributes: {mid: true})
get developers_path(role_levels: ["mid"])
assert_select "input[checked][type=checkbox][value=mid][name='role_levels[]']"
assert_select "h2", "Mid"
end
test "developers can be filtered by hero or bio" do
create_developer(hero: "OSS lover")
get developers_path(search_query: "OSS")
assert_select "h2", "OSS lover"
end
test "developers not interested in work can be shown" do
create_developer(hero: "Not interested", search_status: :not_interested)
get developers_path(include_not_interested: true)
assert_select "input[checked][type=checkbox][name='include_not_interested']"
assert_select "h2", "Not interested"
end
test "paginating filtered developers respects the filters" do
developers(:prospect).update!(available_on: Date.yesterday, search_status: :open)
with_pagy_default_items(1) do
get developers_path(sort: :availability)
assert_select "#developers h2", count: 1
assert_select "#mobile-filters h2", count: 1
assert_select "a[href=?]", "/developers?sort=availability&page=2"
end
end
test "cannot create new profile if already has one" do
sign_in users(:developer)
assert_no_difference "Developer.count" do
post developers_path, params: valid_developer_params
end
end
test "redirect to the edit profile when they try to enter developers/new, if they already have a profile" do
user = users(:developer)
sign_in user
get new_developer_path
assert_redirected_to edit_developer_path(user.developer)
end
test "successful profile creation" do
sign_in users(:empty)
assert_difference ["Developer.count", "Analytics::Event.count"], 1 do
post developers_path, params: valid_developer_params
end
assert_redirected_to analytics_event_path(Analytics::Event.last)
assert_equal Analytics::Event.last.url, developer_path(Developer.last)
end
test "create with nested attributes" do
user = users(:empty)
sign_in user
assert_difference "Developer.count", 1 do
params = valid_developer_params
params[:developer][:role_type_attributes] = {part_time_contract: true}
params[:developer][:role_level_attributes] = {senior: true}
post developers_path, params:
end
assert user.developer.role_type.part_time_contract?
assert user.developer.role_level.senior?
assert user.developer.avatar.attached?
end
test "custom develper meta tags are rendered" do
developer = developers(:one)
get developer_path(developer)
assert_title_contains developer.hero
assert_description_contains developer.bio
end
test "successful edit to profile" do
sign_in users(:developer)
developer = developers(:one)
get edit_developer_path(developer)
assert_select "form"
assert_select "#developer_avatar_hidden"
assert_select "#developer_cover_image_hidden"
patch developer_path(developer), params: {
developer: {
name: "New Name"
}
}
assert_redirected_to developer_path(developer)
follow_redirect!
assert_equal "New Name", developer.reload.name
end
test "edit with nested attributes" do
user = users(:developer)
sign_in user
patch developer_path(user.developer), params: {
developer: {
role_type_attributes: {
part_time_contract: true
},
role_level_attributes: {
junior: true,
mid: true
}
}
}
assert user.developer.reload.role_type.part_time_contract?
assert user.developer.reload.role_level.junior?
assert user.developer.reload.role_level.mid?
end
test "invalid profile creation" do
sign_in users(:empty)
assert_no_difference "Developer.count" do
post developers_path, params: {
developer: {
name: "Developer"
}
}
end
end
test "can edit own profile" do
sign_in users(:developer)
developer = developers(:one)
get edit_developer_path(developer)
assert_select "form"
assert_select "#developer_avatar_hidden"
assert_select "#developer_cover_image_hidden"
patch developer_path(developer), params: {
developer: {
name: "New Name"
}
}
assert_redirected_to developer_path(developer)
assert_equal "New Name", developer.reload.name
end
test "cannot edit another developer's profile" do
sign_in users(:developer)
developer = developers(:prospect)
get edit_developer_path(developer)
assert_redirected_to root_path
assert_no_changes "developer.name" do
patch developer_path(developer), params: {
developer: {
name: "New Name"
}
}
end
assert_redirected_to root_path
end
test "invalid form changes label color" do
sign_in users(:empty)
post developers_path, params: {
developer: {
name: ""
}
}
assert_select %(div.text-red-600 label[for="developer_name"])
assert_select %(div.text-red-600 label[for="developer_hero"])
assert_select %(div.text-red-600 label[for="developer_bio"])
end
test "pagination" do
get developers_path
assert_select "#developers"
end
def valid_developer_params
{
developer: {
name: "Developer",
available_on: Date.yesterday,
hero: "A developer",
bio: "I develop.",
avatar: fixture_file_upload("lovelace.jpg", "image/jpeg"),
cover_image: fixture_file_upload("mountains.jpg", "image/jpeg"),
location_attributes: {
city: "City"
}
}
}
end
end
| 27.832143 | 110 | 0.70512 |
6ab9bdde67ed9416611ed41b8bb3e9820e7b53a3 | 503 | Pod::Spec.new do |s|
s.name = 'Colours'
s.version = '4.2'
s.summary = '100s of beautiful, predefined Colors and Color methods. Works for iOS/OSX.'
s.author = {
'Ben Gordon' => '[email protected]'
}
s.source = {
:git => 'https://github.com/bennyguitar/Colours.git',
:tag => '4.2'
}
s.homepage = 'https://github.com/bennyguitar'
s.license = 'LICENSE'
s.source_files = '*.{h,m}'
s.ios.deployment_target = '5.0'
s.osx.deployment_target = '10.7'
end | 29.588235 | 95 | 0.588469 |
21a32f4d5147380b4f060200013bc395f66e8e08 | 995 | require 'puppet/indirector/facts/yaml'
require 'puppet/util/profiler'
require 'puppet/util/hdp'
require 'json'
require 'time'
# HDP Facts
class Puppet::Node::Facts::HDP < Puppet::Node::Facts::Yaml
desc 'Save facts to HDP and then to yamlcache.'
include Puppet::Util::HDP
def profile(message, metric_id, &block)
message = 'HDP: ' + message
arity = Puppet::Util::Profiler.method(:profile).arity
case arity
when 1
Puppet::Util::Profiler.profile(message, &block)
when 2, -2
Puppet::Util::Profiler.profile(message, metric_id, &block)
end
end
def save(request)
# yaml cache goes first
super(request)
profile('hdp_facts#save', [:hdp, :facts, :save, request.key]) do
begin
Puppet.info 'Submitting facts to HDP'
current_time = Time.now
send_facts(request, current_time.clone.utc)
rescue StandardError => e
Puppet.err "Could not send facts to HDP: #{e}\n#{e.backtrace}"
end
end
end
end
| 25.512821 | 70 | 0.660302 |
5d5c9b57067f7b46dae49a56b863a8e361067983 | 1,772 | #
# Cookbook Name:: arcgis-mission
# Recipe:: federation
#
# Copyright 2020 Esri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
arcgis_enterprise_portal 'Federate Mission Server' do
portal_url node['arcgis']['portal']['wa_url']
username node['arcgis']['portal']['admin_username']
password node['arcgis']['portal']['admin_password']
server_url node['arcgis']['mission_server']['web_context_url']
server_admin_url node['arcgis']['mission_server']['private_url']
server_username node['arcgis']['mission_server']['admin_username']
server_password node['arcgis']['mission_server']['admin_password']
is_hosting false
retries 5
retry_delay 30
action :federate_server
end
arcgis_enterprise_portal 'Enable MissionServer server function' do
portal_url node['arcgis']['portal']['wa_url']
username node['arcgis']['portal']['admin_username']
password node['arcgis']['portal']['admin_password']
server_url node['arcgis']['mission_server']['web_context_url']
server_admin_url node['arcgis']['mission_server']['private_url']
server_username node['arcgis']['mission_server']['admin_username']
server_password node['arcgis']['mission_server']['admin_password']
server_function 'MissionServer'
is_hosting false
action :enable_server_function
end
| 38.521739 | 74 | 0.757336 |
4a6aa434076e3e847d503aa677652bcd687323b5 | 9,497 | # frozen_string_literal: true
require_relative '../test_helper'
require_relative '../mock_helpers/pagy_buggy'
require_relative '../mock_helpers/app'
# in test we cannot use the Pagy::I18n.load method because
# it would freeze the Pagy::I18n::DATA hash so i18n_load
# do the same as Pagy::I18n.load, just omitting the freeze
def i18n_load(*locales)
Pagy::I18n::DATA.clear
Pagy::I18n.send(:build, *locales)
end
describe 'pagy/frontend' do
let(:app) { MockApp.new }
describe '#pagy_nav' do
it 'renders page 1' do
pagy = Pagy.new count: 103, page: 1
_(app.pagy_nav(pagy)).must_rematch
_(app.pagy_nav(pagy, pagy_id: 'test-nav-id', link_extra: 'link-extra')).must_rematch
end
it 'renders page 3' do
pagy = Pagy.new count: 103, page: 3
_(app.pagy_nav(pagy)).must_rematch
_(app.pagy_nav(pagy, pagy_id: 'test-nav-id', link_extra: 'link-extra')).must_rematch
end
it 'renders page 6' do
pagy = Pagy.new count: 103, page: 6
_(app.pagy_nav(pagy)).must_rematch
_(app.pagy_nav(pagy, pagy_id: 'test-nav-id', link_extra: 'link-extra')).must_rematch
end
it 'renders page 10' do
pagy = Pagy.new count: 1000, page: 10
_(app.pagy_nav(pagy)).must_rematch
_(app.pagy_nav(pagy, pagy_id: 'test-nav-id', link_extra: 'link-extra')).must_rematch
end
it 'renders with link_extras' do
pagy = Pagy.new count: 103, page: 1, link_extra: "X"
_(app.pagy_nav(pagy)).must_include '?page=2" X rel'
_(app.pagy_nav(pagy, link_extra: 'link-extra')).must_include '?page=2" X link-extra rel'
end
it 'should raise for wrong series' do
_ { app.pagy_nav(PagyBuggy.new(count: 100)) }.must_raise Pagy::InternalError
end
end
describe '#pagy_link_proc' do
it 'renders with extras' do
pagy = Pagy.new count: 103, page: 1
_(app.pagy_link_proc(pagy, link_extra: "X").call(1)).must_equal '<a href="/foo?page=1" X >1</a>'
end
end
describe '#pagy_t' do
it 'pluralizes' do
_(app.pagy_t('pagy.nav.prev')).must_equal "‹ Prev"
_(app.pagy_t('pagy.item_name', count: 0)).must_equal "items"
_(app.pagy_t('pagy.item_name', count: 1)).must_equal "item"
_(app.pagy_t('pagy.item_name', count: 10)).must_equal "items"
end
# rubocop:disable Style/FormatStringToken
it 'interpolates' do
_(app.pagy_t('pagy.info.no_items', count: 0)).must_equal "No %{item_name} found"
_(app.pagy_t('pagy.info.single_page', count: 1)).must_equal "Displaying <b>1</b> %{item_name}"
_(app.pagy_t('pagy.info.single_page', count: 10)).must_equal "Displaying <b>10</b> %{item_name}"
_(app.pagy_t('pagy.info.multiple_pages', count: 10)).must_equal "Displaying %{item_name} <b>%{from}-%{to}</b> of <b>10</b> in total"
_(app.pagy_t('pagy.info.multiple_pages', item_name: 'Products', count: 300, from: 101, to: 125)).must_equal "Displaying Products <b>101-125</b> of <b>300</b> in total"
end
# rubocop:enable Style/FormatStringToken
it 'handles missing keys' do
_(app.pagy_t('pagy.nav.not_here')).must_equal '[translation missing: "pagy.nav.not_here"]'
_(app.pagy_t('pagy.nav.gap.not_here')).must_equal '[translation missing: "pagy.nav.gap.not_here"]'
end
end
describe "Pagy::I18n" do
it 'loads custom :locale, :filepath and :pluralize' do
_(proc { i18n_load(locale: 'xx') }).must_raise Errno::ENOENT
_(proc { i18n_load(locale: 'xx', filepath: Pagy.root.join('locales', 'en.yml')) }).must_raise Pagy::I18nError
_(proc { i18n_load(locale: 'en', filepath: Pagy.root.join('locales', 'xx.yml')) }).must_raise Errno::ENOENT
custom_dictionary = Pagy.root.parent.join('test', 'files', 'custom.yml')
i18n_load(locale: 'custom', filepath: custom_dictionary)
_(Pagy::I18n.t('custom', 'pagy.nav.prev')).must_equal "‹ Custom Prev"
i18n_load(locale: 'en', pluralize: ->(_) { 'one' }) # returns always 'one' regardless the count
_(Pagy::I18n.t(nil, 'pagy.item_name', count: 0)).must_equal "item"
_(Pagy::I18n.t('en', 'pagy.item_name', count: 0)).must_equal "item"
_(Pagy::I18n.t('en', 'pagy.item_name', count: 1)).must_equal "item"
_(Pagy::I18n.t('en', 'pagy.item_name', count: 10)).must_equal "item"
i18n_load(locale: 'en') # reset for other tests
end
it 'switches :locale according to @pagy_locale' do
i18n_load({ locale: 'de' }, { locale: 'en' }, { locale: 'nl' })
app.instance_variable_set(:@pagy_locale, 'nl')
_(app.pagy_t('pagy.item_name', count: 1)).must_equal "stuk"
app.instance_variable_set(:@pagy_locale, 'en')
_(app.pagy_t('pagy.item_name', count: 1)).must_equal "item"
app.instance_variable_set(:@pagy_locale, nil)
_(app.pagy_t('pagy.item_name', count: 1)).must_equal "Eintrag"
app.instance_variable_set(:@pagy_locale, 'unknown')
_(app.pagy_t('pagy.item_name', count: 1)).must_equal "Eintrag" # silently serves the first loaded locale
i18n_load(locale: 'en') # reset for other tests
app.instance_variable_set(:@pagy_locale, nil) # reset for other tests
end
end
describe '#pagy_info' do
it 'renders without i18n key' do
_(app.pagy_info(Pagy.new(count: 0))).must_equal '<span class="pagy-info">No items found</span>'
_(app.pagy_info(Pagy.new(count: 1))).must_equal '<span class="pagy-info">Displaying <b>1</b> item</span>'
_(app.pagy_info(Pagy.new(count: 13))).must_equal '<span class="pagy-info">Displaying <b>13</b> items</span>'
_(app.pagy_info(Pagy.new(count: 100, page: 3))).must_equal '<span class="pagy-info">Displaying items <b>41-60</b> of <b>100</b> in total</span>'
end
it 'renders with existing i18n key' do
Pagy::I18n::DATA['en'][0]['pagy.info.product.one'] = 'Product'
Pagy::I18n::DATA['en'][0]['pagy.info.product.other'] = 'Products'
_(app.pagy_info(Pagy.new(count: 0, i18n_key: 'pagy.info.product'))).must_equal '<span class="pagy-info">No Products found</span>'
_(app.pagy_info(Pagy.new(count: 1, i18n_key: 'pagy.info.product'))).must_equal '<span class="pagy-info">Displaying <b>1</b> Product</span>'
_(app.pagy_info(Pagy.new(count: 13, i18n_key: 'pagy.info.product'))).must_equal '<span class="pagy-info">Displaying <b>13</b> Products</span>'
_(app.pagy_info(Pagy.new(count: 100, i18n_key: 'pagy.info.product', page: 3))).must_equal '<span class="pagy-info">Displaying Products <b>41-60</b> of <b>100</b> in total</span>'
_(app.pagy_info(Pagy.new(count: 0), i18n_key: 'pagy.info.product')).must_equal '<span class="pagy-info">No Products found</span>'
_(app.pagy_info(Pagy.new(count: 1), i18n_key: 'pagy.info.product')).must_equal '<span class="pagy-info">Displaying <b>1</b> Product</span>'
_(app.pagy_info(Pagy.new(count: 13), i18n_key: 'pagy.info.product')).must_equal '<span class="pagy-info">Displaying <b>13</b> Products</span>'
_(app.pagy_info(Pagy.new(count: 100, page: 3), i18n_key: 'pagy.info.product')).must_equal '<span class="pagy-info">Displaying Products <b>41-60</b> of <b>100</b> in total</span>'
i18n_load(locale: 'en') # reset for other tests
end
it 'overrides the item_name and set pagy_id' do
_(app.pagy_info(Pagy.new(count: 0), pagy_id: 'pagy-info', item_name: 'Widgets')).must_equal '<span id="pagy-info" class="pagy-info">No Widgets found</span>'
_(app.pagy_info(Pagy.new(count: 1), pagy_id: 'pagy-info', item_name: 'Widget')).must_equal '<span id="pagy-info" class="pagy-info">Displaying <b>1</b> Widget</span>'
_(app.pagy_info(Pagy.new(count: 13), pagy_id: 'pagy-info', item_name: 'Widgets')).must_equal '<span id="pagy-info" class="pagy-info">Displaying <b>13</b> Widgets</span>'
_(app.pagy_info(Pagy.new(count: 100, page: 3), pagy_id: 'pagy-info', item_name: 'Widgets')).must_equal '<span id="pagy-info" class="pagy-info">Displaying Widgets <b>41-60</b> of <b>100</b> in total</span>'
end
end
describe '#pagy_url_for' do
it 'renders basic url' do
pagy = Pagy.new count: 1000, page: 3
_(app.pagy_url_for(pagy, 5)).must_equal '/foo?page=5'
_(app.pagy_url_for(pagy, 5, absolute: true)).must_equal 'http://example.com:3000/foo?page=5'
end
it 'renders url with params' do
pagy = Pagy.new count: 1000, page: 3, params: { a: 3, b: 4 }
_(app.pagy_url_for(pagy, 5)).must_equal "/foo?page=5&a=3&b=4"
_(app.pagy_url_for(pagy, 5, absolute: true)).must_equal "http://example.com:3000/foo?page=5&a=3&b=4"
end
it 'renders url with fragment' do
pagy = Pagy.new count: 1000, page: 3, fragment: '#fragment'
_(app.pagy_url_for(pagy, 6)).must_equal '/foo?page=6#fragment'
_(app.pagy_url_for(pagy, 6, absolute: true)).must_equal 'http://example.com:3000/foo?page=6#fragment'
end
it 'renders url with params and fragment' do
pagy = Pagy.new count: 1000, page: 3, params: { a: 3, b: 4 }, fragment: '#fragment'
_(app.pagy_url_for(pagy, 5)).must_equal "/foo?page=5&a=3&b=4#fragment"
_(app.pagy_url_for(pagy, 5, absolute: true)).must_equal "http://example.com:3000/foo?page=5&a=3&b=4#fragment"
end
end
describe '#pagy_get_params' do
it 'overrides params' do
overridden = MockApp::Overridden.new(params: { delete_me: 'delete_me', a: 5 })
pagy = Pagy.new count: 1000, page: 3, params: { b: 4 }, fragment: '#fragment'
_(overridden.pagy_url_for(pagy, 5)).must_equal "/foo?a=5&b=4&page=5&add_me=add_me#fragment"
end
end
end
| 56.529762 | 211 | 0.66442 |
26af54de9165a0cb2215bec35480037594a89476 | 46,439 | require 'active_resource/connection'
require 'cgi'
require 'set'
module ActiveResource
# ActiveResource::Base is the main class for mapping RESTful resources as models in a Rails application.
#
# For an outline of what Active Resource is capable of, see link:files/vendor/rails/activeresource/README.html.
#
# == Automated mapping
#
# Active Resource objects represent your RESTful resources as manipulatable Ruby objects. To map resources
# to Ruby objects, Active Resource only needs a class name that corresponds to the resource name (e.g., the class
# Person maps to the resources people, very similarly to Active Record) and a +site+ value, which holds the
# URI of the resources.
#
# class Person < ActiveResource::Base
# self.site = "http://api.people.com:3000/"
# end
#
# Now the Person class is mapped to RESTful resources located at <tt>http://api.people.com:3000/people/</tt>, and
# you can now use Active Resource's lifecycles methods to manipulate resources. In the case where you already have
# an existing model with the same name as the desired RESTful resource you can set the +element_name+ value.
#
# class PersonResource < ActiveResource::Base
# self.site = "http://api.people.com:3000/"
# self.element_name = "person"
# end
#
#
# == Lifecycle methods
#
# Active Resource exposes methods for creating, finding, updating, and deleting resources
# from REST web services.
#
# ryan = Person.new(:first => 'Ryan', :last => 'Daigle')
# ryan.save # => true
# ryan.id # => 2
# Person.exists?(ryan.id) # => true
# ryan.exists? # => true
#
# ryan = Person.find(1)
# # Resource holding our newly created Person object
#
# ryan.first = 'Rizzle'
# ryan.save # => true
#
# ryan.destroy # => true
#
# As you can see, these are very similar to Active Record's lifecycle methods for database records.
# You can read more about each of these methods in their respective documentation.
#
# === Custom REST methods
#
# Since simple CRUD/lifecycle methods can't accomplish every task, Active Resource also supports
# defining your own custom REST methods. To invoke them, Active Resource provides the <tt>get</tt>,
# <tt>post</tt>, <tt>put</tt> and <tt>\delete</tt> methods where you can specify a custom REST method
# name to invoke.
#
# # POST to the custom 'register' REST method, i.e. POST /people/new/register.xml.
# Person.new(:name => 'Ryan').post(:register)
# # => { :id => 1, :name => 'Ryan', :position => 'Clerk' }
#
# # PUT an update by invoking the 'promote' REST method, i.e. PUT /people/1/promote.xml?position=Manager.
# Person.find(1).put(:promote, :position => 'Manager')
# # => { :id => 1, :name => 'Ryan', :position => 'Manager' }
#
# # GET all the positions available, i.e. GET /people/positions.xml.
# Person.get(:positions)
# # => [{:name => 'Manager'}, {:name => 'Clerk'}]
#
# # DELETE to 'fire' a person, i.e. DELETE /people/1/fire.xml.
# Person.find(1).delete(:fire)
#
# For more information on using custom REST methods, see the
# ActiveResource::CustomMethods documentation.
#
# == Validations
#
# You can validate resources client side by overriding validation methods in the base class.
#
# class Person < ActiveResource::Base
# self.site = "http://api.people.com:3000/"
# protected
# def validate
# errors.add("last", "has invalid characters") unless last =~ /[a-zA-Z]*/
# end
# end
#
# See the ActiveResource::Validations documentation for more information.
#
# == Authentication
#
# Many REST APIs will require authentication, usually in the form of basic
# HTTP authentication. Authentication can be specified by:
#
# === HTTP Basic Authentication
# * putting the credentials in the URL for the +site+ variable.
#
# class Person < ActiveResource::Base
# self.site = "http://ryan:[email protected]:3000/"
# end
#
# * defining +user+ and/or +password+ variables
#
# class Person < ActiveResource::Base
# self.site = "http://api.people.com:3000/"
# self.user = "ryan"
# self.password = "password"
# end
#
# For obvious security reasons, it is probably best if such services are available
# over HTTPS.
#
# Note: Some values cannot be provided in the URL passed to site. e.g. email addresses
# as usernames. In those situations you should use the separate user and password option.
#
# === Certificate Authentication
#
# * End point uses an X509 certificate for authentication. <tt>See ssl_options=</tt> for all options.
#
# class Person < ActiveResource::Base
# self.site = "https://secure.api.people.com/"
# self.ssl_options = {:cert => OpenSSL::X509::Certificate.new(File.open(pem_file))
# :key => OpenSSL::PKey::RSA.new(File.open(pem_file)),
# :ca_path => "/path/to/OpenSSL/formatted/CA_Certs",
# :verify_mode => OpenSSL::SSL::VERIFY_PEER}
# end
#
# == Errors & Validation
#
# Error handling and validation is handled in much the same manner as you're used to seeing in
# Active Record. Both the response code in the HTTP response and the body of the response are used to
# indicate that an error occurred.
#
# === Resource errors
#
# When a GET is requested for a resource that does not exist, the HTTP <tt>404</tt> (Resource Not Found)
# response code will be returned from the server which will raise an ActiveResource::ResourceNotFound
# exception.
#
# # GET http://api.people.com:3000/people/999.xml
# ryan = Person.find(999) # 404, raises ActiveResource::ResourceNotFound
#
# <tt>404</tt> is just one of the HTTP error response codes that Active Resource will handle with its own exception. The
# following HTTP response codes will also result in these exceptions:
#
# * 200..399 - Valid response, no exception (other than 301, 302)
# * 301, 302 - ActiveResource::Redirection
# * 400 - ActiveResource::BadRequest
# * 401 - ActiveResource::UnauthorizedAccess
# * 403 - ActiveResource::ForbiddenAccess
# * 404 - ActiveResource::ResourceNotFound
# * 405 - ActiveResource::MethodNotAllowed
# * 409 - ActiveResource::ResourceConflict
# * 410 - ActiveResource::ResourceGone
# * 422 - ActiveResource::ResourceInvalid (rescued by save as validation errors)
# * 401..499 - ActiveResource::ClientError
# * 500..599 - ActiveResource::ServerError
# * Other - ActiveResource::ConnectionError
#
# These custom exceptions allow you to deal with resource errors more naturally and with more precision
# rather than returning a general HTTP error. For example:
#
# begin
# ryan = Person.find(my_id)
# rescue ActiveResource::ResourceNotFound
# redirect_to :action => 'not_found'
# rescue ActiveResource::ResourceConflict, ActiveResource::ResourceInvalid
# redirect_to :action => 'new'
# end
#
# === Validation errors
#
# Active Resource supports validations on resources and will return errors if any these validations fail
# (e.g., "First name can not be blank" and so on). These types of errors are denoted in the response by
# a response code of <tt>422</tt> and an XML or JSON representation of the validation errors. The save operation will
# then fail (with a <tt>false</tt> return value) and the validation errors can be accessed on the resource in question.
#
# ryan = Person.find(1)
# ryan.first # => ''
# ryan.save # => false
#
# # When
# # PUT http://api.people.com:3000/people/1.xml
# # or
# # PUT http://api.people.com:3000/people/1.json
# # is requested with invalid values, the response is:
# #
# # Response (422):
# # <errors type="array"><error>First cannot be empty</error></errors>
# # or
# # {"errors":["First cannot be empty"]}
# #
#
# ryan.errors.invalid?(:first) # => true
# ryan.errors.full_messages # => ['First cannot be empty']
#
# Learn more about Active Resource's validation features in the ActiveResource::Validations documentation.
#
# === Timeouts
#
# Active Resource relies on HTTP to access RESTful APIs and as such is inherently susceptible to slow or
# unresponsive servers. In such cases, your Active Resource method calls could \timeout. You can control the
# amount of time before Active Resource times out with the +timeout+ variable.
#
# class Person < ActiveResource::Base
# self.site = "http://api.people.com:3000/"
# self.timeout = 5
# end
#
# This sets the +timeout+ to 5 seconds. You can adjust the +timeout+ to a value suitable for the RESTful API
# you are accessing. It is recommended to set this to a reasonably low value to allow your Active Resource
# clients (especially if you are using Active Resource in a Rails application) to fail-fast (see
# http://en.wikipedia.org/wiki/Fail-fast) rather than cause cascading failures that could incapacitate your
# server.
#
# When a \timeout occurs, an ActiveResource::TimeoutError is raised. You should rescue from
# ActiveResource::TimeoutError in your Active Resource method calls.
#
# Internally, Active Resource relies on Ruby's Net::HTTP library to make HTTP requests. Setting +timeout+
# sets the <tt>read_timeout</tt> of the internal Net::HTTP instance to the same value. The default
# <tt>read_timeout</tt> is 60 seconds on most Ruby implementations.
class Base
##
# :singleton-method:
# The logger for diagnosing and tracing Active Resource calls.
cattr_accessor :logger
# Controls the top-level behavior of JSON serialization
cattr_accessor :include_root_in_json, :instance_writer => false
class << self
# Gets the URI of the REST resources to map for this class. The site variable is required for
# Active Resource's mapping to work.
def site
# Not using superclass_delegating_reader because don't want subclasses to modify superclass instance
#
# With superclass_delegating_reader
#
# Parent.site = 'http://[email protected]'
# Subclass.site # => 'http://[email protected]'
# Subclass.site.user = 'david'
# Parent.site # => 'http://[email protected]'
#
# Without superclass_delegating_reader (expected behaviour)
#
# Parent.site = 'http://[email protected]'
# Subclass.site # => 'http://[email protected]'
# Subclass.site.user = 'david' # => TypeError: can't modify frozen object
#
if defined?(@site)
@site
elsif superclass != Object && superclass.site
superclass.site.dup.freeze
end
end
# Sets the URI of the REST resources to map for this class to the value in the +site+ argument.
# The site variable is required for Active Resource's mapping to work.
def site=(site)
@connection = nil
if site.nil?
@site = nil
else
@site = create_site_uri_from(site)
@user = URI.decode(@site.user) if @site.user
@password = URI.decode(@site.password) if @site.password
end
end
# Gets the \proxy variable if a proxy is required
def proxy
# Not using superclass_delegating_reader. See +site+ for explanation
if defined?(@proxy)
@proxy
elsif superclass != Object && superclass.proxy
superclass.proxy.dup.freeze
end
end
# Sets the URI of the http proxy to the value in the +proxy+ argument.
def proxy=(proxy)
@connection = nil
@proxy = proxy.nil? ? nil : create_proxy_uri_from(proxy)
end
# Gets the \user for REST HTTP authentication.
def user
# Not using superclass_delegating_reader. See +site+ for explanation
if defined?(@user)
@user
elsif superclass != Object && superclass.user
superclass.user.dup.freeze
end
end
# Sets the \user for REST HTTP authentication.
def user=(user)
@connection = nil
@user = user
end
# Gets the \password for REST HTTP authentication.
def password
# Not using superclass_delegating_reader. See +site+ for explanation
if defined?(@password)
@password
elsif superclass != Object && superclass.password
superclass.password.dup.freeze
end
end
# Sets the \password for REST HTTP authentication.
def password=(password)
@connection = nil
@password = password
end
# Sets the format that attributes are sent and received in from a mime type reference:
#
# Person.format = :json
# Person.find(1) # => GET /people/1.json
#
# Person.format = ActiveResource::Formats::XmlFormat
# Person.find(1) # => GET /people/1.xml
#
# Default format is <tt>:xml</tt>.
def format=(mime_type_reference_or_format)
format = mime_type_reference_or_format.is_a?(Symbol) ?
ActiveResource::Formats[mime_type_reference_or_format] : mime_type_reference_or_format
write_inheritable_attribute(:format, format)
connection.format = format if site
end
# Returns the current format, default is ActiveResource::Formats::XmlFormat.
def format
read_inheritable_attribute(:format) || ActiveResource::Formats[:xml]
end
# Sets the number of seconds after which requests to the REST API should time out.
def timeout=(timeout)
@connection = nil
@timeout = timeout
end
# Gets the number of seconds after which requests to the REST API should time out.
def timeout
if defined?(@timeout)
@timeout
elsif superclass != Object && superclass.timeout
superclass.timeout
end
end
# Options that will get applied to an SSL connection.
#
# * <tt>:key</tt> - An OpenSSL::PKey::RSA or OpenSSL::PKey::DSA object.
# * <tt>:cert</tt> - An OpenSSL::X509::Certificate object as client certificate
# * <tt>:ca_file</tt> - Path to a CA certification file in PEM format. The file can contrain several CA certificates.
# * <tt>:ca_path</tt> - Path of a CA certification directory containing certifications in PEM format.
# * <tt>:verify_mode</tt> - Flags for server the certification verification at begining of SSL/TLS session. (OpenSSL::SSL::VERIFY_NONE or OpenSSL::SSL::VERIFY_PEER is acceptable)
# * <tt>:verify_callback</tt> - The verify callback for the server certification verification.
# * <tt>:verify_depth</tt> - The maximum depth for the certificate chain verification.
# * <tt>:cert_store</tt> - OpenSSL::X509::Store to verify peer certificate.
# * <tt>:ssl_timeout</tt> -The SSL timeout in seconds.
def ssl_options=(opts={})
@connection = nil
@ssl_options = opts
end
# Returns the SSL options hash.
def ssl_options
if defined?(@ssl_options)
@ssl_options
elsif superclass != Object && superclass.ssl_options
superclass.ssl_options
end
end
# An instance of ActiveResource::Connection that is the base \connection to the remote service.
# The +refresh+ parameter toggles whether or not the \connection is refreshed at every request
# or not (defaults to <tt>false</tt>).
def connection(refresh = false)
if defined?(@connection) || superclass == Object
@connection = Connection.new(site, format) if refresh || @connection.nil?
@connection.proxy = proxy if proxy
@connection.user = user if user
@connection.password = password if password
@connection.timeout = timeout if timeout
@connection.ssl_options = ssl_options if ssl_options
@connection
else
superclass.connection
end
end
def headers
@headers ||= {}
end
# Do not include any modules in the default element name. This makes it easier to seclude ARes objects
# in a separate namespace without having to set element_name repeatedly.
attr_accessor_with_default(:element_name) { to_s.split("::").last.underscore } #:nodoc:
attr_accessor_with_default(:collection_name) { element_name.pluralize } #:nodoc:
attr_accessor_with_default(:primary_key, 'id') #:nodoc:
# Gets the \prefix for a resource's nested URL (e.g., <tt>prefix/collectionname/1.xml</tt>)
# This method is regenerated at runtime based on what the \prefix is set to.
def prefix(options={})
default = site.path
default << '/' unless default[-1..-1] == '/'
# generate the actual method based on the current site path
self.prefix = default
prefix(options)
end
# An attribute reader for the source string for the resource path \prefix. This
# method is regenerated at runtime based on what the \prefix is set to.
def prefix_source
prefix # generate #prefix and #prefix_source methods first
prefix_source
end
# Sets the \prefix for a resource's nested URL (e.g., <tt>prefix/collectionname/1.xml</tt>).
# Default value is <tt>site.path</tt>.
def prefix=(value = '/')
# Replace :placeholders with '#{embedded options[:lookups]}'
prefix_call = value.gsub(/:\w+/) { |key| "\#{options[#{key}]}" }
# Clear prefix parameters in case they have been cached
@prefix_parameters = nil
# Redefine the new methods.
code, line = <<-end_code, __LINE__ + 1
def prefix_source() "#{value}" end
def prefix(options={}) "#{prefix_call}" end
end_code
silence_warnings { instance_eval code, __FILE__, line }
rescue
logger.error "Couldn't set prefix: #{$!}\n #{code}"
raise
end
alias_method :set_prefix, :prefix= #:nodoc:
alias_method :set_element_name, :element_name= #:nodoc:
alias_method :set_collection_name, :collection_name= #:nodoc:
# Gets the element path for the given ID in +id+. If the +query_options+ parameter is omitted, Rails
# will split from the \prefix options.
#
# ==== Options
# +prefix_options+ - A \hash to add a \prefix to the request for nested URLs (e.g., <tt>:account_id => 19</tt>
# would yield a URL like <tt>/accounts/19/purchases.xml</tt>).
# +query_options+ - A \hash to add items to the query string for the request.
#
# ==== Examples
# Post.element_path(1)
# # => /posts/1.xml
#
# Comment.element_path(1, :post_id => 5)
# # => /posts/5/comments/1.xml
#
# Comment.element_path(1, :post_id => 5, :active => 1)
# # => /posts/5/comments/1.xml?active=1
#
# Comment.element_path(1, {:post_id => 5}, {:active => 1})
# # => /posts/5/comments/1.xml?active=1
#
def element_path(id, prefix_options = {}, query_options = nil)
prefix_options, query_options = split_options(prefix_options) if query_options.nil?
"#{prefix(prefix_options)}#{collection_name}/#{_encode_www_form_component(id.to_s)}.#{format.extension}#{query_string(query_options)}"
end
# Gets the collection path for the REST resources. If the +query_options+ parameter is omitted, Rails
# will split from the +prefix_options+.
#
# ==== Options
# * +prefix_options+ - A hash to add a prefix to the request for nested URL's (e.g., <tt>:account_id => 19</tt>
# would yield a URL like <tt>/accounts/19/purchases.xml</tt>).
# * +query_options+ - A hash to add items to the query string for the request.
#
# ==== Examples
# Post.collection_path
# # => /posts.xml
#
# Comment.collection_path(:post_id => 5)
# # => /posts/5/comments.xml
#
# Comment.collection_path(:post_id => 5, :active => 1)
# # => /posts/5/comments.xml?active=1
#
# Comment.collection_path({:post_id => 5}, {:active => 1})
# # => /posts/5/comments.xml?active=1
#
def collection_path(prefix_options = {}, query_options = nil)
prefix_options, query_options = split_options(prefix_options) if query_options.nil?
"#{prefix(prefix_options)}#{collection_name}.#{format.extension}#{query_string(query_options)}"
end
alias_method :set_primary_key, :primary_key= #:nodoc:
# Creates a new resource instance and makes a request to the remote service
# that it be saved, making it equivalent to the following simultaneous calls:
#
# ryan = Person.new(:first => 'ryan')
# ryan.save
#
# Returns the newly created resource. If a failure has occurred an
# exception will be raised (see <tt>save</tt>). If the resource is invalid and
# has not been saved then <tt>valid?</tt> will return <tt>false</tt>,
# while <tt>new?</tt> will still return <tt>true</tt>.
#
# ==== Examples
# Person.create(:name => 'Jeremy', :email => '[email protected]', :enabled => true)
# my_person = Person.find(:first)
# my_person.email # => [email protected]
#
# dhh = Person.create(:name => 'David', :email => '[email protected]', :enabled => true)
# dhh.valid? # => true
# dhh.new? # => false
#
# # We'll assume that there's a validation that requires the name attribute
# that_guy = Person.create(:name => '', :email => '[email protected]', :enabled => true)
# that_guy.valid? # => false
# that_guy.new? # => true
def create(attributes = {})
self.new(attributes).tap { |resource| resource.save }
end
# Core method for finding resources. Used similarly to Active Record's +find+ method.
#
# ==== Arguments
# The first argument is considered to be the scope of the query. That is, how many
# resources are returned from the request. It can be one of the following.
#
# * <tt>:one</tt> - Returns a single resource.
# * <tt>:first</tt> - Returns the first resource found.
# * <tt>:last</tt> - Returns the last resource found.
# * <tt>:all</tt> - Returns every resource that matches the request.
#
# ==== Options
#
# * <tt>:from</tt> - Sets the path or custom method that resources will be fetched from.
# * <tt>:params</tt> - Sets query and \prefix (nested URL) parameters.
#
# ==== Examples
# Person.find(1)
# # => GET /people/1.xml
#
# Person.find(:all)
# # => GET /people.xml
#
# Person.find(:all, :params => { :title => "CEO" })
# # => GET /people.xml?title=CEO
#
# Person.find(:first, :from => :managers)
# # => GET /people/managers.xml
#
# Person.find(:last, :from => :managers)
# # => GET /people/managers.xml
#
# Person.find(:all, :from => "/companies/1/people.xml")
# # => GET /companies/1/people.xml
#
# Person.find(:one, :from => :leader)
# # => GET /people/leader.xml
#
# Person.find(:all, :from => :developers, :params => { :language => 'ruby' })
# # => GET /people/developers.xml?language=ruby
#
# Person.find(:one, :from => "/companies/1/manager.xml")
# # => GET /companies/1/manager.xml
#
# StreetAddress.find(1, :params => { :person_id => 1 })
# # => GET /people/1/street_addresses/1.xml
def find(*arguments)
scope = arguments.slice!(0)
options = arguments.slice!(0) || {}
case scope
when :all then find_every(options)
when :first then find_every(options).first
when :last then find_every(options).last
when :one then find_one(options)
else find_single(scope, options)
end
end
# Deletes the resources with the ID in the +id+ parameter.
#
# ==== Options
# All options specify \prefix and query parameters.
#
# ==== Examples
# Event.delete(2) # sends DELETE /events/2
#
# Event.create(:name => 'Free Concert', :location => 'Community Center')
# my_event = Event.find(:first) # let's assume this is event with ID 7
# Event.delete(my_event.id) # sends DELETE /events/7
#
# # Let's assume a request to events/5/cancel.xml
# Event.delete(params[:id]) # sends DELETE /events/5
def delete(id, options = {})
connection.delete(element_path(id, options))
end
# Asserts the existence of a resource, returning <tt>true</tt> if the resource is found.
#
# ==== Examples
# Note.create(:title => 'Hello, world.', :body => 'Nothing more for now...')
# Note.exists?(1) # => true
#
# Note.exists(1349) # => false
def exists?(id, options = {})
if id
prefix_options, query_options = split_options(options[:params])
path = element_path(id, prefix_options, query_options)
response = connection.head(path, headers)
response.code.to_i == 200
end
# id && !find_single(id, options).nil?
rescue ActiveResource::ResourceNotFound, ActiveResource::ResourceGone
false
end
private
# Find every resource
def find_every(options)
case from = options[:from]
when Symbol
instantiate_collection(get(from, options[:params]))
when String
path = "#{from}#{query_string(options[:params])}"
instantiate_collection(connection.get(path, headers) || [])
else
prefix_options, query_options = split_options(options[:params])
path = collection_path(prefix_options, query_options)
instantiate_collection( (connection.get(path, headers) || []), prefix_options )
end
end
# Find a single resource from a one-off URL
def find_one(options)
case from = options[:from]
when Symbol
instantiate_record(get(from, options[:params]))
when String
path = "#{from}#{query_string(options[:params])}"
instantiate_record(connection.get(path, headers))
end
end
# Find a single resource from the default URL
def find_single(scope, options)
prefix_options, query_options = split_options(options[:params])
path = element_path(scope, prefix_options, query_options)
instantiate_record(connection.get(path, headers), prefix_options)
end
def instantiate_collection(collection, prefix_options = {})
collection.collect! { |record| instantiate_record(record, prefix_options) }
end
def instantiate_record(record, prefix_options = {})
new(record).tap do |resource|
resource.prefix_options = prefix_options
end
end
# Accepts a URI and creates the site URI from that.
def create_site_uri_from(site)
site.is_a?(URI) ? site.dup : URI.parse(site)
end
# Accepts a URI and creates the proxy URI from that.
def create_proxy_uri_from(proxy)
proxy.is_a?(URI) ? proxy.dup : URI.parse(proxy)
end
# contains a set of the current prefix parameters.
def prefix_parameters
@prefix_parameters ||= prefix_source.scan(/:\w+/).map { |key| key[1..-1].to_sym }.to_set
end
# Builds the query string for the request.
def query_string(options)
"?#{options.to_query}" unless options.nil? || options.empty?
end
# split an option hash into two hashes, one containing the prefix options,
# and the other containing the leftovers.
def split_options(options = {})
prefix_options, query_options = {}, {}
(options || {}).each do |key, value|
next if key.blank?
(prefix_parameters.include?(key.to_sym) ? prefix_options : query_options)[key.to_sym] = value
end
[ prefix_options, query_options ]
end
if RUBY_VERSION >= '1.9'
define_method :_encode_www_form_component do |string|
URI.encode_www_form_component(string)
end
else
define_method :_encode_www_form_component do |string|
string.gsub(/[^*\-.0-9A-Z_a-z]/) do |char|
if char == ' '
'+'
else
'%%%02X' % char[0]
end
end
end
end
end
attr_accessor :attributes #:nodoc:
attr_accessor :prefix_options #:nodoc:
# Constructor method for \new resources; the optional +attributes+ parameter takes a \hash
# of attributes for the \new resource.
#
# ==== Examples
# my_course = Course.new
# my_course.name = "Western Civilization"
# my_course.lecturer = "Don Trotter"
# my_course.save
#
# my_other_course = Course.new(:name => "Philosophy: Reason and Being", :lecturer => "Ralph Cling")
# my_other_course.save
def initialize(attributes = {})
@attributes = {}
@prefix_options = {}
load(attributes)
end
# Returns a \clone of the resource that hasn't been assigned an +id+ yet and
# is treated as a \new resource.
#
# ryan = Person.find(1)
# not_ryan = ryan.clone
# not_ryan.new? # => true
#
# Any active resource member attributes will NOT be cloned, though all other
# attributes are. This is to prevent the conflict between any +prefix_options+
# that refer to the original parent resource and the newly cloned parent
# resource that does not exist.
#
# ryan = Person.find(1)
# ryan.address = StreetAddress.find(1, :person_id => ryan.id)
# ryan.hash = {:not => "an ARes instance"}
#
# not_ryan = ryan.clone
# not_ryan.new? # => true
# not_ryan.address # => NoMethodError
# not_ryan.hash # => {:not => "an ARes instance"}
def clone
# Clone all attributes except the pk and any nested ARes
cloned = attributes.reject {|k,v| k == self.class.primary_key || v.is_a?(ActiveResource::Base)}.inject({}) do |attrs, (k, v)|
attrs[k] = v.clone
attrs
end
# Form the new resource - bypass initialize of resource with 'new' as that will call 'load' which
# attempts to convert hashes into member objects and arrays into collections of objects. We want
# the raw objects to be cloned so we bypass load by directly setting the attributes hash.
resource = self.class.new({})
resource.prefix_options = self.prefix_options
resource.send :instance_variable_set, '@attributes', cloned
resource
end
# A method to determine if the resource a \new object (i.e., it has not been POSTed to the remote service yet).
#
# ==== Examples
# not_new = Computer.create(:brand => 'Apple', :make => 'MacBook', :vendor => 'MacMall')
# not_new.new? # => false
#
# is_new = Computer.new(:brand => 'IBM', :make => 'Thinkpad', :vendor => 'IBM')
# is_new.new? # => true
#
# is_new.save
# is_new.new? # => false
#
def new?
id.nil?
end
alias :new_record? :new?
# Gets the <tt>\id</tt> attribute of the resource.
def id
attributes[self.class.primary_key]
end
# Sets the <tt>\id</tt> attribute of the resource.
def id=(id)
attributes[self.class.primary_key] = id
end
# Allows Active Resource objects to be used as parameters in Action Pack URL generation.
def to_param
id && id.to_s
end
# Test for equality. Resource are equal if and only if +other+ is the same object or
# is an instance of the same class, is not <tt>new?</tt>, and has the same +id+.
#
# ==== Examples
# ryan = Person.create(:name => 'Ryan')
# jamie = Person.create(:name => 'Jamie')
#
# ryan == jamie
# # => false (Different name attribute and id)
#
# ryan_again = Person.new(:name => 'Ryan')
# ryan == ryan_again
# # => false (ryan_again is new?)
#
# ryans_clone = Person.create(:name => 'Ryan')
# ryan == ryans_clone
# # => false (Different id attributes)
#
# ryans_twin = Person.find(ryan.id)
# ryan == ryans_twin
# # => true
#
def ==(other)
other.equal?(self) || (other.instance_of?(self.class) && other.id == id && other.prefix_options == prefix_options)
end
# Tests for equality (delegates to ==).
def eql?(other)
self == other
end
# Delegates to id in order to allow two resources of the same type and \id to work with something like:
# [Person.find(1), Person.find(2)] & [Person.find(1), Person.find(4)] # => [Person.find(1)]
def hash
id.hash
end
# Duplicate the current resource without saving it.
#
# ==== Examples
# my_invoice = Invoice.create(:customer => 'That Company')
# next_invoice = my_invoice.dup
# next_invoice.new? # => true
#
# next_invoice.save
# next_invoice == my_invoice # => false (different id attributes)
#
# my_invoice.customer # => That Company
# next_invoice.customer # => That Company
def dup
self.class.new.tap do |resource|
resource.attributes = @attributes
resource.prefix_options = @prefix_options
end
end
# A method to \save (+POST+) or \update (+PUT+) a resource. It delegates to +create+ if a \new object,
# +update+ if it is existing. If the response to the \save includes a body, it will be assumed that this body
# is XML for the final object as it looked after the \save (which would include attributes like +created_at+
# that weren't part of the original submit).
#
# ==== Examples
# my_company = Company.new(:name => 'RoleModel Software', :owner => 'Ken Auer', :size => 2)
# my_company.new? # => true
# my_company.save # sends POST /companies/ (create)
#
# my_company.new? # => false
# my_company.size = 10
# my_company.save # sends PUT /companies/1 (update)
def save
new? ? create : update
end
# Deletes the resource from the remote service.
#
# ==== Examples
# my_id = 3
# my_person = Person.find(my_id)
# my_person.destroy
# Person.find(my_id) # 404 (Resource Not Found)
#
# new_person = Person.create(:name => 'James')
# new_id = new_person.id # => 7
# new_person.destroy
# Person.find(new_id) # 404 (Resource Not Found)
def destroy
connection.delete(element_path, self.class.headers)
end
# Evaluates to <tt>true</tt> if this resource is not <tt>new?</tt> and is
# found on the remote service. Using this method, you can check for
# resources that may have been deleted between the object's instantiation
# and actions on it.
#
# ==== Examples
# Person.create(:name => 'Theodore Roosevelt')
# that_guy = Person.find(:first)
# that_guy.exists? # => true
#
# that_lady = Person.new(:name => 'Paul Bean')
# that_lady.exists? # => false
#
# guys_id = that_guy.id
# Person.delete(guys_id)
# that_guy.exists? # => false
def exists?
!new? && self.class.exists?(to_param, :params => prefix_options)
end
# Converts the resource to an XML string representation.
#
# ==== Options
# The +options+ parameter is handed off to the +to_xml+ method on each
# attribute, so it has the same options as the +to_xml+ methods in
# Active Support.
#
# * <tt>:indent</tt> - Set the indent level for the XML output (default is +2+).
# * <tt>:dasherize</tt> - Boolean option to determine whether or not element names should
# replace underscores with dashes. Default is <tt>true</tt>. The default can be set to <tt>false</tt>
# by setting the module attribute <tt>ActiveSupport.dasherize_xml = false</tt> in an initializer. Because save
# uses this method, and there are no options on save, then you will have to set the default if you don't
# want underscores in element names to become dashes when the resource is saved. This is important when
# integrating with non-Rails applications.
# * <tt>:camelize</tt> - Boolean option to determine whether or not element names should be converted
# to camel case, e.g some_name to SomeName. Default is <tt>false</tt>. Like <tt>:dasherize</tt> you can
# change the default by setting the module attribute <tt>ActiveSupport.camelise_xml = true</tt> in an initializer.
# * <tt>:skip_instruct</tt> - Toggle skipping the +instruct!+ call on the XML builder
# that generates the XML declaration (default is <tt>false</tt>).
#
# ==== Examples
# my_group = SubsidiaryGroup.find(:first)
# my_group.to_xml
# # => <?xml version="1.0" encoding="UTF-8"?>
# # <subsidiary_group> [...] </subsidiary_group>
#
# my_group.to_xml(:dasherize => true)
# # => <?xml version="1.0" encoding="UTF-8"?>
# # <subsidiary-group> [...] </subsidiary-group>
#
# my_group.to_xml(:skip_instruct => true)
# # => <subsidiary_group> [...] </subsidiary_group>
def to_xml(options={})
attributes.to_xml({:root => self.class.element_name}.merge(options))
end
# Coerces to a hash for JSON encoding.
#
# ==== Options
# The +options+ are passed to the +to_json+ method on each
# attribute, so the same options as the +to_json+ methods in
# Active Support.
#
# * <tt>:only</tt> - Only include the specified attribute or list of
# attributes in the serialized output. Attribute names must be specified
# as strings.
# * <tt>:except</tt> - Do not include the specified attribute or list of
# attributes in the serialized output. Attribute names must be specified
# as strings.
#
# ==== Examples
# person = Person.new(:first_name => "Jim", :last_name => "Smith")
# person.to_json
# # => {"first_name": "Jim", "last_name": "Smith"}
#
# person.to_json(:only => ["first_name"])
# # => {"first_name": "Jim"}
#
# person.to_json(:except => ["first_name"])
# # => {"last_name": "Smith"}
def as_json(options = nil)
attributes.as_json(options)
end
# Returns the serialized string representation of the resource in the configured
# serialization format specified in ActiveResource::Base.format. The options
# applicable depend on the configured encoding format.
def encode(options={})
case self.class.format
when ActiveResource::Formats[:xml]
self.class.format.encode(attributes, {:root => self.class.element_name}.merge(options))
when ActiveResource::Formats::JsonFormat
if ActiveResource::Base.include_root_in_json
self.class.format.encode({self.class.element_name => attributes}, options)
else
self.class.format.encode(attributes, options)
end
else
self.class.format.encode(attributes, options)
end
end
# A method to \reload the attributes of this object from the remote web service.
#
# ==== Examples
# my_branch = Branch.find(:first)
# my_branch.name # => "Wislon Raod"
#
# # Another client fixes the typo...
#
# my_branch.name # => "Wislon Raod"
# my_branch.reload
# my_branch.name # => "Wilson Road"
def reload
self.load(self.class.find(to_param, :params => @prefix_options).attributes)
end
# A method to manually load attributes from a \hash. Recursively loads collections of
# resources. This method is called in +initialize+ and +create+ when a \hash of attributes
# is provided.
#
# ==== Examples
# my_attrs = {:name => 'J&J Textiles', :industry => 'Cloth and textiles'}
# my_attrs = {:name => 'Marty', :colors => ["red", "green", "blue"]}
#
# the_supplier = Supplier.find(:first)
# the_supplier.name # => 'J&M Textiles'
# the_supplier.load(my_attrs)
# the_supplier.name('J&J Textiles')
#
# # These two calls are the same as Supplier.new(my_attrs)
# my_supplier = Supplier.new
# my_supplier.load(my_attrs)
#
# # These three calls are the same as Supplier.create(my_attrs)
# your_supplier = Supplier.new
# your_supplier.load(my_attrs)
# your_supplier.save
def load(attributes)
raise ArgumentError, "expected an attributes Hash, got #{attributes.inspect}" unless attributes.is_a?(Hash)
@prefix_options, attributes = split_options(attributes)
attributes.each do |key, value|
@attributes[key.to_s] =
case value
when Array
resource = find_or_create_resource_for_collection(key)
value.map do |attrs|
if attrs.is_a?(String) || attrs.is_a?(Numeric)
attrs.duplicable? ? attrs.dup : attrs
else
resource.new(attrs)
end
end
when Hash
resource = find_or_create_resource_for(key)
resource.new(value)
else
value.dup rescue value
end
end
self
end
# For checking <tt>respond_to?</tt> without searching the attributes (which is faster).
alias_method :respond_to_without_attributes?, :respond_to?
# A method to determine if an object responds to a message (e.g., a method call). In Active Resource, a Person object with a
# +name+ attribute can answer <tt>true</tt> to <tt>my_person.respond_to?(:name)</tt>, <tt>my_person.respond_to?(:name=)</tt>, and
# <tt>my_person.respond_to?(:name?)</tt>.
def respond_to?(method, include_priv = false)
method_name = method.to_s
if attributes.nil?
return super
elsif attributes.has_key?(method_name)
return true
elsif ['?','='].include?(method_name.last) && attributes.has_key?(method_name.first(-1))
return true
end
# super must be called at the end of the method, because the inherited respond_to?
# would return true for generated readers, even if the attribute wasn't present
super
end
protected
def connection(refresh = false)
self.class.connection(refresh)
end
# Update the resource on the remote service.
def update
connection.put(element_path(prefix_options), encode, self.class.headers).tap do |response|
load_attributes_from_response(response)
end
end
# Create (i.e., \save to the remote service) the \new resource.
def create
connection.post(collection_path, encode, self.class.headers).tap do |response|
self.id = id_from_response(response)
load_attributes_from_response(response)
end
end
def load_attributes_from_response(response)
if response['Content-Length'] != "0" && response.body.strip.size > 0
load(self.class.format.decode(response.body))
end
end
# Takes a response from a typical create post and pulls the ID out
def id_from_response(response)
response['Location'][/\/([^\/]*?)(\.\w+)?$/, 1] if response['Location']
end
def element_path(options = nil)
self.class.element_path(to_param, options || prefix_options)
end
def collection_path(options = nil)
self.class.collection_path(options || prefix_options)
end
private
# Tries to find a resource for a given collection name; if it fails, then the resource is created
def find_or_create_resource_for_collection(name)
find_or_create_resource_for(name.to_s.singularize)
end
# Tries to find a resource in a non empty list of nested modules
# Raises a NameError if it was not found in any of the given nested modules
def find_resource_in_modules(resource_name, module_names)
receiver = Object
namespaces = module_names[0, module_names.size-1].map do |module_name|
receiver = receiver.const_get(module_name)
end
if namespace = namespaces.reverse.detect { |ns| ns.const_defined?(resource_name) }
return namespace.const_get(resource_name)
else
raise NameError
end
end
# Tries to find a resource for a given name; if it fails, then the resource is created
def find_or_create_resource_for(name)
resource_name = name.to_s.camelize
ancestors = self.class.name.split("::")
if ancestors.size > 1
find_resource_in_modules(resource_name, ancestors)
else
self.class.const_get(resource_name)
end
rescue NameError
if self.class.const_defined?(resource_name)
resource = self.class.const_get(resource_name)
else
resource = self.class.const_set(resource_name, Class.new(ActiveResource::Base))
end
resource.prefix = self.class.prefix
resource.site = self.class.site
resource
end
def split_options(options = {})
self.class.__send__(:split_options, options)
end
def method_missing(method_symbol, *arguments) #:nodoc:
method_name = method_symbol.to_s
case method_name.last
when "="
attributes[method_name.first(-1)] = arguments.first
when "?"
attributes[method_name.first(-1)]
else
attributes.has_key?(method_name) ? attributes[method_name] : super
end
end
end
end
| 39.255283 | 184 | 0.621547 |
6aed271f9a99cce5eacd747088893df2f8095024 | 6,402 | require 'nokogiri'
require 'ostruct'
require 'webrick/cookie'
module Anemone
class Page
# The URL of the page
attr_reader :url
# The raw HTTP response body of the page
attr_reader :body
# Headers of the HTTP response
attr_reader :headers
# URL of the page this one redirected to, if any
attr_reader :redirect_to
# Exception object, if one was raised during HTTP#fetch_page
attr_reader :error
# OpenStruct for user-stored data
attr_accessor :data
# Integer response code of the page
attr_accessor :code
# Boolean indicating whether or not this page has been visited in PageStore#shortest_paths!
attr_accessor :visited
# Depth of this page from the root of the crawl. This is not necessarily the
# shortest path; use PageStore#shortest_paths! to find that value.
attr_accessor :depth
# URL of the page that brought us to this page
attr_accessor :referer
# Response time of the request for this page in milliseconds
attr_accessor :response_time
# Storage for the original HTTP request that generated this response
attr_accessor :request
#
# Create a new page
#
def initialize(url, params = {})
@url = url
@data = OpenStruct.new
@code = params[:code]
@headers = params[:headers] || {}
@headers['content-type'] ||= ['']
@aliases = Array(params[:aka]).compact
@referer = params[:referer]
@depth = params[:depth] || 0
@redirect_to = to_absolute(params[:redirect_to])
@response_time = params[:response_time]
@body = params[:body]
@error = params[:error]
@fetched = !params[:code].nil?
end
#
# Array of distinct A tag HREFs from the page
#
# MODIFIED: Dig URLs from elements other than "A" refs
#
def links
return @links unless @links.nil?
@links = []
return @links if !doc
# First extract normal, direct links
etypes = %W{a frame iframe}
doc.css(*etypes).each do |r|
u = r['src'] || r['href']
next if u.nil? or u.empty?
abs = to_absolute(URI(u)) rescue next
@links << abs if in_domain?(abs)
end
# Now create links from other content URLs
etypes = %W{img script link form}
doc.css(*etypes).each do |r|
u = r['src'] || r['href'] || r['action']
next if u.nil? or u.empty?
# Remove any query string
u,tmp = u.split('?',2)
# Back off to the containing directory
u.gsub!(/(.*\/)[^\/]+$/, "\\1")
abs = to_absolute(URI(u)) rescue next
@links << abs if in_domain?(abs)
end
nlinks = []
@links.each do |u|
bits = u.path.split('/')
while(bits.length > 0)
bits.pop
nlinks << to_absolute(URI(bits.join('/'))) rescue next
end
end
@links.push(nlinks)
@links.flatten!
@links.uniq!
@links
end
#
# Nokogiri document for the HTML body
#
def doc
return @doc if @doc
@doc = Nokogiri::HTML(@body) if @body && html? rescue nil
end
#
# Delete the Nokogiri document and response body to conserve memory
#
def discard_doc!
links # force parsing of page links before we trash the document
@doc = @body = nil
end
#
# Was the page successfully fetched?
# +true+ if the page was fetched with no error, +false+ otherwise.
#
def fetched?
@fetched
end
#
# Array of cookies received with this page as WEBrick::Cookie objects.
#
def cookies
WEBrick::Cookie.parse_set_cookies(@headers['Set-Cookie']) rescue []
end
#
# The content-type returned by the HTTP request for this page
#
def content_type
res = headers['content-type']
res = res.first if res.kind_of?(::Array)
res
end
#
# Returns +true+ if the page is a HTML document, returns +false+
# otherwise.
#
def html?
!!(content_type =~ %r{^(text/html|application/xhtml+xml)\b})
end
#
# Returns +true+ if the page is a HTTP redirect, returns +false+
# otherwise.
#
def redirect?
(300..307).include?(@code)
end
#
# Returns +true+ if the page was not found (returned 404 code),
# returns +false+ otherwise.
#
def not_found?
404 == @code
end
#
# Converts relative URL *link* into an absolute URL based on the
# location of the page
#
def to_absolute(link)
return nil if link.nil?
# remove anchor
link = URI.encode(link.to_s.gsub(/#[a-zA-Z0-9_-]*$/,''))
relative = URI(link)
absolute = @url.merge(relative)
absolute.path = '/' if absolute.path.empty?
return absolute
end
#
# Returns +true+ if *uri* is in the same domain as the page, returns
# +false+ otherwise
#
def in_domain?(uri)
uri.host == @url.host
end
def marshal_dump
[@url, @headers, @data, @body, @links, @code, @visited, @depth, @referer, @redirect_to, @response_time, @fetched]
end
def marshal_load(ary)
@url, @headers, @data, @body, @links, @code, @visited, @depth, @referer, @redirect_to, @response_time, @fetched = ary
end
def to_hash
{'url' => @url.to_s,
'headers' => Marshal.dump(@headers),
'data' => Marshal.dump(@data),
'body' => @body,
'links' => links.map(&:to_s),
'code' => @code,
'visited' => @visited,
'depth' => @depth,
'referer' => @referer.to_s,
'redirect_to' => @redirect_to.to_s,
'response_time' => @response_time,
'fetched' => @fetched}
end
def self.from_hash(hash)
page = self.new(URI(hash['url']))
{'@headers' => Marshal.load(hash['headers']),
'@data' => Marshal.load(hash['data']),
'@body' => hash['body'],
'@links' => hash['links'].map { |link| URI(link) },
'@code' => hash['code'].to_i,
'@visited' => hash['visited'],
'@depth' => hash['depth'].to_i,
'@referer' => hash['referer'],
'@redirect_to' => URI(hash['redirect_to']),
'@response_time' => hash['response_time'].to_i,
'@fetched' => hash['fetched']
}.each do |var, value|
page.instance_variable_set(var, value)
end
page
end
end
end
| 26.786611 | 123 | 0.579975 |
d58e57df2f1b0f719836709cef1909461828aaab | 11,585 | # -*- coding: binary -*-
module Msf
module EvasiveTCP
attr_accessor :_send_size, :_send_delay, :evasive
def denagle
begin
setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
rescue ::Exception
end
end
def write(buf, opts={})
return super(buf, opts) if not @evasive
ret = 0
idx = 0
len = @_send_size || buf.length
while(idx < buf.length)
if(@_send_delay and idx > 0)
::IO.select(nil, nil, nil, @_send_delay)
end
pkt = buf[idx, len]
res = super(pkt, opts)
flush()
idx += len
ret += res if res
end
ret
end
end
###
#
# This module provides methods for establish a connection to a remote host and
# communicating with it.
#
###
module Exploit::Remote::Tcp
#
# Initializes an instance of an exploit module that exploits a
# vulnerability in a TCP server.
#
def initialize(info = {})
super
register_options(
[
Opt::RHOST,
Opt::RPORT
], Msf::Exploit::Remote::Tcp
)
register_advanced_options(
[
OptBool.new('SSL', [ false, 'Negotiate SSL for outgoing connections', false]),
OptEnum.new('SSLVersion', [ false, 'Specify the version of SSL that should be used', 'SSL3', ['SSL2', 'SSL3', 'TLS1']]),
OptEnum.new('SSLVerifyMode', [ false, 'SSL verification method', 'PEER', %W{CLIENT_ONCE FAIL_IF_NO_PEER_CERT NONE PEER}]),
OptString.new('SSLCipher', [ false, 'String for SSL cipher - "DHE-RSA-AES256-SHA" or "ADH"']),
Opt::Proxies,
Opt::CPORT,
Opt::CHOST,
OptInt.new('ConnectTimeout', [ true, 'Maximum number of seconds to establish a TCP connection', 10])
], Msf::Exploit::Remote::Tcp
)
register_evasion_options(
[
OptInt.new('TCP::max_send_size', [false, 'Maxiumum tcp segment size. (0 = disable)', 0]),
OptInt.new('TCP::send_delay', [false, 'Delays inserted before every send. (0 = disable)', 0])
], Msf::Exploit::Remote::Tcp
)
end
#
# Establishes a TCP connection to the specified RHOST/RPORT
#
# @see Rex::Socket::Tcp
# @see Rex::Socket::Tcp.create
def connect(global = true, opts={})
dossl = false
if(opts.has_key?('SSL'))
dossl = opts['SSL']
else
dossl = ssl
if (datastore.default?('SSL') and rport.to_i == 443)
dossl = true
end
end
nsock = Rex::Socket::Tcp.create(
'PeerHost' => opts['RHOST'] || rhost,
'PeerPort' => (opts['RPORT'] || rport).to_i,
'LocalHost' => opts['CHOST'] || chost || "0.0.0.0",
'LocalPort' => (opts['CPORT'] || cport || 0).to_i,
'SSL' => dossl,
'SSLVersion' => opts['SSLVersion'] || ssl_version,
'Proxies' => proxies,
'Timeout' => (opts['ConnectTimeout'] || connect_timeout || 10).to_i,
'Context' =>
{
'Msf' => framework,
'MsfExploit' => self,
})
# enable evasions on this socket
set_tcp_evasions(nsock)
# Set this socket to the global socket as necessary
self.sock = nsock if (global)
# Add this socket to the list of sockets created by this exploit
add_socket(nsock)
return nsock
end
# Enable evasions on a given client
def set_tcp_evasions(socket)
if( datastore['TCP::max_send_size'].to_i == 0 and datastore['TCP::send_delay'].to_i == 0)
return
end
return if socket.respond_to?('evasive')
socket.extend(EvasiveTCP)
if ( datastore['TCP::max_send_size'].to_i > 0)
socket._send_size = datastore['TCP::max_send_size']
socket.denagle
socket.evasive = true
end
if ( datastore['TCP::send_delay'].to_i > 0)
socket._send_delay = datastore['TCP::send_delay']
socket.evasive = true
end
end
def handler(nsock = self.sock)
# If the handler claims the socket, then we don't want it to get closed
# during cleanup
if ((rv = super) == Handler::Claimed)
if (nsock == self.sock)
self.sock = nil
end
# Remove this socket from the list of sockets so that it will not be
# aborted.
remove_socket(nsock)
end
return rv
end
#
# Closes the TCP connection
#
def disconnect(nsock = self.sock)
begin
if (nsock)
nsock.shutdown
nsock.close
end
rescue IOError
end
if (nsock == sock)
self.sock = nil
end
# Remove this socket from the list of sockets created by this exploit
remove_socket(nsock)
end
#
# Performs cleanup, disconnects the socket if necessary
#
def cleanup
super
disconnect
end
##
#
# Wrappers for getters
#
##
#
# Returns the target host
#
def rhost
datastore['RHOST']
end
#
# Returns the remote port
#
def rport
datastore['RPORT']
end
#
# Returns the local host
#
def lhost
datastore['LHOST']
end
#
# Returns the local port
#
def lport
datastore['LPORT']
end
#
# Returns the local host for outgoing connections
#
def chost
datastore['CHOST']
end
#
# Returns the local port for outgoing connections
#
def cport
datastore['CPORT']
end
#
# Returns the boolean indicating SSL
#
def ssl
datastore['SSL']
end
#
# Returns the string indicating SSLVersion
#
def ssl_version
datastore['SSLVersion']
end
#
# Returns the proxy configuration
#
def proxies
datastore['Proxies']
end
#
# Returns the TCP connection timeout
#
def connect_timeout
datastore['ConnectTimeout']
end
protected
attr_accessor :sock
end
###
#
# This mixin provides a generic interface for running a TCP server of some
# sort that is designed to exploit clients. Exploits that include this mixin
# automatically take a passive stance.
#
###
module Exploit::Remote::TcpServer
def initialize(info = {})
super(update_info(info,
'Stance' => Msf::Exploit::Stance::Passive))
register_options(
[
OptBool.new('SSL', [ false, 'Negotiate SSL for incoming connections', false]),
OptEnum.new('SSLVersion', [ false, 'Specify the version of SSL that should be used', 'SSL3', ['SSL2', 'SSL3', 'TLS1']]),
OptPath.new('SSLCert', [ false, 'Path to a custom SSL certificate (default is randomly generated)']),
OptAddress.new('SRVHOST', [ true, "The local host to listen on. This must be an address on the local machine or 0.0.0.0", '0.0.0.0' ]),
OptPort.new('SRVPORT', [ true, "The local port to listen on.", 8080 ]),
], Msf::Exploit::Remote::TcpServer)
register_advanced_options(
[
OptString.new('ListenerComm', [ false, 'The specific communication channel to use for this service']),
OptBool.new('SSLCompression', [ false, 'Enable SSL/TLS-level compression', false ])
], Msf::Exploit::Remote::TcpServer)
register_evasion_options(
[
OptInt.new('TCP::max_send_size', [false, 'Maximum tcp segment size. (0 = disable)', 0]),
OptInt.new('TCP::send_delay', [false, 'Delays inserted before every send. (0 = disable)', 0])
], Msf::Exploit::Remote::Tcp
)
end
#
# This mixin overrides the exploit method so that it can initiate the
# service that corresponds with what the client has requested.
#
def exploit
start_service()
print_status("Server started.")
# Call the exploit primer
primer
# Wait on the service to stop
self.service.wait
end
#
# Primer method to call after starting service but before handling connections
#
def primer
end
#
# Stops the service, if one was created.
#
def cleanup
super
if(service)
stop_service()
print_status("Server stopped.")
end
end
#
# Called when a client connects.
#
def on_client_connect(client)
end
#
# Called when a client has data available for reading.
#
def on_client_data(client)
end
#
# Called when a client has disconnected.
#
def on_client_close(client)
end
#
# Starts the service.
#
def start_service(*args)
begin
comm = datastore['ListenerComm']
if comm == "local"
comm = ::Rex::Socket::Comm::Local
else
comm = nil
end
self.service = Rex::Socket::TcpServer.create(
'LocalHost' => srvhost,
'LocalPort' => srvport,
'SSL' => ssl,
'SSLCert' => ssl_cert,
'SSLCompression' => opts['SSLCompression'] || ssl_compression,
'Comm' => comm,
'Context' =>
{
'Msf' => framework,
'MsfExploit' => self,
})
self.service.on_client_connect_proc = Proc.new { |client|
on_client_connect(client)
}
self.service.on_client_data_proc = Proc.new { |client|
on_client_data(client)
}
self.service.on_client_close_proc = Proc.new { |client|
on_client_close(client)
}
# Start the listening service
self.service.start
rescue ::Errno::EACCES => e
if (srvport.to_i < 1024)
print_line(" ")
print_error("Could not start the TCP server: #{e}.")
print_error(
"This module is configured to use a privileged TCP port (#{srvport}). " +
"On Unix systems, only the root user account is allowed to bind to privileged ports." +
"Please run the framework as root to use this module."
)
print_error(
"On Microsoft Windows systems, this error is returned when a process attempts to "+
"listen on a host/port combination that is already in use. For example, Windows XP "+
"will return this error if a process attempts to bind() over the system SMB/NetBIOS services."
)
print_line(" ")
end
raise e
end
end
#
# Stops the service.
#
def stop_service
if (service)
begin
self.service.deref if self.service.kind_of?(Rex::Service)
if self.service.kind_of?(Rex::Socket)
self.service.close
self.service.stop
end
self.service = nil
rescue ::Exception
end
end
end
#
# Returns the local host that is being listened on.
#
def srvhost
datastore['SRVHOST']
end
#
# Returns the local port that is being listened on.
#
def srvport
datastore['SRVPORT']
end
#
# Returns the SSL option
#
def ssl
datastore['SSL']
end
#
# Returns the SSLCert option
#
def ssl_cert
datastore['SSLCert']
end
# @return [Bool] enable SSL/TLS-level compression
def ssl_compression
datastore['SSLCompression']
end
#
# Re-generates the payload, substituting the current RHOST and RPORT with
# the supplied client host and port from the socket.
#
def regenerate_payload(cli, arch = nil, platform = nil, target = nil)
ohost = datastore['RHOST']
oport = datastore['RPORT']
p = nil
begin
# Update the datastore with the supplied client peerhost/peerport
datastore['RHOST'] = cli.peerhost
datastore['RPORT'] = cli.peerport
if ((p = super(arch, platform, target)) == nil)
print_error("Failed to generate payload")
return nil
end
# Allow the payload to start a new handler
add_handler({
'RHOST' => datastore['RHOST'],
'RPORT' => datastore['RPORT']
})
ensure
datastore['RHOST'] = ohost
datastore['RPORT'] = oport
end
p
end
protected
attr_accessor :service # :nodoc:
end
end
| 22.45155 | 143 | 0.607769 |
b992febea039f776cfa1fc5b2fa1a690c08fc5e6 | 432 | class SectionsController < ApplicationController
def index
unless params[:section]
redirect_to sections_path(section: Section.first.name)
return
end
@sections = Section.all
section = Section.find_by(name: params[:section])
@food_items = section.food_items
end
def show
@sections = Section.all
@section = Section.find(params[:id])
@food_items = @section.food_items
end
end
| 21.6 | 60 | 0.6875 |
7a0f2efda6c3e963b89edb166f4b4a44a80ee43b | 274 | class CreateEmployments < ActiveRecord::Migration
def change
create_table :employments do |t|
t.string :company_id
t.string :employee_id
t.timestamps
end
add_index :employments, :company_id
add_index :employments, :employee_id
end
end
| 21.076923 | 49 | 0.708029 |
abdd67be3b275a2a510ccef8da27a3722a702dd2 | 11,596 | require 'spec_helper'
class Foo
include Mongoid::Document
include Mongoid::TagsArentHard
field :label
field :account, default: 'a'
default_scope ->{ where(account: 'a') }
taggable_with :tags
taggable_with :colors, separator: ";"
end
describe Mongoid::TagsArentHard do
let(:foo) { Foo.new }
{tags: ",", colors: ";"}.each do |_name, _separator|
describe ".taggable_with" do
it "defines a getter for '#{_name}'" do
foo.send(_name).should be_kind_of(Mongoid::TagsArentHard::Tags)
foo.send(_name).should eql([])
end
it "defines a setter for '#{_name}' (string)" do
foo.send("#{_name}=", "foo #{_separator} bar")
foo.send(_name).should eql(["foo","bar"])
end
it "defines a setter for '#{_name}' (array)" do
foo.send("#{_name}=", ["foo", "bar"])
foo.send(_name).should eql(["foo","bar"])
end
it "does not raise an error when reading frozen objects" do
expect {
foo.send("#{_name}=", ["foo", "bar"])
foo.freeze
foo.send(_name)
}.to_not raise_error
end
end
describe '#save' do
it "saves the #{_name} correctly" do
foo.send("#{_name}=", "foo#{_separator}bar")
foo.save!
foo.reload
foo.send(_name).should eql(["foo","bar"])
end
end
describe '+=' do
it "adds and replaces using a string" do
foo.send("#{_name}=", ["foo", "bar"])
foo.send(_name).should eql(["foo","bar"])
fooa = foo.send(_name)
fooa += "a#{_separator}b"
fooa.should eql(["foo","bar", "a", "b"])
end
it "adds and replaces using an array" do
foo.send("#{_name}=", ["foo", "bar"])
foo.send(_name).should eql(["foo","bar"])
fooa = foo.send(_name)
fooa += ["a", "b"]
fooa.should eql(["foo","bar", "a", "b"])
end
it "adds and replaces using a Tags object" do
foo.send("#{_name}=", ["foo", "bar"])
foo.send(_name).should eql(["foo","bar"])
fooa = foo.send(_name)
fooa += Mongoid::TagsArentHard::Tags.new(["a", "b"], {})
fooa.should eql(["foo","bar", "a", "b"])
end
end
describe 'changes' do
it "tracks changes correctly" do
foo.save!
foo.reload
foo.push(tags: "foo")
puts foo.inspect
changes = foo.changes
changes[_name.to_s].should eql([[], ["foo", "bar"]])
end
end
context "default scope" do
before(:each) do
@foo1 = Foo.create!(_name => "a#{_separator}b#{_separator}c", :account => 'b')
@foo2 = Foo.create!(_name => "b#{_separator}c#{_separator}f", :account => 'a')
end
describe "all_#{_name}" do
it "returns tags per account scope" do
results = Foo.send("all_#{_name}")
results.length.should be(3)
results.should include 'b'
results.should include 'c'
results.should include 'f'
end
end
end
context 'class scopes' do
before(:each) do
@foo1 = Foo.create!(_name => "a#{_separator}b#{_separator}c", :label => 'test')
@foo2 = Foo.create!(_name => "b#{_separator}c#{_separator}f")
@foo3 = Foo.create!(_name => "d#{_separator}e#{_separator}f")
end
describe "all_#{_name}" do
it "returns all unique tag names as an array" do
results = Foo.send("all_#{_name}")
results.length.should be(6)
results.should include 'a'
results.should include 'b'
results.should include 'c'
results.should include 'd'
results.should include 'e'
results.should include 'f'
end
it "returns all unique tag names within the given scope" do
results = Foo.where(label: 'test').send("all_#{_name}")
results.length.should be(3)
results.should include 'a'
results.should include 'b'
results.should include 'c'
end
end
describe "with_#{_name}" do
it "returns all models with a specific #{_name} (splatted)" do
results = Foo.send("with_#{_name}", "a")
results.should have(1).foo
results.should include(@foo1)
results = Foo.send("with_#{_name}", "b")
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo2)
end
it "returns all models with a specific #{_name} (arrayed)" do
results = Foo.send("with_#{_name}", ["a"])
results.should have(1).foo
results.should include(@foo1)
results = Foo.send("with_#{_name}", ["b"])
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo2)
end
end
describe "with_any_#{_name}" do
it "returns all models with any #{_name} (splatted)" do
results = Foo.send("with_any_#{_name}", "a")
results.should have(1).foo
results.should include(@foo1)
results = Foo.send("with_any_#{_name}", "b")
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo2)
results = Foo.send("with_any_#{_name}", "a", "e")
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo3)
end
it "returns all models with any #{_name} (arrayed)" do
results = Foo.send("with_any_#{_name}", ["a"])
results.should have(1).foo
results.should include(@foo1)
results = Foo.send("with_any_#{_name}", ["b"])
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo2)
results = Foo.send("with_any_#{_name}", ["a", "e"])
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo3)
end
it "returns all models with any #{_name} (string)" do
results = Foo.send("with_any_#{_name}", "a")
results.should have(1).foo
results.should include(@foo1)
results = Foo.send("with_any_#{_name}", "b")
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo2)
results = Foo.send("with_any_#{_name}", "a,e")
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo3)
end
end
describe "with_all_#{_name}" do
it "returns all models with all #{_name} (splatted)" do
results = Foo.send("with_all_#{_name}", "a")
results.should have(1).foo
results.should include(@foo1)
results = Foo.send("with_all_#{_name}", "b")
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo2)
results = Foo.send("with_all_#{_name}", "a", "e")
results.should have(0).foos
results = Foo.send("with_all_#{_name}", "b", "f")
results.should have(1).foo
results.should include(@foo2)
end
it "returns all models with all #{_name} (arrayed)" do
results = Foo.send("with_all_#{_name}", ["a"])
results.should have(1).foo
results.should include(@foo1)
results = Foo.send("with_all_#{_name}", ["b"])
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo2)
results = Foo.send("with_all_#{_name}", ["a", "e"])
results.should have(0).foos
results = Foo.send("with_all_#{_name}", ["b", "f"])
results.should have(1).foo
results.should include(@foo2)
end
it "returns all models with all #{_name} (string)" do
results = Foo.send("with_all_#{_name}", "a")
results.should have(1).foo
results.should include(@foo1)
results = Foo.send("with_all_#{_name}", "b")
results.should have(2).foos
results.should include(@foo1)
results.should include(@foo2)
results = Foo.send("with_all_#{_name}", "a,e")
results.should have(0).foos
results = Foo.send("with_all_#{_name}", "b,f")
results.should have(1).foo
results.should include(@foo2)
end
end
describe "without_any_#{_name}" do
it "returns all models without any #{_name} (splatted)" do
results = Foo.send("without_any_#{_name}", "a")
results.should_not have(1).foo
results.should_not include(@foo1)
results = Foo.send("without_any_#{_name}", "b")
results.should_not have(2).foos
results.should_not include(@foo1)
results.should_not include(@foo2)
results = Foo.send("without_any_#{_name}", "a", "e")
results.should_not have(2).foos
results.should_not include(@foo1)
results.should_not include(@foo3)
results = Foo.send("without_any_#{_name}", 'a', 'z')
results.should have(2).foos
results.should include(@foo2)
results.should include(@foo3)
results = Foo.send("without_any_#{_name}", 'z')
results.should have(3).foos
results.should include(@foo1)
results.should include(@foo2)
results.should include(@foo3)
end
it "returns all models without any #{_name} (arrayed)" do
results = Foo.send("without_any_#{_name}", ["a"])
results.should_not have(1).foo
results.should_not include(@foo1)
results = Foo.send("without_any_#{_name}", ["b"])
results.should_not have(2).foos
results.should_not include(@foo1)
results.should_not include(@foo2)
results = Foo.send("without_any_#{_name}", ["a", "e"])
results.should_not have(2).foos
results.should_not include(@foo1)
results.should_not include(@foo3)
results = Foo.send("without_any_#{_name}", ['a', 'z'])
results.should have(2).foos
results.should include(@foo2)
results.should include(@foo3)
results = Foo.send("without_any_#{_name}", ['z'])
results.should have(3).foos
results.should include(@foo1)
results.should include(@foo2)
results.should include(@foo3)
end
it "returns all models without any #{_name} (string)" do
results = Foo.send("without_any_#{_name}", "a")
results.should_not have(1).foo
results.should_not include(@foo1)
results = Foo.send("without_any_#{_name}", "b")
results.should_not have(2).foos
results.should_not include(@foo1)
results.should_not include(@foo2)
results = Foo.send("without_any_#{_name}", "a,e")
results.should_not have(2).foos
results.should_not include(@foo1)
results.should_not include(@foo3)
results = Foo.send("without_any_#{_name}", 'a,z')
results.should have(2).foos
results.should include(@foo2)
results.should include(@foo3)
results = Foo.send("without_any_#{_name}", 'z')
results.should have(3).foos
results.should include(@foo1)
results.should include(@foo2)
results.should include(@foo3)
end
end
end
end
end
| 30.515789 | 87 | 0.55959 |
f7fe617fd98938d2339e5d08005f32ee85eec1a1 | 3,539 | require 'rails_helper'
RSpec.describe TestCollection::AnswerSearchKey, type: :service do
let(:test_collection) { create(:test_collection, :completed) }
let(:org_id) { test_collection.organization_id }
let(:test_id) { test_collection.id }
let(:question_choice_id) { nil }
let(:answer_number) { nil }
let(:audience_id) { nil }
let(:question_type) { nil }
let(:dont_include_test_answer_wrapper) { nil }
subject do
TestCollection::AnswerSearchKey.new(
question: question,
question_type: question_type,
question_choice_id: question_choice_id,
answer_number: answer_number,
audience_id: audience_id,
dont_include_test_answer_wrapper: dont_include_test_answer_wrapper,
)
end
context 'with scale question answer' do
let(:question) { test_collection.question_items.scale_questions.sample }
let(:answer_number) { 2 }
it 'returns question key' do
expect(subject.for_test(test_id)).to eq(
"test_answer(test_#{test_id}_question_#{question.id}_answer_2)",
)
end
it 'returns org-wide key' do
expect(subject.for_organization(org_id)).to eq(
"test_answer(organization_#{org_id}_#{question.question_type}_answer_2)",
)
end
context 'with audience' do
let(:audience_id) { 18 }
it 'returns key with audience' do
expect(subject.for_test(test_id)).to eq(
"test_answer(test_#{test_id}_question_#{question.id}_answer_2_audience_18)",
)
end
end
context 'with idea' do
it 'returns question key' do
expect(subject.for_test(test_id, 5)).to eq(
"test_answer(test_#{test_id}_idea_5_question_#{question.id}_answer_2)",
)
end
end
context 'with dont_include_test_answer_wrapper true' do
let!(:dont_include_test_answer_wrapper) { true }
it 'just returns key' do
expect(subject.for_test(test_id)).to eq(
"test_#{test_id}_question_#{question.id}_answer_2",
)
end
end
end
context 'with multiple choice question answer' do
let(:question) { create(:question_item, question_type: :question_single_choice, parent_collection: test_collection) }
let(:question_choice_id) { question.question_choices.sample.id }
it 'returns question key' do
expect(subject.for_test(test_id)).to eq(
"test_answer(test_#{test_id}_question_#{question.id}_answer_#{question_choice_id})",
)
end
it 'returns org-wide key' do
expect(subject.for_organization(org_id)).to eq(
"test_answer(organization_#{org_id}_#{question.question_type}_answer_#{question_choice_id})",
)
end
context 'with audience' do
let(:audience_id) { 18 }
it 'returns key with audience' do
expect(subject.for_test(test_id)).to eq(
"test_answer(test_#{test_id}_question_#{question.id}_answer_#{question_choice_id}_audience_18)",
)
end
end
context 'with idea' do
it 'returns key' do
expect(subject.for_test(test_id, 7)).to eq(
"test_answer(test_#{test_id}_idea_7_question_#{question.id}_answer_#{question_choice_id})",
)
end
end
end
context 'with empty question and question type' do
let!(:question) { nil }
let(:question_type) { :question_useful }
let(:answer_number) { 1 }
it 'returns org-wide key' do
expect(subject.for_organization(org_id)).to eq(
"test_answer(organization_#{org_id}_#{question_type}_answer_1)",
)
end
end
end
| 30.773913 | 121 | 0.674202 |
62900aa0d7bc48a44213f37857bd2214f0795c91 | 958 | require 'fluent/plugin/parser'
module Fluent::Plugin
class SimpleJsonParser < JSONParser
Fluent::Plugin.register_parser("simple_json", self)
config_param :separator, :string, default: '.'
def parse(text)
r = @load_proc.call(text)
time, record = convert_values(parse_time(r), r)
record = record_flatten(record)
yield time, record
rescue @error_class, EncodingError
yield nil, nil
end
def record_flatten(record, parent = nil)
flattend_record = {}
record.each_with_index do |obj, i|
if obj.is_a?(Array)
k, v = obj
else
k, v = i, obj
end
key = parent ? "#{parent}#{@separator}#{k}" : k
key = key.gsub('.', @separator)
if v.is_a? Enumerable
flattend_record.merge!(record_flatten(v, key))
else
flattend_record[key] = v
end
end
flattend_record
end
end
end
| 23.365854 | 56 | 0.584551 |
28be8bbb983e1bc2a616bb2c443b02deb8ecdd02 | 2,374 | # frozen_string_literal: true
require 'test_helper'
require 'helpers/mocked_instrumentation_service'
class HTML::PipelineTest < Minitest::Test
Pipeline = HTML::Pipeline
class TestFilter
def self.call(input, _context, _result)
input.reverse
end
end
def setup
@context = {}
@result_class = Hash
@pipeline = Pipeline.new [TestFilter], @context, @result_class
end
def test_filter_instrumentation
service = MockedInstrumentationService.new
events = service.subscribe 'call_filter.html_pipeline'
@pipeline.instrumentation_service = service
filter(body = 'hello')
event, payload, res = events.pop
assert event, 'event expected'
assert_equal 'call_filter.html_pipeline', event
assert_equal TestFilter.name, payload[:filter]
assert_equal @pipeline.class.name, payload[:pipeline]
assert_equal body.reverse, payload[:result][:output]
end
def test_pipeline_instrumentation
service = MockedInstrumentationService.new
events = service.subscribe 'call_pipeline.html_pipeline'
@pipeline.instrumentation_service = service
filter(body = 'hello')
event, payload, res = events.pop
assert event, 'event expected'
assert_equal 'call_pipeline.html_pipeline', event
assert_equal @pipeline.filters.map(&:name), payload[:filters]
assert_equal @pipeline.class.name, payload[:pipeline]
assert_equal body.reverse, payload[:result][:output]
end
def test_default_instrumentation_service
service = 'default'
Pipeline.default_instrumentation_service = service
pipeline = Pipeline.new [], @context, @result_class
assert_equal service, pipeline.instrumentation_service
ensure
Pipeline.default_instrumentation_service = nil
end
def test_setup_instrumentation
assert_nil @pipeline.instrumentation_service
service = MockedInstrumentationService.new
events = service.subscribe 'call_pipeline.html_pipeline'
@pipeline.setup_instrumentation name = 'foo', service
assert_equal service, @pipeline.instrumentation_service
assert_equal name, @pipeline.instrumentation_name
filter(body = 'foo')
event, payload, res = events.pop
assert event, 'expected event'
assert_equal name, payload[:pipeline]
assert_equal body.reverse, payload[:result][:output]
end
def filter(input)
@pipeline.call(input)
end
end
| 30.831169 | 66 | 0.747262 |
1cabc943fdad461fe09ac8481cbfa15307f2fbd2 | 293 | module Tagliani
class Configuration
class Elasticsearch < Struct.new(:url, :index, :refresh, :log)
def initialize
self.url = "http://localhost:9200"
self.index = "tagliani"
self.refresh = false
self.log = false
end
end
end
end | 24.416667 | 67 | 0.583618 |
b9eef57a096fe6c436337b03ef1bcdb3a95c9e68 | 1,028 | require 'spec_helper'
describe NetSuite::Actions::Delete do
before(:all) { savon.mock! }
after(:all) { savon.unmock! }
context 'Customer' do
let(:customer) do
NetSuite::Records::Customer.new(:internal_id => '980', :entity_id => 'Shutter Fly', :company_name => 'Shutter Fly, Inc.')
end
before do
savon.expects(:delete).with(:message => {
'platformMsgs:baseRef' => {},
:attributes! => {
'platformMsgs:baseRef' => {
'internalId' => '980',
'type' => 'customer',
'xsi:type' => 'platformCore:RecordRef'
}
}
}).returns(File.read('spec/support/fixtures/delete/delete_customer.xml'))
end
it 'makes a valid request to the NetSuite API' do
NetSuite::Actions::Delete.call(customer)
end
it 'returns a valid Response object' do
response = NetSuite::Actions::Delete.call(customer)
response.should be_kind_of(NetSuite::Response)
response.should be_success
end
end
end
| 27.783784 | 127 | 0.604086 |
116eaa80a3bc381c8d53ef5d82525c9e388ec8b0 | 206 | class Gawker < Cask
url 'http://sourceforge.net/projects/gawker/files/latest/download'
homepage 'http://gawker.sourceforge.net/Gawker.html'
version 'latest'
sha256 :no_check
link 'Gawker.app'
end
| 25.75 | 68 | 0.747573 |
e8f31671ee7d99591242a82c0eef46ee574987ba | 258 | class MyArray
attr_reader :array
def initialize(array)
@array = array
end
def sum(initial_value = 0)
if block_given?
array.reduce(initial_value) { |a, e| a + yield(e) }
else
array.reduce(initial_value, :+)
end
end
end
| 16.125 | 57 | 0.627907 |
62941c1643a222c42f3b5fd7ec775a342b6071dc | 225 | namespace :national_circumstances do
desc 'Imports national_circumstances'
task import: :environment do
TimedLogger.log('import national circumstances') do
ImportNationalCircumstances.new.call
end
end
end
| 25 | 55 | 0.777778 |
62009ef0d1b2903033576ecb99e2f812a35fe54e | 3,008 | # This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# This file is the source Rails uses to define your schema when running `bin/rails
# db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
# be faster and is potentially less error prone than running all of your
# migrations from scratch. Old migrations may fail to apply correctly if those
# migrations use external dependencies or application code.
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 2021_06_09_183105) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
create_table "chord_measures", force: :cascade do |t|
t.bigint "chord_id", null: false
t.bigint "measure_id", null: false
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
t.index ["chord_id"], name: "index_chord_measures_on_chord_id"
t.index ["measure_id"], name: "index_chord_measures_on_measure_id"
end
create_table "chords", force: :cascade do |t|
t.string "name"
t.string "root"
t.string "quality"
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
end
create_table "measures", force: :cascade do |t|
t.bigint "song_id", null: false
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
t.index ["song_id"], name: "index_measures_on_song_id"
end
create_table "song_time_sigs", force: :cascade do |t|
t.bigint "song_id"
t.bigint "time_signature_id"
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
t.index ["song_id"], name: "index_song_time_sigs_on_song_id"
t.index ["time_signature_id"], name: "index_song_time_sigs_on_time_signature_id"
end
create_table "songs", force: :cascade do |t|
t.string "title"
t.string "author"
t.integer "tempo"
t.bigint "user_id", null: false
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
t.index ["user_id"], name: "index_songs_on_user_id"
end
create_table "time_signatures", force: :cascade do |t|
t.string "name"
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
end
create_table "users", force: :cascade do |t|
t.string "username"
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
end
add_foreign_key "chord_measures", "chords"
add_foreign_key "chord_measures", "measures"
add_foreign_key "measures", "songs"
add_foreign_key "songs", "users"
end
| 38.564103 | 86 | 0.72141 |
28353b7ff69de1106f4b58a3cfd5bd6316a099de | 1,711 | class Komposition < Formula
desc "Video editor built for screencasters"
homepage "https://github.com/owickstrom/komposition"
url "https://github.com/owickstrom/komposition/archive/v0.2.0.tar.gz"
sha256 "cedb41c68866f8d6a87579f566909fcd32697b03f66c0e2a700a94b6a9263b88"
license "MPL-2.0"
revision 3
head "https://github.com/owickstrom/komposition.git"
bottle do
cellar :any
sha256 "dc76316ff64beb2d4756ba554844a57d546a0bbe8a300ce1879a6cddcb72ebf8" => :catalina
sha256 "e78904afced48a6365ec5cec4b9e97ecccf4bf81401c5576a0c3b21fa1078264" => :mojave
sha256 "137747b62de4e68164bceccd009beb65606ae6ba2c94fbe9a72b0eee50ae0961" => :high_sierra
end
depends_on "cabal-install" => :build
depends_on "[email protected]" => :build
depends_on "pkg-config" => :build
depends_on "ffmpeg"
depends_on "gobject-introspection"
depends_on "gst-libav"
depends_on "gst-plugins-base"
depends_on "gst-plugins-good"
depends_on "gstreamer"
depends_on "gstreamer"
depends_on "gtk+3"
depends_on "sox"
uses_from_macos "libffi"
# fix a constraint issue with protolude
# remove once new version with
# https://github.com/owickstrom/komposition/pull/102 is included
patch do
url "https://github.com/owickstrom/komposition/commit/e6c575cf8eddc3be30471df9a9dd92b3cb9f70c1.diff?full_index=1"
sha256 "bdf561d07f1b8d41a4c030e121becab3b70882da8ccee53c1e91c6c0931fee0c"
end
def install
system "cabal", "v2-update"
system "cabal", "v2-install", *std_cabal_v2_args
end
test do
output = shell_output "#{bin}/komposition doesnotexist 2>&1"
assert_match "[ERROR] Opening existing project failed: ProjectDirectoryDoesNotExist \"doesnotexist\"", output
end
end
| 34.22 | 117 | 0.77031 |
bbb6de07c003ecf4b6c88745255d45e9a14f3f6d | 778 | module CanTango::PermitStore
module Parser
sweetload :Permit, :PermitMode, :Rule
def self.create_for meth, action, target
parser_class(target).new method, action, target
end
protected
def self.parser_class target
parser_name(target).constantize
end
def self.parser_name target
"#{ns}::#{parser_type(target).to_s.camelize}"
end
def self.ns
"CanTango::PermitStore::Parser::Rule"
end
def self.parser_type target
case target.to_s
when /\/(.*)\//
:regex
when /^\^(\w+)/ # a category is prefixed with a '^<D-^>s'
:category
when /\w+#\w+=.+/
:relationship
when /\w+#\w+/
:ownership
else
:default
end
end
end
end | 20.473684 | 63 | 0.57455 |
1c211feaf52bc14ce992d7551c93d51b91173c22 | 27,384 | module Sass
module Selector
# An operator-separated sequence of
# {SimpleSequence simple selector sequences}.
class Sequence < AbstractSequence
# Sets the line of the Sass template on which this selector was declared.
# This also sets the line for all child selectors.
#
# @param line [Integer]
# @return [Integer]
def line=(line)
members.each {|m| m.line = line if m.is_a?(SimpleSequence)}
@line = line
end
# Sets the name of the file in which this selector was declared,
# or `nil` if it was not declared in a file (e.g. on stdin).
# This also sets the filename for all child selectors.
#
# @param filename [String, nil]
# @return [String, nil]
def filename=(filename)
members.each {|m| m.filename = filename if m.is_a?(SimpleSequence)}
filename
end
# The array of {SimpleSequence simple selector sequences}, operators, and
# newlines. The operators are strings such as `"+"` and `">"` representing
# the corresponding CSS operators, or interpolated SassScript. Newlines
# are also newline strings; these aren't semantically relevant, but they
# do affect formatting.
#
# @return [Array<SimpleSequence, String|Array<Sass::Tree::Node, String>>]
attr_reader :members
# @param seqs_and_ops [Array<SimpleSequence, String|Array<Sass::Tree::Node, String>>]
# See \{#members}
def initialize(seqs_and_ops)
@members = seqs_and_ops
end
# Resolves the {Parent} selectors within this selector
# by replacing them with the given parent selector,
# handling commas appropriately.
#
# @param super_cseq [CommaSequence] The parent selector
# @param implicit_parent [Boolean] Whether the the parent
# selector should automatically be prepended to the resolved
# selector if it contains no parent refs.
# @return [CommaSequence] This selector, with parent references resolved
# @raise [Sass::SyntaxError] If a parent selector is invalid
def resolve_parent_refs(super_cseq, implicit_parent)
members = @members.dup
nl = (members.first == "\n" && members.shift)
contains_parent_ref = contains_parent_ref?
return CommaSequence.new([self]) if !implicit_parent && !contains_parent_ref
unless contains_parent_ref
old_members, members = members, []
members << nl if nl
members << SimpleSequence.new([Parent.new], false)
members += old_members
end
CommaSequence.new(Sass::Util.paths(members.map do |sseq_or_op|
next [sseq_or_op] unless sseq_or_op.is_a?(SimpleSequence)
sseq_or_op.resolve_parent_refs(super_cseq).members
end).map do |path|
path_members = path.map do |seq_or_op|
next seq_or_op unless seq_or_op.is_a?(Sequence)
seq_or_op.members
end
if path_members.length == 2 && path_members[1][0] == "\n"
path_members[0].unshift path_members[1].shift
end
Sequence.new(path_members.flatten)
end)
end
# Returns whether there's a {Parent} selector anywhere in this sequence.
#
# @return [Boolean]
def contains_parent_ref?
members.any? do |sseq_or_op|
next false unless sseq_or_op.is_a?(SimpleSequence)
next true if sseq_or_op.members.first.is_a?(Parent)
sseq_or_op.members.any? do |sel|
sel.is_a?(Pseudo) && sel.selector && sel.selector.contains_parent_ref?
end
end
end
# Non-destructively extends this selector with the extensions specified in a hash
# (which should come from {Sass::Tree::Visitors::Cssize}).
#
# @param extends [Sass::Util::SubsetMap{Selector::Simple =>
# Sass::Tree::Visitors::Cssize::Extend}]
# The extensions to perform on this selector
# @param parent_directives [Array<Sass::Tree::DirectiveNode>]
# The directives containing this selector.
# @param replace [Boolean]
# Whether to replace the original selector entirely or include
# it in the result.
# @param seen [Set<Array<Selector::Simple>>]
# The set of simple sequences that are currently being replaced.
# @param original [Boolean]
# Whether this is the original selector being extended, as opposed to
# the result of a previous extension that's being re-extended.
# @return [Array<Sequence>] A list of selectors generated
# by extending this selector with `extends`.
# These correspond to a {CommaSequence}'s {CommaSequence#members members array}.
# @see CommaSequence#do_extend
def do_extend(extends, parent_directives, replace, seen, original)
extended_not_expanded = members.map do |sseq_or_op|
next [[sseq_or_op]] unless sseq_or_op.is_a?(SimpleSequence)
extended = sseq_or_op.do_extend(extends, parent_directives, replace, seen)
# The First Law of Extend says that the generated selector should have
# specificity greater than or equal to that of the original selector.
# In order to ensure that, we record the original selector's
# (`extended.first`) original specificity.
extended.first.add_sources!([self]) if original && !invisible?
extended.map {|seq| seq.members}
end
weaves = Sass::Util.paths(extended_not_expanded).map {|path| weave(path)}
trim(weaves).map {|p| Sequence.new(p)}
end
# Unifies this with another selector sequence to produce a selector
# that matches (a subset of) the intersection of the two inputs.
#
# @param other [Sequence]
# @return [CommaSequence, nil] The unified selector, or nil if unification failed.
# @raise [Sass::SyntaxError] If this selector cannot be unified.
# This will only ever occur when a dynamic selector,
# such as {Parent} or {Interpolation}, is used in unification.
# Since these selectors should be resolved
# by the time extension and unification happen,
# this exception will only ever be raised as a result of programmer error
def unify(other)
base = members.last
other_base = other.members.last
return unless base.is_a?(SimpleSequence) && other_base.is_a?(SimpleSequence)
return unless (unified = other_base.unify(base))
woven = weave([members[0...-1], other.members[0...-1] + [unified]])
CommaSequence.new(woven.map {|w| Sequence.new(w)})
end
# Returns whether or not this selector matches all elements
# that the given selector matches (as well as possibly more).
#
# @example
# (.foo).superselector?(.foo.bar) #=> true
# (.foo).superselector?(.bar) #=> false
# @param cseq [Sequence]
# @return [Boolean]
def superselector?(seq)
_superselector?(members, seq.members)
end
# @see AbstractSequence#to_s
def to_s(opts = {})
@members.map {|m| m.is_a?(String) ? m : m.to_s(opts)}.join(" ").gsub(/ ?\n ?/, "\n")
end
# Returns a string representation of the sequence.
# This is basically the selector string.
#
# @return [String]
def inspect
members.map {|m| m.inspect}.join(" ")
end
# Add to the {SimpleSequence#sources} sets of the child simple sequences.
# This destructively modifies this sequence's members array, but not the
# child simple sequences.
#
# @param sources [Set<Sequence>]
def add_sources!(sources)
members.map! {|m| m.is_a?(SimpleSequence) ? m.with_more_sources(sources) : m}
end
# Converts the subject operator "!", if it exists, into a ":has()"
# selector.
#
# @retur [Sequence]
def subjectless
pre_subject = []
has = []
subject = nil
members.each do |sseq_or_op|
if subject
has << sseq_or_op
elsif sseq_or_op.is_a?(String) || !sseq_or_op.subject?
pre_subject << sseq_or_op
else
subject = sseq_or_op.dup
subject.members = sseq_or_op.members.dup
subject.subject = false
has = []
end
end
return self unless subject
unless has.empty?
subject.members << Pseudo.new(:class, 'has', nil, CommaSequence.new([Sequence.new(has)]))
end
Sequence.new(pre_subject + [subject])
end
private
# Conceptually, this expands "parenthesized selectors". That is, if we
# have `.A .B {@extend .C}` and `.D .C {...}`, this conceptually expands
# into `.D .C, .D (.A .B)`, and this function translates `.D (.A .B)` into
# `.D .A .B, .A .D .B`. For thoroughness, `.A.D .B` would also be
# required, but including merged selectors results in exponential output
# for very little gain.
#
# @param path [Array<Array<SimpleSequence or String>>]
# A list of parenthesized selector groups.
# @return [Array<Array<SimpleSequence or String>>] A list of fully-expanded selectors.
def weave(path)
# This function works by moving through the selector path left-to-right,
# building all possible prefixes simultaneously.
prefixes = [[]]
path.each do |current|
next if current.empty?
current = current.dup
last_current = [current.pop]
prefixes = prefixes.map do |prefix|
sub = subweave(prefix, current)
next [] unless sub
sub.map {|seqs| seqs + last_current}
end.flatten(1)
end
prefixes
end
# This interweaves two lists of selectors,
# returning all possible orderings of them (including using unification)
# that maintain the relative ordering of the input arrays.
#
# For example, given `.foo .bar` and `.baz .bang`,
# this would return `.foo .bar .baz .bang`, `.foo .bar.baz .bang`,
# `.foo .baz .bar .bang`, `.foo .baz .bar.bang`, `.foo .baz .bang .bar`,
# and so on until `.baz .bang .foo .bar`.
#
# Semantically, for selectors A and B, this returns all selectors `AB_i`
# such that the union over all i of elements matched by `AB_i X` is
# identical to the intersection of all elements matched by `A X` and all
# elements matched by `B X`. Some `AB_i` are elided to reduce the size of
# the output.
#
# @param seq1 [Array<SimpleSequence or String>]
# @param seq2 [Array<SimpleSequence or String>]
# @return [Array<Array<SimpleSequence or String>>]
def subweave(seq1, seq2)
return [seq2] if seq1.empty?
return [seq1] if seq2.empty?
seq1, seq2 = seq1.dup, seq2.dup
return unless (init = merge_initial_ops(seq1, seq2))
return unless (fin = merge_final_ops(seq1, seq2))
# Make sure there's only one root selector in the output.
root1 = has_root?(seq1.first) && seq1.shift
root2 = has_root?(seq2.first) && seq2.shift
if root1 && root2
return unless (root = root1.unify(root2))
seq1.unshift root
seq2.unshift root
elsif root1
seq2.unshift root1
elsif root2
seq1.unshift root2
end
seq1 = group_selectors(seq1)
seq2 = group_selectors(seq2)
lcs = Sass::Util.lcs(seq2, seq1) do |s1, s2|
next s1 if s1 == s2
next unless s1.first.is_a?(SimpleSequence) && s2.first.is_a?(SimpleSequence)
next s2 if parent_superselector?(s1, s2)
next s1 if parent_superselector?(s2, s1)
next unless must_unify?(s1, s2)
next unless (unified = Sequence.new(s1).unify(Sequence.new(s2)))
unified.members.first.members if unified.members.length == 1
end
diff = [[init]]
until lcs.empty?
diff << chunks(seq1, seq2) {|s| parent_superselector?(s.first, lcs.first)} << [lcs.shift]
seq1.shift
seq2.shift
end
diff << chunks(seq1, seq2) {|s| s.empty?}
diff += fin.map {|sel| sel.is_a?(Array) ? sel : [sel]}
diff.reject! {|c| c.empty?}
Sass::Util.paths(diff).map {|p| p.flatten}.reject {|p| path_has_two_subjects?(p)}
end
# Extracts initial selector combinators (`"+"`, `">"`, `"~"`, and `"\n"`)
# from two sequences and merges them together into a single array of
# selector combinators.
#
# @param seq1 [Array<SimpleSequence or String>]
# @param seq2 [Array<SimpleSequence or String>]
# @return [Array<String>, nil] If there are no operators in the merged
# sequence, this will be the empty array. If the operators cannot be
# merged, this will be nil.
def merge_initial_ops(seq1, seq2)
ops1, ops2 = [], []
ops1 << seq1.shift while seq1.first.is_a?(String)
ops2 << seq2.shift while seq2.first.is_a?(String)
newline = false
newline ||= !!ops1.shift if ops1.first == "\n"
newline ||= !!ops2.shift if ops2.first == "\n"
# If neither sequence is a subsequence of the other, they cannot be
# merged successfully
lcs = Sass::Util.lcs(ops1, ops2)
return unless lcs == ops1 || lcs == ops2
(newline ? ["\n"] : []) + (ops1.size > ops2.size ? ops1 : ops2)
end
# Extracts final selector combinators (`"+"`, `">"`, `"~"`) and the
# selectors to which they apply from two sequences and merges them
# together into a single array.
#
# @param seq1 [Array<SimpleSequence or String>]
# @param seq2 [Array<SimpleSequence or String>]
# @return [Array<SimpleSequence or String or
# Array<Array<SimpleSequence or String>>]
# If there are no trailing combinators to be merged, this will be the
# empty array. If the trailing combinators cannot be merged, this will
# be nil. Otherwise, this will contained the merged selector. Array
# elements are [Sass::Util#paths]-style options; conceptually, an "or"
# of multiple selectors.
# @comment
# rubocop:disable MethodLength
def merge_final_ops(seq1, seq2, res = [])
ops1, ops2 = [], []
ops1 << seq1.pop while seq1.last.is_a?(String)
ops2 << seq2.pop while seq2.last.is_a?(String)
# Not worth the headache of trying to preserve newlines here. The most
# important use of newlines is at the beginning of the selector to wrap
# across lines anyway.
ops1.reject! {|o| o == "\n"}
ops2.reject! {|o| o == "\n"}
return res if ops1.empty? && ops2.empty?
if ops1.size > 1 || ops2.size > 1
# If there are multiple operators, something hacky's going on. If one
# is a supersequence of the other, use that, otherwise give up.
lcs = Sass::Util.lcs(ops1, ops2)
return unless lcs == ops1 || lcs == ops2
res.unshift(*(ops1.size > ops2.size ? ops1 : ops2).reverse)
return res
end
# This code looks complicated, but it's actually just a bunch of special
# cases for interactions between different combinators.
op1, op2 = ops1.first, ops2.first
if op1 && op2
sel1 = seq1.pop
sel2 = seq2.pop
if op1 == '~' && op2 == '~'
if sel1.superselector?(sel2)
res.unshift sel2, '~'
elsif sel2.superselector?(sel1)
res.unshift sel1, '~'
else
merged = sel1.unify(sel2)
res.unshift [
[sel1, '~', sel2, '~'],
[sel2, '~', sel1, '~'],
([merged, '~'] if merged)
].compact
end
elsif (op1 == '~' && op2 == '+') || (op1 == '+' && op2 == '~')
if op1 == '~'
tilde_sel, plus_sel = sel1, sel2
else
tilde_sel, plus_sel = sel2, sel1
end
if tilde_sel.superselector?(plus_sel)
res.unshift plus_sel, '+'
else
merged = plus_sel.unify(tilde_sel)
res.unshift [
[tilde_sel, '~', plus_sel, '+'],
([merged, '+'] if merged)
].compact
end
elsif op1 == '>' && %w(~ +).include?(op2)
res.unshift sel2, op2
seq1.push sel1, op1
elsif op2 == '>' && %w(~ +).include?(op1)
res.unshift sel1, op1
seq2.push sel2, op2
elsif op1 == op2
merged = sel1.unify(sel2)
return unless merged
res.unshift merged, op1
else
# Unknown selector combinators can't be unified
return
end
return merge_final_ops(seq1, seq2, res)
elsif op1
seq2.pop if op1 == '>' && seq2.last && seq2.last.superselector?(seq1.last)
res.unshift seq1.pop, op1
return merge_final_ops(seq1, seq2, res)
else # op2
seq1.pop if op2 == '>' && seq1.last && seq1.last.superselector?(seq2.last)
res.unshift seq2.pop, op2
return merge_final_ops(seq1, seq2, res)
end
end
# @comment
# rubocop:enable MethodLength
# Takes initial subsequences of `seq1` and `seq2` and returns all
# orderings of those subsequences. The initial subsequences are determined
# by a block.
#
# Destructively removes the initial subsequences of `seq1` and `seq2`.
#
# For example, given `(A B C | D E)` and `(1 2 | 3 4 5)` (with `|`
# denoting the boundary of the initial subsequence), this would return
# `[(A B C 1 2), (1 2 A B C)]`. The sequences would then be `(D E)` and
# `(3 4 5)`.
#
# @param seq1 [Array]
# @param seq2 [Array]
# @yield [a] Used to determine when to cut off the initial subsequences.
# Called repeatedly for each sequence until it returns true.
# @yieldparam a [Array] A final subsequence of one input sequence after
# cutting off some initial subsequence.
# @yieldreturn [Boolean] Whether or not to cut off the initial subsequence
# here.
# @return [Array<Array>] All possible orderings of the initial subsequences.
def chunks(seq1, seq2)
chunk1 = []
chunk1 << seq1.shift until yield seq1
chunk2 = []
chunk2 << seq2.shift until yield seq2
return [] if chunk1.empty? && chunk2.empty?
return [chunk2] if chunk1.empty?
return [chunk1] if chunk2.empty?
[chunk1 + chunk2, chunk2 + chunk1]
end
# Groups a sequence into subsequences. The subsequences are determined by
# strings; adjacent non-string elements will be put into separate groups,
# but any element adjacent to a string will be grouped with that string.
#
# For example, `(A B "C" D E "F" G "H" "I" J)` will become `[(A) (B "C" D)
# (E "F" G "H" "I" J)]`.
#
# @param seq [Array]
# @return [Array<Array>]
def group_selectors(seq)
newseq = []
tail = seq.dup
until tail.empty?
head = []
begin
head << tail.shift
end while !tail.empty? && head.last.is_a?(String) || tail.first.is_a?(String)
newseq << head
end
newseq
end
# Given two selector sequences, returns whether `seq1` is a
# superselector of `seq2`; that is, whether `seq1` matches every
# element `seq2` matches.
#
# @param seq1 [Array<SimpleSequence or String>]
# @param seq2 [Array<SimpleSequence or String>]
# @return [Boolean]
def _superselector?(seq1, seq2)
seq1 = seq1.reject {|e| e == "\n"}
seq2 = seq2.reject {|e| e == "\n"}
# Selectors with leading or trailing operators are neither
# superselectors nor subselectors.
return if seq1.last.is_a?(String) || seq2.last.is_a?(String) ||
seq1.first.is_a?(String) || seq2.first.is_a?(String)
# More complex selectors are never superselectors of less complex ones
return if seq1.size > seq2.size
return seq1.first.superselector?(seq2.last, seq2[0...-1]) if seq1.size == 1
_, si = seq2.each_with_index.find do |e, i|
return if i == seq2.size - 1
next if e.is_a?(String)
seq1.first.superselector?(e, seq2[0...i])
end
return unless si
if seq1[1].is_a?(String)
return unless seq2[si + 1].is_a?(String)
# .foo ~ .bar is a superselector of .foo + .bar
return unless seq1[1] == "~" ? seq2[si + 1] != ">" : seq1[1] == seq2[si + 1]
# .foo > .baz is not a superselector of .foo > .bar > .baz or .foo >
# .bar .baz, despite the fact that .baz is a superselector of .bar >
# .baz and .bar .baz. Same goes for + and ~.
return if seq1.length == 3 && seq2.length > 3
return _superselector?(seq1[2..-1], seq2[si + 2..-1])
elsif seq2[si + 1].is_a?(String)
return unless seq2[si + 1] == ">"
return _superselector?(seq1[1..-1], seq2[si + 2..-1])
else
return _superselector?(seq1[1..-1], seq2[si + 1..-1])
end
end
# Like \{#_superselector?}, but compares the selectors in the
# context of parent selectors, as though they shared an implicit
# base simple selector. For example, `B` is not normally a
# superselector of `B A`, since it doesn't match `A` elements.
# However, it is a parent superselector, since `B X` is a
# superselector of `B A X`.
#
# @param seq1 [Array<SimpleSequence or String>]
# @param seq2 [Array<SimpleSequence or String>]
# @return [Boolean]
def parent_superselector?(seq1, seq2)
base = Sass::Selector::SimpleSequence.new([Sass::Selector::Placeholder.new('<temp>')],
false)
_superselector?(seq1 + [base], seq2 + [base])
end
# Returns whether two selectors must be unified to produce a valid
# combined selector. This is true when both selectors contain the same
# unique simple selector such as an id.
#
# @param seq1 [Array<SimpleSequence or String>]
# @param seq2 [Array<SimpleSequence or String>]
# @return [Boolean]
def must_unify?(seq1, seq2)
unique_selectors = seq1.map do |sseq|
next [] if sseq.is_a?(String)
sseq.members.select {|sel| sel.unique?}
end.flatten.to_set
return false if unique_selectors.empty?
seq2.any? do |sseq|
next false if sseq.is_a?(String)
sseq.members.any? do |sel|
next unless sel.unique?
unique_selectors.include?(sel)
end
end
end
# Removes redundant selectors from between multiple lists of
# selectors. This takes a list of lists of selector sequences;
# each individual list is assumed to have no redundancy within
# itself. A selector is only removed if it's redundant with a
# selector in another list.
#
# "Redundant" here means that one selector is a superselector of
# the other. The more specific selector is removed.
#
# @param seqses [Array<Array<Array<SimpleSequence or String>>>]
# @return [Array<Array<SimpleSequence or String>>]
def trim(seqses)
# Avoid truly horrific quadratic behavior. TODO: I think there
# may be a way to get perfect trimming without going quadratic.
return seqses.flatten(1) if seqses.size > 100
# Keep the results in a separate array so we can be sure we aren't
# comparing against an already-trimmed selector. This ensures that two
# identical selectors don't mutually trim one another.
result = seqses.dup
# This is n^2 on the sequences, but only comparing between
# separate sequences should limit the quadratic behavior.
seqses.each_with_index do |seqs1, i|
result[i] = seqs1.reject do |seq1|
# The maximum specificity of the sources that caused [seq1] to be
# generated. In order for [seq1] to be removed, there must be
# another selector that's a superselector of it *and* that has
# specificity greater or equal to this.
max_spec = _sources(seq1).map do |seq|
spec = seq.specificity
spec.is_a?(Range) ? spec.max : spec
end.max || 0
result.any? do |seqs2|
next if seqs1.equal?(seqs2)
# Second Law of Extend: the specificity of a generated selector
# should never be less than the specificity of the extending
# selector.
#
# See https://github.com/nex3/sass/issues/324.
seqs2.any? do |seq2|
spec2 = _specificity(seq2)
spec2 = spec2.begin if spec2.is_a?(Range)
spec2 >= max_spec && _superselector?(seq2, seq1)
end
end
end
end
result.flatten(1)
end
def _hash
members.reject {|m| m == "\n"}.hash
end
def _eql?(other)
other.members.reject {|m| m == "\n"}.eql?(members.reject {|m| m == "\n"})
end
def path_has_two_subjects?(path)
subject = false
path.each do |sseq_or_op|
next unless sseq_or_op.is_a?(SimpleSequence)
next unless sseq_or_op.subject?
return true if subject
subject = true
end
false
end
def _sources(seq)
s = Set.new
seq.map {|sseq_or_op| s.merge sseq_or_op.sources if sseq_or_op.is_a?(SimpleSequence)}
s
end
def extended_not_expanded_to_s(extended_not_expanded)
extended_not_expanded.map do |choices|
choices = choices.map do |sel|
next sel.first.to_s if sel.size == 1
"#{sel.join ' '}"
end
next choices.first if choices.size == 1 && !choices.include?(' ')
"(#{choices.join ', '})"
end.join ' '
end
def has_root?(sseq)
sseq.is_a?(SimpleSequence) &&
sseq.members.any? {|sel| sel.is_a?(Pseudo) && sel.normalized_name == "root"}
end
end
end
end
| 41.117117 | 100 | 0.576541 |
01f5e9a43ef4803bcedb334a7e6febdac8f031d7 | 651 | # encoding: utf-8
module FFaker
module PhoneNumberFR
extend ModuleUtils
extend self
COUNTRY_PREFIX = %w[+33 0033 0]
AREA_PREFIX = %w[1 2 3 4 5]
MOBILE_PREFIX = %w[6 7]
PHONE_NUMBER = ['########', ' ## ## ## ##']
def phone_number
case rand(2)
when 0 then home_work_phone_number
when 1 then mobile_phone_number
end
end
def home_work_phone_number
FFaker.numerify "#{COUNTRY_PREFIX.sample}#{AREA_PREFIX.sample}#{PHONE_NUMBER.sample}"
end
def mobile_phone_number
FFaker.numerify "#{COUNTRY_PREFIX.sample}#{MOBILE_PREFIX.sample}#{PHONE_NUMBER.sample}"
end
end
end
| 22.448276 | 93 | 0.652842 |
2855b215f2b34a3f15b36dc5332268c0a0b6ecce | 1,110 | class Ilmbase < Formula
desc "OpenEXR ILM Base libraries (high dynamic-range image file format)"
homepage "https://www.openexr.com/"
url "https://download.savannah.nongnu.org/releases/openexr/ilmbase-2.2.1.tar.gz"
sha256 "cac206e63be68136ef556c2b555df659f45098c159ce24804e9d5e9e0286609e"
bottle do
cellar :any
# sha256 "63e5e1e6f1e17b8aa264b14cb1e8120a7e606b4f6173c85c945ea35ab911f8bc" => :mojave
sha256 "5621509767a95332eff8e26f7fe80c6bce9c3c271fa8521e234263b3c3d67454" => :high_sierra
sha256 "7b40da5907be805067a7af87b5a5af2dac9e446478de06316a059fa9c4f9a9c0" => :sierra
sha256 "402fe7453b9ca2c4c4a3fbdb9557156819e26c959e18c096dcceab8b1b6ce9a5" => :el_capitan
end
def install
system "./configure", "--disable-dependency-tracking",
"--prefix=#{prefix}"
system "make", "install"
pkgshare.install %w[Half HalfTest Iex IexMath IexTest IlmThread Imath ImathTest]
end
test do
cd pkgshare/"IexTest" do
system ENV.cxx, "-I#{include}/OpenEXR", "-I./", "-c",
"testBaseExc.cpp", "-o", testpath/"test"
end
end
end
| 38.275862 | 93 | 0.728829 |
d598fd02b762c8ba205c0b614c2813e7edc5281a | 3,248 | # frozen_string_literal: true
require 'test_helper'
class StatsDTest < Krane::TestCase
include StatsD::Instrument::Assertions
class TestMeasureClass
extend(Krane::StatsD::MeasureMethods)
def thing_to_measure
123
end
measure_method :thing_to_measure
def measure_with_custom_metric; end
measure_method :measure_with_custom_metric, "customized"
def measured_method_raises
raise ArgumentError
end
measure_method :measured_method_raises
def statsd_tags
{ test: true }
end
end
class TestMeasureNoTags
extend(Krane::StatsD::MeasureMethods)
def thing_to_measure; end
measure_method :thing_to_measure
end
def test_measuring_non_existent_method_raises
assert_raises_message(NotImplementedError, "Cannot instrument undefined method bogus_method") do
TestMeasureClass.measure_method(:bogus_method)
end
end
def test_measure_method_does_not_change_the_return_value
assert_equal(123, TestMeasureClass.new.thing_to_measure)
end
def test_measure_method_uses_expected_name_and_tags
metrics = capture_statsd_calls(client: Krane::StatsD.client) do
TestMeasureClass.new.thing_to_measure
end
assert_predicate(metrics, :one?, "Expected 1 metric, got #{metrics.length}")
assert_equal("Krane.thing_to_measure.duration", metrics.first.name)
assert_equal(["test:true"], metrics.first.tags)
end
def test_measure_method_with_custom_metric_name
metrics = capture_statsd_calls(client: Krane::StatsD.client) do
TestMeasureClass.new.measure_with_custom_metric
end
assert_predicate(metrics, :one?, "Expected 1 metric, got #{metrics.length}")
assert_equal("Krane.customized", metrics.first.name)
assert_equal(["test:true"], metrics.first.tags)
end
def test_measure_method_with_statsd_tags_undefined
metrics = capture_statsd_calls(client: Krane::StatsD.client) do
TestMeasureNoTags.new.thing_to_measure
end
assert_predicate(metrics, :one?, "Expected 1 metric, got #{metrics.length}")
assert_equal("Krane.thing_to_measure.duration", metrics.first.name)
assert_nil(metrics.first.tags)
end
def test_measure_method_that_raises_with_hash_tags
metrics = capture_statsd_calls(client: Krane::StatsD.client) do
tester = TestMeasureClass.new
tester.expects(:statsd_tags).returns(test: true)
assert_raises(ArgumentError) do
tester.measured_method_raises
end
end
assert_predicate(metrics, :one?, "Expected 1 metric, got #{metrics.length}")
assert_equal("Krane.measured_method_raises.duration", metrics.first.name)
assert_equal(["test:true", "error:true"], metrics.first.tags)
end
def test_measure_method_that_raises_with_array_tags
metrics = capture_statsd_calls(client: Krane::StatsD.client) do
tester = TestMeasureClass.new
tester.expects(:statsd_tags).returns(["test:true"])
assert_raises(ArgumentError) do
tester.measured_method_raises
end
end
assert_predicate(metrics, :one?, "Expected 1 metric, got #{metrics.length}")
assert_equal("Krane.measured_method_raises.duration", metrics.first.name)
assert_equal(["test:true", "error:true"], metrics.first.tags)
end
end
| 33.142857 | 100 | 0.754618 |
e228c9f3928656ff296259ccdcbb7ca31484dea3 | 6,915 | # lchg.rb - line changes detection library
#
# Copyright (C) 2010 Tanaka Akira <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'tempfile'
require 'escape'
module Lchg
def Lchg.each_context(enum, scanner, pred, context=3)
buf = []
after_match = false
i = 0
enum.each {|elt|
scanner.call(elt) if scanner
not_exist = pred.call(elt)
if not_exist
b = i - buf.length
buf.each_with_index {|(t, e), j|
yield b+j, t, e
}
buf.clear
yield i, not_exist, elt
after_match = true
else
if after_match
if buf.length == context
b = i - buf.length
buf.each_with_index {|(t, e), j|
yield b+j, t, e
}
buf.clear
after_match = false
end
buf << [not_exist, elt]
else
if buf.length == context
buf.shift
end
buf << [not_exist, elt]
end
end
i += 1
}
if after_match
b = i - buf.length
buf.each_with_index {|(t, e), j|
yield b+j, t, e
}
end
end
def Lchg.encode_pair(num, str)
hex = str.unpack("H*")[0]
return "#{num} #{hex}"
end
def Lchg.decode_pair(line)
return nil if line == nil
num, hex = line.chomp.split(/ /)
return [num.to_i, [hex].pack("H*")]
end
def Lchg.encode_lines(path)
tf = Tempfile.open("lchg-a")
File.open(path) {|f|
f.each_with_index {|line, i|
line.chomp!
tf.puts encode_pair(i, line)
}
}
tf.flush
tf
end
def Lchg.sort_by_content(path)
tf = Tempfile.open("lchg-b")
command = ["sort", "-k", "2", path]
IO.popen("#{Escape.shell_command command}") {|f|
while line = f.gets
tf.puts line
end
}
tf.flush
tf
end
def Lchg.sync_each(tf1, tf2)
numline1 = decode_pair(tf1.gets)
numline2 = decode_pair(tf2.gets)
prev = nil
buf1 = []
buf2 = []
while numline1 || numline2
if numline2 == nil || (numline1 != nil && numline1[1] <= numline2[1])
if !prev || prev == numline1[1]
prev = numline1[1]
buf1 << numline1
else
yield buf1, buf2
prev = numline1[1]
buf1 = [numline1]
buf2 = []
end
numline1 = decode_pair(tf1.gets)
else
if !prev || prev == numline2[1]
prev = numline2[1]
buf2 << numline2
else
yield buf1, buf2
prev = numline2[1]
buf1 = []
buf2 = [numline2]
end
numline2 = decode_pair(tf2.gets)
end
end
if !buf1.empty? || !buf2.empty?
yield buf1, buf2
end
end
def Lchg.add_mark(src1, src2)
src1.rewind
src2.rewind
dst1 = Tempfile.open("lchg-c")
dst2 = Tempfile.open("lchg-c")
numdel = 0
numadd = 0
sync_each(src1, src2) {|buf1, buf2|
if buf1.empty?
buf2.each {|num, line|
numadd += 1
dst2.puts encode_pair(num, "+"+line)
}
elsif buf2.empty?
buf1.each {|num, line|
numdel += 1
dst1.puts encode_pair(num, "-"+line)
}
else
buf1.each {|num, line|
dst1.puts encode_pair(num, " "+line)
}
buf2.each {|num, line|
dst2.puts encode_pair(num, " "+line)
}
end
}
dst1.flush
dst2.flush
[numdel, numadd, dst1, dst2]
end
def Lchg.sort_by_linenum(path)
tf = Tempfile.open("lchg-d")
command = ["sort", "-n", path]
IO.popen("#{Escape.shell_command command}") {|f|
while line = f.gets
tf.puts line
end
}
tf.flush
tf
end
def Lchg.output_changes(header, tf, out, scanner=nil)
out.puts '==================================================================='
out.puts header
tf.rewind
last = -1
if scanner
scanner2 = lambda {|line|
linenumz, line = decode_pair(line)
/\A./ =~ line
scanner.call(linenumz+1, $&, $')
}
end
each_context(tf, scanner2, lambda {|line| / 20/ !~ line }) {|i, t, line|
num, str = decode_pair(line)
out.puts "@@ #{i} @@" if last + 1 != num
out.puts str
last = num
}
end
def Lchg.diff(path1, path2, out, header1="--- #{path1}\n", header2="+++ #{path2}\n", scanner=nil)
tf1a = encode_lines(path1)
#puts tf1a.path; print File.read(tf1a.path)
tf1b = sort_by_content(tf1a.path)
#puts tf1b.path; print File.read(tf1b.path)
tf1a.close(true)
tf2a = encode_lines(path2)
#puts tf2a.path; print File.read(tf2a.path)
tf2b = sort_by_content(tf2a.path)
#puts tf2b.path; print File.read(tf2b.path)
tf2a.close(true)
numdel, numadd, tf1c, tf2c = add_mark(tf1b, tf2b)
#puts tf1c.path; print File.read(tf1c.path)
#puts tf2c.path; print File.read(tf2c.path)
tf1b.close(true)
tf2b.close(true)
tf1d = sort_by_linenum(tf1c.path)
tf1c.close(true)
tf2d = sort_by_linenum(tf2c.path)
tf2c.close(true)
if numadd != 0
new_scanner = lambda {|linenum, mark, line| scanner.call(:new, linenum, mark, line) } if scanner
Lchg.output_changes(header2, tf2d, out, new_scanner)
end
out.puts if numdel != 0 && numadd != 0
if numdel != 0
old_scanner = lambda {|linenum, mark, line| scanner.call(:old, linenum, mark, line) } if scanner
Lchg.output_changes(header1, tf1d, out, old_scanner)
end
numadd != 0 || numdel != 0
end
end
| 28.109756 | 102 | 0.58496 |
ffb14c159436c8712256a71d70c48b6b3af94868 | 409 | require 'spec_helper'
describe AgreementsController do
describe "#show" do
describe "response" do
before { get :show }
subject { response }
it { should be_success }
it { should render_template 'show' }
end
describe "route" do
subject { { :get => "/agreement" } }
it { should route_to(:controller => "agreements", :action => "show") }
end
end
end
| 17.782609 | 76 | 0.599022 |
4a9e227eb6018e689b5e95620d8a26b28cdaf51b | 11,430 | require 'test_helper'
class UsersControllerTest < ActionController::TestCase
# Be sure to include AuthenticatedTestHelper in test/test_helper.rb instead
# Then, you can remove it from this and the units test.
include AuthenticatedTestHelper
fixtures :all
def test_title
get :new
assert_select 'title', text: 'Signup', count: 1
end
test 'cancel registration' do
user = Factory :brand_new_user
refute user.person
login_as user
assert_equal user.id, session[:user_id]
assert_difference('User.count', -1) do
post :cancel_registration
end
assert_redirected_to :root
assert_nil session[:user_id]
end
test 'whoami_no_login' do
get :whoami, format: :json
assert_response :not_found
end
test 'whoami_login' do
person = Factory :person
login_as person.user
get :whoami, format: :json
assert_response :redirect
assert_redirected_to person_path(person)
end
test 'cancel registration doesnt destroy user with profile' do
person = Factory :person
login_as person.user
assert_equal person.user.id, session[:user_id]
assert_no_difference('User.count') do
post :cancel_registration
end
assert_redirected_to :root
assert_equal person.user.id, session[:user_id]
end
def test_activation_required_link
get :activation_required
assert_response :success
end
test 'should destroy only by admin' do
user_without_profile = Factory :brand_new_user
user = Factory :user
login_as user
assert_difference('User.count', 0) do
delete :destroy, params: { id: user_without_profile }
end
logout
admin = Factory(:user, person_id: Factory(:admin).id)
login_as admin
assert_difference('User.count', -1) do
delete :destroy, params: { id: user_without_profile }
end
end
test 'should not destroy user with profile' do
person = Factory :person
admin = Factory(:user, person_id: Factory(:admin).id)
login_as admin
assert_no_difference('User.count') do
delete :destroy, params: { id: person.user }
end
end
test 'resend activation email only by admin' do
person = Factory(:person)
user = Factory :brand_new_user, person: person
assert !user.active?
login_as Factory(:user)
assert_enqueued_emails(0) do
assert_no_difference('MessageLog.count') do
post :resend_activation_email, params: { id: user }
end
end
assert_empty person.activation_email_logs
assert_not_nil flash[:error]
flash.clear
logout
admin = Factory(:user, person_id: Factory(:admin).id)
login_as admin
assert_enqueued_emails(1) do
assert_difference('MessageLog.count') do
post :resend_activation_email, params: { id: user }
end
end
assert_nil flash[:error]
assert_equal 1,person.activation_email_logs.count
end
test 'only admin can bulk_destroy' do
user1 = Factory :user
user2 = Factory :user
admin = Factory(:user, person_id: Factory(:admin).id)
login_as admin
assert_difference('User.count', -1) do
post :bulk_destroy, params: { ids: [user1.id] }
end
logout
assert_difference('User.count', 0) do
post :bulk_destroy, params: { ids: [user2.id] }
end
assert_redirected_to :root
assert_not_nil flash[:error]
end
test 'bulk destroy' do
user1 = Factory :user
user2 = Factory :user
Factory :favourite_group, user: user1
Factory :favourite_group, user: user2
admin = Factory(:user, person_id: Factory(:admin).id)
login_as admin
# destroy also dependencies
assert_difference('User.count', -2) do
assert_difference('FavouriteGroup.count', -2) do
post :bulk_destroy, params: { ids: [user1.id, user2.id] }
end
end
end
test 'bulk destroy only ids in params' do
user1 = Factory :user
user2 = Factory :user
Factory :favourite_group, user: user1
Factory :favourite_group, user: user2
admin = Factory(:user, person_id: Factory(:admin).id)
login_as admin
# destroy also dependencies
assert_difference('User.count', -1) do
assert_difference('FavouriteGroup.count', -1) do
post :bulk_destroy, params: { ids: [user1.id] }
end
end
end
def test_system_message_on_signup_no_users
get :new
assert_response :success
assert_select 'div.alert', count: 0
User.destroy_all
get :new
assert_response :success
assert_select 'div.alert', count: 1
end
def test_should_allow_signup
assert_difference 'User.count' do
create_user
assert_response :redirect
end
end
def test_should_require_login_on_signup
assert_no_difference 'User.count' do
create_user(login: nil)
assert assigns(:user).errors[:login]
end
end
def test_should_require_password_on_signup
assert_no_difference 'User.count' do
create_user(password: nil)
assert assigns(:user).errors[:password]
end
end
def test_should_require_password_confirmation_on_signup
assert_no_difference 'User.count' do
create_user(password_confirmation: nil)
assert assigns(:user).errors[:password_confirmation]
end
end
test 'should activate user' do
user = Factory(:not_activated_person).user
refute user.active?
#make some logs
MessageLog.log_activation_email(user.person)
MessageLog.log_activation_email(user.person)
assert_equal 2, MessageLog.activation_email_logs(user.person).count
assert_difference('MessageLog.count',-2) do
get :activate, params: { activation_code: user.activation_code }
end
assert_empty MessageLog.activation_email_logs(user.person)
assert_redirected_to person_path(user.person)
refute_nil flash[:notice]
assert User.find(user.id).active?
end
def test_should_not_activate_user_without_key
get :activate
assert_nil flash[:notice]
end
def test_should_not_activate_user_with_blank_key
get :activate, params: { activation_code: '' }
assert_nil flash[:notice]
end
def test_can_edit_self
login_as :quentin
get :edit, params: { id: users(:quentin) }
assert_response :success
# TODO: is there a better way to test the layout used?
assert_select '#navbar' # check its using the right layout
end
def test_cant_edit_some_else
login_as :quentin
get :edit, params: { id: users(:aaron) }
assert_redirected_to root_url
end
def test_associated_with_person
u = Factory(:brand_new_user)
login_as u
assert_nil u.person
p = Factory(:brand_new_person)
post :update, params: { id: u.id, user: { id: u.id, person_id: p.id, email: p.email } }
assert_nil flash[:error]
assert_equal p, User.find(u.id).person
end
def test_update_password
login_as :quentin
u = users(:quentin)
pwd = 'b' * User::MIN_PASSWORD_LENGTH
post :update, params: { id: u.id, user: { id: u.id, password: pwd, password_confirmation: pwd } }
assert_nil flash[:error]
assert User.authenticate('quentin', pwd)
end
test 'reset code cleared after updating password' do
user = Factory(:user)
user.reset_password
user.save!
login_as(user)
pwd = 'a' * User::MIN_PASSWORD_LENGTH
post :update, params: { id: user.id, user: { id: user.id, password: pwd, password_confirmation: pwd } }
user.reload
assert_nil user.reset_password_code
assert_nil user.reset_password_code_until
end
test 'admin can impersonate' do
login_as :quentin
assert User.current_user, users(:quentin)
get :impersonate, params: { id: users(:aaron) }
assert_redirected_to root_path
assert User.current_user, users(:aaron)
end
test 'admin redirected back impersonating non-existent user' do
login_as :quentin
assert User.current_user, users(:quentin)
get :impersonate, params: { id: (User.last.id + 1) }
assert_redirected_to admin_path
assert User.current_user, users(:quentin)
assert flash[:error]
end
test 'non admin cannot impersonate' do
login_as :aaron
assert User.current_user, users(:aaron)
get :impersonate, params: { id: users(:quentin) }
assert flash[:error]
assert User.current_user, users(:aaron)
end
test 'should handle no current_user when edit user' do
logout
get :edit, params: { id: users(:aaron), user: {} }
assert_redirected_to :root
assert_not_nil flash[:error]
end
test 'reset password with valid code' do
user = Factory(:user)
user.reset_password
user.save!
refute_nil(user.reset_password_code)
refute_nil(user.reset_password_code_until)
get :reset_password, params: { reset_code: user.reset_password_code }
assert_redirected_to edit_user_path(user)
assert_equal 'You can change your password here', flash[:notice]
assert_nil flash[:error]
end
test 'reset password with invalid code' do
get :reset_password, params: { reset_code: 'xxx' }
assert_redirected_to root_path
assert_nil flash[:notice]
refute_nil flash[:error]
assert_equal 'Invalid password reset code', flash[:error]
end
test 'reset password with no code' do
get :reset_password
assert_redirected_to root_path
assert_nil flash[:notice]
refute_nil flash[:error]
assert_equal 'Invalid password reset code', flash[:error]
end
test 'reset password with expired code' do
user = Factory(:user)
user.reset_password
user.reset_password_code_until = 5.days.ago
user.save!
get :reset_password, params: { reset_code: user.reset_password_code }
assert_redirected_to root_path
assert_nil flash[:notice]
refute_nil flash[:error]
assert_equal 'Your password reset code has expired', flash[:error]
end
test 'terms and conditions checkbox' do
with_config_value :terms_enabled,true do
assert User.any?
get :new
assert_response :success
assert_select "form.new_user input#tc_agree[type=checkbox]", count:1
assert_select "form.new_user input.btn[type=submit][disabled]", count:1
assert_select "form.new_user input.btn[type=submit]:not([disabled])", count:0
end
end
# First user is the admin user that sets it up, so no T & C's to agree to
test 'no terms and conditions checkbox for first user' do
with_config_value :terms_enabled,true do
User.destroy_all
refute User.any?
get :new
assert_response :success
assert_select "form.new_user input#tc_agree[type=checkbox]", count:0
assert_select "form.new_user input.btn[type=submit][disabled]", count:0
assert_select "form.new_user input.btn[type=submit]:not([disabled])", count:1
end
end
test "no terms and conditions if disabled" do
with_config_value :terms_enabled,false do
assert User.any?
get :new
assert_response :success
assert_select "form.new_user input#tc_agree[type=checkbox]", count:0
assert_select "form.new_user input.btn[type=submit][disabled]", count:0
assert_select "form.new_user input.btn[type=submit]:not([disabled])", count:1
end
end
protected
def create_user(options = {})
pwd = 'a' * User::MIN_PASSWORD_LENGTH
post :create, params: { user: { login: 'quire', email: '[email protected]',
password: pwd, password_confirmation: pwd }.merge(options), person: { first_name: 'fred' } }
end
end
| 28.718593 | 118 | 0.699913 |
26801e4ad68ee1d167731a9433bec9ac0522bc93 | 5,182 | class Ghostscript < Formula
desc "Interpreter for PostScript and PDF"
homepage "https://www.ghostscript.com/"
url "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/download/gs9540/ghostpdl-9.54.0.tar.gz"
sha256 "63e54cddcdf48ea296b6315353f86b8a622d4e46959b10d536297e006b85687b"
license "AGPL-3.0-or-later"
# We check the tags from the `head` repository because the GitHub tags are
# formatted ambiguously, like `gs9533` (corresponding to version 9.53.3).
livecheck do
url :head
regex(/^ghostpdl[._-]v?(\d+(?:\.\d+)+)$/i)
end
bottle do
sha256 arm64_big_sur: "5aa59b2287f35c9f8c0b20d8561c7f0b7c4217d5ba43a41ade1f2e31db1833e0"
sha256 big_sur: "cfe91b44577a206ac0ef1106c5c3681d6eef7559e176cefd3452621d5b5bf974"
sha256 catalina: "b821f9923f8579229634edaa454cb127836f1af97f724a8941ec76c12896b4cd"
sha256 mojave: "23048bf2ec8c47dfe9a58476c7927464ea732af1b0d4fc5787ebef04bfa4f76e"
end
head do
# Can't use shallow clone. Doing so = fatal errors.
url "https://git.ghostscript.com/ghostpdl.git"
depends_on "autoconf" => :build
depends_on "automake" => :build
depends_on "libtool" => :build
end
depends_on "pkg-config" => :build
depends_on "fontconfig"
depends_on "freetype"
depends_on "jbig2dec"
depends_on "jpeg"
depends_on "libidn"
depends_on "libpng"
depends_on "libtiff"
depends_on "little-cms2"
depends_on "openjpeg"
uses_from_macos "expat"
uses_from_macos "zlib"
on_macos do
patch :DATA # Uncomment macOS-specific make vars
end
on_linux do
depends_on "gcc"
end
fails_with gcc: "5"
# https://sourceforge.net/projects/gs-fonts/
resource "fonts" do
url "https://downloads.sourceforge.net/project/gs-fonts/gs-fonts/8.11%20%28base%2035%2C%20GPL%29/ghostscript-fonts-std-8.11.tar.gz"
sha256 "0eb6f356119f2e49b2563210852e17f57f9dcc5755f350a69a46a0d641a0c401"
end
def install
# Fix vendored tesseract build error: 'cstring' file not found
# Remove when possible to link to system tesseract
ENV.append_to_cflags "-stdlib=libc++" if ENV.compiler == :clang
# Fix VERSION file incorrectly included as C++20 <version> header
# Remove when possible to link to system tesseract
rm "tesseract/VERSION"
# Delete local vendored sources so build uses system dependencies
rm_rf "expat"
rm_rf "freetype"
rm_rf "jbig2dec"
rm_rf "jpeg"
rm_rf "lcms2mt"
rm_rf "libpng"
rm_rf "openjpeg"
rm_rf "tiff"
rm_rf "zlib"
args = %W[
--prefix=#{prefix}
--disable-compile-inits
--disable-cups
--disable-gtk
--with-system-libtiff
--without-x
]
if build.head?
system "./autogen.sh", *args
else
system "./configure", *args
end
# Install binaries and libraries
system "make", "install"
ENV.deparallelize { system "make", "install-so" }
(pkgshare/"fonts").install resource("fonts")
(man/"de").rmtree
end
test do
ps = test_fixtures("test.ps")
assert_match "Hello World!", shell_output("#{bin}/ps2ascii #{ps}")
end
end
__END__
diff --git i/base/unix-dll.mak w/base/unix-dll.mak
index f50c09c00adb..8855133b400c 100644
--- i/base/unix-dll.mak
+++ w/base/unix-dll.mak
@@ -89,18 +89,33 @@ GPDL_SONAME_MAJOR_MINOR=$(GPDL_SONAME_BASE)$(GS_SOEXT)$(SO_LIB_VERSION_SEPARATOR
# similar linkers it must containt the trailing "="
# LDFLAGS_SO=-shared -Wl,$(LD_SET_DT_SONAME)$(LDFLAGS_SO_PREFIX)$(GS_SONAME_MAJOR)
# MacOS X
-#GS_SOEXT=dylib
-#GS_SONAME=$(GS_SONAME_BASE).$(GS_SOEXT)
-#GS_SONAME_MAJOR=$(GS_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_SOEXT)
-#GS_SONAME_MAJOR_MINOR=$(GS_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_VERSION_MINOR).$(GS_SOEXT)
+GS_SOEXT=dylib
+GS_SONAME=$(GS_SONAME_BASE).$(GS_SOEXT)
+GS_SONAME_MAJOR=$(GS_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_SOEXT)
+GS_SONAME_MAJOR_MINOR=$(GS_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_VERSION_MINOR).$(GS_SOEXT)
#LDFLAGS_SO=-dynamiclib -flat_namespace
-#LDFLAGS_SO_MAC=-dynamiclib -install_name $(GS_SONAME_MAJOR_MINOR)
+GS_LDFLAGS_SO=-dynamiclib -install_name $(GS_SONAME_MAJOR_MINOR)
#LDFLAGS_SO=-dynamiclib -install_name $(FRAMEWORK_NAME)
+PCL_SONAME=$(PCL_SONAME_BASE).$(GS_SOEXT)
+PCL_SONAME_MAJOR=$(PCL_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_SOEXT)
+PCL_SONAME_MAJOR_MINOR=$(PCL_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_VERSION_MINOR).$(GS_SOEXT)
+PCL_LDFLAGS_SO=-dynamiclib -install_name $(PCL_SONAME_MAJOR_MINOR)
+
+XPS_SONAME=$(XPS_SONAME_BASE).$(GS_SOEXT)
+XPS_SONAME_MAJOR=$(XPS_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_SOEXT)
+XPS_SONAME_MAJOR_MINOR=$(XPS_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_VERSION_MINOR).$(GS_SOEXT)
+XPS_LDFLAGS_SO=-dynamiclib -install_name $(XPS_SONAME_MAJOR_MINOR)
+
+GPDL_SONAME=$(GPDL_SONAME_BASE).$(GS_SOEXT)
+GPDL_SONAME_MAJOR=$(GPDL_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_SOEXT)
+GPDL_SONAME_MAJOR_MINOR=$(GPDL_SONAME_BASE).$(GS_VERSION_MAJOR).$(GS_VERSION_MINOR).$(GS_SOEXT)
+GPDL_LDFLAGS_SO=-dynamiclib -install_name $(GPDL_SONAME_MAJOR_MINOR)
+
GS_SO=$(BINDIR)/$(GS_SONAME)
GS_SO_MAJOR=$(BINDIR)/$(GS_SONAME_MAJOR)
GS_SO_MAJOR_MINOR=$(BINDIR)/$(GS_SONAME_MAJOR_MINOR)
PCL_SO=$(BINDIR)/$(PCL_SONAME)
| 33.649351 | 135 | 0.736395 |
f75b3cc715c26b0fb277953cb1ab0fb67d291bd0 | 313 | require File.expand_path('../../../spec_helper', __FILE__)
require File.expand_path('../shared/concat.rb', __FILE__)
require 'strscan'
describe "StringScanner#<<" do
it_behaves_like :strscan_concat, :<<
end
describe "StringScanner#<< when passed a Fixnum" do
it_behaves_like :strscan_concat_fixnum, :<<
end
| 26.083333 | 58 | 0.744409 |
f8deb9661107874a93d06dc7cc6cbab464c60a35 | 7,466 | require 'set'
require 'core_classes/as_set'
require 'core_classes/stdlib_classes'
module CoreClasses
extend AsSet
private
def self.ruby_222
Set.new(
[
ARGF.class,
ArgumentError,
Array,
BasicObject,
Bignum,
Binding,
Class,
Comparable,
Complex,
Data,
Dir,
EOFError,
Encoding,
Encoding::CompatibilityError,
Encoding::Converter,
Encoding::ConverterNotFoundError,
Encoding::InvalidByteSequenceError,
Encoding::UndefinedConversionError,
EncodingError,
Enumerable,
Enumerator,
Enumerator::Generator,
Enumerator::Lazy,
Enumerator::Yielder,
Errno,
Errno::E2BIG,
Errno::EACCES,
Errno::EADDRINUSE,
Errno::EADDRNOTAVAIL,
Errno::EADV,
Errno::EAFNOSUPPORT,
Errno::EAGAIN,
Errno::EALREADY,
Errno::EBADE,
Errno::EBADF,
Errno::EBADFD,
Errno::EBADMSG,
Errno::EBADR,
Errno::EBADRQC,
Errno::EBADSLT,
Errno::EBFONT,
Errno::EBUSY,
Errno::ECANCELED,
Errno::ECHILD,
Errno::ECHRNG,
Errno::ECOMM,
Errno::ECONNABORTED,
Errno::ECONNREFUSED,
Errno::ECONNRESET,
Errno::EDEADLK,
Errno::EDESTADDRREQ,
Errno::EDOM,
Errno::EDOTDOT,
Errno::EDQUOT,
Errno::EEXIST,
Errno::EFAULT,
Errno::EFBIG,
Errno::EHOSTDOWN,
Errno::EHOSTUNREACH,
Errno::EHWPOISON,
Errno::EIDRM,
Errno::EILSEQ,
Errno::EINPROGRESS,
Errno::EINTR,
Errno::EINVAL,
Errno::EIO,
Errno::EISCONN,
Errno::EISDIR,
Errno::EISNAM,
Errno::EKEYEXPIRED,
Errno::EKEYREJECTED,
Errno::EKEYREVOKED,
Errno::EL2HLT,
Errno::EL2NSYNC,
Errno::EL3HLT,
Errno::EL3RST,
Errno::ELIBACC,
Errno::ELIBBAD,
Errno::ELIBEXEC,
Errno::ELIBMAX,
Errno::ELIBSCN,
Errno::ELNRNG,
Errno::ELOOP,
Errno::EMEDIUMTYPE,
Errno::EMFILE,
Errno::EMLINK,
Errno::EMSGSIZE,
Errno::EMULTIHOP,
Errno::ENAMETOOLONG,
Errno::ENAVAIL,
Errno::ENETDOWN,
Errno::ENETRESET,
Errno::ENETUNREACH,
Errno::ENFILE,
Errno::ENOANO,
Errno::ENOBUFS,
Errno::ENOCSI,
Errno::ENODATA,
Errno::ENODEV,
Errno::ENOENT,
Errno::ENOEXEC,
Errno::ENOKEY,
Errno::ENOLCK,
Errno::ENOLINK,
Errno::ENOMEDIUM,
Errno::ENOMEM,
Errno::ENOMSG,
Errno::ENONET,
Errno::ENOPKG,
Errno::ENOPROTOOPT,
Errno::ENOSPC,
Errno::ENOSR,
Errno::ENOSTR,
Errno::ENOSYS,
Errno::ENOTBLK,
Errno::ENOTCONN,
Errno::ENOTDIR,
Errno::ENOTEMPTY,
Errno::ENOTNAM,
Errno::ENOTRECOVERABLE,
Errno::ENOTSOCK,
Errno::ENOTTY,
Errno::ENOTUNIQ,
Errno::ENXIO,
Errno::EOPNOTSUPP,
Errno::EOVERFLOW,
Errno::EOWNERDEAD,
Errno::EPERM,
Errno::EPFNOSUPPORT,
Errno::EPIPE,
Errno::EPROTO,
Errno::EPROTONOSUPPORT,
Errno::EPROTOTYPE,
Errno::ERANGE,
Errno::EREMCHG,
Errno::EREMOTE,
Errno::EREMOTEIO,
Errno::ERESTART,
Errno::ERFKILL,
Errno::EROFS,
Errno::ESHUTDOWN,
Errno::ESOCKTNOSUPPORT,
Errno::ESPIPE,
Errno::ESRCH,
Errno::ESRMNT,
Errno::ESTALE,
Errno::ESTRPIPE,
Errno::ETIME,
Errno::ETIMEDOUT,
Errno::ETOOMANYREFS,
Errno::ETXTBSY,
Errno::EUCLEAN,
Errno::EUNATCH,
Errno::EUSERS,
Errno::EXDEV,
Errno::EXFULL,
Errno::NOERROR,
Exception,
FalseClass,
Fiber,
FiberError,
File,
File::Constants,
File::Stat,
FileTest,
Fixnum,
Float,
FloatDomainError,
GC,
GC::Profiler,
Gem,
Gem::BasicSpecification,
Gem::CommandLineError,
Gem::ConflictError,
Gem::Dependency,
Gem::DependencyError,
Gem::DependencyRemovalException,
Gem::DependencyResolutionError,
Gem::Deprecate,
Gem::DocumentError,
Gem::EndOfYAMLException,
Gem::ErrorReason,
Gem::Exception,
Gem::FilePermissionError,
Gem::FormatException,
Gem::GemNotFoundException,
Gem::GemNotInHomeException,
Gem::ImpossibleDependenciesError,
Gem::InstallError,
Gem::InvalidSpecificationException,
Gem::LoadError,
Gem::OperationNotSupportedError,
Gem::PathSupport,
Gem::Platform,
Gem::PlatformMismatch,
Gem::RemoteError,
Gem::RemoteInstallationCancelled,
Gem::RemoteInstallationSkipped,
Gem::RemoteSourceException,
Gem::Requirement,
Gem::Requirement::BadRequirementError,
Gem::RubyVersionMismatch,
Gem::SourceFetchProblem,
Gem::SpecificGemNotFoundException,
Gem::Specification,
Gem::StringSink,
Gem::StringSource,
Gem::StubSpecification,
Gem::StubSpecification::StubLine,
Gem::SystemExitException,
Gem::UnsatisfiableDependencyError,
Gem::VerificationError,
Gem::Version,
Hash,
IO,
IO::EAGAINWaitReadable,
IO::EAGAINWaitWritable,
IO::EINPROGRESSWaitReadable,
IO::EINPROGRESSWaitWritable,
IO::WaitReadable,
IO::WaitWritable,
IOError,
IndexError,
Integer,
Interrupt,
Kernel,
KeyError,
LoadError,
LocalJumpError,
Marshal,
MatchData,
Math,
Math::DomainError,
Method,
Module,
Monitor,
MonitorMixin,
MonitorMixin::ConditionVariable,
MonitorMixin::ConditionVariable::Timeout,
Mutex,
NameError,
NilClass,
NoMemoryError,
NoMethodError,
NotImplementedError,
Numeric,
Object,
ObjectSpace,
ObjectSpace::WeakMap,
Proc,
Process,
Process::GID,
Process::Status,
Process::Sys,
Process::Tms,
Process::UID,
Process::Waiter,
Random,
Range,
RangeError,
Rational,
RbConfig,
Regexp,
RegexpError,
RubyVM,
RubyVM::Env,
RubyVM::InstructionSequence,
RuntimeError,
ScriptError,
SecurityError,
Signal,
SignalException,
StandardError,
StopIteration,
String,
Struct,
Symbol,
SyntaxError,
SystemCallError,
SystemExit,
SystemStackError,
Thread,
Thread::Backtrace,
Thread::Backtrace::Location,
Thread::ConditionVariable,
Thread::Queue,
Thread::SizedQueue,
ThreadError,
ThreadGroup,
Time,
TracePoint,
TrueClass,
TypeError,
UnboundMethod,
UncaughtThrowError,
ZeroDivisionError
]
)
end
end
| 23.77707 | 49 | 0.538039 |
7a6fdeae6fb6e3885164bac17efd764c8db804ad | 1,902 | class Harfbuzz < Formula
desc "OpenType text shaping engine"
homepage "https://github.com/harfbuzz/harfbuzz"
url "https://github.com/harfbuzz/harfbuzz/releases/download/2.6.7/harfbuzz-2.6.7.tar.xz"
sha256 "49e481d06cdff97bf68d99fa26bdf785331f411614485d892ea4c78eb479b218"
bottle do
cellar :any
sha256 "744c939d61d7fd5e1512a672396c12817318489898585802664ce8dcbc742ff9" => :catalina
sha256 "987af143e178686a03141215e5023cf2186fc898802afea9932f9bca396e411a" => :mojave
sha256 "3a2eb5612dcb5e1e336942da2ea1ff16c70e78bda1d91e3cf5162bd1a89a3e09" => :high_sierra
end
head do
url "https://github.com/behdad/harfbuzz.git"
depends_on "autoconf" => :build
depends_on "automake" => :build
depends_on "libtool" => :build
depends_on "ragel" => :build
end
depends_on "gobject-introspection" => :build
depends_on "pkg-config" => :build
depends_on "cairo"
depends_on "freetype"
depends_on "glib"
depends_on "graphite2"
depends_on "icu4c"
resource "ttf" do
url "https://github.com/behdad/harfbuzz/raw/fc0daafab0336b847ac14682e581a8838f36a0bf/test/shaping/fonts/sha1sum/270b89df543a7e48e206a2d830c0e10e5265c630.ttf"
sha256 "9535d35dab9e002963eef56757c46881f6b3d3b27db24eefcc80929781856c77"
end
def install
args = %W[
--disable-dependency-tracking
--prefix=#{prefix}
--enable-introspection=yes
--enable-static
--with-cairo=yes
--with-coretext=yes
--with-freetype=yes
--with-glib=yes
--with-gobject=yes
--with-graphite2=yes
--with-icu=yes
]
system "./autogen.sh" if build.head?
system "./configure", *args
system "make", "install"
end
test do
resource("ttf").stage do
shape = `echo 'സ്റ്റ്' | #{bin}/hb-shape 270b89df543a7e48e206a2d830c0e10e5265c630.ttf`.chomp
assert_equal "[glyph201=0+1183|U0D4D=0+0]", shape
end
end
end
| 30.190476 | 161 | 0.715563 |
d584f2d38a94c1fd83537c802da5efdedbe2f7c4 | 20,204 | # Test for acts_as_category
#
# There are several ways to execute this test:
#
# 1. Open this file on a Mac in TextMate and press APPLE + R
# 2. Go to "vendor/plugins/acts_as_category/test" and run "rake test" in a terminal window
# 3. Run "rake test:plugins" in a terminal window to execute tests of all plugins
#
# For further information see http://blog.funkensturm.de/downloads
require 'test/unit'
require 'rubygems'
require 'active_record'
require 'action_view'
ActiveRecord::Base.establish_connection(:adapter => "sqlite3", :dbfile => ":memory:")
$stdout = StringIO.new # Prevent ActiveRecord's annoying schema statements
def setup_db
ActiveRecord::Base.logger
ActiveRecord::Schema.define(:version => 1) do
create_table "categories", :force => true do |t|
t.integer "my_parent_id"
t.integer "my_position"
t.boolean "my_hidden"
t.integer "my_children_count"
t.integer "my_ancestors_count"
t.integer "my_descendants_count"
end
end
end
def teardown_db
ActiveRecord::Base.connection.tables.each do |table|
ActiveRecord::Base.connection.drop_table(table)
end
end
setup_db # Because the plugin needs an existing table before initialization (e.g. for attr_readonly)
$:.unshift File.dirname(__FILE__) + '/../lib' # make "lib" known to "require"
require 'active_record/acts/category'
require 'active_record/acts/category_content'
require 'acts_as_category_helper'
require File.dirname(__FILE__) + '/../init' # Initialize Plugin
class Category < ActiveRecord::Base
acts_as_category :foreign_key => 'my_parent_id',
:position => 'my_position',
:hidden => 'my_hidden',
:children_count => 'my_children_count',
:ancestors_count => 'my_ancestors_count',
:descendants_count => 'my_descendants_count'
end
teardown_db # Because CategoryTest's setup method won't execute setup_db otherwise
class CategoryTest < Test::Unit::TestCase
# Test category trees:
#
# r1 r2 r3
# \_ r11 \_ r21
# \_ r111 \ \_ r211
# \_ r22
# \_ r221
def setup
setup_db
assert @r1 = Category.create! # id 1
assert @r2 = Category.create! # id 2
assert @r3 = Category.create! # id 3
assert @r11 = Category.create!(:my_parent_id => @r1.id) # id 4
assert @r21 = Category.create!(:my_parent_id => @r2.id) # id 5
assert @r22 = Category.create!(:my_parent_id => @r2.id) # id 6
assert @r111 = Category.create!(:my_parent_id => @r11.id) # id 7
assert @r211 = Category.create!(:my_parent_id => @r21.id) # id 8
assert @r221 = Category.create!(:my_parent_id => @r22.id) # id 9
assert @r1 = Category.find(1)
assert @r2 = Category.find(2)
assert @r3 = Category.find(3)
assert @r11 = Category.find(4)
assert @r21 = Category.find(5)
assert @r22 = Category.find(6)
assert @r111 = Category.find(7)
assert @r211 = Category.find(8)
assert @r221 = Category.find(9)
Category.permissions.clear
end
def teardown
teardown_db
end
def check_cache # This is merely a method used by certain tests
Category.find(:all).each { |c|
# Note that "children_count" is a built-in Rails functionality and must not be tested here
assert_equal c.ancestors.size, c.my_ancestors_count
assert_equal c.descendants.size, c.my_descendants_count
}
end
def test_cache_columns
check_cache
end
def test_permissions_class_variable
Category.permissions = nil
assert_equal [], Category.permissions
Category.permissions = [nil]
assert_equal [], Category.permissions
Category.permissions = [0]
assert_equal [], Category.permissions
Category.permissions = 'string'
assert_equal [], Category.permissions
Category.permissions = [1]
assert_equal [1], Category.permissions
Category.permissions = [1,2,3]
assert_equal [1,2,3], Category.permissions
Category.permissions = [1,'string',3]
assert_equal [1,3], Category.permissions
Category.permissions.clear
assert_equal [], Category.permissions
end
def test_where_permitted_sql_query
assert_equal ' (my_hidden IS NULL OR my_hidden=0) ', Category.where_permitted
assert_equal ' AND (my_hidden IS NULL OR my_hidden=0) ', Category.where_permitted(true)
Category.permissions = [1,2,3]
assert_equal ' (my_hidden IS NULL OR my_hidden=0 OR id IN (1,2,3)) ', Category.where_permitted
assert_equal ' AND (my_hidden IS NULL OR my_hidden=0 OR id IN (1,2,3)) ', Category.where_permitted(true)
end
def test_attr_readonly
assert @r1.my_children_count = 99
assert @r1.my_ancestors_count = 99
assert @r1.my_descendants_count = 99
assert @r1.save
assert @r1 = Category.find(1)
assert_equal 1, @r1.my_children_count
# See http://github.com/funkensturm/acts_as_category/commit/e00904a06fd27e013424c55c105342aff20fc375
#assert_equal 0, @r1.my_ancestors_count
#assert_equal 2, @r1.my_descendants_count
assert @r1.update_attribute('my_children_count', 99)
assert @r1.update_attribute('my_ancestors_count', 99)
assert @r1.update_attribute('my_descendants_count', 99)
assert @r1 = Category.find(1)
assert_equal 1, @r1.my_children_count
#assert_equal 0, @r1.my_ancestors_count
#assert_equal 2, @r1.my_descendants_count
end
def test_permitted?
assert @r3.permitted?
assert @r3.update_attribute('my_hidden', true)
assert [email protected]?
Category.permissions = [@r3.id]
assert @r3.permitted?
Category.permissions.clear
assert [email protected]?
assert @r3.update_attribute('my_hidden', false)
assert @r3.permitted?
assert @r2.permitted?
assert @r21.permitted?
assert @r211.permitted?
assert @r211.update_attribute('my_hidden', true)
assert @r2.permitted?
assert @r21.permitted?
assert [email protected]?
Category.permissions = [@r211.id]
assert @r2.permitted?
assert @r21.permitted?
assert @r211.permitted?
Category.permissions.clear
Category.permissions = [99]
assert @r2.permitted?
assert @r21.permitted?
assert [email protected]?
assert @r211.update_attribute('my_hidden', false)
assert @r2.permitted?
assert @r21.permitted?
assert @r211.permitted?
assert @r21.update_attribute('my_hidden', true)
assert @r2.permitted?
assert [email protected]?
assert [email protected]_hidden
assert [email protected]?
Category.permissions = [@r21.id]
assert @r2.permitted?
assert @r21.permitted?
assert @r211.permitted?
Category.permissions.clear
assert @r2.update_attribute('my_hidden', true)
assert @r21.update_attribute('my_hidden', false)
assert [email protected]?
assert [email protected]?
assert [email protected]?
Category.permissions = [@r21.id, @r211.id]
assert [email protected]?
assert [email protected]?
assert [email protected]?
Category.permissions = [@r2.id]
assert @r2.permitted?
assert @r21.permitted?
assert @r211.permitted?
end
def test_children
assert_equal [@r11], @r1.children
assert_equal [@r21, @r22], @r2.children
assert_equal [], @r3.children
assert_equal [@r111], @r11.children
assert_equal [], @r111.children
assert_equal [@r211], @r21.children
assert_equal [@r221], @r22.children
assert_equal [], @r211.children
assert_equal [], @r221.children
end
def test_children_permissions
assert @r22.update_attribute('my_hidden', true)
assert_equal [@r21, @r22], @r2.orig_children
assert_equal [@r11], @r1.children
assert_equal [@r21], @r2.children
assert_equal [], @r3.children
assert_equal [@r111], @r11.children
assert_equal [], @r111.children
assert_equal [@r211], @r21.children
assert_equal [], @r22.children
assert_equal [], @r211.children
assert_equal [], @r221.children
end
def test_children_ids
assert_equal [4], @r1.children_ids
assert_equal [5, 6], @r2.children_ids
assert_equal [], @r3.children_ids
assert_equal [7], @r11.children_ids
assert_equal [], @r111.children_ids
assert_equal [8], @r21.children_ids
assert_equal [9], @r22.children_ids
assert_equal [], @r211.children_ids
assert_equal [], @r221.children_ids
end
def test_children_ids_permissions
assert @r22.update_attribute('my_hidden', true)
assert_equal [4], @r1.children_ids
assert_equal [5], @r2.children_ids
assert_equal [], @r3.children_ids
assert_equal [7], @r11.children_ids
assert_equal [], @r111.children_ids
assert_equal [8], @r21.children_ids
assert_equal [], @r22.children_ids
assert_equal [], @r211.children_ids
assert_equal [], @r221.children_ids
end
def test_children_size
assert_equal 1, @r1.children.size
assert_equal 2, @r2.children.size
assert_equal 0, @r3.children.size
assert_equal 1, @r11.children.size
assert_equal 0, @r111.children.size
assert_equal 1, @r21.children.size
assert_equal 1, @r22.children.size
assert_equal 0, @r211.children.size
assert_equal 0, @r221.children.size
assert @r111.update_attribute('my_hidden', true)
assert @r22.update_attribute('my_hidden', true)
assert_equal 1, @r1.children.size
assert_equal 1, @r2.children.size
assert_equal 0, @r3.children.size
assert_equal 0, @r11.children.size
assert_equal 0, @r111.children.size
assert_equal 1, @r21.children.size
assert_equal 0, @r22.children.size
assert_equal 0, @r211.children.size
assert_equal 0, @r221.children.size
end
def test_parent
assert_nil @r1.parent
assert_nil @r2.parent
assert_nil @r3.parent
assert_equal @r1, @r11.parent
assert_equal @r11, @r111.parent
assert_equal @r2, @r21.parent
assert_equal @r2, @r22.parent
assert_equal @r21, @r211.parent
assert_equal @r22, @r221.parent
end
def test_ancestors
assert_equal [], @r1.ancestors
assert_equal [], @r2.ancestors
assert_equal [], @r3.ancestors
assert_equal [@r1], @r11.ancestors
assert_equal [@r2], @r21.ancestors
assert_equal [@r2], @r22.ancestors
assert_equal [@r11, @r1], @r111.ancestors
assert_equal [@r21, @r2], @r211.ancestors
assert_equal [@r22, @r2], @r221.ancestors
end
def test_ancestors_ids
assert_equal [], @r1.ancestors_ids
assert_equal [], @r2.ancestors_ids
assert_equal [], @r3.ancestors_ids
assert_equal [1], @r11.ancestors_ids
assert_equal [2], @r21.ancestors_ids
assert_equal [2], @r22.ancestors_ids
assert_equal [4, 1], @r111.ancestors_ids
assert_equal [5, 2], @r211.ancestors_ids
assert_equal [6, 2], @r221.ancestors_ids
end
def test_descendants
assert_equal [@r11, @r111], @r1.descendants
assert_equal [@r21, @r211, @r22, @r221], @r2.descendants
assert_equal [], @r3.descendants
assert_equal [@r111], @r11.descendants
assert_equal [@r211], @r21.descendants
assert_equal [@r221], @r22.descendants
assert_equal [], @r111.descendants
assert_equal [], @r211.descendants
assert_equal [], @r221.descendants
end
def test_descendants_permissions
assert @r22.update_attribute('my_hidden', true)
assert_equal [@r11, @r111], @r1.descendants
assert_equal [@r21, @r211], @r2.descendants
assert_equal [], @r3.descendants
assert_equal [@r111], @r11.descendants
assert_equal [], @r111.descendants
assert_equal [@r211], @r21.descendants
assert_equal [], @r22.descendants
assert_equal [], @r211.descendants
assert_equal [], @r221.descendants
end
def test_descendants_ids
assert_equal [4, 7], @r1.descendants_ids
assert_equal [5, 8, 6, 9], @r2.descendants_ids
assert_equal [], @r3.descendants_ids
assert_equal [7], @r11.descendants_ids
assert_equal [8], @r21.descendants_ids
assert_equal [9], @r22.descendants_ids
assert_equal [9], @r22.descendants_ids
assert_equal [9], @r22.descendants_ids
assert_equal [], @r111.descendants_ids
assert_equal [], @r211.descendants_ids
assert_equal [], @r221.descendants_ids
assert_equal [], @r221.descendants_ids
end
def test_descendants_ids_permissions
assert @r22.update_attribute('my_hidden', true)
assert_equal [4, 7], @r1.descendants_ids
assert_equal [5, 8], @r2.descendants_ids
assert_equal [], @r3.descendants_ids
assert_equal [7], @r11.descendants_ids
assert_equal [], @r111.descendants_ids
assert_equal [8], @r21.descendants_ids
assert_equal [], @r22.descendants_ids
assert_equal [], @r211.descendants_ids
assert_equal [], @r221.descendants_ids
end
def test_root
assert_equal @r1, @r1.root
assert_equal @r1, @r11.root
assert_equal @r1, @r111.root
assert_equal @r2, @r21.root
assert_equal @r2, @r211.root
assert_equal @r2, @r22.root
assert_equal @r2, @r221.root
assert_equal @r3, @r3.root
end
def test_root_permissions # i. e. ignoring permissions
assert @r22.update_attribute('my_hidden', true)
assert_equal @r2, @r2.root
assert_equal @r2, @r22.root
assert_equal @r2, @r221.root
end
def test_root?
assert @r1.root?
assert @r3.root?
assert [email protected]?
end
def test_root_permissions # i. e. ignoring permissions
assert @r3.update_attribute('my_hidden', true)
assert @r1.root?
assert @r3.root?
assert [email protected]?
end
def test_roots
assert_equal [@r1, @r2, @r3], Category.roots
assert_equal [@r1, @r2, @r3], Category.roots(true)
end
def test_roots_permissions
assert @r2.update_attribute('my_hidden', true)
assert_equal [@r1, @r3], Category.roots
end
def test_roots_permissions_override
assert @r2.update_attribute('my_hidden', true)
assert_equal [@r1, @r2, @r3], Category.roots(true)
end
def test_siblings
assert_equal [@r2, @r3], @r1.siblings
assert_equal [@r1, @r3], @r2.siblings
assert_equal [@r1, @r2], @r3.siblings
assert_equal [], @r11.siblings
assert_equal [@r22], @r21.siblings
assert_equal [@r21], @r22.siblings
assert_equal [], @r111.siblings
assert_equal [], @r211.siblings
assert_equal [], @r221.siblings
end
def test_siblings_permissions
assert @r2.update_attribute('my_hidden', true)
assert_equal [@r3], @r1.siblings
assert_equal [@r1, @r3], @r2.siblings
assert_equal [@r1], @r3.siblings
assert_equal [], @r11.siblings
assert_equal [], @r21.siblings
assert_equal [], @r22.siblings
assert_equal [], @r111.siblings
assert_equal [], @r211.siblings
assert_equal [], @r221.siblings
assert @r22.update_attribute('my_hidden', true)
assert_equal [], @r21.siblings
assert_equal [], @r22.siblings
assert_equal [], @r111.siblings
assert_equal [], @r211.siblings
assert_equal [], @r221.siblings
assert @r2.update_attribute('my_hidden', false)
assert_equal [], @r22.siblings
end
def test_self_and_siblings
assert_equal [@r1, @r2, @r3], @r1.self_and_siblings
assert_equal [@r1, @r2, @r3], @r2.self_and_siblings
assert_equal [@r1, @r2, @r3], @r3.self_and_siblings
assert_equal [@r11], @r11.self_and_siblings
assert_equal [@r21, @r22], @r21.self_and_siblings
assert_equal [@r21, @r22], @r22.self_and_siblings
assert_equal [@r111], @r111.self_and_siblings
assert_equal [@r211], @r211.self_and_siblings
assert_equal [@r221], @r221.self_and_siblings
end
def test_self_and_siblings_permissions
assert @r22.update_attribute('my_hidden', true)
assert_equal [@r1, @r2, @r3], @r1.self_and_siblings
assert_equal [@r1, @r2, @r3], @r2.self_and_siblings
assert_equal [@r1, @r2, @r3], @r3.self_and_siblings
assert_equal [@r11], @r11.self_and_siblings
assert_equal [@r21], @r21.self_and_siblings
assert_equal [@r21], @r22.self_and_siblings
assert_equal [@r111], @r111.self_and_siblings
assert_equal [@r211], @r211.self_and_siblings
assert_equal [], @r221.self_and_siblings
end
def test_dependent_destroy_and_cache
assert_equal 9, Category.count
assert @r1.destroy
assert_equal 6, Category.count
check_cache
assert @r211.destroy
assert_equal 5, Category.count
check_cache
assert @r21.destroy
assert_equal 4, Category.count
check_cache
assert @r22.destroy
assert_equal 2, Category.count
check_cache
assert @r2.destroy
assert @r3.destroy
assert_equal 0, Category.count
check_cache
end
def test_insert_and_cache
teardown_db
setup_db
assert @r1 = Category.create!
check_cache
assert @r2 = Category.create!
check_cache
Category.new().save
assert @r3 = Category.find(3)
check_cache
assert @r11 = Category.create!(:my_parent_id => @r1.id)
check_cache
Category.new(:my_parent_id => @r2.id).save
assert @r21 = Category.find(5)
check_cache
assert @r22 = Category.create!(:my_parent_id => @r2.id)
check_cache
Category.new(:my_parent_id => @r11.id).save
assert @r111 = Category.find(7)
check_cache
assert @r211 = Category.create!(:my_parent_id => @r21.id)
check_cache
assert @r221 = Category.create!(:my_parent_id => @r22.id)
check_cache
@r12 = @r1.children.create
check_cache
assert @r12
assert_equal @r12.parent, @r1
assert @r1 = Category.find(1)
assert_equal 2, @r1.children.size
assert @r1.children.include?(@r12)
assert @r1.children.include?(@r11)
check_cache
end
def test_update_where_root_becomes_child
@r1.update_attributes(:my_parent_id => @r21.id)
check_cache
end
def test_update_where_child_becomes_root
@r111.update_attributes(:my_parent_id =>nil)
check_cache
end
def test_update_where_child_switches_within_branch
@r22.update_attributes(:my_parent_id => @r211.id)
check_cache
end
def test_update_where_child_switches_branch
@r221.update_attributes(:my_parent_id => @r11.id)
check_cache
end
def test_invalid_parent_id_type
assert !Category.new(:my_parent_id => 0.0).save
assert !Category.new(:my_parent_id => 1.5).save
assert !Category.new(:my_parent_id => 0).save
assert !Category.new(:my_parent_id => 'string').save
end
def test_non_existant_foreign_key
assert !Category.new(:my_parent_id => 9876543210).save
assert_raise(ActiveRecord::RecordInvalid) { Category.create!(:my_parent_id => 9876543210) }
end
def test_category_becomes_its_own_parent
assert [email protected]_attributes(:my_parent_id => @r1.id)
assert @r2.my_parent_id = @r2.id
assert [email protected]
end
def test_category_becomes_parent_of_descendant
assert [email protected]_attributes(:my_parent_id => @r11.id)
assert [email protected]_attributes(:my_parent_id => @r111.id)
assert [email protected]_attributes(:my_parent_id => @r111.id)
assert @r2.my_parent_id = @r21.id
assert [email protected]
end
def test_update_positions
Category.update_positions({'aac_sortable_tree_0' => [3,1,2]})
assert_equal 1, Category.find(3).my_position
assert_equal 2, Category.find(1).my_position
assert_equal 3, Category.find(2).my_position
Category.update_positions({'aac_sortable_tree_2' => [6,5]})
assert_equal 1, Category.find(6).my_position
assert_equal 2, Category.find(5).my_position
assert_raise(::ArgumentError) { Category.update_positions({'aac_sortable_tree_2' => [1]}) }
assert_raise(::ArgumentError) { Category.update_positions({'aac_sortable_tree_2' => [1,2,3]}) }
assert_raise(::ArgumentError) { Category.update_positions({'aac_sortable_tree_2' => [5,6,7]}) }
assert_raise(::ArgumentError) { Category.update_positions({'aac_sortable_tree_9876543210' => [1]}) }
assert_raise(::ArgumentError) { Category.update_positions({'aac_sortable_tree_1' => [9876543210]}) }
end
def get
assert_equal @r1, Category.get(1)
assert @r1.update_attribute('my_hidden', true)
assert_nil Category.get(1)
assert_nil Category.get(4)
assert_nil Category.get(7)
assert_equal @r2, Category.get(2)
assert_equal @r3, Category.get(3)
end
end
| 33.561462 | 108 | 0.691942 |
ab3c117e20d0f308cdef51d8f9012f35c10fc897 | 3,407 | # frozen_string_literal: true
require 'dry/monads/result'
require 'dry/transaction/result_matcher'
require 'dry/transaction/stack'
module Dry
module Transaction
module InstanceMethods
include Dry::Monads::Result::Mixin
attr_reader :steps
attr_reader :operations
attr_reader :listeners
attr_reader :stack
def initialize(steps: (self.class.steps), listeners: nil, **operations)
@steps = steps.map { |step|
operation = resolve_operation(step, **operations)
step.with(operation: operation)
}
@operations = operations
@stack = Stack.new(@steps)
subscribe(listeners) unless listeners.nil?
end
def call(input = nil, &block)
assert_step_arity
result = stack.(Success(input))
if block
ResultMatcher.(result, &block)
else
result.or { |step_failure|
# Unwrap the value from the StepFailure and return it directly
Failure(step_failure.value)
}
end
end
def subscribe(listeners)
@listeners = listeners
if listeners.is_a?(Hash)
listeners.each do |step_name, listener|
steps.detect { |step| step.name == step_name }.subscribe(listener)
end
else
steps.each do |step|
step.subscribe(listeners)
end
end
end
def with_step_args(**step_args)
assert_valid_step_args(step_args)
new_steps = steps.map { |step|
if step_args[step.name]
step.with(call_args: step_args[step.name])
else
step
end
}
self.class.new(steps: new_steps, listeners: listeners, **operations)
end
private
def respond_to_missing?(name, _include_private = false)
steps.any? { |step| step.name == name }
end
def method_missing(name, *args, &block)
step = steps.detect { |s| s.name == name }
super unless step
operation = operations[step.name]
raise NotImplementedError, "no operation +#{step.operation_name}+ defined for step +#{step.name}+" unless operation
operation.(*args, &block)
end
def resolve_operation(step, **operations)
if step.internal? && operations[step.name]
operations[step.name]
elsif methods.include?(step.name) || private_methods.include?(step.name)
method(step.name)
elsif operations[step.name].respond_to?(:call)
operations[step.name]
elsif operations[step.name]
raise InvalidStepError.new(step.name)
else
raise MissingStepError.new(step.name)
end
end
def assert_valid_step_args(step_args)
step_args.each_key do |step_name|
unless steps.any? { |step| step.name == step_name }
raise ArgumentError, "+#{step_name}+ is not a valid step name"
end
end
end
def assert_step_arity
steps.each do |step|
num_args_required = step.arity >= 0 ? step.arity : ~step.arity
num_args_supplied = step.call_args.length + 1 # add 1 for main `input`
if num_args_required > num_args_supplied
raise ArgumentError, "not enough arguments supplied for step +#{step.name}+"
end
end
end
end
end
end
| 28.157025 | 123 | 0.601409 |
1a4f0a9399835284b9e2dfa8049f9c39fec27e6e | 357 | # Be sure to restart your server when you modify this file.
# Rails.application.config.session_store :cookie_store, key: '_rails_41_app_session'
MongoStore::Session.database = Mongo::Connection.new.db("rails41_app_#{Rails.env}") if ENV['MONGO_SESSION_STORE_ORM'] == "mongo"
Rails.application.config.session_store :"#{ENV['MONGO_SESSION_STORE_ORM']}_store"
| 59.5 | 128 | 0.789916 |
d5d415d2ddf4990e4d313716b5ea32548b095b39 | 2,479 | # frozen_string_literal: true
class UpdateVarcharFields < ActiveRecord::Migration[4.2]
def self.up
change_column :facilities, :url_name, :string, limit: 50, null: false
change_column :facilities, :account, :string, limit: 50, null: false
add_column :facilities, :description_c, :text, null: true
execute "UPDATE facilities SET description_c = description"
remove_column :facilities, :description
rename_column :facilities, :description_c, :description
change_column :accounts, :type, :string, limit: 50, null: false
change_column :accounts, :account_number, :string, limit: 50, null: false
change_column :accounts, :description, :string, limit: 50, null: false
change_column :order_statuses, :name, :string, limit: 50, null: false
change_column :products, :type, :string, limit: 50, null: false
change_column :products, :name, :string, limit: 200, null: false
change_column :products, :url_name, :string, limit: 50, null: false
change_column :products, :unit_size, :string, limit: 50, null: true
change_column :products, :relay_ip, :string, limit: 15, null: true
change_column :price_groups, :name, :string, limit: 50, null: false
change_column :price_group_members, :type, :string, limit: 50, null: false
change_column :price_policies, :type, :string, limit: 50, null: false
end
def self.down
change_column :facilities, :account, :string, limit: 100, null: false
add_column :facilities, :description_v, :varchar, limit: 4000, null: true
execute "UPDATE facilities SET description_v = description"
remove_column :facilities, :description
rename_column :facilities, :description_v, :description
change_column :accounts, :type, :string, limit: 200, null: false
change_column :accounts, :account_number, :string, limit: 100, null: false
change_column :accounts, :description, :string, limit: 200, null: false
change_column :order_statuses, :name, :string, limit: 200, null: false
change_column :products, :type, :string, limit: 200, null: false
change_column :price_groups, :name, :string, limit: 200, null: false
change_column :price_group_members, :type, :string, limit: 200, null: false
change_column :price_policies, :type, :string, limit: 200, null: false
end
end
| 43.491228 | 80 | 0.674062 |
621e0b930617a4ff057c27061b206ab2ebd6070b | 1,357 | # Configure Spree Preferences
#
# Note: Initializing preferences available within the Admin will overwrite any changes that were made through the user interface when you restart.
# If you would like users to be able to update a setting with the Admin it should NOT be set here.
#
# Note: If a preference is set here it will be stored within the cache & database upon initialization.
# Just removing an entry from this initializer will not make the preference value go away.
# Instead you must either set a new value or remove entry, clear cache, and remove database entry.
#
# In order to initialize a setting do:
# config.setting_name = 'new value'
Spree.config do |config|
# Example:
# Uncomment to stop tracking inventory levels in the application
# config.track_inventory_levels = false
end
# Configure Spree Dependencies
#
# Note: If a dependency is set here it will NOT be stored within the cache & database upon initialization.
# Just removing an entry from this initializer will make the dependency value go away.
#
Spree.dependencies do |dependencies|
# Example:
# Uncomment to change the default Service handling adding Items to Cart
# dependencies.cart_add_item_service = 'MyNewAwesomeService'
end
Spree.user_class = <%= (options[:user_class].blank? ? 'Spree::LegacyUser' : options[:user_class]).inspect %>
| 43.774194 | 146 | 0.759027 |
7a0696d6edee6aaf58bbba541e69e6052594d183 | 3,893 | require "active_support/core_ext/integer/time"
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Code is not reloaded between requests.
config.cache_classes = true
# Eager load code on boot. This eager loads most of Rails and
# your application in memory, allowing both threaded web servers
# and those relying on copy on write to perform better.
# Rake tasks automatically ignore this option for performance.
config.eager_load = true
# Full error reports are disabled and caching is turned on.
config.consider_all_requests_local = false
config.action_controller.perform_caching = true
# Ensures that a master key has been made available in either ENV["RAILS_MASTER_KEY"]
# or in config/master.key. This key is used to decrypt credentials (and other encrypted files).
# config.require_master_key = true
# Disable serving static files from the `/public` folder by default since
# Apache or NGINX already handles this.
config.public_file_server.enabled = ENV["RAILS_SERVE_STATIC_FILES"].present?
# Compress CSS using a preprocessor.
# config.assets.css_compressor = :sass
# Do not fallback to assets pipeline if a precompiled asset is missed.
config.assets.compile = false
# Enable serving of images, stylesheets, and JavaScripts from an asset server.
# config.asset_host = "http://assets.example.com"
# Specifies the header that your server uses for sending files.
# config.action_dispatch.x_sendfile_header = "X-Sendfile" # for Apache
# config.action_dispatch.x_sendfile_header = "X-Accel-Redirect" # for NGINX
# Store uploaded files on the local file system (see config/storage.yml for options).
config.active_storage.service = :local
# Mount Action Cable outside main process or domain.
# config.action_cable.mount_path = nil
# config.action_cable.url = "wss://example.com/cable"
# config.action_cable.allowed_request_origins = [ "http://example.com", /http:\/\/example.*/ ]
# Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
# config.force_ssl = true
# Include generic and useful information about system operation, but avoid logging too much
# information to avoid inadvertent exposure of personally identifiable information (PII).
config.log_level = :info
# Prepend all log lines with the following tags.
config.log_tags = [ :request_id ]
# Use a different cache store in production.
# config.cache_store = :mem_cache_store
# Use a real queuing backend for Active Job (and separate queues per environment).
# config.active_job.queue_adapter = :resque
# config.active_job.queue_name_prefix = "Project_production"
config.action_mailer.perform_caching = false
# Ignore bad email addresses and do not raise email delivery errors.
# Set this to true and configure the email server for immediate delivery to raise delivery errors.
# config.action_mailer.raise_delivery_errors = false
# Enable locale fallbacks for I18n (makes lookups for any locale fall back to
# the I18n.default_locale when a translation cannot be found).
config.i18n.fallbacks = true
# Don't log any deprecations.
config.active_support.report_deprecations = false
# Use default logging formatter so that PID and timestamp are not suppressed.
config.log_formatter = ::Logger::Formatter.new
# Use a different logger for distributed setups.
# require "syslog/logger"
# config.logger = ActiveSupport::TaggedLogging.new(Syslog::Logger.new "app-name")
if ENV["RAILS_LOG_TO_STDOUT"].present?
logger = ActiveSupport::Logger.new(STDOUT)
logger.formatter = config.log_formatter
config.logger = ActiveSupport::TaggedLogging.new(logger)
end
# Do not dump schema after migrations.
config.active_record.dump_schema_after_migration = false
end
| 41.414894 | 100 | 0.759312 |
39fe95656f254c22e35bc5c07da8796e4eda7946 | 1,452 | #-- copyright
# OpenProject is a project management system.
# Copyright (C) 2012-2013 the OpenProject Foundation (OPF)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3.
#
# OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
# Copyright (C) 2006-2013 Jean-Philippe Lang
# Copyright (C) 2010-2013 the ChiliProject Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# See doc/COPYRIGHT.rdoc for more details.
#++
FactoryGirl.define do
factory(:available_project_status, :class => AvailableProjectStatus) do |d|
reported_project_status { |e| e.association :reported_project_status }
project_type { |e| e.association :project_type }
end
end
| 41.485714 | 91 | 0.756887 |
1cb94c178ea02938ec751c1b406c8e82a63d79e0 | 6,322 | require 'spec_helper'
describe FundAmerica::Offering do
context '#list' do
before(:all) do
@response = FundAmerica::Offering.list
end
it 'must have object in response' do
expect(@response['object']).not_to be nil
end
it 'must have object as resource_list' do
expect(@response['object']).to eq('resource_list')
end
it 'must have total_resources in response' do
expect(@response['total_resources']).not_to be nil
end
it 'must have total_resources as 0 or more' do
expect(@response['total_resources']).to be >= 0
end
it 'must have page in response' do
expect(@response['page']).not_to be nil
end
it 'must have page as 1 or more' do
expect(@response['page']).to be >= 1
end
it 'must have per in response' do
expect(@response['per']).not_to be nil
end
it 'must have more in response' do
expect(@response['more']).not_to be nil
end
it 'must have more as boolean value' do
expect(@response['more']).to be_boolean
end
it 'must have resources in response' do
expect(@response['resources']).not_to be nil
end
it 'must have resources as array' do
expect(@response['resources']).to be_instance_of(Array)
end
end
context '#create' do
before(:all) do
t = Time.now.to_i
options = {
:city => 'Las Vegas',
:country => 'US',
:email => "test#{t}@test.com",
:name => "Test U#{t}",
:phone => '1234567890',
:postal_code => '89123',
:region => 'NV',
:street_address_1 => '123 street',
:tax_id_number => '123123123',
:type => "person",
:date_of_birth => '1980-01-01'
}
@entity = FundAmerica::Entity.create(options)
offering_options = {
:amount => '5000000',
:description => 'A really big deal.',
:max_amount => '5500000',
:min_amount => '4500000',
:name => "Offering #{t}",
:entity_id => @entity['id']
}
@offering = FundAmerica::Offering.create(offering_options)
end
it 'must have a response' do
expect(@offering).not_to be nil
end
it 'must have an id' do
expect(@offering['id']).not_to be nil
end
it 'must have object as offering' do
expect(@offering['object']).to eq('offering')
end
context '#details' do
before(:all) do
@offering_details = FundAmerica::Offering.details(@offering['id'])
end
it 'must have a response' do
expect(@offering_details).not_to be nil
end
it 'must have an id' do
expect(@offering_details['id']).not_to be nil
end
it 'must match the id in the request' do
expect(@offering_details['id']).to eq(@offering['id'])
end
end
context '#update' do
before(:all) do
@offering_updated = FundAmerica::Offering.update(@offering['id'], {:accept_investments => true})
end
it 'must have a response' do
expect(@offering_updated).not_to be nil
end
it 'must have an id' do
expect(@offering_updated['id']).not_to be nil
end
it 'must match the id in the request' do
expect(@offering_updated['id']).to eq(@offering['id'])
end
it 'must show the updated value in response' do
expect(@offering_updated['accept_investments']).to be true
end
end
context '#billing_logs' do
before(:all) do
@billing_logs = FundAmerica::Offering.billing_logs(@offering['id'])
end
it 'must have object in response' do
expect(@billing_logs['object']).not_to be nil
end
it 'must have object as resource_list' do
expect(@billing_logs['object']).to eq('resource_list')
end
it 'must have total_resources in response' do
expect(@billing_logs['total_resources']).not_to be nil
end
it 'must have total_resources as 0 or more' do
expect(@billing_logs['total_resources']).to be >= 0
end
it 'must have page in response' do
expect(@billing_logs['page']).not_to be nil
end
it 'must have page as 1 or more' do
expect(@billing_logs['page']).to be >= 1
end
it 'must have per in response' do
expect(@billing_logs['per']).not_to be nil
end
it 'must have more in response' do
expect(@billing_logs['more']).not_to be nil
end
it 'must have more as boolean value' do
expect(@billing_logs['more']).to be_boolean
end
it 'must have resources in response' do
expect(@billing_logs['resources']).not_to be nil
end
it 'must have resources as array' do
expect(@billing_logs['resources']).to be_instance_of(Array)
end
end
context '#investment_payments' do
before(:all) do
@investment_payments = FundAmerica::Offering.investment_payments(@offering['id'])
end
it 'must have object in response' do
expect(@investment_payments['object']).not_to be nil
end
it 'must have object as resource_list' do
expect(@investment_payments['object']).to eq('resource_list')
end
it 'must have total_resources in response' do
expect(@investment_payments['total_resources']).not_to be nil
end
it 'must have total_resources as 0 or more' do
expect(@investment_payments['total_resources']).to be >= 0
end
it 'must have page in response' do
expect(@investment_payments['page']).not_to be nil
end
it 'must have page as 1 or more' do
expect(@investment_payments['page']).to be >= 1
end
it 'must have per in response' do
expect(@investment_payments['per']).not_to be nil
end
it 'must have more in response' do
expect(@investment_payments['more']).not_to be nil
end
it 'must have more as boolean value' do
expect(@investment_payments['more']).to be_boolean
end
it 'must have resources in response' do
expect(@investment_payments['resources']).not_to be nil
end
it 'must have resources as array' do
expect(@investment_payments['resources']).to be_instance_of(Array)
end
end
end
end
| 26.902128 | 104 | 0.608826 |
e2fa992a31de9f156e33c8067be9dc35574ac30a | 1,568 | class Diskus < Formula
desc "Minimal, fast alternative to 'du -sh'"
homepage "https://github.com/sharkdp/diskus"
url "https://github.com/sharkdp/diskus/archive/v0.6.0.tar.gz"
sha256 "661687edefa3218833677660a38ccd4e2a3c45c4a66055c5bfa4667358b97500"
license "Apache-2.0"
bottle do
rebuild 1
sha256 cellar: :any_skip_relocation, arm64_monterey: "2829a12accf1b84833f9663b55e45d0851ee27e6ac2835c6f4c3bb5366f4507d"
sha256 cellar: :any_skip_relocation, arm64_big_sur: "2b3c800cb9f9387eabe0820cf588be1c10cb00204bb6838c067a0f35c691ab8a"
sha256 cellar: :any_skip_relocation, monterey: "3456e115758dccffee255f9fe8f37258c0c22a168902683f07e4ce4a24ec54b6"
sha256 cellar: :any_skip_relocation, big_sur: "56ca7dc82e101e1dbb7090fe1567c924bd967f4d3df8b138caa45182fd76e446"
sha256 cellar: :any_skip_relocation, catalina: "3c4f0aafa14c810b36b2d5dbeb6998bdc866aad7b531f12f14508ee2e8b1c46d"
sha256 cellar: :any_skip_relocation, mojave: "16deb101df03efdcc20f56ed24d2e9608e8733e3bf9f552935ccc73428ac96a3"
sha256 cellar: :any_skip_relocation, high_sierra: "e603cd7daf7d48e0f139b584ef77f4f59949470e4e2d0ee0f430ac229fe444ea"
sha256 cellar: :any_skip_relocation, x86_64_linux: "7eb0588350167e38dc14e6ba328e08faca5940c4659ef2e84d3e80533bc14617"
end
depends_on "rust" => :build
def install
system "cargo", "install", *std_cargo_args
end
test do
(testpath/"test.txt").write("Hello World")
output = shell_output("#{bin}/diskus #{testpath}/test.txt")
assert_match "4096", output
end
end
| 49 | 123 | 0.793367 |
e2f1c974f1986df9811257300bbccdaf27d9df8d | 157 | require 'aruba/platform'
Aruba.platform.deprecated('The use of "aruba/jruby" is deprecated. Use "aruba/config/jruby" instead')
require 'aruba/config/jruby'
| 31.4 | 101 | 0.77707 |
7a729bf380a4878bd86aa7daf73d16ad637bd475 | 2,570 | # frozen_string_literal: true
RSpec.shared_examples 'platforms_spec' do |plugin, plugin_name|
let(:puppetdb_session) { double }
let(:puppet_db) { double }
let(:list) { [OpenStruct.new(name: 'production', modules: [Puppet::Module.new('modulea', '/foo/bar', 'production'), Puppet::Module.new('moduleb', '/foo/baz', 'production')])] }
let(:puppet_lookup) { double }
let(:classes) { double }
let(:facts) { double }
it 'has a valid schema' do
schema = plugin.schema
expect(schema).to be_a_valid_schema
end
it "#{plugin_name}.run without errors" do
allow(Puppet).to receive(:lookup).with(:environments).and_return(puppet_lookup)
allow(puppet_lookup).to receive(:list).and_return(list)
list[0].modules.each do |elem|
allow(elem).to receive(:forge_module?).and_return(true)
end
allow(puppetdb_session).to receive(:puppet_db).and_return(puppet_db)
allow(puppet_db).to receive(:request).with('', 'resources[certname, title] { type = "Class" }').and_return(classes)
allow(classes).to receive(:data).and_return(
[
{
'title' => 'ModuleA::Foo',
'certname' => 'aaa',
},
{
'title' => 'ModuleB::Bar',
'certname' => 'bbb',
},
],
)
allow(puppet_db).to receive(:request).with('', 'facts[certname, value] { name = "osfamily" }').and_return(facts)
allow(facts).to receive(:data).and_return(
[
{
'value' => 'linux',
'certname' => 'aaa',
},
{
'value' => 'windows',
'certname' => 'bbb',
},
],
)
expect(plugin.run(puppetdb_session)).to eq([
class_platforms: [
{
count: 1,
name: 'ModuleA::Foo',
platform: 'linux',
},
{
count: 1,
name: 'ModuleB::Bar',
platform: 'windows',
},
],
])
end
it 'generates an example' do
expect { plugin.example }.not_to raise_error
end
end
| 36.714286 | 178 | 0.443191 |
287e423f7009308bc02fee2cf9ac33d039a40694 | 295 | require "rails_helper"
describe MappingsBatch do
describe "validations" do
it { is_expected.to validate_presence_of(:user) }
it { is_expected.to validate_presence_of(:site) }
it { is_expected.to validate_inclusion_of(:state).in_array(MappingsBatch::PROCESSING_STATES) }
end
end
| 29.5 | 98 | 0.766102 |
bf51f68c13de676b44ac28ea058eb1cbb1d3950d | 2,965 | # The Kenney UI Space pack has transparent panels with optional cut corners.
# This is a normal nine slice where each corner has two options.
class GlassPanel < Zif::NinePanel
SPRITES_PATH = 'sprites/kenney-uipack-space/danhealy-modified'.freeze
CUT_CORNER = "#{SPRITES_PATH}/glass_cut_corner.png".freeze
ROUND_CORNER = "#{SPRITES_PATH}/glass_round_corner.png".freeze
WIDTH = 16
attr_accessor :cuts
def initialize(width, height, cut_corners=[false, false, false, false], name=Zif.random_name('glass_panel'))
super(name)
self.upper_left_corner = Zif::Sprite.new.tap do |s|
s.x = 0
s.w = WIDTH
s.h = WIDTH
end
self.upper_right_corner = Zif::Sprite.new.tap do |s|
s.w = WIDTH
s.h = WIDTH
s.flip_horizontally = true
end
self.lower_right_corner = Zif::Sprite.new.tap do |s|
s.y = 0
s.w = WIDTH
s.h = WIDTH
s.flip_vertically = true
s.flip_horizontally = true
end
self.lower_left_corner = Zif::Sprite.new.tap do |s|
s.x = 0
s.y = 0
s.w = WIDTH
s.h = WIDTH
s.flip_vertically = true
end
self.upper_edge = Zif::Sprite.new.tap do |s|
s.x = WIDTH
s.h = WIDTH
s.path = "#{SPRITES_PATH}/glass_side.png"
end
self.right_edge = Zif::Sprite.new.tap do |s|
s.y = WIDTH
s.w = WIDTH
s.path = "#{SPRITES_PATH}/glass_side_right.png"
end
self.lower_edge = Zif::Sprite.new.tap do |s|
s.x = WIDTH
s.y = 0
s.h = WIDTH
s.path = "#{SPRITES_PATH}/glass_side.png"
s.flip_vertically = true
end
self.left_edge = Zif::Sprite.new.tap do |s|
s.x = 0
s.y = WIDTH
s.w = WIDTH
s.path = "#{SPRITES_PATH}/glass_side_right.png"
s.flip_horizontally = true
end
@fill = Zif::Sprite.new.tap do |s|
s.x = WIDTH
s.y = WIDTH
s.path = "#{SPRITES_PATH}/glass_center.png"
end
resize(width, height)
change_cuts(cut_corners)
end
def change_cuts(cut_corners)
@cuts = cut_corners
lower_left_corner.path = cut_corners[0] ? CUT_CORNER : ROUND_CORNER
lower_right_corner.path = cut_corners[1] ? CUT_CORNER : ROUND_CORNER
upper_right_corner.path = cut_corners[2] ? CUT_CORNER : ROUND_CORNER
upper_left_corner.path = cut_corners[3] ? CUT_CORNER : ROUND_CORNER
end
def resize_width(width)
return if @w == width
@w = width
upper_right_corner.x = @w - WIDTH
lower_right_corner.x = @w - WIDTH
upper_edge.w = @w - 2 * WIDTH
right_edge.x = @w - WIDTH
lower_edge.w = @w - 2 * WIDTH
@fill.w = @w - 2 * WIDTH
end
def resize_height(height)
return if @h == height
@h = height
upper_left_corner.y = @h - WIDTH
upper_right_corner.y = @h - WIDTH
upper_edge.y = @h - WIDTH
right_edge.h = @h - 2 * WIDTH
left_edge.h = @h - 2 * WIDTH
@fill.h = @h - 2 * WIDTH
end
end
| 26.008772 | 110 | 0.609781 |
ac56dd0403455349244920d9ea5d425ae473bbe8 | 4,151 | require 'triglav_client'
class Client
class Error < StandardError
attr_reader :cause
def initialize(message, cause)
@cause = cause
super(message)
end
end
class AuthenticationError < Error; end
class ConnectionError < Error; end
attr_reader :url, :username, :password, :authenticator
def initialize(
url: 'http://localhost:7800',
username: 'triglav_test',
password: 'triglav_test',
authenticator: 'local',
timeout: nil,
debugging: nil
)
@url = url
@username = username
@password = password
@authenticator = authenticator
@timeout = timeout
@debugging = debugging
initialize_current_token
authenticate
end
# Send messages
#
# @param [Array] events array of event messages
#
# {
# resource_uri: "hdfs://host:port/path/to/resource",
# resource_unit: 'daily',
# resource_time: Time.at.to_i,
# resource_timezone: "+09:00",
# payload: {free: "text"}.to_json,
# }
#
# @see TriglavAgent::MessageRequest
def send_messages(events)
messages_api = TriglavClient::MessagesApi.new(api_client)
handle_error { messages_api.send_messages(events) }
end
# Fetch messages
#
# @param [Integer] offset
# @param [Integer] limit
# @param [Array] resource_uris
# @return [Array] array of messages
# @see TriglavClient::MessageEachResponse
# id
# resource_uri
# resource_unit
# resource_time
# resource_timezone
# payload
def fetch_messages(offset, limit: 100, resource_uris: [])
messages_api = TriglavClient::MessagesApi.new(api_client)
# fetch_request = TriglavClient::MessageFetchRequest.new.tap {|request|
# request.offset = offset
# request.limit = limit
# request.resource_uris = resource_uris
# }
# with_token { messages_api.fetch_messages(fetch_request) }
handle_error { messages_api.list_messages(offset, {limit: limit, resource_uris: resource_uris}) }
end
private
def api_client
return @api_client if @api_client
config = TriglavClient::Configuration.new do |config|
uri = URI.parse(url)
config.scheme = uri.scheme
config.host = "#{uri.host}:#{uri.port}"
config.timeout = @timeout if @timeout
config.debugging = @debugging if @debugging
end
@api_client = TriglavClient::ApiClient.new(config)
end
# Authenticate
#
# 1. Another process saved a newer token onto the token_file => read it
# 2. The token saved on the token_file is same with current token => re-authenticate
# 3. The token saved on the token_file is older than the current token
# => unknown situation, re-authenticate and save into token_file to refresh anyway
# 4. No token is saved on the token_file => authenticate
def authenticate
auth_api = TriglavClient::AuthApi.new(api_client)
credential = TriglavClient::Credential.new(
username: username, password: password, authenticator: authenticator
)
handle_auth_error do
result = auth_api.create_token(credential)
token = {access_token: result.access_token}
update_current_token(token)
end
end
def initialize_current_token
@current_token = {
access_token: (api_client.config.api_key['Authorization'] = String.new),
}
end
def update_current_token(token)
@current_token[:access_token].replace(token[:access_token])
end
def handle_auth_error(&block)
begin
yield
rescue TriglavClient::ApiError => e
if e.code == 0
raise ConnectionError.new("Could not connect to #{triglav_url}", e)
elsif e.message == 'Unauthorized'.freeze
raise AuthenticationError.new("Failed to authenticate on triglav API.".freeze, e)
else
raise Error.new(e.message, e)
end
end
end
def handle_error(&block)
begin
yield
rescue TriglavClient::ApiError => e
if e.code == 0
raise ConnectionError.new("Could not connect to #{triglav_url}", e)
elsif e.message == 'Unauthorized'.freeze
authenticate
retry
else
raise Error.new(e.message, e)
end
end
end
end
| 28.047297 | 101 | 0.675259 |
612fd3b78f8812a19e948fdaa1475acd0084e8db | 15,990 | #!/usr/bin/env ruby
require File.expand_path(File.join(File.dirname(__FILE__), "test_helper"))
describe BinData::Record do
it "is not registered" do
lambda {
BinData::RegisteredClasses.lookup("Record")
}.must_raise BinData::UnRegisteredTypeError
end
end
describe BinData::Record, "when defining with errors" do
it "fails on non registered types" do
lambda {
class BadTypeRecord < BinData::Record
non_registered_type :a
end
}.must_raise_on_line TypeError, 2, "unknown type 'non_registered_type' in BadTypeRecord"
end
it "gives correct error message for non registered nested types" do
lambda {
class BadNestedTypeRecord < BinData::Record
array :a, type: :non_registered_type
end
}.must_raise_on_line TypeError, 2, "unknown type 'non_registered_type' in BadNestedTypeRecord"
end
it "gives correct error message for non registered nested types in blocks" do
lambda {
class BadNestedTypeInBlockRecord < BinData::Record
array :a do
non_registered_type
end
end
}.must_raise_on_line TypeError, 3, "unknown type 'non_registered_type' in BinData::Array"
end
it "fails on nested choice when missing names" do
lambda {
class MissingChoiceNamesRecord < BinData::Record
choice do
int8 :a
int8
end
end
}.must_raise_on_line SyntaxError, 4, "fields must either all have names, or none must have names in BinData::Choice"
end
it "fails on malformed names" do
lambda {
class MalformedNameRecord < BinData::Record
int8 :a
int8 "45"
end
}.must_raise_on_line NameError, 3, "field '45' is an illegal fieldname in MalformedNameRecord"
end
it "fails on duplicate names" do
lambda {
class DuplicateNameRecord < BinData::Record
int8 :a
int8 :b
int8 :a
end
}.must_raise_on_line SyntaxError, 4, "duplicate field 'a' in DuplicateNameRecord"
end
it "fails on reserved names" do
lambda {
class ReservedNameRecord < BinData::Record
int8 :a
int8 :invert # from Hash.instance_methods
end
}.must_raise_on_line NameError, 3, "field 'invert' is a reserved name in ReservedNameRecord"
end
it "fails when field name shadows an existing method" do
lambda {
class ExistingNameRecord < BinData::Record
int8 :object_id
end
}.must_raise_on_line NameError, 2, "field 'object_id' shadows an existing method in ExistingNameRecord"
end
it "fails on unknown endian" do
lambda {
class BadEndianRecord < BinData::Record
endian 'a bad value'
end
}.must_raise_on_line ArgumentError, 2, "unknown value for endian 'a bad value' in BadEndianRecord"
end
it "fails when endian is after a field" do
lambda {
class BadEndianPosRecord < BinData::Record
string :a
endian :little
end
}.must_raise_on_line SyntaxError, 3, "endian must be called before defining fields in BadEndianPosRecord"
end
it "fails when search_prefix is after a field" do
lambda {
class BadSearchPrefixPosRecord < BinData::Record
string :a
search_prefix :pre
end
}.must_raise_on_line SyntaxError, 3, "search_prefix must be called before defining fields in BadSearchPrefixPosRecord"
end
end
describe BinData::Record, "with anonymous fields" do
class AnonymousRecord < BinData::Record
int8 'a', initial_value: 10
int8 ''
int8 nil
int8
int8 value: :a
end
let(:obj) { AnonymousRecord.new }
it "only shows non anonymous fields" do
obj.field_names.must_equal [:a]
end
it "does not include anonymous fields in snapshot" do
obj.a = 5
obj.snapshot.must_equal({a: 5})
end
it "writes anonymous fields" do
str = "\001\002\003\004\005"
obj.read(str)
obj.a.clear
obj.to_binary_s.must_equal_binary "\012\002\003\004\012"
end
end
describe BinData::Record, "with hidden fields" do
class HiddenRecord < BinData::Record
hide :b, :c
int8 :a
int8 'b', initial_value: 10
int8 :c
int8 :d, value: :b
end
let(:obj) { HiddenRecord.new }
it "only shows fields that aren't hidden" do
obj.field_names.must_equal [:a, :d]
end
it "accesses hidden fields directly" do
obj.b.must_equal 10
obj.c = 15
obj.c.must_equal 15
obj.must_respond_to :b=
end
it "does not include hidden fields in snapshot" do
obj.b = 5
obj.snapshot.must_equal({a: 0, d: 5})
end
end
describe BinData::Record, "with multiple fields" do
class MultiFieldRecord < BinData::Record
int8 :a
int8 :b
end
let(:obj) { MultiFieldRecord.new(a: 1, b: 2) }
it "returns num_bytes" do
obj.a.num_bytes.must_equal 1
obj.b.num_bytes.must_equal 1
obj.num_bytes.must_equal 2
end
it "identifies accepted parameters" do
BinData::Record.accepted_parameters.all.must_include :hide
BinData::Record.accepted_parameters.all.must_include :endian
end
it "clears" do
obj.a = 6
obj.clear
assert obj.clear?
end
it "clears individual elements" do
obj.a = 6
obj.b = 7
obj.a.clear
assert obj.a.clear?
refute obj.b.clear?
end
it "writes ordered" do
obj.to_binary_s.must_equal_binary "\x01\x02"
end
it "reads ordered" do
obj.read("\x03\x04")
obj.a.must_equal 3
obj.b.must_equal 4
end
it "returns a snapshot" do
snap = obj.snapshot
snap.a.must_equal 1
snap.b.must_equal 2
snap.must_equal({ a: 1, b: 2 })
end
it "returns field_names" do
obj.field_names.must_equal [:a, :b]
end
it "fails on unknown method call" do
lambda { obj.does_not_exist }.must_raise NoMethodError
end
end
describe BinData::Record, "with nested structs" do
class NestedStructRecord < BinData::Record
int8 :a, initial_value: 6
struct :b, the_val: :a do
hide :w
int8 :w, initial_value: 3
int8 :x, value: :the_val
end
struct :c do
int8 :y, value: -> { b.w }
int8 :z
end
end
let(:obj) { NestedStructRecord.new }
it "includes nested field names" do
obj.field_names.must_equal [:a, :b, :c]
end
it "hides nested field names" do
obj.b.field_names.must_equal [:x]
end
it "accesses nested fields" do
obj.a.must_equal 6
obj.b.w.must_equal 3
obj.b.x.must_equal 6
obj.c.y.must_equal 3
end
it "returns correct abs_offset" do
obj.abs_offset.must_equal 0
obj.b.abs_offset.must_equal 1
obj.b.w.abs_offset.must_equal 1
obj.c.abs_offset.must_equal 3
obj.c.z.abs_offset.must_equal 4
end
it "returns correct rel_offset" do
obj.rel_offset.must_equal 0
obj.b.rel_offset.must_equal 1
obj.b.w.rel_offset.must_equal 0
obj.c.rel_offset.must_equal 3
obj.c.z.rel_offset.must_equal 1
end
it "assigns nested fields" do
obj.assign(a: 2, b: {w: 4})
obj.a.must_equal 2
obj.b.w.must_equal 4
obj.b.x.must_equal 2
obj.c.y.must_equal 4
end
end
describe BinData::Record, "with nested array of primitives" do
class NestedPrimitiveArrayRecord < BinData::Record
array :a, initial_length: 3 do
uint8 value: -> { index }
end
end
let(:obj) { NestedPrimitiveArrayRecord.new }
it "uses block as :type" do
obj.snapshot.must_equal({a: [0, 1, 2]})
end
end
describe BinData::Record, "with nested array of structs" do
class NestedStructArrayRecord < BinData::Record
array :a do
uint8 :b
uint8 :c
end
end
let(:obj) { NestedStructArrayRecord.new }
it "uses block as struct for :type" do
obj.a[0].b = 2
obj.snapshot.must_equal({a: [{b: 2, c: 0}]})
end
end
describe BinData::Record, "with nested choice with implied keys" do
class NestedChoiceWithImpliedKeysRecord < BinData::Record
choice :a, selection: 1 do
uint8 value: 1
uint8 value: 2
end
end
let(:obj) { NestedChoiceWithImpliedKeysRecord.new }
specify { obj.a.must_equal 2 }
end
describe BinData::Record, "with nested choice with explicit keys" do
class NestedChoiceWithKeysRecord < BinData::Record
choice :a, selection: 5 do
uint8 3, value: 1
uint8 5, value: 2
end
end
let(:obj) { NestedChoiceWithKeysRecord.new }
specify { obj.a.must_equal 2 }
end
describe BinData::Record, "with nested choice with names" do
class NestedChoiceWithNamesRecord < BinData::Record
choice :a, selection: "b" do
uint8 "b", value: 1
uint8 "c", value: 2
end
end
let(:obj) { NestedChoiceWithNamesRecord.new }
specify { obj.a.must_equal 1 }
end
describe BinData::Record, "with an endian defined" do
class RecordWithEndian < BinData::Record
endian :little
uint16 :a
float :b
array :c, initial_length: 2 do
int8
end
choice :d, selection: 1 do
uint16
uint32
end
struct :e do
uint16 :f
uint32be :g
end
struct :h do
endian :big
struct :i do
uint16 :j
end
end
end
let(:obj) { RecordWithEndian.new }
it "uses correct endian" do
obj.a = 1
obj.b = 2.0
obj.c[0] = 3
obj.c[1] = 4
obj.d = 5
obj.e.f = 6
obj.e.g = 7
obj.h.i.j = 8
lambdaed = [1, 2.0, 3, 4, 5, 6, 7, 8].pack('veCCVvNn')
obj.to_binary_s.must_equal_binary lambdaed
end
end
describe BinData::Record, "with search_prefix" do
class ASprefix < BinData::Int8; end
class BSprefix < BinData::Int8; end
class RecordWithSearchPrefix < BinData::Record
search_prefix :a
sprefix :f
end
class RecordWithParentSearchPrefix < BinData::Record
search_prefix :a
struct :s do
sprefix :f
end
end
class RecordWithNestedSearchPrefix < BinData::Record
search_prefix :a
struct :s do
search_prefix :x
sprefix :f
end
end
class RecordWithPrioritisedNestedSearchPrefix < BinData::Record
search_prefix :b
struct :s do
search_prefix :a
sprefix :f
end
end
it "uses search_prefix" do
obj = RecordWithSearchPrefix.new
obj.f.class.name.must_equal "ASprefix"
end
it "uses parent search_prefix" do
obj = RecordWithParentSearchPrefix.new
obj.s.f.class.name.must_equal "ASprefix"
end
it "uses nested search_prefix" do
obj = RecordWithNestedSearchPrefix.new
obj.s.f.class.name.must_equal "ASprefix"
end
it "uses prioritised nested search_prefix" do
obj = RecordWithPrioritisedNestedSearchPrefix.new
obj.s.f.class.name.must_equal "ASprefix"
end
end
describe BinData::Record, "with endian :big_and_little" do
class RecordWithBnLEndian < BinData::Record
endian :big_and_little
int16 :a, value: 1
end
it "is not registered" do
lambda {
BinData::RegisteredClasses.lookup("RecordWithBnLEndian")
}.must_raise BinData::UnRegisteredTypeError
end
it "creates big endian version" do
obj = RecordWithBnLEndianBe.new
obj.to_binary_s.must_equal_binary "\x00\x01"
end
it "creates little endian version" do
obj = RecordWithBnLEndianLe.new
obj.to_binary_s.must_equal_binary "\x01\x00"
end
it "requires :endian as argument" do
lambda {
RecordWithBnLEndian.new
}.must_raise ArgumentError
end
it "accepts :endian as argument" do
obj = RecordWithBnLEndian.new(endian: :little)
obj.to_binary_s.must_equal_binary "\x01\x00"
end
end
describe BinData::Record, "with endian :big_and_little and search_prefix" do
class NsBNLIntBe < BinData::Int16be; end
class NsBNLIntLe < BinData::Int16le; end
class RecordWithBnLEndianAndSearchPrefix < BinData::Record
endian :big_and_little
search_prefix :ns
bnl_int :a, value: 1
end
it "creates big endian version" do
obj = RecordWithBnLEndianAndSearchPrefixBe.new
obj.to_binary_s.must_equal_binary "\x00\x01"
end
it "creates little endian version" do
obj = RecordWithBnLEndianAndSearchPrefixLe.new
obj.to_binary_s.must_equal_binary "\x01\x00"
end
end
describe BinData::Record, "with endian :big_and_little when subclassed" do
class ARecordWithBnLEndian < BinData::Record
endian :big_and_little
int16 :a, value: 1
end
class BRecordWithBnLEndian < ARecordWithBnLEndian
int16 :b, value: 2
end
it "is not registered" do
lambda {
BinData::RegisteredClasses.lookup("BRecordWithBnLEndian")
}.must_raise BinData::UnRegisteredTypeError
end
it "creates big endian version" do
obj = BRecordWithBnLEndianBe.new
obj.to_binary_s.must_equal_binary "\x00\x01\x00\x02"
end
it "creates little endian version" do
obj = BRecordWithBnLEndianLe.new
obj.to_binary_s.must_equal_binary "\x01\x00\x02\x00"
end
it "requires :endian as argument" do
lambda {
BRecordWithBnLEndian.new
}.must_raise ArgumentError
end
it "accepts :endian as argument" do
obj = BRecordWithBnLEndian.new(endian: :little)
obj.to_binary_s.must_equal_binary "\x01\x00\x02\x00"
end
end
describe BinData::Record, "defined recursively" do
class RecursiveRecord < BinData::Record
endian :big
uint16 :val
uint8 :has_nxt, value: -> { nxt.clear? ? 0 : 1 }
recursive_record :nxt, onlyif: -> { has_nxt > 0 }
end
it "can be created" do
RecursiveRecord.new
end
it "reads" do
str = "\x00\x01\x01\x00\x02\x01\x00\x03\x00"
obj = RecursiveRecord.read(str)
obj.val.must_equal 1
obj.nxt.val.must_equal 2
obj.nxt.nxt.val.must_equal 3
end
it "is assignable on demand" do
obj = RecursiveRecord.new
obj.val = 13
obj.nxt.val = 14
obj.nxt.nxt.val = 15
end
it "writes" do
obj = RecursiveRecord.new
obj.val = 5
obj.nxt.val = 6
obj.nxt.nxt.val = 7
obj.to_binary_s.must_equal_binary "\x00\x05\x01\x00\x06\x01\x00\x07\x00"
end
end
describe BinData::Record, "with custom mandatory parameters" do
class MandatoryRecord < BinData::Record
mandatory_parameter :arg1
uint8 :a, value: :arg1
end
it "raises error if mandatory parameter is not supplied" do
lambda { MandatoryRecord.new }.must_raise ArgumentError
end
it "uses mandatory parameter" do
obj = MandatoryRecord.new(arg1: 5)
obj.a.must_equal 5
end
end
describe BinData::Record, "with custom default parameters" do
class DefaultRecord < BinData::Record
default_parameter arg1: 5
uint8 :a, value: :arg1
uint8 :b
end
it "uses default parameter" do
obj = DefaultRecord.new
obj.a.must_equal 5
end
it "overrides default parameter" do
obj = DefaultRecord.new(arg1: 7)
obj.a.must_equal 7
end
it "accepts values" do
obj = DefaultRecord.new(b: 2)
obj.b.must_equal 2
end
it "accepts values and parameters" do
obj = DefaultRecord.new({b: 2}, arg1: 3)
obj.a.must_equal 3
obj.b.must_equal 2
end
end
describe BinData::Record, "with :onlyif" do
class OnlyIfRecord < BinData::Record
uint8 :a, initial_value: 3
uint8 :b, initial_value: 5, onlyif: -> { a == 3 }
uint8 :c, initial_value: 7, onlyif: -> { a != 3 }
end
let(:obj) { OnlyIfRecord.new }
it "initial state" do
obj.num_bytes.must_equal 2
obj.snapshot.must_equal({a: 3, b: 5})
obj.to_binary_s.must_equal_binary "\x03\x05"
end
it "identifies if fields are included" do
obj.a?.must_equal true
obj.b?.must_equal true
obj.c?.must_equal false
end
it "reads as lambdaed" do
obj.read("\x01\x02")
obj.snapshot.must_equal({a: 1, c: 2})
end
end
describe BinData::Record, "derived classes" do
class ParentRecord < BinData::Record
uint8 :a
end
class Child1Record < ParentRecord
uint8 :b
end
class Child2Record < Child1Record
uint8 :c
end
it "does not affect parent" do
ParentRecord.new.field_names.must_equal [:a]
end
it "inherits fields for first child" do
Child1Record.new.field_names.must_equal [:a, :b]
end
it "inherits fields for second child" do
Child2Record.new.field_names.must_equal [:a, :b, :c]
end
end
| 23.549337 | 122 | 0.677173 |
5d427ae1f6849513e10eb28019a0dbc0df24a0d5 | 992 | # Warn when there is a big PR
net_new = git.insertions - git.deletions
warn('Big PR, try to keep changes smaller if you can') if git.lines_of_code > 800 or net_new > 500
# Encourage writing up some reasoning about the PR, rather than just leaving a title
if github.pr_body.length < 3
warn 'Please provide a summary in the Pull Request description'
end
# Make it more obvious that a PR is a work in progress and shouldn't be merged yet.
has_wip_label = github.pr_labels.any? { |label| label.include? 'WIP' }
has_wip_title = github.pr_title.include? '[WIP]'
has_dnm_label = github.pr_labels.any? { |label| label.include? 'DO NOT MERGE' }
has_dnm_title = github.pr_title.include? '[DO NOT MERGE]'
if has_wip_label || has_wip_title
warn('PR is classed as Work in Progress')
end
if has_dnm_label || has_dnm_title
warn('At the authors request please DO NOT MERGE this PR')
end
fail 'Please re-submit this PR to dev, we may have already fixed your issue.' if github.branch_for_base != 'dev' | 45.090909 | 112 | 0.753024 |
03622cbce6bbb2b362a0a5242b923f4dca3d0c89 | 5,133 | #
# Be sure to run `pod spec lint CocoapodsLib.podspec' to ensure this is a
# valid spec and to remove all comments including this before submitting the spec.
#
# To learn more about Podspec attributes see https://guides.cocoapods.org/syntax/podspec.html
# To see working Podspecs in the CocoaPods repo see https://github.com/CocoaPods/Specs/
#
Pod::Spec.new do |spec|
# ――― Spec Metadata ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# These will help people to find your library, and whilst it
# can feel like a chore to fill in it's definitely to your advantage. The
# summary should be tweet-length, and the description more in depth.
#
spec.name = "CocoapodsLib"
spec.version = "2.0.0"
spec.summary = "It's demo."
# This description is used to generate tags and improve search results.
# * Think: What does it do? Why did you write it? What is the focus?
# * Try to keep it short, snappy and to the point.
# * Write the description between the DESC delimiters below.
# * Finally, don't worry about the indent, CocoaPods strips it!
spec.description = <<-DESC
It's demo for create cocoapods library.
DESC
spec.homepage = "https://github.com/ray00178/CocoapodsDemo"
# spec.screenshots = "www.example.com/screenshots_1.gif", "www.example.com/screenshots_2.gif"
# ――― Spec License ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# Licensing your code is important. See https://choosealicense.com for more info.
# CocoaPods will detect a license file if there is a named LICENSE*
# Popular ones are 'MIT', 'BSD' and 'Apache License, Version 2.0'.
#
spec.license = { :type => "MIT", :file => "LICENSE" }
# ――― Author Metadata ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# Specify the authors of the library, with email addresses. Email addresses
# of the authors are extracted from the SCM log. E.g. $ git log. CocoaPods also
# accepts just a name if you'd rather not provide an email address.
#
# Specify a social_media_url where others can refer to, for example a twitter
# profile URL.
#
spec.author = { "Ray" => "[email protected]" }
# Or just: spec.author = "Ray"
# spec.authors = { "Ray" => "[email protected]" }
# spec.social_media_url = "https://twitter.com/Ray"
# ――― Platform Specifics ――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# If this Pod runs only on iOS or OS X, then specify the platform and
# the deployment target. You can optionally include the target after the platform.
#
# spec.platform = :ios
spec.platform = :ios, "9.0"
spec.ios.deployment_target = "9.0"
spec.swift_versions = '4.2'
# When using multiple platforms
# spec.osx.deployment_target = "10.7"
# spec.watchos.deployment_target = "2.0"
# spec.tvos.deployment_target = "9.0"
# ――― Source Location ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# Specify the location from where the source should be retrieved.
# Supports git, hg, bzr, svn and HTTP.
#
spec.source = { :git => "https://github.com/ray00178/CocoapodsDemo.git", :tag => "#{spec.version}" }
# ――― Source Code ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# CocoaPods is smart about how it includes source code. For source files
# giving a folder will include any swift, h, m, mm, c & cpp files.
# For header files it will include any header in the folder.
# Not including the public_header_files will make all headers public.
#
spec.source_files = "CocoapodsDemo", "CocoapodsDemo/**/*.{h,m}"
# spec.public_header_files = "Classes/**/*.h"
# ――― Resources ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# A list of resources included with the Pod. These are copied into the
# target bundle with a build phase script. Anything else will be cleaned.
# You can preserve files from being cleaned, please don't preserve
# non-essential files like tests, examples and documentation.
#
# spec.resource = "icon.png"
# spec.resources = "Resources/*.png"
# spec.preserve_paths = "FilesToSave", "MoreFilesToSave"
# ――― Project Linking ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# Link your library with frameworks, or libraries. Libraries do not include
# the lib prefix of their name.
#
# spec.framework = "SomeFramework"
# spec.frameworks = "SomeFramework", "AnotherFramework"
# spec.library = "iconv"
# spec.libraries = "iconv", "xml2"
# ――― Project Settings ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# If your library depends on compiler flags you can set them in the xcconfig hash
# where they will only apply to your library. If you depend on other Podspecs
# you can include multiple dependencies to ensure it works.
# spec.requires_arc = true
# spec.xcconfig = { "HEADER_SEARCH_PATHS" => "$(SDKROOT)/usr/include/libxml2" }
# spec.dependency "JSONKit", "~> 1.4"
end
| 37.195652 | 108 | 0.59887 |
ff65d39f8fcb9b89436efd85bea54a3f8dd1ff27 | 213 | require "good_migrations/version"
require "good_migrations/load_error"
require "good_migrations/patches_autoloader"
require "good_migrations/prevents_app_load"
require "good_migrations/railtie" if defined?(Rails)
| 35.5 | 52 | 0.859155 |
ab1cbff00cebe86807296a8a53f8d97f9e66d8a1 | 1,051 | require File.expand_path('../boot', __FILE__)
require 'rails/all'
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(:default, Rails.env)
module Dummy
class Application < Rails::Application
config.active_record.sqlite3.represent_boolean_as_integer = true
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC.
# config.time_zone = 'Central Time (US & Canada)'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
# config.i18n.default_locale = :de
end
end
| 42.04 | 99 | 0.729781 |
1ce43f4d066c5c0b5b9ed467afe0992dcbf54b7b | 635 | # == Schema Information
#
# Table name: gods
#
# id :bigint(8) not null, primary key
# name :string
# popularity :integer
# pantheon :string
# url :string
# created_at :datetime not null
# updated_at :datetime not null
#
require 'open-uri'
class God < ApplicationRecord
validates :name, uniqueness: { scope: [:pantheon] }
before_save :update_popularity
private
def update_popularity
return if self.url.blank?
raw = open(self.url).read
index = raw.match(/Popularity index.+ (\d+)/i)
return if index.blank?
self.popularity = index[1].to_i
end
end
| 19.242424 | 53 | 0.633071 |
f814153e30603feef0a5fc1bfa34184c6e610028 | 1,366 | # frozen_string_literal: true
require_relative 'lib/solidus_webhooks/version'
Gem::Specification.new do |spec|
spec.name = 'solidus_webhooks'
spec.version = SolidusWebhooks::VERSION
spec.authors = ['Elia Schito']
spec.email = '[email protected]'
spec.summary = 'Webhooks support for Solidus'
spec.homepage = 'https://github.com/solidusio-contrib/solidus_webhooks#readme'
spec.license = 'BSD-3-Clause'
spec.metadata['homepage_uri'] = spec.homepage
spec.metadata['source_code_uri'] = 'https://github.com/solidusio-contrib/solidus_webhooks'
spec.metadata['changelog_uri'] = 'https://github.com/solidusio-contrib/solidus_webhooks/releases'
spec.required_ruby_version = Gem::Requirement.new('~> 2.5')
# Specify which files should be added to the gem when it is released.
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
files = Dir.chdir(__dir__) { `git ls-files -z`.split("\x0") }
spec.files = files.grep_v(%r{^(test|spec|features)/})
spec.test_files = files.grep(%r{^(test|spec|features)/})
spec.bindir = "exe"
spec.executables = files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ["lib"]
spec.add_dependency 'solidus_core', ['>= 2.0.0', '< 3']
spec.add_dependency 'solidus_support', '~> 0.5'
spec.add_development_dependency 'solidus_dev_support', '~> 2.1'
end
| 37.944444 | 99 | 0.715959 |
28be7db286f94d74ee2f5e5a39a785b0c45076c6 | 380 | class ReferenceDocumentFromRevision < ActiveRecord::Migration[5.2]
def change
add_reference :versioned_revisions,
:document,
foreign_key: { to_table: :versioned_documents,
on_delete: :restrict },
index: true,
null: false # rubocop:disable Rails/NotNullColumn
end
end
| 34.545455 | 67 | 0.571053 |
4a039e96b99a7e291ec8d334479ab1cd168b97e3 | 3,398 | module Xcake
# This namespace provides all of methods for
# the DSL where configurations are specified.
#
# Classes for the DSL which want to either
# specifiy build settings or scheme launch arguments
# (i.e The Project or Targets) include this namespace.
#
# @example
# class Application
# include Xcake::Configurable
# end
#
module Configurable
private
attr_accessor :configurations
public
# @return [Array<Configuration>] list of all configurations
#
def all_configurations
if @configurations.nil?
@configurations = []
if parent_configurable && parent_configurable.all_configurations
copy_parent_configurations
else
debug_configuration :Debug
release_configuration :Release
end
end
@configurations
end
private
def copy_parent_configurations
return unless parent_configurable
parent_configurable.all_configurations.each do |c|
configuration(c.name, c.type)
end
end
public
# @param [Array<Configuration>] new list of configurations to set
#
def all_configurations=(configurations)
@configurations = configurations
end
# @return [Array<Configuration>] list of configurations of a type
#
def configurations_of_type(type)
return [] if @configurations.nil?
@configurations.select do |c|
c.type == type
end
end
# This either finds a release configuration
# with the same name or creates one.
#
# @deprecated Please use `configuration <name>, :debug`
#
# @return [Configuration] the new or existing debug configuration
#
def debug_configuration(name = nil, &block)
configuration(name, :debug, &block)
end
# This either finds a release configuration
# with the same name or creates one.
#
# @deprecated Please use `configuration <name>, :release`
#
# @return [Configuration] the new or existing release configuration
#
def release_configuration(name = nil, &block)
configuration(name, :release, &block)
end
# This either finds a configuration
# with the same name and type or creates one.
#
# @return [Configuration] the new or existing configuration
#
def configuration(name, type)
default_settings = default_settings_for_type(type)
configurations = configurations_of_type(type)
build_configuration = if name.nil?
configurations.first
else
configurations.detect do |c|
c.name == name.to_s
end
end
if build_configuration.nil?
name = type.to_s.capitalize if name.nil?
build_configuration = Configuration.new(name) do |b|
b.type = type
b.settings.merge!(default_settings)
yield(b) if block_given?
end
@configurations ||= []
@configurations << build_configuration
end
build_configuration
end
private
def parent_configurable
nil
end
def default_settings_for_type(type)
case type
when :debug
default_debug_settings
when :release
default_release_settings
end
end
end
end
| 24.271429 | 72 | 0.625662 |
1164f50db1d65207c89feb54c0a56663119a9f9a | 633 | module PokeApi
# Ability object handling all data fetched from /ability
class Ability < NamedApiResource
attr_reader :is_main_series,
:generation,
:names,
:effect_entries,
:effect_changes,
:flavor_text_entries,
:pokemon
def initialize(data)
assign_data(data)
end
private
def custom_endpoint_object
{
effect_entries: Common::VerboseEffect,
effect_changes: AbilityEffectChange,
flavor_text_entries: AbilityFlavorText,
pokemon: AbilityPokemon
}
end
end
end
| 22.607143 | 58 | 0.600316 |
bbfc18a75318f7a2f58519cc5b79acbbddde459d | 51,404 | # frozen_string_literal: true
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "google/cloud/errors"
require "google/cloud/dataproc/v1beta2/clusters_pb"
module Google
module Cloud
module Dataproc
module V1beta2
module ClusterController
##
# Client for the ClusterController service.
#
# The ClusterControllerService provides methods to manage clusters
# of Compute Engine instances.
#
class Client
# @private
attr_reader :cluster_controller_stub
##
# Configure the ClusterController Client class.
#
# See {::Google::Cloud::Dataproc::V1beta2::ClusterController::Client::Configuration}
# for a description of the configuration fields.
#
# ## Example
#
# To modify the configuration for all ClusterController clients:
#
# ::Google::Cloud::Dataproc::V1beta2::ClusterController::Client.configure do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def self.configure
@configure ||= begin
namespace = ["Google", "Cloud", "Dataproc", "V1beta2"]
parent_config = while namespace.any?
parent_name = namespace.join "::"
parent_const = const_get parent_name
break parent_const.configure if parent_const&.respond_to? :configure
namespace.pop
end
default_config = Client::Configuration.new parent_config
default_config.rpcs.create_cluster.timeout = 300.0
default_config.rpcs.create_cluster.retry_policy = {
initial_delay: 0.1,
max_delay: 60.0,
multiplier: 1.3,
retry_codes: ["UNAVAILABLE"]
}
default_config.rpcs.update_cluster.timeout = 300.0
default_config.rpcs.update_cluster.retry_policy = {
initial_delay: 0.1,
max_delay: 60.0,
multiplier: 1.3,
retry_codes: ["UNAVAILABLE"]
}
default_config.rpcs.delete_cluster.timeout = 300.0
default_config.rpcs.delete_cluster.retry_policy = {
initial_delay: 0.1,
max_delay: 60.0,
multiplier: 1.3,
retry_codes: ["UNAVAILABLE"]
}
default_config.rpcs.get_cluster.timeout = 300.0
default_config.rpcs.get_cluster.retry_policy = {
initial_delay: 0.1,
max_delay: 60.0,
multiplier: 1.3,
retry_codes: ["INTERNAL", "DEADLINE_EXCEEDED", "UNAVAILABLE"]
}
default_config.rpcs.list_clusters.timeout = 300.0
default_config.rpcs.list_clusters.retry_policy = {
initial_delay: 0.1,
max_delay: 60.0,
multiplier: 1.3,
retry_codes: ["INTERNAL", "DEADLINE_EXCEEDED", "UNAVAILABLE"]
}
default_config.rpcs.diagnose_cluster.timeout = 300.0
default_config.rpcs.diagnose_cluster.retry_policy = {
initial_delay: 0.1,
max_delay: 60.0,
multiplier: 1.3,
retry_codes: ["UNAVAILABLE"]
}
default_config
end
yield @configure if block_given?
@configure
end
##
# Configure the ClusterController Client instance.
#
# The configuration is set to the derived mode, meaning that values can be changed,
# but structural changes (adding new fields, etc.) are not allowed. Structural changes
# should be made on {Client.configure}.
#
# See {::Google::Cloud::Dataproc::V1beta2::ClusterController::Client::Configuration}
# for a description of the configuration fields.
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def configure
yield @config if block_given?
@config
end
##
# Create a new ClusterController client object.
#
# ## Examples
#
# To create a new ClusterController client with the default
# configuration:
#
# client = ::Google::Cloud::Dataproc::V1beta2::ClusterController::Client.new
#
# To create a new ClusterController client with a custom
# configuration:
#
# client = ::Google::Cloud::Dataproc::V1beta2::ClusterController::Client.new do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the ClusterController client.
# @yieldparam config [Client::Configuration]
#
def initialize
# These require statements are intentionally placed here to initialize
# the gRPC module only when it's required.
# See https://github.com/googleapis/toolkit/issues/446
require "gapic/grpc"
require "google/cloud/dataproc/v1beta2/clusters_services_pb"
# Create the configuration object
@config = Configuration.new Client.configure
# Yield the configuration if needed
yield @config if block_given?
# Create credentials
credentials = @config.credentials
credentials ||= Credentials.default scope: @config.scope
if credentials.is_a?(String) || credentials.is_a?(Hash)
credentials = Credentials.new credentials, scope: @config.scope
end
@quota_project_id = @config.quota_project
@quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
@operations_client = Operations.new do |config|
config.credentials = credentials
config.endpoint = @config.endpoint
end
@cluster_controller_stub = ::Gapic::ServiceStub.new(
::Google::Cloud::Dataproc::V1beta2::ClusterController::Stub,
credentials: credentials,
endpoint: @config.endpoint,
channel_args: @config.channel_args,
interceptors: @config.interceptors
)
end
##
# Get the associated client for long-running operations.
#
# @return [::Google::Cloud::Dataproc::V1beta2::ClusterController::Operations]
#
attr_reader :operations_client
# Service calls
##
# Creates a cluster in a project. The returned
# {::Google::Longrunning::Operation#metadata Operation.metadata} will be
# [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
#
# @overload create_cluster(request, options = nil)
# Pass arguments to `create_cluster` via a request object, either of type
# {::Google::Cloud::Dataproc::V1beta2::CreateClusterRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Dataproc::V1beta2::CreateClusterRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload create_cluster(project_id: nil, region: nil, cluster: nil, request_id: nil)
# Pass arguments to `create_cluster` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param project_id [::String]
# Required. The ID of the Google Cloud Platform project that the cluster
# belongs to.
# @param region [::String]
# Required. The Dataproc region in which to handle the request.
# @param cluster [::Google::Cloud::Dataproc::V1beta2::Cluster, ::Hash]
# Required. The cluster to create.
# @param request_id [::String]
# Optional. A unique id used to identify the request. If the server
# receives two {::Google::Cloud::Dataproc::V1beta2::CreateClusterRequest CreateClusterRequest} requests with the same
# id, then the second request will be ignored and the
# first {::Google::Longrunning::Operation google.longrunning.Operation} created and stored in the backend
# is returned.
#
# It is recommended to always set this value to a
# [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
#
# The id must contain only letters (a-z, A-Z), numbers (0-9),
# underscores (_), and hyphens (-). The maximum length is 40 characters.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def create_cluster request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Dataproc::V1beta2::CreateClusterRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.create_cluster.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dataproc::V1beta2::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"project_id" => request.project_id,
"region" => request.region
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.create_cluster.timeout,
metadata: metadata,
retry_policy: @config.rpcs.create_cluster.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cluster_controller_stub.call_rpc :create_cluster, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Updates a cluster in a project. The returned
# {::Google::Longrunning::Operation#metadata Operation.metadata} will be
# [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
#
# @overload update_cluster(request, options = nil)
# Pass arguments to `update_cluster` via a request object, either of type
# {::Google::Cloud::Dataproc::V1beta2::UpdateClusterRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Dataproc::V1beta2::UpdateClusterRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload update_cluster(project_id: nil, region: nil, cluster_name: nil, cluster: nil, graceful_decommission_timeout: nil, update_mask: nil, request_id: nil)
# Pass arguments to `update_cluster` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param project_id [::String]
# Required. The ID of the Google Cloud Platform project the
# cluster belongs to.
# @param region [::String]
# Required. The Dataproc region in which to handle the request.
# @param cluster_name [::String]
# Required. The cluster name.
# @param cluster [::Google::Cloud::Dataproc::V1beta2::Cluster, ::Hash]
# Required. The changes to the cluster.
# @param graceful_decommission_timeout [::Google::Protobuf::Duration, ::Hash]
# Optional. Timeout for graceful YARN decomissioning. Graceful
# decommissioning allows removing nodes from the cluster without
# interrupting jobs in progress. Timeout specifies how long to wait for jobs
# in progress to finish before forcefully removing nodes (and potentially
# interrupting jobs). Default timeout is 0 (for forceful decommission), and
# the maximum allowed timeout is 1 day (see JSON representation of
# [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
#
# Only supported on Dataproc image versions 1.2 and higher.
# @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
# Required. Specifies the path, relative to `Cluster`, of
# the field to update. For example, to change the number of workers
# in a cluster to 5, the `update_mask` parameter would be
# specified as `config.worker_config.num_instances`,
# and the `PATCH` request body would specify the new value, as follows:
#
# {
# "config":{
# "workerConfig":{
# "numInstances":"5"
# }
# }
# }
#
# Similarly, to change the number of preemptible workers in a cluster to 5,
# the `update_mask` parameter would be
# `config.secondary_worker_config.num_instances`, and the `PATCH` request
# body would be set as follows:
#
# {
# "config":{
# "secondaryWorkerConfig":{
# "numInstances":"5"
# }
# }
# }
# <strong>Note:</strong> currently only the following fields can be updated:
#
# <table>
# <tr>
# <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
# </tr>
# <tr>
# <td>labels</td><td>Updates labels</td>
# </tr>
# <tr>
# <td>config.worker_config.num_instances</td><td>Resize primary worker
# group</td>
# </tr>
# <tr>
# <td>config.secondary_worker_config.num_instances</td><td>Resize secondary
# worker group</td>
# </tr>
# <tr>
# <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL
# duration</td>
# </tr>
# <tr>
# <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL
# deletion timestamp</td>
# </tr>
# <tr>
# <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL
# duration</td>
# </tr>
# <tr>
# <td>config.autoscaling_config.policy_uri</td><td>Use, stop using, or change
# autoscaling policies</td>
# </tr>
# </table>
# @param request_id [::String]
# Optional. A unique id used to identify the request. If the server
# receives two {::Google::Cloud::Dataproc::V1beta2::UpdateClusterRequest UpdateClusterRequest} requests with the same
# id, then the second request will be ignored and the
# first {::Google::Longrunning::Operation google.longrunning.Operation} created and stored in the
# backend is returned.
#
# It is recommended to always set this value to a
# [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
#
# The id must contain only letters (a-z, A-Z), numbers (0-9),
# underscores (_), and hyphens (-). The maximum length is 40 characters.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def update_cluster request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Dataproc::V1beta2::UpdateClusterRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.update_cluster.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dataproc::V1beta2::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"project_id" => request.project_id,
"region" => request.region,
"cluster_name" => request.cluster_name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.update_cluster.timeout,
metadata: metadata,
retry_policy: @config.rpcs.update_cluster.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cluster_controller_stub.call_rpc :update_cluster, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Deletes a cluster in a project. The returned
# {::Google::Longrunning::Operation#metadata Operation.metadata} will be
# [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
#
# @overload delete_cluster(request, options = nil)
# Pass arguments to `delete_cluster` via a request object, either of type
# {::Google::Cloud::Dataproc::V1beta2::DeleteClusterRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Dataproc::V1beta2::DeleteClusterRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload delete_cluster(project_id: nil, region: nil, cluster_name: nil, cluster_uuid: nil, request_id: nil)
# Pass arguments to `delete_cluster` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param project_id [::String]
# Required. The ID of the Google Cloud Platform project that the cluster
# belongs to.
# @param region [::String]
# Required. The Dataproc region in which to handle the request.
# @param cluster_name [::String]
# Required. The cluster name.
# @param cluster_uuid [::String]
# Optional. Specifying the `cluster_uuid` means the RPC should fail
# (with error NOT_FOUND) if cluster with specified UUID does not exist.
# @param request_id [::String]
# Optional. A unique id used to identify the request. If the server
# receives two {::Google::Cloud::Dataproc::V1beta2::DeleteClusterRequest DeleteClusterRequest} requests with the same
# id, then the second request will be ignored and the
# first {::Google::Longrunning::Operation google.longrunning.Operation} created and stored in the
# backend is returned.
#
# It is recommended to always set this value to a
# [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
#
# The id must contain only letters (a-z, A-Z), numbers (0-9),
# underscores (_), and hyphens (-). The maximum length is 40 characters.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def delete_cluster request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Dataproc::V1beta2::DeleteClusterRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.delete_cluster.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dataproc::V1beta2::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"project_id" => request.project_id,
"region" => request.region,
"cluster_name" => request.cluster_name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.delete_cluster.timeout,
metadata: metadata,
retry_policy: @config.rpcs.delete_cluster.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cluster_controller_stub.call_rpc :delete_cluster, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets the resource representation for a cluster in a project.
#
# @overload get_cluster(request, options = nil)
# Pass arguments to `get_cluster` via a request object, either of type
# {::Google::Cloud::Dataproc::V1beta2::GetClusterRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Dataproc::V1beta2::GetClusterRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload get_cluster(project_id: nil, region: nil, cluster_name: nil)
# Pass arguments to `get_cluster` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param project_id [::String]
# Required. The ID of the Google Cloud Platform project that the cluster
# belongs to.
# @param region [::String]
# Required. The Dataproc region in which to handle the request.
# @param cluster_name [::String]
# Required. The cluster name.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::Dataproc::V1beta2::Cluster]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::Dataproc::V1beta2::Cluster]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def get_cluster request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Dataproc::V1beta2::GetClusterRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.get_cluster.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dataproc::V1beta2::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"project_id" => request.project_id,
"region" => request.region,
"cluster_name" => request.cluster_name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.get_cluster.timeout,
metadata: metadata,
retry_policy: @config.rpcs.get_cluster.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cluster_controller_stub.call_rpc :get_cluster, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Lists all regions/\\{region}/clusters in a project alphabetically.
#
# @overload list_clusters(request, options = nil)
# Pass arguments to `list_clusters` via a request object, either of type
# {::Google::Cloud::Dataproc::V1beta2::ListClustersRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Dataproc::V1beta2::ListClustersRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_clusters(project_id: nil, region: nil, filter: nil, page_size: nil, page_token: nil)
# Pass arguments to `list_clusters` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param project_id [::String]
# Required. The ID of the Google Cloud Platform project that the cluster
# belongs to.
# @param region [::String]
# Required. The Dataproc region in which to handle the request.
# @param filter [::String]
# Optional. A filter constraining the clusters to list. Filters are
# case-sensitive and have the following syntax:
#
# field = value [AND [field = value]] ...
#
# where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
# and `[KEY]` is a label key. **value** can be `*` to match all values.
# `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
# `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
# contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
# contains the `DELETING` and `ERROR` states.
# `clusterName` is the name of the cluster provided at creation time.
# Only the logical `AND` operator is supported; space-separated items are
# treated as having an implicit `AND` operator.
#
# Example filter:
#
# status.state = ACTIVE AND clusterName = mycluster
# AND labels.env = staging AND labels.starred = *
# @param page_size [::Integer]
# Optional. The standard List page size.
# @param page_token [::String]
# Optional. The standard List page token.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::Dataproc::V1beta2::Cluster>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Cloud::Dataproc::V1beta2::Cluster>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def list_clusters request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Dataproc::V1beta2::ListClustersRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_clusters.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dataproc::V1beta2::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"project_id" => request.project_id,
"region" => request.region
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.list_clusters.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_clusters.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cluster_controller_stub.call_rpc :list_clusters, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @cluster_controller_stub, :list_clusters, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets cluster diagnostic information. The returned
# {::Google::Longrunning::Operation#metadata Operation.metadata} will be
# [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
# After the operation completes,
# {::Google::Longrunning::Operation#response Operation.response}
# contains
# {::Google::Protobuf::Empty Empty}.
#
# @overload diagnose_cluster(request, options = nil)
# Pass arguments to `diagnose_cluster` via a request object, either of type
# {::Google::Cloud::Dataproc::V1beta2::DiagnoseClusterRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Dataproc::V1beta2::DiagnoseClusterRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload diagnose_cluster(project_id: nil, region: nil, cluster_name: nil)
# Pass arguments to `diagnose_cluster` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param project_id [::String]
# Required. The ID of the Google Cloud Platform project that the cluster
# belongs to.
# @param region [::String]
# Required. The Dataproc region in which to handle the request.
# @param cluster_name [::String]
# Required. The cluster name.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def diagnose_cluster request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Dataproc::V1beta2::DiagnoseClusterRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.diagnose_cluster.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Dataproc::V1beta2::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"project_id" => request.project_id,
"region" => request.region,
"cluster_name" => request.cluster_name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.diagnose_cluster.timeout,
metadata: metadata,
retry_policy: @config.rpcs.diagnose_cluster.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cluster_controller_stub.call_rpc :diagnose_cluster, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Configuration class for the ClusterController API.
#
# This class represents the configuration for ClusterController,
# providing control over timeouts, retry behavior, logging, transport
# parameters, and other low-level controls. Certain parameters can also be
# applied individually to specific RPCs. See
# {::Google::Cloud::Dataproc::V1beta2::ClusterController::Client::Configuration::Rpcs}
# for a list of RPCs that can be configured independently.
#
# Configuration can be applied globally to all clients, or to a single client
# on construction.
#
# # Examples
#
# To modify the global config, setting the timeout for create_cluster
# to 20 seconds, and all remaining timeouts to 10 seconds:
#
# ::Google::Cloud::Dataproc::V1beta2::ClusterController::Client.configure do |config|
# config.timeout = 10.0
# config.rpcs.create_cluster.timeout = 20.0
# end
#
# To apply the above configuration only to a new client:
#
# client = ::Google::Cloud::Dataproc::V1beta2::ClusterController::Client.new do |config|
# config.timeout = 10.0
# config.rpcs.create_cluster.timeout = 20.0
# end
#
# @!attribute [rw] endpoint
# The hostname or hostname:port of the service endpoint.
# Defaults to `"dataproc.googleapis.com"`.
# @return [::String]
# @!attribute [rw] credentials
# Credentials to send with calls. You may provide any of the following types:
# * (`String`) The path to a service account key file in JSON format
# * (`Hash`) A service account key as a Hash
# * (`Google::Auth::Credentials`) A googleauth credentials object
# (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
# (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
# * (`nil`) indicating no credentials
# @return [::Object]
# @!attribute [rw] scope
# The OAuth scopes
# @return [::Array<::String>]
# @!attribute [rw] lib_name
# The library name as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] lib_version
# The library version as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] channel_args
# Extra parameters passed to the gRPC channel. Note: this is ignored if a
# `GRPC::Core::Channel` object is provided as the credential.
# @return [::Hash]
# @!attribute [rw] interceptors
# An array of interceptors that are run before calls are executed.
# @return [::Array<::GRPC::ClientInterceptor>]
# @!attribute [rw] timeout
# The call timeout in seconds.
# @return [::Numeric]
# @!attribute [rw] metadata
# Additional gRPC headers to be sent with the call.
# @return [::Hash{::Symbol=>::String}]
# @!attribute [rw] retry_policy
# The retry policy. The value is a hash with the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
# @return [::Hash]
# @!attribute [rw] quota_project
# A separate project against which to charge quota.
# @return [::String]
#
class Configuration
extend ::Gapic::Config
config_attr :endpoint, "dataproc.googleapis.com", ::String
config_attr :credentials, nil do |value|
allowed = [::String, ::Hash, ::Proc, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
allowed.any? { |klass| klass === value }
end
config_attr :scope, nil, ::String, ::Array, nil
config_attr :lib_name, nil, ::String, nil
config_attr :lib_version, nil, ::String, nil
config_attr(:channel_args, { "grpc.service_config_disable_resolution"=>1 }, ::Hash, nil)
config_attr :interceptors, nil, ::Array, nil
config_attr :timeout, nil, ::Numeric, nil
config_attr :metadata, nil, ::Hash, nil
config_attr :retry_policy, nil, ::Hash, ::Proc, nil
config_attr :quota_project, nil, ::String, nil
# @private
def initialize parent_config = nil
@parent_config = parent_config unless parent_config.nil?
yield self if block_given?
end
##
# Configurations for individual RPCs
# @return [Rpcs]
#
def rpcs
@rpcs ||= begin
parent_rpcs = nil
parent_rpcs = @parent_config.rpcs if @parent_config&.respond_to? :rpcs
Rpcs.new parent_rpcs
end
end
##
# Configuration RPC class for the ClusterController API.
#
# Includes fields providing the configuration for each RPC in this service.
# Each configuration object is of type `Gapic::Config::Method` and includes
# the following configuration fields:
#
# * `timeout` (*type:* `Numeric`) - The call timeout in milliseconds
# * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional gRPC headers
# * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
# include the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
#
class Rpcs
##
# RPC-specific configuration for `create_cluster`
# @return [::Gapic::Config::Method]
#
attr_reader :create_cluster
##
# RPC-specific configuration for `update_cluster`
# @return [::Gapic::Config::Method]
#
attr_reader :update_cluster
##
# RPC-specific configuration for `delete_cluster`
# @return [::Gapic::Config::Method]
#
attr_reader :delete_cluster
##
# RPC-specific configuration for `get_cluster`
# @return [::Gapic::Config::Method]
#
attr_reader :get_cluster
##
# RPC-specific configuration for `list_clusters`
# @return [::Gapic::Config::Method]
#
attr_reader :list_clusters
##
# RPC-specific configuration for `diagnose_cluster`
# @return [::Gapic::Config::Method]
#
attr_reader :diagnose_cluster
# @private
def initialize parent_rpcs = nil
create_cluster_config = parent_rpcs&.create_cluster if parent_rpcs&.respond_to? :create_cluster
@create_cluster = ::Gapic::Config::Method.new create_cluster_config
update_cluster_config = parent_rpcs&.update_cluster if parent_rpcs&.respond_to? :update_cluster
@update_cluster = ::Gapic::Config::Method.new update_cluster_config
delete_cluster_config = parent_rpcs&.delete_cluster if parent_rpcs&.respond_to? :delete_cluster
@delete_cluster = ::Gapic::Config::Method.new delete_cluster_config
get_cluster_config = parent_rpcs&.get_cluster if parent_rpcs&.respond_to? :get_cluster
@get_cluster = ::Gapic::Config::Method.new get_cluster_config
list_clusters_config = parent_rpcs&.list_clusters if parent_rpcs&.respond_to? :list_clusters
@list_clusters = ::Gapic::Config::Method.new list_clusters_config
diagnose_cluster_config = parent_rpcs&.diagnose_cluster if parent_rpcs&.respond_to? :diagnose_cluster
@diagnose_cluster = ::Gapic::Config::Method.new diagnose_cluster_config
yield self if block_given?
end
end
end
end
end
end
end
end
end
| 52.186802 | 172 | 0.558206 |
08d94c3fbaed8aab06a6ac1bfe5c7e52845a24d5 | 512 | #
# Cookbook Name:: newrelic_lwrp_test
# Recipe:: agent_php
#
# Copyright (c) 2016, David Joos
#
include_recipe 'apache2'
include_recipe 'php'
newrelic_agent_php 'Install' do
license node['newrelic']['license']
service_name node['newrelic']['php_agent']['web_server']['service_name']
config_file node['newrelic']['php_agent']['php_config']
enable_module node['newrelic']['php_agent']['enable_module']
execute_php5enmod node['newrelic']['php_agent']['execute_php5enmod']
startup_mode 'external'
end
| 26.947368 | 74 | 0.744141 |
bb91d483e16e4ada09c3e6bde22d7928e1704b97 | 886 | # Require any additional compass plugins here.
require 'modular-scale'
# Set this to the root of your project when deployed:
http_path = "/"
css_dir = "stylesheets"
sass_dir = "sass"
images_dir = "images"
javascripts_dir = "javascripts"
# You can select your preferred output style here (can be overridden via the command line):
# output_style = :expanded or :nested or :compact or :compressed
# To enable relative paths to assets via compass helper functions. Uncomment:
# relative_assets = true
# To disable debugging comments that display the original location of your selectors. Uncomment:
# line_comments = false
# If you prefer the indented syntax, you might want to regenerate this
# project again passing --syntax sass, or you can uncomment this:
# preferred_syntax = :sass
# and then run:
# sass-convert -R --from scss --to sass sass scss && rm -rf sass && mv scss sass
| 32.814815 | 96 | 0.75395 |
ab2345b32cc0f56250ad434f273f73168be2f532 | 136 | module Authentication
class ApplicationMailer < ActionMailer::Base
default from: '[email protected]'
layout 'mailer'
end
end
| 19.428571 | 46 | 0.742647 |
e20a8c52ff740a7118195bb40c38bfa973905508 | 16,782 | =begin
#NSX-T Manager API
#VMware NSX-T Manager REST API
OpenAPI spec version: 2.5.1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Swagger Codegen version: 2.4.19
=end
require 'date'
module NSXT
class EdgeNode
# Link to this resource
attr_accessor :_self
# The server will populate this field when returing the resource. Ignored on PUT and POST.
attr_accessor :_links
# Schema for this resource
attr_accessor :_schema
# The _revision property describes the current revision of the resource. To prevent clients from overwriting each other's changes, PUT operations must include the current _revision of the resource, which clients should obtain by issuing a GET operation. If the _revision provided in a PUT request is missing or stale, the operation will be rejected.
attr_accessor :_revision
# Indicates system owned resource
attr_accessor :_system_owned
# Defaults to ID if not set
attr_accessor :display_name
# Description of this resource
attr_accessor :description
# Opaque identifiers meaningful to the API user
attr_accessor :tags
# ID of the user who created this resource
attr_accessor :_create_user
# Protection status is one of the following: PROTECTED - the client who retrieved the entity is not allowed to modify it. NOT_PROTECTED - the client who retrieved the entity is allowed to modify it REQUIRE_OVERRIDE - the client who retrieved the entity is a super user and can modify it, but only when providing the request header X-Allow-Overwrite=true. UNKNOWN - the _protection field could not be determined for this entity.
attr_accessor :_protection
# Timestamp of resource creation
attr_accessor :_create_time
# Timestamp of last modification
attr_accessor :_last_modified_time
# ID of the user who last modified this resource
attr_accessor :_last_modified_user
# Unique identifier of this resource
attr_accessor :id
# Fabric node type, for example 'HostNode', 'EdgeNode' or 'PublicCloudGatewayNode'
attr_accessor :resource_type
# Discovered IP Addresses of the fabric node, version 4 or 6
attr_accessor :discovered_ip_addresses
# IP Addresses of the Node, version 4 or 6. This property is mandatory for all nodes except for automatic deployment of edge virtual machine node. For automatic deployment, the ip address from management_port_subnets property will be considered.
attr_accessor :ip_addresses
# ID of the Node maintained on the Node and used to recognize the Node
attr_accessor :external_id
# Fully qualified domain name of the fabric node
attr_accessor :fqdn
# Reports the current configuration of the SSH, DHS, NTP and host name on this edge node. The deployment_config property is used during deployment and this counterpart property shows current values.
attr_accessor :node_settings
# When this configuration is specified, edge fabric node of deployment_type VIRTUAL_MACHINE will be deployed and registered with MP.
attr_accessor :deployment_config
# List of logical router ids to which this edge node is allocated.
attr_accessor :allocation_list
# Supported edge deployment type.
attr_accessor :deployment_type
class EnumAttributeValidator
attr_reader :datatype
attr_reader :allowable_values
def initialize(datatype, allowable_values)
@allowable_values = allowable_values.map do |value|
case datatype.to_s
when /Integer/i
value.to_i
when /Float/i
value.to_f
else
value
end
end
end
def valid?(value)
!value || allowable_values.include?(value)
end
end
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'_self' => :'_self',
:'_links' => :'_links',
:'_schema' => :'_schema',
:'_revision' => :'_revision',
:'_system_owned' => :'_system_owned',
:'display_name' => :'display_name',
:'description' => :'description',
:'tags' => :'tags',
:'_create_user' => :'_create_user',
:'_protection' => :'_protection',
:'_create_time' => :'_create_time',
:'_last_modified_time' => :'_last_modified_time',
:'_last_modified_user' => :'_last_modified_user',
:'id' => :'id',
:'resource_type' => :'resource_type',
:'discovered_ip_addresses' => :'discovered_ip_addresses',
:'ip_addresses' => :'ip_addresses',
:'external_id' => :'external_id',
:'fqdn' => :'fqdn',
:'node_settings' => :'node_settings',
:'deployment_config' => :'deployment_config',
:'allocation_list' => :'allocation_list',
:'deployment_type' => :'deployment_type'
}
end
# Attribute type mapping.
def self.swagger_types
{
:'_self' => :'SelfResourceLink',
:'_links' => :'Array<ResourceLink>',
:'_schema' => :'String',
:'_revision' => :'Integer',
:'_system_owned' => :'BOOLEAN',
:'display_name' => :'String',
:'description' => :'String',
:'tags' => :'Array<Tag>',
:'_create_user' => :'String',
:'_protection' => :'String',
:'_create_time' => :'Integer',
:'_last_modified_time' => :'Integer',
:'_last_modified_user' => :'String',
:'id' => :'String',
:'resource_type' => :'String',
:'discovered_ip_addresses' => :'Array<String>',
:'ip_addresses' => :'Array<String>',
:'external_id' => :'String',
:'fqdn' => :'String',
:'node_settings' => :'EdgeNodeSettings',
:'deployment_config' => :'EdgeNodeDeploymentConfig',
:'allocation_list' => :'Array<String>',
:'deployment_type' => :'String'
}
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
# convert string to symbol for hash key
attributes = attributes.each_with_object({}) { |(k, v), h| h[k.to_sym] = v }
if attributes.has_key?(:'_self')
self._self = attributes[:'_self']
end
if attributes.has_key?(:'_links')
if (value = attributes[:'_links']).is_a?(Array)
self._links = value
end
end
if attributes.has_key?(:'_schema')
self._schema = attributes[:'_schema']
end
if attributes.has_key?(:'_revision')
self._revision = attributes[:'_revision']
end
if attributes.has_key?(:'_system_owned')
self._system_owned = attributes[:'_system_owned']
end
if attributes.has_key?(:'display_name')
self.display_name = attributes[:'display_name']
end
if attributes.has_key?(:'description')
self.description = attributes[:'description']
end
if attributes.has_key?(:'tags')
if (value = attributes[:'tags']).is_a?(Array)
self.tags = value
end
end
if attributes.has_key?(:'_create_user')
self._create_user = attributes[:'_create_user']
end
if attributes.has_key?(:'_protection')
self._protection = attributes[:'_protection']
end
if attributes.has_key?(:'_create_time')
self._create_time = attributes[:'_create_time']
end
if attributes.has_key?(:'_last_modified_time')
self._last_modified_time = attributes[:'_last_modified_time']
end
if attributes.has_key?(:'_last_modified_user')
self._last_modified_user = attributes[:'_last_modified_user']
end
if attributes.has_key?(:'id')
self.id = attributes[:'id']
end
if attributes.has_key?(:'resource_type')
self.resource_type = attributes[:'resource_type']
end
if attributes.has_key?(:'discovered_ip_addresses')
if (value = attributes[:'discovered_ip_addresses']).is_a?(Array)
self.discovered_ip_addresses = value
end
end
if attributes.has_key?(:'ip_addresses')
if (value = attributes[:'ip_addresses']).is_a?(Array)
self.ip_addresses = value
end
end
if attributes.has_key?(:'external_id')
self.external_id = attributes[:'external_id']
end
if attributes.has_key?(:'fqdn')
self.fqdn = attributes[:'fqdn']
end
if attributes.has_key?(:'node_settings')
self.node_settings = attributes[:'node_settings']
end
if attributes.has_key?(:'deployment_config')
self.deployment_config = attributes[:'deployment_config']
end
if attributes.has_key?(:'allocation_list')
if (value = attributes[:'allocation_list']).is_a?(Array)
self.allocation_list = value
end
end
if attributes.has_key?(:'deployment_type')
self.deployment_type = attributes[:'deployment_type']
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properties with the reasons
def list_invalid_properties
invalid_properties = Array.new
if !@display_name.nil? && @display_name.to_s.length > 255
invalid_properties.push('invalid value for "display_name", the character length must be smaller than or equal to 255.')
end
if [email protected]? && @description.to_s.length > 1024
invalid_properties.push('invalid value for "description", the character length must be smaller than or equal to 1024.')
end
if @resource_type.nil?
invalid_properties.push('invalid value for "resource_type", resource_type cannot be nil.')
end
invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
return false if !@display_name.nil? && @display_name.to_s.length > 255
return false if [email protected]? && @description.to_s.length > 1024
return false if @resource_type.nil?
deployment_type_validator = EnumAttributeValidator.new('String', ['VIRTUAL_MACHINE', 'PHYSICAL_MACHINE', 'UNKNOWN'])
return false unless deployment_type_validator.valid?(@deployment_type)
true
end
# Custom attribute writer method with validation
# @param [Object] display_name Value to be assigned
def display_name=(display_name)
if !display_name.nil? && display_name.to_s.length > 255
fail ArgumentError, 'invalid value for "display_name", the character length must be smaller than or equal to 255.'
end
@display_name = display_name
end
# Custom attribute writer method with validation
# @param [Object] description Value to be assigned
def description=(description)
if !description.nil? && description.to_s.length > 1024
fail ArgumentError, 'invalid value for "description", the character length must be smaller than or equal to 1024.'
end
@description = description
end
# Custom attribute writer method checking allowed values (enum).
# @param [Object] deployment_type Object to be assigned
def deployment_type=(deployment_type)
validator = EnumAttributeValidator.new('String', ['VIRTUAL_MACHINE', 'PHYSICAL_MACHINE', 'UNKNOWN'])
unless validator.valid?(deployment_type)
fail ArgumentError, 'invalid value for "deployment_type", must be one of #{validator.allowable_values}.'
end
@deployment_type = deployment_type
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
_self == o._self &&
_links == o._links &&
_schema == o._schema &&
_revision == o._revision &&
_system_owned == o._system_owned &&
display_name == o.display_name &&
description == o.description &&
tags == o.tags &&
_create_user == o._create_user &&
_protection == o._protection &&
_create_time == o._create_time &&
_last_modified_time == o._last_modified_time &&
_last_modified_user == o._last_modified_user &&
id == o.id &&
resource_type == o.resource_type &&
discovered_ip_addresses == o.discovered_ip_addresses &&
ip_addresses == o.ip_addresses &&
external_id == o.external_id &&
fqdn == o.fqdn &&
node_settings == o.node_settings &&
deployment_config == o.deployment_config &&
allocation_list == o.allocation_list &&
deployment_type == o.deployment_type
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[_self, _links, _schema, _revision, _system_owned, display_name, description, tags, _create_user, _protection, _create_time, _last_modified_time, _last_modified_user, id, resource_type, discovered_ip_addresses, ip_addresses, external_id, fqdn, node_settings, deployment_config, allocation_list, deployment_type].hash
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /\AArray<(.*)>/i
# check to ensure the input is an array given that the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map { |v| _deserialize($1, v) })
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
end # or else data not found in attributes(hash), not an issue as the data can be optional
end
self
end
# Deserializes the data based on type
# @param string type Data type
# @param string value Value to be deserialized
# @return [Object] Deserialized data
def _deserialize(type, value)
case type.to_sym
when :DateTime
DateTime.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :BOOLEAN
if value.to_s =~ /\A(true|t|yes|y|1)\z/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+?), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
temp_model = NSXT.const_get(type).new
temp_model.build_from_hash(value)
end
end
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# to_body is an alias to to_hash (backward compatibility)
# @return [Hash] Returns the object in the form of hash
def to_body
to_hash
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
next if value.nil?
hash[param] = _to_hash(value)
end
hash
end
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
| 34.602062 | 508 | 0.641163 |
038d0968ee21f0772260722b87fe6459b24c2987 | 4,824 | # -*- coding: utf-8 -*-
# frozen_string_literal: true
require 'diceBot/SwordWorld'
class SwordWorld2_0 < SwordWorld
# ゲームシステムの識別子
ID = 'SwordWorld2.0'
# ゲームシステム名
NAME = 'ソードワールド2.0'
# ゲームシステム名の読みがな
SORT_KEY = 'そおとわあると2.0'
# ダイスボットの使い方
HELP_MESSAGE = <<INFO_MESSAGE_TEXT
自動的成功、成功、失敗、自動的失敗の自動判定を行います。
・レーティング表 (Kx)
"Kキーナンバー+ボーナス"の形で記入します。
ボーナスの部分に「K20+K30」のようにレーティングを取ることは出来ません。
また、ボーナスは複数取ることが出来ます。
レーティング表もダイスロールと同様に、他のプレイヤーに隠れてロールすることも可能です。
例)K20 K10+5 k30 k10+10 Sk10-1 k10+5+2
・クリティカル値の設定
クリティカル値は"[クリティカル値]"で指定します。
指定しない場合はクリティカル値10とします。
クリティカル処理が必要ないときは13などとしてください。(防御時などの対応)
またタイプの軽減化のために末尾に「@クリティカル値」でも処理するようにしました。
例)K20[10] K10+5[9] k30[10] k10[9]+10 k10-5@9
・レーティング表の半減 (HKx)
レーティング表の先頭または末尾に"H"をつけると、レーティング表を振って最終結果を半減させます。
クリティカル値を指定しない場合、クリティカルなしと扱われます。
例)HK20 K20h HK10-5@9 K10-5@9H K20gfH
・ダイス目の修正(運命変転やクリティカルレイ用)
末尾に「$修正値」でダイス目に修正がかかります。
$+1と修正表記ならダイス目に+修正、$9のように固定値ならダイス目をその出目に差し替え。
クリティカルした場合でも固定値や修正値の適用は最初の一回だけです。
例)K20$+1 K10+5$9 k10-5@9$+2 k10[9]+10$9
・首切り刀用レーティング上昇 r10
例)K20r10 K30+24@8R10 K40+24@8$12r10
・グレイテストフォーチュンは末尾に gf
例)K20gf K30+24@8GF K40+24@8$12r10gf
・超越判定用に2d6ロールに 2D6@10 書式でクリティカル値付与が可能に。
例)2D6@10 2D6@10+11>=30
・成長 (Gr)
末尾に数字を付加することで、複数回の成長をまとめて行えます。
例)Gr3
・防御ファンブル表 (FT)
防御ファンブル表を出すことができます。
・絡み効果表 (TT)
絡み効果表を出すことができます。
INFO_MESSAGE_TEXT
setPrefixes(['H?K\d+.*', 'Gr(\d+)?', 'FT', 'TT'])
def initialize
rating_table = 2
super()
@rating_table = rating_table
end
def rollDiceCommand(command)
case command
when /^Gr(\d+)?/i
if command =~ /^Gr(\d+)/i
growth(Regexp.last_match(1).to_i)
else
growth
end
when 'FT'
get_fumble_table
when 'TT'
get_tangle_table
else
super(command)
end
end
def getRateUpFromString(string)
rateUp = 0
regexp = /r\[(\d+)\]/i
if regexp === string
rateUp = Regexp.last_match(1).to_i
string = string.gsub(regexp, '')
end
return rateUp, string
end
def getAdditionalString(string, output)
output, values = super(string, output)
isGratestFortune, string = getGratestFortuneFromString(string)
values['isGratestFortune'] = isGratestFortune
output += "gf" if isGratestFortune
return output, values
end
def rollDice(values)
unless values['isGratestFortune']
return super(values)
end
dice, diceText = roll(1, 6)
dice *= 2
diceText = "#{diceText},#{diceText}"
return dice, diceText
end
def getGratestFortuneFromString(string)
isGratestFortune = false
regexp = /gf/i
if regexp === string
isGratestFortune = true
string = string.gsub(regexp, '')
end
return isGratestFortune, string
end
def is2dCritical
true
end
# SW2.0 の超成功用
# @param (see DiceBot#check2dCritical)
def check2dCritical(critical, dice_new, dice_arry, loop_count)
return if critical.nil? || critical <= 2
if loop_count == 0
return if dice_new == 12
return if dice_new == 2
end
if dice_new >= critical
dice_arry.push(2)
end
end
def check_nD6(total, dice_total, dice_list, cmp_op, target)
result = super(total, dice_total, dice_list, cmp_op, target)
return result unless result.nil?
string = bcdice.getOriginalMessage
superSuccessValue = 41
if /@(\d+)/ === string
critical = Regexp.last_match(1).to_i
if dice_total >= critical
if total >= superSuccessValue
return " > 超成功"
end
end
end
return result
end
def growth(count = 1)
((1..count).map { growth_step }).join " | "
end
def growth_step
d1, = roll(1, 6)
d2, = roll(1, 6)
a1 = get_ability_by_dice(d1)
a2 = get_ability_by_dice(d2)
return a1 != a2 ? "[#{d1},#{d2}]->(#{a1} or #{a2})" : "[#{d1},#{d2}]->(#{a1})"
end
def get_ability_by_dice(dice)
['器用度', '敏捷度', '筋力', '生命力', '知力', '精神力'][dice - 1]
end
def get_fumble_table()
table = [
'この表を2回振り、その両方を適用する。(同じ出目による影響は累積しない)。この自動失敗により得られる経験点は、+50点される',
'ダメージに、攻撃者を強化している「剣のかけら」の数が追加される',
'ダメージに、攻撃者の「レベル」が追加される',
'ダメージ決定を2回行い、より高い方を採用する',
'合算ダメージを2倍する',
'防護点無効'
]
text, num = get_table_by_1d6(table)
return "防御ファンブル表(#{num}) → #{text}"
end
def get_tangle_table()
table = [
'頭や顔:牙や噛みつきなどにおける命中力判定及び、魔法の行使やブレスに-2のペナルティ修正を受ける',
'武器や盾:武器の使用不可、又は盾の回避力修正及び防護点を無効化する',
'腕や手:武器や爪などにおける命中力判定に-2のペナルティ修正、盾を持つ腕方の腕ならその盾の回避力修正及び防護点を無効化する',
'脚や足:移動不可、更に回避力判定に-2のペナルティ修正を受ける ※両足に絡んでも累積しない',
'胴体:生命・精神抵抗力を基準値に用いる判定を除き、あらゆる行為判定に-1のペナルティ修正を受ける',
'特殊:尻尾や翼などに命中。絡められた部位を使用する判定において-2のペナルティ修正、またはそこが使えていたことによるボーナス修正を失う ※存在しない場合は決め直し'
]
text, num = get_table_by_1d6(table)
return "絡み効果表(#{num}) → #{text}"
end
end
| 21.535714 | 89 | 0.65796 |
d53a4e634980116f319c6dc1016f2c6cac960a7f | 1,526 | #!/usr/bin/env rspec
require 'spec_helper'
require 'puppet/indirector/node/puppetdb'
require 'puppet/util/puppetdb/command_names'
require 'json'
require 'date'
require 'time'
describe Puppet::Node::Puppetdb do
CommandDeactivateNode = Puppet::Util::Puppetdb::CommandNames::CommandDeactivateNode
before :each do
Puppet::Node.indirection.stubs(:terminus).returns(subject)
end
let(:node) { "something.example.com" }
let(:producer_timestamp) { Puppet::Util::Puppetdb.to_wire_time(Time.now) }
def destroy
Puppet::Node.indirection.destroy(node)
end
describe "#destroy" do
let(:response) { Net::HTTPOK.new('1.1', 200, 'OK') }
let(:http) { mock 'http' }
before :each do
Puppet::Network::HttpPool.expects(:connection).returns http
end
it "should POST a '#{CommandDeactivateNode}' command" do
response.stubs(:body).returns '{"uuid": "a UUID"}'
http.expects(:post).with do |uri,body,headers|
req = JSON.parse(body)
req["certname"] == node &&
extract_producer_timestamp(req) <= Time.now.to_i
end.returns response
destroy
end
it "should log a deprecation warning if one is returned from PuppetDB" do
response['x-deprecation'] = 'A horrible deprecation warning!'
response.stubs(:body).returns '{"uuid": "a UUID"}'
Puppet.expects(:deprecation_warning).with do |msg|
msg =~ /A horrible deprecation warning!/
end
http.stubs(:post).returns response
destroy
end
end
end
| 26.310345 | 85 | 0.67038 |
e8a0345d90622f1fbcfbb64e7f8be81dbd451e76 | 451 | require 'rails_helper'
# Specs in this file have access to a helper object that includes
# the CageSearchesHelper. For example:
#
# describe CageSearchesHelper do
# describe "string concat" do
# it "concats two strings with spaces" do
# expect(helper.concat_strings("this","that")).to eq("this that")
# end
# end
# end
RSpec.describe CageSearchesHelper, type: :helper do
pending "add some examples to (or delete) #{__FILE__}"
end
| 28.1875 | 71 | 0.718404 |
f8c4ca69d57aefba7c6764f10850c5488c87bae1 | 345 | require 'route_command'
module Booty
module Products
class NewCommand < Booty::RouteCommand
handles :uri => /^\/products\/new$/, :method => :GET
def initialize(view_engine)
@view_engine = view_engine
end
def run(request)
HtmlResponse.new(:template => "/products/new")
end
end
end
end
| 19.166667 | 58 | 0.623188 |
f79b0cad6e97bfac01a5e053772a1e233e20521d | 46 | def slowest_examples
new(*options.keys)
end
| 11.5 | 20 | 0.782609 |
e9337e705fb098cbea1f17734e1272a6a30c6c90 | 87 | class CardReading < ApplicationRecord
belongs_to :reading
belongs_to :card
end
| 17.4 | 37 | 0.770115 |
28f098cb594b6597e5c9b61108201bdfcafae01c | 134 | class InactiveTags < ActiveRecord::Migration[4.2]
def change
add_column :tags, :inactive, :boolean, :default => false
end
end
| 22.333333 | 60 | 0.716418 |
912397641fc765529896b20d474383b99dad2b3f | 18,874 | require_relative '../../../test_helper'
module OmniAuth
module Strategies
class OpenIDConnectTest < StrategyTestCase
def test_client_options_defaults
assert_equal 'https', strategy.options.client_options.scheme
assert_equal 443, strategy.options.client_options.port
assert_equal '/authorize', strategy.options.client_options.authorization_endpoint
assert_equal '/token', strategy.options.client_options.token_endpoint
end
def test_request_phase
expected_redirect = /^https:\/\/example\.com\/authorize\?client_id=1234&nonce=\w{32}&response_type=code&scope=openid&state=\w{32}$/
strategy.options.issuer = 'example.com'
strategy.options.client_options.host = 'example.com'
strategy.expects(:redirect).with(regexp_matches(expected_redirect))
strategy.request_phase
end
def test_logout_phase_with_discovery
expected_redirect = %r{^https:\/\/example\.com\/logout$}
strategy.options.client_options.host = 'example.com'
strategy.options.discovery = true
issuer = stub('OpenIDConnect::Discovery::Issuer')
issuer.stubs(:issuer).returns('https://example.com/')
::OpenIDConnect::Discovery::Provider.stubs(:discover!).returns(issuer)
config = stub('OpenIDConnect::Discovery::Provder::Config')
config.stubs(:authorization_endpoint).returns('https://example.com/authorization')
config.stubs(:token_endpoint).returns('https://example.com/token')
config.stubs(:userinfo_endpoint).returns('https://example.com/userinfo')
config.stubs(:jwks_uri).returns('https://example.com/jwks')
config.stubs(:end_session_endpoint).returns('https://example.com/logout')
::OpenIDConnect::Discovery::Provider::Config.stubs(:discover!).with('https://example.com/').returns(config)
request.stubs(:path_info).returns('/auth/openidconnect/logout')
strategy.expects(:redirect).with(regexp_matches(expected_redirect))
strategy.other_phase
end
def test_logout_phase_with_discovery_and_post_logout_redirect_uri
expected_redirect = 'https://example.com/logout?post_logout_redirect_uri=https%3A%2F%2Fmysite.com'
strategy.options.client_options.host = 'example.com'
strategy.options.discovery = true
strategy.options.post_logout_redirect_uri = 'https://mysite.com'
issuer = stub('OpenIDConnect::Discovery::Issuer')
issuer.stubs(:issuer).returns('https://example.com/')
::OpenIDConnect::Discovery::Provider.stubs(:discover!).returns(issuer)
config = stub('OpenIDConnect::Discovery::Provder::Config')
config.stubs(:authorization_endpoint).returns('https://example.com/authorization')
config.stubs(:token_endpoint).returns('https://example.com/token')
config.stubs(:userinfo_endpoint).returns('https://example.com/userinfo')
config.stubs(:jwks_uri).returns('https://example.com/jwks')
config.stubs(:end_session_endpoint).returns('https://example.com/logout')
::OpenIDConnect::Discovery::Provider::Config.stubs(:discover!).with('https://example.com/').returns(config)
request.stubs(:path_info).returns('/auth/openidconnect/logout')
strategy.expects(:redirect).with(expected_redirect)
strategy.other_phase
end
def test_logout_phase
strategy.options.issuer = 'example.com'
strategy.options.client_options.host = 'example.com'
request.stubs(:path_info).returns('/auth/openidconnect/logout')
strategy.expects(:call_app!)
strategy.other_phase
end
def test_request_phase_with_params
expected_redirect = /^https:\/\/example\.com\/authorize\?claims_locales=es&client_id=1234&login_hint=john.doe%40example.com&nonce=\w{32}&response_type=code&scope=openid&state=\w{32}&ui_locales=en$/
strategy.options.issuer = 'example.com'
strategy.options.client_options.host = 'example.com'
request.stubs(:params).returns('login_hint' => '[email protected]', 'ui_locales' => 'en', 'claims_locales' => 'es')
strategy.expects(:redirect).with(regexp_matches(expected_redirect))
strategy.request_phase
end
def test_request_phase_with_discovery
expected_redirect = /^https:\/\/example\.com\/authorization\?client_id=1234&nonce=\w{32}&response_type=code&scope=openid&state=\w{32}$/
strategy.options.client_options.host = 'example.com'
strategy.options.discovery = true
issuer = stub('OpenIDConnect::Discovery::Issuer')
issuer.stubs(:issuer).returns('https://example.com/')
::OpenIDConnect::Discovery::Provider.stubs(:discover!).returns(issuer)
config = stub('OpenIDConnect::Discovery::Provder::Config')
config.stubs(:authorization_endpoint).returns('https://example.com/authorization')
config.stubs(:token_endpoint).returns('https://example.com/token')
config.stubs(:userinfo_endpoint).returns('https://example.com/userinfo')
config.stubs(:jwks_uri).returns('https://example.com/jwks')
::OpenIDConnect::Discovery::Provider::Config.stubs(:discover!).with('https://example.com/').returns(config)
strategy.expects(:redirect).with(regexp_matches(expected_redirect))
strategy.request_phase
assert_equal strategy.options.issuer, 'https://example.com/'
assert_equal strategy.options.client_options.authorization_endpoint, 'https://example.com/authorization'
assert_equal strategy.options.client_options.token_endpoint, 'https://example.com/token'
assert_equal strategy.options.client_options.userinfo_endpoint, 'https://example.com/userinfo'
assert_equal strategy.options.client_options.jwks_uri, 'https://example.com/jwks'
assert_nil strategy.options.client_options.end_session_endpoint
end
def test_uid
assert_equal user_info.sub, strategy.uid
strategy.options.uid_field = 'preferred_username'
assert_equal user_info.preferred_username, strategy.uid
strategy.options.uid_field = 'something'
assert_equal user_info.sub, strategy.uid
end
def test_callback_phase(session = {}, params = {})
code = SecureRandom.hex(16)
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
request.stubs(:params).returns('code' => code, 'state' => state)
request.stubs(:path_info).returns('')
strategy.options.issuer = 'example.com'
strategy.options.client_signing_alg = :RS256
strategy.options.client_jwk_signing_key = File.read('test/fixtures/jwks.json')
id_token = stub('OpenIDConnect::ResponseObject::IdToken')
id_token.stubs(:verify!).with(issuer: strategy.options.issuer, client_id: @identifier, nonce: nonce).returns(true)
::OpenIDConnect::ResponseObject::IdToken.stubs(:decode).returns(id_token)
strategy.unstub(:user_info)
access_token = stub('OpenIDConnect::AccessToken')
access_token.stubs(:access_token)
access_token.stubs(:refresh_token)
access_token.stubs(:expires_in)
access_token.stubs(:scope)
access_token.stubs(:id_token).returns(File.read('test/fixtures/id_token.txt'))
client.expects(:access_token!).at_least_once.returns(access_token)
access_token.expects(:userinfo!).returns(user_info)
strategy.call!('rack.session' => { 'omniauth.state' => state, 'omniauth.nonce' => nonce })
strategy.callback_phase
end
def test_callback_phase_with_discovery
code = SecureRandom.hex(16)
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
jwks = JSON::JWK::Set.new(JSON.parse(File.read('test/fixtures/jwks.json'))['keys'])
request.stubs(:params).returns('code' => code, 'state' => state)
request.stubs(:path_info).returns('')
strategy.options.client_options.host = 'example.com'
strategy.options.discovery = true
issuer = stub('OpenIDConnect::Discovery::Issuer')
issuer.stubs(:issuer).returns('https://example.com/')
::OpenIDConnect::Discovery::Provider.stubs(:discover!).returns(issuer)
config = stub('OpenIDConnect::Discovery::Provder::Config')
config.stubs(:authorization_endpoint).returns('https://example.com/authorization')
config.stubs(:token_endpoint).returns('https://example.com/token')
config.stubs(:userinfo_endpoint).returns('https://example.com/userinfo')
config.stubs(:jwks_uri).returns('https://example.com/jwks')
config.stubs(:jwks).returns(jwks)
::OpenIDConnect::Discovery::Provider::Config.stubs(:discover!).with('https://example.com/').returns(config)
id_token = stub('OpenIDConnect::ResponseObject::IdToken')
id_token.stubs(:verify!).with(issuer: 'https://example.com/', client_id: @identifier, nonce: nonce).returns(true)
::OpenIDConnect::ResponseObject::IdToken.stubs(:decode).returns(id_token)
strategy.unstub(:user_info)
access_token = stub('OpenIDConnect::AccessToken')
access_token.stubs(:access_token)
access_token.stubs(:refresh_token)
access_token.stubs(:expires_in)
access_token.stubs(:scope)
access_token.stubs(:id_token).returns(File.read('test/fixtures/id_token.txt'))
client.expects(:access_token!).at_least_once.returns(access_token)
access_token.expects(:userinfo!).returns(user_info)
strategy.call!('rack.session' => { 'omniauth.state' => state, 'omniauth.nonce' => nonce })
strategy.callback_phase
end
def test_callback_phase_with_error
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
request.stubs(:params).returns('error' => 'invalid_request')
request.stubs(:path_info).returns('')
strategy.call!({'rack.session' => {'omniauth.state' => state, 'omniauth.nonce' => nonce}})
strategy.expects(:fail!)
strategy.callback_phase
end
def test_callback_phase_with_invalid_state
code = SecureRandom.hex(16)
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
request.stubs(:params).returns('code' => code, 'state' => 'foobar')
request.stubs(:path_info).returns('')
strategy.call!('rack.session' => { 'omniauth.state' => state, 'omniauth.nonce' => nonce })
strategy.expects(:fail!)
strategy.callback_phase
end
def test_callback_phase_without_code
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
request.stubs(:params).returns('state' => state)
request.stubs(:path_info).returns('')
strategy.call!('rack.session' => { 'omniauth.state' => state, 'omniauth.nonce' => nonce })
strategy.expects(:fail!)
strategy.callback_phase
end
def test_callback_phase_with_timeout
code = SecureRandom.hex(16)
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
request.stubs(:params).returns('code' => code, 'state' => state)
request.stubs(:path_info).returns('')
strategy.options.issuer = 'example.com'
strategy.stubs(:access_token).raises(::Timeout::Error.new('error'))
strategy.call!('rack.session' => { 'omniauth.state' => state, 'omniauth.nonce' => nonce })
strategy.expects(:fail!)
strategy.callback_phase
end
def test_callback_phase_with_etimeout
code = SecureRandom.hex(16)
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
request.stubs(:params).returns('code' => code, 'state' => state)
request.stubs(:path_info).returns('')
strategy.options.issuer = 'example.com'
strategy.stubs(:access_token).raises(::Errno::ETIMEDOUT.new('error'))
strategy.call!('rack.session' => { 'omniauth.state' => state, 'omniauth.nonce' => nonce })
strategy.expects(:fail!)
strategy.callback_phase
end
def test_callback_phase_with_socket_error
code = SecureRandom.hex(16)
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
request.stubs(:params).returns('code' => code, 'state' => state)
request.stubs(:path_info).returns('')
strategy.options.issuer = 'example.com'
strategy.stubs(:access_token).raises(::SocketError.new('error'))
strategy.call!('rack.session' => { 'omniauth.state' => state, 'omniauth.nonce' => nonce })
strategy.expects(:fail!)
strategy.callback_phase
end
def test_info
info = strategy.info
assert_equal user_info.name, info[:name]
assert_equal user_info.email, info[:email]
assert_equal user_info.preferred_username, info[:nickname]
assert_equal user_info.given_name, info[:first_name]
assert_equal user_info.family_name, info[:last_name]
assert_equal user_info.gender, info[:gender]
assert_equal user_info.picture, info[:image]
assert_equal user_info.phone_number, info[:phone]
assert_equal({ website: user_info.website }, info[:urls])
end
def test_extra
assert_equal({ raw_info: user_info.as_json }, strategy.extra)
end
def test_credentials
strategy.options.issuer = 'example.com'
strategy.options.client_signing_alg = :RS256
strategy.options.client_jwk_signing_key = File.read('test/fixtures/jwks.json')
id_token = stub('OpenIDConnect::ResponseObject::IdToken')
id_token.stubs(:verify!).returns(true)
::OpenIDConnect::ResponseObject::IdToken.stubs(:decode).returns(id_token)
access_token = stub('OpenIDConnect::AccessToken')
access_token.stubs(:access_token).returns(SecureRandom.hex(16))
access_token.stubs(:refresh_token).returns(SecureRandom.hex(16))
access_token.stubs(:expires_in).returns(Time.now)
access_token.stubs(:scope).returns('openidconnect')
access_token.stubs(:id_token).returns(File.read('test/fixtures/id_token.txt'))
client.expects(:access_token!).returns(access_token)
access_token.expects(:refresh_token).returns(access_token.refresh_token)
access_token.expects(:expires_in).returns(access_token.expires_in)
assert_equal(
{
id_token: access_token.id_token,
token: access_token.access_token,
refresh_token: access_token.refresh_token,
expires_in: access_token.expires_in,
scope: access_token.scope
},
strategy.credentials
)
end
def test_option_send_nonce
strategy.options.client_options[:host] = 'foobar.com'
assert(strategy.authorize_uri =~ /nonce=/, 'URI must contain nonce')
strategy.options.send_nonce = false
assert(!(strategy.authorize_uri =~ /nonce=/), 'URI must not contain nonce')
end
def test_failure_endpoint_redirect
OmniAuth.config.stubs(:failure_raise_out_environments).returns([])
strategy.stubs(:env).returns({})
request.stubs(:params).returns('error' => 'access denied')
result = strategy.callback_phase
assert(result.is_a? Array)
assert(result[0] == 302, 'Redirect')
assert(result[1]["Location"] =~ /\/auth\/failure/)
end
def test_state
strategy.options.state = lambda { 42 }
session = { "state" => 42 }
expected_redirect = /&state=/
strategy.options.issuer = 'example.com'
strategy.options.client_options.host = 'example.com'
strategy.expects(:redirect).with(regexp_matches(expected_redirect))
strategy.request_phase
# this should succeed as the correct state is passed with the request
test_callback_phase(session, { 'state' => 42 })
# the following should fail because the wrong state is passed to the callback
code = SecureRandom.hex(16)
request.stubs(:params).returns('code' => code, 'state' => 43)
request.stubs(:path_info).returns('')
strategy.call!('rack.session' => session)
strategy.expects(:fail!)
strategy.callback_phase
end
def test_option_client_auth_method
state = SecureRandom.hex(16)
nonce = SecureRandom.hex(16)
opts = strategy.options.client_options
opts[:host] = 'foobar.com'
strategy.options.issuer = 'foobar.com'
strategy.options.client_auth_method = :not_basic
strategy.options.client_signing_alg = :RS256
strategy.options.client_jwk_signing_key = File.read('test/fixtures/jwks.json')
json_response = {
access_token: 'test_access_token',
id_token: File.read('test/fixtures/id_token.txt'),
token_type: 'Bearer',
}.to_json
success = Struct.new(:status, :body).new(200, json_response)
request.stubs(:path_info).returns('')
strategy.call!('rack.session' => { 'omniauth.state' => state, 'omniauth.nonce' => nonce })
id_token = stub('OpenIDConnect::ResponseObject::IdToken')
id_token.stubs(:verify!).with(issuer: strategy.options.issuer, client_id: @identifier, nonce: nonce).returns(true)
::OpenIDConnect::ResponseObject::IdToken.stubs(:decode).returns(id_token)
HTTPClient.any_instance.stubs(:post).with(
"#{ opts.scheme }://#{ opts.host }:#{ opts.port }#{ opts.token_endpoint }",
{ scope: 'openid', grant_type: :client_credentials, client_id: @identifier, client_secret: @secret },
{}
).returns(success)
assert(strategy.send :access_token)
end
def test_public_key_with_jwks
strategy.options.client_signing_alg = :RS256
strategy.options.client_jwk_signing_key = File.read('./test/fixtures/jwks.json')
assert_equal JSON::JWK::Set, strategy.public_key.class
end
def test_public_key_with_jwk
strategy.options.client_signing_alg = :RS256
jwks_str = File.read('./test/fixtures/jwks.json')
jwks = JSON.parse(jwks_str)
jwk = jwks['keys'].first
strategy.options.client_jwk_signing_key = jwk.to_json
assert_equal JSON::JWK, strategy.public_key.class
end
def test_public_key_with_x509
strategy.options.client_signing_alg = :RS256
strategy.options.client_x509_signing_key = File.read('./test/fixtures/test.crt')
assert_equal OpenSSL::PKey::RSA, strategy.public_key.class
end
def test_public_key_with_hmac
strategy.options.client_options.secret = 'secret'
strategy.options.client_signing_alg = :HS256
assert_equal strategy.options.client_options.secret, strategy.public_key
end
end
end
end
| 43.388506 | 205 | 0.673413 |
d5fd539553031d8cd800c5a1b097ab98636b9838 | 27 | module CartBooksHelper
end
| 9 | 22 | 0.888889 |
18c78f316a1fbaa6084b89c68a7129e9cef59f0d | 140 | # http://www.codewars.com/kata/55c933c115a8c426ac000082
# --- iteration 1 ---
def eval_object(v)
v["a"].send(v["operation"], v["b"])
end
| 20 | 55 | 0.657143 |
084b613c1edcfcb6c2b6a3876f4d74d541ead19c | 156,990 | # frozen_string_literal: true
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
require 'seahorse/client/plugins/content_length.rb'
require 'aws-sdk-core/plugins/credentials_configuration.rb'
require 'aws-sdk-core/plugins/logging.rb'
require 'aws-sdk-core/plugins/param_converter.rb'
require 'aws-sdk-core/plugins/param_validator.rb'
require 'aws-sdk-core/plugins/user_agent.rb'
require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
require 'aws-sdk-core/plugins/retry_errors.rb'
require 'aws-sdk-core/plugins/global_configuration.rb'
require 'aws-sdk-core/plugins/regional_endpoint.rb'
require 'aws-sdk-core/plugins/endpoint_discovery.rb'
require 'aws-sdk-core/plugins/endpoint_pattern.rb'
require 'aws-sdk-core/plugins/response_paging.rb'
require 'aws-sdk-core/plugins/stub_responses.rb'
require 'aws-sdk-core/plugins/idempotency_token.rb'
require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
require 'aws-sdk-core/plugins/transfer_encoding.rb'
require 'aws-sdk-core/plugins/http_checksum.rb'
require 'aws-sdk-core/plugins/signature_v4.rb'
require 'aws-sdk-core/plugins/protocols/rest_json.rb'
require 'aws-sdk-glacier/plugins/account_id.rb'
require 'aws-sdk-glacier/plugins/api_version.rb'
require 'aws-sdk-glacier/plugins/checksums.rb'
Aws::Plugins::GlobalConfiguration.add_identifier(:glacier)
module Aws::Glacier
# An API client for Glacier. To construct a client, you need to configure a `:region` and `:credentials`.
#
# client = Aws::Glacier::Client.new(
# region: region_name,
# credentials: credentials,
# # ...
# )
#
# For details on configuring region and credentials see
# the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
#
# See {#initialize} for a full list of supported configuration options.
class Client < Seahorse::Client::Base
include Aws::ClientStubs
@identifier = :glacier
set_api(ClientApi::API)
add_plugin(Seahorse::Client::Plugins::ContentLength)
add_plugin(Aws::Plugins::CredentialsConfiguration)
add_plugin(Aws::Plugins::Logging)
add_plugin(Aws::Plugins::ParamConverter)
add_plugin(Aws::Plugins::ParamValidator)
add_plugin(Aws::Plugins::UserAgent)
add_plugin(Aws::Plugins::HelpfulSocketErrors)
add_plugin(Aws::Plugins::RetryErrors)
add_plugin(Aws::Plugins::GlobalConfiguration)
add_plugin(Aws::Plugins::RegionalEndpoint)
add_plugin(Aws::Plugins::EndpointDiscovery)
add_plugin(Aws::Plugins::EndpointPattern)
add_plugin(Aws::Plugins::ResponsePaging)
add_plugin(Aws::Plugins::StubResponses)
add_plugin(Aws::Plugins::IdempotencyToken)
add_plugin(Aws::Plugins::JsonvalueConverter)
add_plugin(Aws::Plugins::ClientMetricsPlugin)
add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
add_plugin(Aws::Plugins::TransferEncoding)
add_plugin(Aws::Plugins::HttpChecksum)
add_plugin(Aws::Plugins::SignatureV4)
add_plugin(Aws::Plugins::Protocols::RestJson)
add_plugin(Aws::Glacier::Plugins::AccountId)
add_plugin(Aws::Glacier::Plugins::ApiVersion)
add_plugin(Aws::Glacier::Plugins::Checksums)
# @overload initialize(options)
# @param [Hash] options
# @option options [required, Aws::CredentialProvider] :credentials
# Your AWS credentials. This can be an instance of any one of the
# following classes:
#
# * `Aws::Credentials` - Used for configuring static, non-refreshing
# credentials.
#
# * `Aws::InstanceProfileCredentials` - Used for loading credentials
# from an EC2 IMDS on an EC2 instance.
#
# * `Aws::SharedCredentials` - Used for loading credentials from a
# shared file, such as `~/.aws/config`.
#
# * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
#
# When `:credentials` are not configured directly, the following
# locations will be searched for credentials:
#
# * `Aws.config[:credentials]`
# * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
# * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
# * `~/.aws/credentials`
# * `~/.aws/config`
# * EC2 IMDS instance profile - When used by default, the timeouts are
# very aggressive. Construct and pass an instance of
# `Aws::InstanceProfileCredentails` to enable retries and extended
# timeouts.
#
# @option options [required, String] :region
# The AWS region to connect to. The configured `:region` is
# used to determine the service `:endpoint`. When not passed,
# a default `:region` is searched for in the following locations:
#
# * `Aws.config[:region]`
# * `ENV['AWS_REGION']`
# * `ENV['AMAZON_REGION']`
# * `ENV['AWS_DEFAULT_REGION']`
# * `~/.aws/credentials`
# * `~/.aws/config`
#
# @option options [String] :access_key_id
#
# @option options [String] :account_id ("-")
# The default Glacier AWS account ID to use for all glacier
# operations. The default value of `-` uses the account
# your `:credentials` belong to.
#
# @option options [Boolean] :active_endpoint_cache (false)
# When set to `true`, a thread polling for endpoints will be running in
# the background every 60 secs (default). Defaults to `false`.
#
# @option options [Boolean] :adaptive_retry_wait_to_fill (true)
# Used only in `adaptive` retry mode. When true, the request will sleep
# until there is sufficent client side capacity to retry the request.
# When false, the request will raise a `RetryCapacityNotAvailableError` and will
# not retry instead of sleeping.
#
# @option options [Boolean] :client_side_monitoring (false)
# When `true`, client-side metrics will be collected for all API requests from
# this client.
#
# @option options [String] :client_side_monitoring_client_id ("")
# Allows you to provide an identifier for this client which will be attached to
# all generated client side metrics. Defaults to an empty string.
#
# @option options [String] :client_side_monitoring_host ("127.0.0.1")
# Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
# side monitoring agent is running on, where client metrics will be published via UDP.
#
# @option options [Integer] :client_side_monitoring_port (31000)
# Required for publishing client metrics. The port that the client side monitoring
# agent is running on, where client metrics will be published via UDP.
#
# @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
# Allows you to provide a custom client-side monitoring publisher class. By default,
# will use the Client Side Monitoring Agent Publisher.
#
# @option options [Boolean] :convert_params (true)
# When `true`, an attempt is made to coerce request parameters into
# the required types.
#
# @option options [Boolean] :correct_clock_skew (true)
# Used only in `standard` and adaptive retry modes. Specifies whether to apply
# a clock skew correction and retry requests with skewed client clocks.
#
# @option options [Boolean] :disable_host_prefix_injection (false)
# Set to true to disable SDK automatically adding host prefix
# to default service endpoint when available.
#
# @option options [String] :endpoint
# The client endpoint is normally constructed from the `:region`
# option. You should only configure an `:endpoint` when connecting
# to test or custom endpoints. This should be a valid HTTP(S) URI.
#
# @option options [Integer] :endpoint_cache_max_entries (1000)
# Used for the maximum size limit of the LRU cache storing endpoints data
# for endpoint discovery enabled operations. Defaults to 1000.
#
# @option options [Integer] :endpoint_cache_max_threads (10)
# Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
#
# @option options [Integer] :endpoint_cache_poll_interval (60)
# When :endpoint_discovery and :active_endpoint_cache is enabled,
# Use this option to config the time interval in seconds for making
# requests fetching endpoints information. Defaults to 60 sec.
#
# @option options [Boolean] :endpoint_discovery (false)
# When set to `true`, endpoint discovery will be enabled for operations when available.
#
# @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
# The log formatter.
#
# @option options [Symbol] :log_level (:info)
# The log level to send messages to the `:logger` at.
#
# @option options [Logger] :logger
# The Logger instance to send log messages to. If this option
# is not set, logging will be disabled.
#
# @option options [Integer] :max_attempts (3)
# An integer representing the maximum number attempts that will be made for
# a single request, including the initial attempt. For example,
# setting this value to 5 will result in a request being retried up to
# 4 times. Used in `standard` and `adaptive` retry modes.
#
# @option options [String] :profile ("default")
# Used when loading credentials from the shared credentials file
# at HOME/.aws/credentials. When not specified, 'default' is used.
#
# @option options [Proc] :retry_backoff
# A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
# This option is only used in the `legacy` retry mode.
#
# @option options [Float] :retry_base_delay (0.3)
# The base delay in seconds used by the default backoff function. This option
# is only used in the `legacy` retry mode.
#
# @option options [Symbol] :retry_jitter (:none)
# A delay randomiser function used by the default backoff function.
# Some predefined functions can be referenced by name - :none, :equal, :full,
# otherwise a Proc that takes and returns a number. This option is only used
# in the `legacy` retry mode.
#
# @see https://www.awsarchitectureblog.com/2015/03/backoff.html
#
# @option options [Integer] :retry_limit (3)
# The maximum number of times to retry failed requests. Only
# ~ 500 level server errors and certain ~ 400 level client errors
# are retried. Generally, these are throttling errors, data
# checksum errors, networking errors, timeout errors, auth errors,
# endpoint discovery, and errors from expired credentials.
# This option is only used in the `legacy` retry mode.
#
# @option options [Integer] :retry_max_delay (0)
# The maximum number of seconds to delay between retries (0 for no limit)
# used by the default backoff function. This option is only used in the
# `legacy` retry mode.
#
# @option options [String] :retry_mode ("legacy")
# Specifies which retry algorithm to use. Values are:
#
# * `legacy` - The pre-existing retry behavior. This is default value if
# no retry mode is provided.
#
# * `standard` - A standardized set of retry rules across the AWS SDKs.
# This includes support for retry quotas, which limit the number of
# unsuccessful retries a client can make.
#
# * `adaptive` - An experimental retry mode that includes all the
# functionality of `standard` mode along with automatic client side
# throttling. This is a provisional mode that may change behavior
# in the future.
#
#
# @option options [String] :secret_access_key
#
# @option options [String] :session_token
#
# @option options [Boolean] :stub_responses (false)
# Causes the client to return stubbed responses. By default
# fake responses are generated and returned. You can specify
# the response data to return or errors to raise by calling
# {ClientStubs#stub_responses}. See {ClientStubs} for more information.
#
# ** Please note ** When response stubbing is enabled, no HTTP
# requests are made, and retries are disabled.
#
# @option options [Boolean] :validate_params (true)
# When `true`, request parameters are validated before
# sending the request.
#
# @option options [URI::HTTP,String] :http_proxy A proxy to send
# requests through. Formatted like 'http://proxy.com:123'.
#
# @option options [Float] :http_open_timeout (15) The number of
# seconds to wait when opening a HTTP session before raising a
# `Timeout::Error`.
#
# @option options [Integer] :http_read_timeout (60) The default
# number of seconds to wait for response data. This value can
# safely be set per-request on the session.
#
# @option options [Float] :http_idle_timeout (5) The number of
# seconds a connection is allowed to sit idle before it is
# considered stale. Stale connections are closed and removed
# from the pool before making a request.
#
# @option options [Float] :http_continue_timeout (1) The number of
# seconds to wait for a 100-continue response before sending the
# request body. This option has no effect unless the request has
# "Expect" header set to "100-continue". Defaults to `nil` which
# disables this behaviour. This value can safely be set per
# request on the session.
#
# @option options [Boolean] :http_wire_trace (false) When `true`,
# HTTP debug output will be sent to the `:logger`.
#
# @option options [Boolean] :ssl_verify_peer (true) When `true`,
# SSL peer certificates are verified when establishing a
# connection.
#
# @option options [String] :ssl_ca_bundle Full path to the SSL
# certificate authority bundle file that should be used when
# verifying peer certificates. If you do not pass
# `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default
# will be used if available.
#
# @option options [String] :ssl_ca_directory Full path of the
# directory that contains the unbundled SSL certificate
# authority files for verifying peer certificates. If you do
# not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the
# system default will be used if available.
#
def initialize(*args)
super
end
# @!group API Operations
# This operation aborts a multipart upload identified by the upload ID.
#
# After the Abort Multipart Upload request succeeds, you cannot upload
# any more parts to the multipart upload or complete the multipart
# upload. Aborting a completed upload fails. However, aborting an
# already-aborted upload will succeed, for a short time. For more
# information about uploading a part and completing a multipart upload,
# see UploadMultipartPart and CompleteMultipartUpload.
#
# This operation is idempotent.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and underlying REST API, see [Working with
# Archives in Amazon S3 Glacier][2] and [Abort Multipart Upload][3] in
# the *Amazon Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-abort-upload.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :upload_id
# The upload ID of the multipart upload to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To abort a multipart upload identified by the upload ID
#
# # The example deletes an in-progress multipart upload to a vault named my-vault:
#
# resp = client.abort_multipart_upload({
# account_id: "-",
# upload_id: "19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ",
# vault_name: "my-vault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.abort_multipart_upload({
# account_id: "string", # required
# vault_name: "string", # required
# upload_id: "string", # required
# })
#
# @overload abort_multipart_upload(params = {})
# @param [Hash] params ({})
def abort_multipart_upload(params = {}, options = {})
req = build_request(:abort_multipart_upload, params)
req.send_request(options)
end
# This operation aborts the vault locking process if the vault lock is
# not in the `Locked` state. If the vault lock is in the `Locked` state
# when this operation is requested, the operation returns an
# `AccessDeniedException` error. Aborting the vault locking process
# removes the vault lock policy from the specified vault.
#
# A vault lock is put into the `InProgress` state by calling
# InitiateVaultLock. A vault lock is put into the `Locked` state by
# calling CompleteVaultLock. You can get the state of a vault lock by
# calling GetVaultLock. For more information about the vault locking
# process, see [Amazon Glacier Vault Lock][1]. For more information
# about vault lock policies, see [Amazon Glacier Access Control with
# Vault Lock Policies][2].
#
# This operation is idempotent. You can successfully invoke this
# operation multiple times, if the vault lock is in the `InProgress`
# state or if there is no policy associated with the vault.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID. This value must match the
# AWS account ID associated with the credentials used to sign the
# request. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you specify your account ID, do not include any hyphens ('-') in
# the ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To abort a vault lock
#
# # The example aborts the vault locking process if the vault lock is not in the Locked state for the vault named
# # examplevault.
#
# resp = client.abort_vault_lock({
# account_id: "-",
# vault_name: "examplevault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.abort_vault_lock({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @overload abort_vault_lock(params = {})
# @param [Hash] params ({})
def abort_vault_lock(params = {}, options = {})
req = build_request(:abort_vault_lock, params)
req.send_request(options)
end
# This operation adds the specified tags to a vault. Each tag is
# composed of a key and a value. Each vault can have up to 10 tags. If
# your request would cause the tag limit for the vault to be exceeded,
# the operation throws the `LimitExceededException` error. If a tag
# already exists on the vault under a specified key, the existing key
# value will be overwritten. For more information about tags, see
# [Tagging Amazon S3 Glacier Resources][1].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [Hash<String,String>] :tags
# The tags to add to the vault. Each tag is composed of a key and a
# value. The value can be an empty string.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To add tags to a vault
#
# # The example adds two tags to a my-vault.
#
# resp = client.add_tags_to_vault({
# tags: {
# "examplekey1" => "examplevalue1",
# "examplekey2" => "examplevalue2",
# },
# account_id: "-",
# vault_name: "my-vault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.add_tags_to_vault({
# account_id: "string", # required
# vault_name: "string", # required
# tags: {
# "TagKey" => "TagValue",
# },
# })
#
# @overload add_tags_to_vault(params = {})
# @param [Hash] params ({})
def add_tags_to_vault(params = {}, options = {})
req = build_request(:add_tags_to_vault, params)
req.send_request(options)
end
# You call this operation to inform Amazon S3 Glacier (Glacier) that all
# the archive parts have been uploaded and that Glacier can now assemble
# the archive from the uploaded parts. After assembling and saving the
# archive to the vault, Glacier returns the URI path of the newly
# created archive resource. Using the URI path, you can then access the
# archive. After you upload an archive, you should save the archive ID
# returned to retrieve the archive at a later point. You can also get
# the vault inventory to obtain a list of archive IDs in a vault. For
# more information, see InitiateJob.
#
# In the request, you must include the computed SHA256 tree hash of the
# entire archive you have uploaded. For information about computing a
# SHA256 tree hash, see [Computing Checksums][1]. On the server side,
# Glacier also constructs the SHA256 tree hash of the assembled archive.
# If the values match, Glacier saves the archive to the vault;
# otherwise, it returns an error, and the operation fails. The ListParts
# operation returns a list of parts uploaded for a specific multipart
# upload. It includes checksum information for each uploaded part that
# can be used to debug a bad checksum issue.
#
# Additionally, Glacier also checks for any missing content ranges when
# assembling the archive, if missing content ranges are found, Glacier
# returns an error and the operation fails.
#
# Complete Multipart Upload is an idempotent operation. After your first
# successful complete multipart upload, if you call the operation again
# within a short period, the operation will succeed and return the same
# archive ID. This is useful in the event you experience a network issue
# that causes an aborted connection or receive a 500 server error, in
# which case you can repeat your Complete Multipart Upload request and
# get the same archive ID without creating duplicate archives. Note,
# however, that after the multipart upload completes, you cannot call
# the List Parts operation and the multipart upload will not appear in
# List Multipart Uploads response, even if idempotent complete is
# possible.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][2].
#
# For conceptual information and underlying REST API, see [Uploading
# Large Archives in Parts (Multipart Upload)][3] and [Complete Multipart
# Upload][4] in the *Amazon Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html
# [4]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-complete-upload.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :upload_id
# The upload ID of the multipart upload.
#
# @option params [Integer] :archive_size
# The total size, in bytes, of the entire archive. This value should be
# the sum of all the sizes of the individual parts that you uploaded.
#
# @option params [String] :checksum
# The SHA256 tree hash of the entire archive. It is the tree hash of
# SHA256 tree hash of the individual parts. If the value you specify in
# the request does not match the SHA256 tree hash of the final assembled
# archive as computed by Amazon S3 Glacier (Glacier), Glacier returns an
# error and the request fails.
#
# @return [Types::ArchiveCreationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ArchiveCreationOutput#location #location} => String
# * {Types::ArchiveCreationOutput#checksum #checksum} => String
# * {Types::ArchiveCreationOutput#archive_id #archive_id} => String
#
#
# @example Example: To complete a multipart upload
#
# # The example completes a multipart upload for a 3 MiB archive.
#
# resp = client.complete_multipart_upload({
# account_id: "-",
# archive_size: 3145728,
# checksum: "9628195fcdbcbbe76cdde456d4646fa7de5f219fb39823836d81f0cc0e18aa67",
# upload_id: "19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ",
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# archive_id: "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUsuhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqrEXAMPLEArchiveId",
# checksum: "9628195fcdbcbbe76cdde456d4646fa7de5f219fb39823836d81f0cc0e18aa67",
# location: "/111122223333/vaults/my-vault/archives/NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUsuhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqrEXAMPLEArchiveId",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.complete_multipart_upload({
# account_id: "string", # required
# vault_name: "string", # required
# upload_id: "string", # required
# archive_size: 1,
# checksum: "string",
# })
#
# @example Response structure
#
# resp.location #=> String
# resp.checksum #=> String
# resp.archive_id #=> String
#
# @overload complete_multipart_upload(params = {})
# @param [Hash] params ({})
def complete_multipart_upload(params = {}, options = {})
req = build_request(:complete_multipart_upload, params)
req.send_request(options)
end
# This operation completes the vault locking process by transitioning
# the vault lock from the `InProgress` state to the `Locked` state,
# which causes the vault lock policy to become unchangeable. A vault
# lock is put into the `InProgress` state by calling InitiateVaultLock.
# You can obtain the state of the vault lock by calling GetVaultLock.
# For more information about the vault locking process, [Amazon Glacier
# Vault Lock][1].
#
# This operation is idempotent. This request is always successful if the
# vault lock is in the `Locked` state and the provided lock ID matches
# the lock ID originally used to lock the vault.
#
# If an invalid lock ID is passed in the request when the vault lock is
# in the `Locked` state, the operation returns an
# `AccessDeniedException` error. If an invalid lock ID is passed in the
# request when the vault lock is in the `InProgress` state, the
# operation throws an `InvalidParameter` error.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID. This value must match the
# AWS account ID associated with the credentials used to sign the
# request. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you specify your account ID, do not include any hyphens ('-') in
# the ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :lock_id
# The `lockId` value is the lock ID obtained from a InitiateVaultLock
# request.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To complete a vault lock
#
# # The example completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked
# # state.
#
# resp = client.complete_vault_lock({
# account_id: "-",
# lock_id: "AE863rKkWZU53SLW5be4DUcW",
# vault_name: "example-vault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.complete_vault_lock({
# account_id: "string", # required
# vault_name: "string", # required
# lock_id: "string", # required
# })
#
# @overload complete_vault_lock(params = {})
# @param [Hash] params ({})
def complete_vault_lock(params = {}, options = {})
req = build_request(:complete_vault_lock, params)
req.send_request(options)
end
# This operation creates a new vault with the specified name. The name
# of the vault must be unique within a region for an AWS account. You
# can create up to 1,000 vaults per account. If you need to create more
# vaults, contact Amazon S3 Glacier.
#
# You must use the following guidelines when naming a vault.
#
# * Names can be between 1 and 255 characters long.
#
# * Allowed characters are a-z, A-Z, 0-9, '\_' (underscore), '-'
# (hyphen), and '.' (period).
#
# This operation is idempotent.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and underlying REST API, see [Creating a
# Vault in Amazon Glacier][2] and [Create Vault ][3] in the *Amazon
# Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/creating-vaults.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-put.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID. This value must match the
# AWS account ID associated with the credentials used to sign the
# request. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you specify your account ID, do not include any hyphens ('-') in
# the ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Types::CreateVaultOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateVaultOutput#location #location} => String
#
#
# @example Example: To create a new vault
#
# # The following example creates a new vault named my-vault.
#
# resp = client.create_vault({
# account_id: "-",
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# location: "/111122223333/vaults/my-vault",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.create_vault({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @example Response structure
#
# resp.location #=> String
#
# @overload create_vault(params = {})
# @param [Hash] params ({})
def create_vault(params = {}, options = {})
req = build_request(:create_vault, params)
req.send_request(options)
end
# This operation deletes an archive from a vault. Subsequent requests to
# initiate a retrieval of this archive will fail. Archive retrievals
# that are in progress for this archive ID may or may not succeed
# according to the following scenarios:
#
# * If the archive retrieval job is actively preparing the data for
# download when Amazon S3 Glacier receives the delete archive request,
# the archival retrieval operation might fail.
#
# * If the archive retrieval job has successfully prepared the archive
# for download when Amazon S3 Glacier receives the delete archive
# request, you will be able to download the output.
#
# This operation is idempotent. Attempting to delete an already-deleted
# archive does not result in an error.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and underlying REST API, see [Deleting an
# Archive in Amazon Glacier][2] and [Delete Archive][3] in the *Amazon
# Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-an-archive.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :archive_id
# The ID of the archive to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To delete an archive
#
# # The example deletes the archive specified by the archive ID.
#
# resp = client.delete_archive({
# account_id: "-",
# archive_id: "NkbByEejwEggmBz2fTHgJrg0XBoDfjP4q6iu87-TjhqG6eGoOY9Z8i1_AUyUsuhPAdTqLHy8pTl5nfCFJmDl2yEZONi5L26Omw12vcs01MNGntHEQL8MBfGlqrEXAMPLEArchiveId",
# vault_name: "examplevault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.delete_archive({
# account_id: "string", # required
# vault_name: "string", # required
# archive_id: "string", # required
# })
#
# @overload delete_archive(params = {})
# @param [Hash] params ({})
def delete_archive(params = {}, options = {})
req = build_request(:delete_archive, params)
req.send_request(options)
end
# This operation deletes a vault. Amazon S3 Glacier will delete a vault
# only if there are no archives in the vault as of the last inventory
# and there have been no writes to the vault since the last inventory.
# If either of these conditions is not satisfied, the vault deletion
# fails (that is, the vault is not removed) and Amazon S3 Glacier
# returns an error. You can use DescribeVault to return the number of
# archives in a vault, and you can use [Initiate a Job (POST jobs)][1]
# to initiate a new inventory retrieval for a vault. The inventory
# contains the archive IDs you use to delete archives using [Delete
# Archive (DELETE archive)][2].
#
# This operation is idempotent.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][3].
#
# For conceptual information and underlying REST API, see [Deleting a
# Vault in Amazon Glacier][4] and [Delete Vault ][5] in the *Amazon S3
# Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-delete.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [4]: https://docs.aws.amazon.com/amazonglacier/latest/dev/deleting-vaults.html
# [5]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-delete.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To delete a vault
#
# # The example deletes a vault named my-vault:
#
# resp = client.delete_vault({
# account_id: "-",
# vault_name: "my-vault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.delete_vault({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @overload delete_vault(params = {})
# @param [Hash] params ({})
def delete_vault(params = {}, options = {})
req = build_request(:delete_vault, params)
req.send_request(options)
end
# This operation deletes the access policy associated with the specified
# vault. The operation is eventually consistent; that is, it might take
# some time for Amazon S3 Glacier to completely remove the access
# policy, and you might still see the effect of the policy for a short
# time after you send the delete request.
#
# This operation is idempotent. You can invoke delete multiple times,
# even if there is no policy associated with the vault. For more
# information about vault access policies, see [Amazon Glacier Access
# Control with Vault Access Policies][1].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To delete the vault access policy
#
# # The example deletes the access policy associated with the vault named examplevault.
#
# resp = client.delete_vault_access_policy({
# account_id: "-",
# vault_name: "examplevault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.delete_vault_access_policy({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @overload delete_vault_access_policy(params = {})
# @param [Hash] params ({})
def delete_vault_access_policy(params = {}, options = {})
req = build_request(:delete_vault_access_policy, params)
req.send_request(options)
end
# This operation deletes the notification configuration set for a vault.
# The operation is eventually consistent; that is, it might take some
# time for Amazon S3 Glacier to completely disable the notifications and
# you might still receive some notifications for a short time after you
# send the delete request.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and underlying REST API, see [Configuring
# Vault Notifications in Amazon S3 Glacier][2] and [Delete Vault
# Notification Configuration ][3] in the Amazon S3 Glacier Developer
# Guide.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-delete.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To delete the notification configuration set for a vault
#
# # The example deletes the notification configuration set for the vault named examplevault.
#
# resp = client.delete_vault_notifications({
# account_id: "-",
# vault_name: "examplevault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.delete_vault_notifications({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @overload delete_vault_notifications(params = {})
# @param [Hash] params ({})
def delete_vault_notifications(params = {}, options = {})
req = build_request(:delete_vault_notifications, params)
req.send_request(options)
end
# This operation returns information about a job you previously
# initiated, including the job initiation date, the user who initiated
# the job, the job status code/message and the Amazon SNS topic to
# notify after Amazon S3 Glacier (Glacier) completes the job. For more
# information about initiating a job, see InitiateJob.
#
# <note markdown="1"> This operation enables you to check the status of your job. However,
# it is strongly recommended that you set up an Amazon SNS topic and
# specify it in your initiate job request so that Glacier can notify the
# topic after it completes the job.
#
# </note>
#
# A job ID will not expire for at least 24 hours after Glacier completes
# the job.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For more information about using this operation, see the documentation
# for the underlying REST API [Describe Job][2] in the *Amazon Glacier
# Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-describe-job-get.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :job_id
# The ID of the job to describe.
#
# @return [Types::GlacierJobDescription] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GlacierJobDescription#job_id #job_id} => String
# * {Types::GlacierJobDescription#job_description #job_description} => String
# * {Types::GlacierJobDescription#action #action} => String
# * {Types::GlacierJobDescription#archive_id #archive_id} => String
# * {Types::GlacierJobDescription#vault_arn #vault_arn} => String
# * {Types::GlacierJobDescription#creation_date #creation_date} => Time
# * {Types::GlacierJobDescription#completed #completed} => Boolean
# * {Types::GlacierJobDescription#status_code #status_code} => String
# * {Types::GlacierJobDescription#status_message #status_message} => String
# * {Types::GlacierJobDescription#archive_size_in_bytes #archive_size_in_bytes} => Integer
# * {Types::GlacierJobDescription#inventory_size_in_bytes #inventory_size_in_bytes} => Integer
# * {Types::GlacierJobDescription#sns_topic #sns_topic} => String
# * {Types::GlacierJobDescription#completion_date #completion_date} => Time
# * {Types::GlacierJobDescription#sha256_tree_hash #sha256_tree_hash} => String
# * {Types::GlacierJobDescription#archive_sha256_tree_hash #archive_sha256_tree_hash} => String
# * {Types::GlacierJobDescription#retrieval_byte_range #retrieval_byte_range} => String
# * {Types::GlacierJobDescription#tier #tier} => String
# * {Types::GlacierJobDescription#inventory_retrieval_parameters #inventory_retrieval_parameters} => Types::InventoryRetrievalJobDescription
# * {Types::GlacierJobDescription#job_output_path #job_output_path} => String
# * {Types::GlacierJobDescription#select_parameters #select_parameters} => Types::SelectParameters
# * {Types::GlacierJobDescription#output_location #output_location} => Types::OutputLocation
#
#
# @example Example: To get information about a previously initiated job
#
# # The example returns information about the previously initiated job specified by the job ID.
#
# resp = client.describe_job({
# account_id: "-",
# job_id: "zbxcm3Z_3z5UkoroF7SuZKrxgGoDc3RloGduS7Eg-RO47Yc6FxsdGBgf_Q2DK5Ejh18CnTS5XW4_XqlNHS61dsO4Cn",
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# action: "InventoryRetrieval",
# completed: false,
# creation_date: Time.parse("2015-07-17T20:23:41.616Z"),
# inventory_retrieval_parameters: {
# format: "JSON",
# },
# job_id: "zbxcm3Z_3z5UkoroF7SuZKrxgGoDc3RloGduS7Eg-RO47Yc6FxsdGBgf_Q2DK5Ejh18CnTS5XW4_XqlNHS61dsO4CnMW",
# status_code: "InProgress",
# vault_arn: "arn:aws:glacier:us-west-2:0123456789012:vaults/my-vault",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.describe_job({
# account_id: "string", # required
# vault_name: "string", # required
# job_id: "string", # required
# })
#
# @example Response structure
#
# resp.job_id #=> String
# resp.job_description #=> String
# resp.action #=> String, one of "ArchiveRetrieval", "InventoryRetrieval", "Select"
# resp.archive_id #=> String
# resp.vault_arn #=> String
# resp.creation_date #=> Time
# resp.completed #=> Boolean
# resp.status_code #=> String, one of "InProgress", "Succeeded", "Failed"
# resp.status_message #=> String
# resp.archive_size_in_bytes #=> Integer
# resp.inventory_size_in_bytes #=> Integer
# resp.sns_topic #=> String
# resp.completion_date #=> Time
# resp.sha256_tree_hash #=> String
# resp.archive_sha256_tree_hash #=> String
# resp.retrieval_byte_range #=> String
# resp.tier #=> String
# resp.inventory_retrieval_parameters.format #=> String
# resp.inventory_retrieval_parameters.start_date #=> Time
# resp.inventory_retrieval_parameters.end_date #=> Time
# resp.inventory_retrieval_parameters.limit #=> String
# resp.inventory_retrieval_parameters.marker #=> String
# resp.job_output_path #=> String
# resp.select_parameters.input_serialization.csv.file_header_info #=> String, one of "USE", "IGNORE", "NONE"
# resp.select_parameters.input_serialization.csv.comments #=> String
# resp.select_parameters.input_serialization.csv.quote_escape_character #=> String
# resp.select_parameters.input_serialization.csv.record_delimiter #=> String
# resp.select_parameters.input_serialization.csv.field_delimiter #=> String
# resp.select_parameters.input_serialization.csv.quote_character #=> String
# resp.select_parameters.expression_type #=> String, one of "SQL"
# resp.select_parameters.expression #=> String
# resp.select_parameters.output_serialization.csv.quote_fields #=> String, one of "ALWAYS", "ASNEEDED"
# resp.select_parameters.output_serialization.csv.quote_escape_character #=> String
# resp.select_parameters.output_serialization.csv.record_delimiter #=> String
# resp.select_parameters.output_serialization.csv.field_delimiter #=> String
# resp.select_parameters.output_serialization.csv.quote_character #=> String
# resp.output_location.s3.bucket_name #=> String
# resp.output_location.s3.prefix #=> String
# resp.output_location.s3.encryption.encryption_type #=> String, one of "aws:kms", "AES256"
# resp.output_location.s3.encryption.kms_key_id #=> String
# resp.output_location.s3.encryption.kms_context #=> String
# resp.output_location.s3.canned_acl #=> String, one of "private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"
# resp.output_location.s3.access_control_list #=> Array
# resp.output_location.s3.access_control_list[0].grantee.type #=> String, one of "AmazonCustomerByEmail", "CanonicalUser", "Group"
# resp.output_location.s3.access_control_list[0].grantee.display_name #=> String
# resp.output_location.s3.access_control_list[0].grantee.uri #=> String
# resp.output_location.s3.access_control_list[0].grantee.id #=> String
# resp.output_location.s3.access_control_list[0].grantee.email_address #=> String
# resp.output_location.s3.access_control_list[0].permission #=> String, one of "FULL_CONTROL", "WRITE", "WRITE_ACP", "READ", "READ_ACP"
# resp.output_location.s3.tagging #=> Hash
# resp.output_location.s3.tagging["string"] #=> String
# resp.output_location.s3.user_metadata #=> Hash
# resp.output_location.s3.user_metadata["string"] #=> String
# resp.output_location.s3.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA"
#
# @overload describe_job(params = {})
# @param [Hash] params ({})
def describe_job(params = {}, options = {})
req = build_request(:describe_job, params)
req.send_request(options)
end
# This operation returns information about a vault, including the
# vault's Amazon Resource Name (ARN), the date the vault was created,
# the number of archives it contains, and the total size of all the
# archives in the vault. The number of archives and their total size are
# as of the last inventory generation. This means that if you add or
# remove an archive from a vault, and then immediately use Describe
# Vault, the change in contents will not be immediately reflected. If
# you want to retrieve the latest inventory of the vault, use
# InitiateJob. Amazon S3 Glacier generates vault inventories
# approximately daily. For more information, see [Downloading a Vault
# Inventory in Amazon S3 Glacier][1].
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][2].
#
# For conceptual information and underlying REST API, see [Retrieving
# Vault Metadata in Amazon S3 Glacier][3] and [Describe Vault ][4] in
# the *Amazon Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html
# [4]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-get.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Types::DescribeVaultOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeVaultOutput#vault_arn #vault_arn} => String
# * {Types::DescribeVaultOutput#vault_name #vault_name} => String
# * {Types::DescribeVaultOutput#creation_date #creation_date} => Time
# * {Types::DescribeVaultOutput#last_inventory_date #last_inventory_date} => Time
# * {Types::DescribeVaultOutput#number_of_archives #number_of_archives} => Integer
# * {Types::DescribeVaultOutput#size_in_bytes #size_in_bytes} => Integer
#
#
# @example Example: To retrieve information about a vault
#
# # The example retrieves data about a vault named my-vault.
#
# resp = client.describe_vault({
# account_id: "-",
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# creation_date: Time.parse("2016-09-23T19:27:18.665Z"),
# number_of_archives: 0,
# size_in_bytes: 0,
# vault_arn: "arn:aws:glacier:us-west-2:111122223333:vaults/my-vault",
# vault_name: "my-vault",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.describe_vault({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @example Response structure
#
# resp.vault_arn #=> String
# resp.vault_name #=> String
# resp.creation_date #=> Time
# resp.last_inventory_date #=> Time
# resp.number_of_archives #=> Integer
# resp.size_in_bytes #=> Integer
#
#
# The following waiters are defined for this operation (see {Client#wait_until} for detailed usage):
#
# * vault_exists
# * vault_not_exists
#
# @overload describe_vault(params = {})
# @param [Hash] params ({})
def describe_vault(params = {}, options = {})
req = build_request(:describe_vault, params)
req.send_request(options)
end
# This operation returns the current data retrieval policy for the
# account and region specified in the GET request. For more information
# about data retrieval policies, see [Amazon Glacier Data Retrieval
# Policies][1].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID. This value must match the
# AWS account ID associated with the credentials used to sign the
# request. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you specify your account ID, do not include any hyphens ('-') in
# the ID.
#
# @return [Types::GetDataRetrievalPolicyOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetDataRetrievalPolicyOutput#policy #policy} => Types::DataRetrievalPolicy
#
#
# @example Example: To get the current data retrieval policy for an account
#
# # The example returns the current data retrieval policy for the account.
#
# resp = client.get_data_retrieval_policy({
# account_id: "-",
# })
#
# resp.to_h outputs the following:
# {
# policy: {
# rules: [
# {
# bytes_per_hour: 10737418240,
# strategy: "BytesPerHour",
# },
# ],
# },
# }
#
# @example Request syntax with placeholder values
#
# resp = client.get_data_retrieval_policy({
# account_id: "string", # required
# })
#
# @example Response structure
#
# resp.policy.rules #=> Array
# resp.policy.rules[0].strategy #=> String
# resp.policy.rules[0].bytes_per_hour #=> Integer
#
# @overload get_data_retrieval_policy(params = {})
# @param [Hash] params ({})
def get_data_retrieval_policy(params = {}, options = {})
req = build_request(:get_data_retrieval_policy, params)
req.send_request(options)
end
# This operation downloads the output of the job you initiated using
# InitiateJob. Depending on the job type you specified when you
# initiated the job, the output will be either the content of an archive
# or a vault inventory.
#
# You can download all the job output or download a portion of the
# output by specifying a byte range. In the case of an archive retrieval
# job, depending on the byte range you specify, Amazon S3 Glacier
# (Glacier) returns the checksum for the portion of the data. You can
# compute the checksum on the client and verify that the values match to
# ensure the portion you downloaded is the correct data.
#
# A job ID will not expire for at least 24 hours after Glacier completes
# the job. That a byte range. For both archive and inventory retrieval
# jobs, you should verify the downloaded size against the size returned
# in the headers from the **Get Job Output** response.
#
# For archive retrieval jobs, you should also verify that the size is
# what you expected. If you download a portion of the output, the
# expected size is based on the range of bytes you specified. For
# example, if you specify a range of `bytes=0-1048575`, you should
# verify your download size is 1,048,576 bytes. If you download an
# entire archive, the expected size is the size of the archive when you
# uploaded it to Amazon S3 Glacier The expected size is also returned in
# the headers from the **Get Job Output** response.
#
# In the case of an archive retrieval job, depending on the byte range
# you specify, Glacier returns the checksum for the portion of the data.
# To ensure the portion you downloaded is the correct data, compute the
# checksum on the client, verify that the values match, and verify that
# the size is what you expected.
#
# A job ID does not expire for at least 24 hours after Glacier completes
# the job. That is, you can download the job output within the 24 hours
# period after Amazon Glacier completes the job.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and the underlying REST API, see
# [Downloading a Vault Inventory][2], [Downloading an Archive][3], and
# [Get Job Output ][4]
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-inventory.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/downloading-an-archive.html
# [4]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-job-output-get.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :job_id
# The job ID whose data is downloaded.
#
# @option params [String] :range
# The range of bytes to retrieve from the output. For example, if you
# want to download the first 1,048,576 bytes, specify the range as
# `bytes=0-1048575`. By default, this operation downloads the entire
# output.
#
# If the job output is large, then you can use a range to retrieve a
# portion of the output. This allows you to download the entire output
# in smaller chunks of bytes. For example, suppose you have 1 GB of job
# output you want to download and you decide to download 128 MB chunks
# of data at a time, which is a total of eight Get Job Output requests.
# You use the following process to download the job output:
#
# 1. Download a 128 MB chunk of output by specifying the appropriate
# byte range. Verify that all 128 MB of data was received.
#
# 2. Along with the data, the response includes a SHA256 tree hash of
# the payload. You compute the checksum of the payload on the client
# and compare it with the checksum you received in the response to
# ensure you received all the expected data.
#
# 3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output
# data, each time specifying the appropriate byte range.
#
# 4. After downloading all the parts of the job output, you have a list
# of eight checksum values. Compute the tree hash of these values to
# find the checksum of the entire output. Using the DescribeJob API,
# obtain job information of the job that provided you the output.
# The response includes the checksum of the entire archive stored in
# Amazon S3 Glacier. You compare this value with the checksum you
# computed to ensure you have downloaded the entire archive content
# with no errors.
#
# @return [Types::GetJobOutputOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetJobOutputOutput#body #body} => IO
# * {Types::GetJobOutputOutput#checksum #checksum} => String
# * {Types::GetJobOutputOutput#status #status} => Integer
# * {Types::GetJobOutputOutput#content_range #content_range} => String
# * {Types::GetJobOutputOutput#accept_ranges #accept_ranges} => String
# * {Types::GetJobOutputOutput#content_type #content_type} => String
# * {Types::GetJobOutputOutput#archive_description #archive_description} => String
#
#
# @example Example: To get the output of a previously initiated job
#
# # The example downloads the output of a previously initiated inventory retrieval job that is identified by the job ID.
#
# resp = client.get_job_output({
# account_id: "-",
# job_id: "zbxcm3Z_3z5UkoroF7SuZKrxgGoDc3RloGduS7Eg-RO47Yc6FxsdGBgf_Q2DK5Ejh18CnTS5XW4_XqlNHS61dsO4CnMW",
# range: "",
# vault_name: "my-vaul",
# })
#
# resp.to_h outputs the following:
# {
# accept_ranges: "bytes",
# body: "inventory-data",
# content_type: "application/json",
# status: 200,
# }
#
# @example Request syntax with placeholder values
#
# resp = client.get_job_output({
# account_id: "string", # required
# vault_name: "string", # required
# job_id: "string", # required
# range: "string",
# })
#
# @example Response structure
#
# resp.body #=> IO
# resp.checksum #=> String
# resp.status #=> Integer
# resp.content_range #=> String
# resp.accept_ranges #=> String
# resp.content_type #=> String
# resp.archive_description #=> String
#
# @overload get_job_output(params = {})
# @param [Hash] params ({})
def get_job_output(params = {}, options = {}, &block)
req = build_request(:get_job_output, params)
req.send_request(options, &block)
end
# This operation retrieves the `access-policy` subresource set on the
# vault; for more information on setting this subresource, see [Set
# Vault Access Policy (PUT access-policy)][1]. If there is no access
# policy set on the vault, the operation returns a `404 Not found`
# error. For more information about vault access policies, see [Amazon
# Glacier Access Control with Vault Access Policies][2].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Types::GetVaultAccessPolicyOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetVaultAccessPolicyOutput#policy #policy} => Types::VaultAccessPolicy
#
#
# @example Example: To get the access-policy set on the vault
#
# # The example retrieves the access-policy set on the vault named example-vault.
#
# resp = client.get_vault_access_policy({
# account_id: "-",
# vault_name: "example-vault",
# })
#
# resp.to_h outputs the following:
# {
# policy: {
# policy: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Define-owner-access-rights\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999999999999:root\"},\"Action\":\"glacier:DeleteArchive\",\"Resource\":\"arn:aws:glacier:us-west-2:999999999999:vaults/examplevault\"}]}",
# },
# }
#
# @example Request syntax with placeholder values
#
# resp = client.get_vault_access_policy({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @example Response structure
#
# resp.policy.policy #=> String
#
# @overload get_vault_access_policy(params = {})
# @param [Hash] params ({})
def get_vault_access_policy(params = {}, options = {})
req = build_request(:get_vault_access_policy, params)
req.send_request(options)
end
# This operation retrieves the following attributes from the
# `lock-policy` subresource set on the specified vault:
#
# * The vault lock policy set on the vault.
#
# * The state of the vault lock, which is either `InProgess` or
# `Locked`.
#
# * When the lock ID expires. The lock ID is used to complete the vault
# locking process.
#
# * When the vault lock was initiated and put into the `InProgress`
# state.
#
# A vault lock is put into the `InProgress` state by calling
# InitiateVaultLock. A vault lock is put into the `Locked` state by
# calling CompleteVaultLock. You can abort the vault locking process by
# calling AbortVaultLock. For more information about the vault locking
# process, [Amazon Glacier Vault Lock][1].
#
# If there is no vault lock policy set on the vault, the operation
# returns a `404 Not found` error. For more information about vault lock
# policies, [Amazon Glacier Access Control with Vault Lock Policies][2].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Types::GetVaultLockOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetVaultLockOutput#policy #policy} => String
# * {Types::GetVaultLockOutput#state #state} => String
# * {Types::GetVaultLockOutput#expiration_date #expiration_date} => Time
# * {Types::GetVaultLockOutput#creation_date #creation_date} => Time
#
#
# @example Example: To retrieve vault lock-policy related attributes that are set on a vault
#
# # The example retrieves the attributes from the lock-policy subresource set on the vault named examplevault.
#
# resp = client.get_vault_lock({
# account_id: "-",
# vault_name: "examplevault",
# })
#
# resp.to_h outputs the following:
# {
# creation_date: Time.parse("exampledate"),
# expiration_date: Time.parse("exampledate"),
# policy: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Define-vault-lock\",\"Effect\":\"Deny\",\"Principal\":{\"AWS\":\"arn:aws:iam::999999999999:root\"},\"Action\":\"glacier:DeleteArchive\",\"Resource\":\"arn:aws:glacier:us-west-2:999999999999:vaults/examplevault\",\"Condition\":{\"NumericLessThanEquals\":{\"glacier:ArchiveAgeinDays\":\"365\"}}}]}",
# state: "InProgress",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.get_vault_lock({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @example Response structure
#
# resp.policy #=> String
# resp.state #=> String
# resp.expiration_date #=> Time
# resp.creation_date #=> Time
#
# @overload get_vault_lock(params = {})
# @param [Hash] params ({})
def get_vault_lock(params = {}, options = {})
req = build_request(:get_vault_lock, params)
req.send_request(options)
end
# This operation retrieves the `notification-configuration` subresource
# of the specified vault.
#
# For information about setting a notification configuration on a vault,
# see SetVaultNotifications. If a notification configuration for a vault
# is not set, the operation returns a `404 Not Found` error. For more
# information about vault notifications, see [Configuring Vault
# Notifications in Amazon S3 Glacier][1].
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][2].
#
# For conceptual information and underlying REST API, see [Configuring
# Vault Notifications in Amazon S3 Glacier][1] and [Get Vault
# Notification Configuration ][3] in the *Amazon Glacier Developer
# Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-get.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Types::GetVaultNotificationsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetVaultNotificationsOutput#vault_notification_config #vault_notification_config} => Types::VaultNotificationConfig
#
#
# @example Example: To get the notification-configuration for the specified vault
#
# # The example retrieves the notification-configuration for the vault named my-vault.
#
# resp = client.get_vault_notifications({
# account_id: "-",
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# vault_notification_config: {
# events: [
# "InventoryRetrievalCompleted",
# "ArchiveRetrievalCompleted",
# ],
# sns_topic: "arn:aws:sns:us-west-2:0123456789012:my-vault",
# },
# }
#
# @example Request syntax with placeholder values
#
# resp = client.get_vault_notifications({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @example Response structure
#
# resp.vault_notification_config.sns_topic #=> String
# resp.vault_notification_config.events #=> Array
# resp.vault_notification_config.events[0] #=> String
#
# @overload get_vault_notifications(params = {})
# @param [Hash] params ({})
def get_vault_notifications(params = {}, options = {})
req = build_request(:get_vault_notifications, params)
req.send_request(options)
end
# This operation initiates a job of the specified type, which can be a
# select, an archival retrieval, or a vault retrieval. For more
# information about using this operation, see the documentation for the
# underlying REST API [Initiate a Job][1].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [Types::JobParameters] :job_parameters
# Provides options for specifying job information.
#
# @return [Types::InitiateJobOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::InitiateJobOutput#location #location} => String
# * {Types::InitiateJobOutput#job_id #job_id} => String
# * {Types::InitiateJobOutput#job_output_path #job_output_path} => String
#
#
# @example Example: To initiate an inventory-retrieval job
#
# # The example initiates an inventory-retrieval job for the vault named examplevault.
#
# resp = client.initiate_job({
# account_id: "-",
# job_parameters: {
# description: "My inventory job",
# format: "CSV",
# sns_topic: "arn:aws:sns:us-west-2:111111111111:Glacier-InventoryRetrieval-topic-Example",
# type: "inventory-retrieval",
# },
# vault_name: "examplevault",
# })
#
# resp.to_h outputs the following:
# {
# job_id: " HkF9p6o7yjhFx-K3CGl6fuSm6VzW9T7esGQfco8nUXVYwS0jlb5gq1JZ55yHgt5vP54ZShjoQzQVVh7vEXAMPLEjobID",
# location: "/111122223333/vaults/examplevault/jobs/HkF9p6o7yjhFx-K3CGl6fuSm6VzW9T7esGQfco8nUXVYwS0jlb5gq1JZ55yHgt5vP54ZShjoQzQVVh7vEXAMPLEjobID",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.initiate_job({
# account_id: "string", # required
# vault_name: "string", # required
# job_parameters: {
# format: "string",
# type: "string",
# archive_id: "string",
# description: "string",
# sns_topic: "string",
# retrieval_byte_range: "string",
# tier: "string",
# inventory_retrieval_parameters: {
# start_date: Time.now,
# end_date: Time.now,
# limit: "string",
# marker: "string",
# },
# select_parameters: {
# input_serialization: {
# csv: {
# file_header_info: "USE", # accepts USE, IGNORE, NONE
# comments: "string",
# quote_escape_character: "string",
# record_delimiter: "string",
# field_delimiter: "string",
# quote_character: "string",
# },
# },
# expression_type: "SQL", # accepts SQL
# expression: "string",
# output_serialization: {
# csv: {
# quote_fields: "ALWAYS", # accepts ALWAYS, ASNEEDED
# quote_escape_character: "string",
# record_delimiter: "string",
# field_delimiter: "string",
# quote_character: "string",
# },
# },
# },
# output_location: {
# s3: {
# bucket_name: "string",
# prefix: "string",
# encryption: {
# encryption_type: "aws:kms", # accepts aws:kms, AES256
# kms_key_id: "string",
# kms_context: "string",
# },
# canned_acl: "private", # accepts private, public-read, public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, bucket-owner-full-control
# access_control_list: [
# {
# grantee: {
# type: "AmazonCustomerByEmail", # required, accepts AmazonCustomerByEmail, CanonicalUser, Group
# display_name: "string",
# uri: "string",
# id: "string",
# email_address: "string",
# },
# permission: "FULL_CONTROL", # accepts FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP
# },
# ],
# tagging: {
# "string" => "string",
# },
# user_metadata: {
# "string" => "string",
# },
# storage_class: "STANDARD", # accepts STANDARD, REDUCED_REDUNDANCY, STANDARD_IA
# },
# },
# },
# })
#
# @example Response structure
#
# resp.location #=> String
# resp.job_id #=> String
# resp.job_output_path #=> String
#
# @overload initiate_job(params = {})
# @param [Hash] params ({})
def initiate_job(params = {}, options = {})
req = build_request(:initiate_job, params)
req.send_request(options)
end
# This operation initiates a multipart upload. Amazon S3 Glacier creates
# a multipart upload resource and returns its ID in the response. The
# multipart upload ID is used in subsequent requests to upload parts of
# an archive (see UploadMultipartPart).
#
# When you initiate a multipart upload, you specify the part size in
# number of bytes. The part size must be a megabyte (1024 KB) multiplied
# by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304
# (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is
# 1 MB, and the maximum is 4 GB.
#
# Every part you upload to this resource (see UploadMultipartPart),
# except the last one, must have the same size. The last one can be the
# same size or smaller. For example, suppose you want to upload a 16.2
# MB file. If you initiate the multipart upload with a part size of 4
# MB, you will upload four parts of 4 MB each and one part of 0.2 MB.
#
# <note markdown="1"> You don't need to know the size of the archive when you start a
# multipart upload because Amazon S3 Glacier does not require you to
# specify the overall archive size.
#
# </note>
#
# After you complete the multipart upload, Amazon S3 Glacier (Glacier)
# removes the multipart upload resource referenced by the ID. Glacier
# also removes the multipart upload resource if you cancel the multipart
# upload or it may be removed if there is no activity for a period of 24
# hours.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and underlying REST API, see [Uploading
# Large Archives in Parts (Multipart Upload)][2] and [Initiate Multipart
# Upload][3] in the *Amazon Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [String] :archive_description
# The archive description that you are uploading in parts.
#
# The part size must be a megabyte (1024 KB) multiplied by a power of 2,
# for example 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8
# MB), and so on. The minimum allowable part size is 1 MB, and the
# maximum is 4 GB (4096 MB).
#
# @option params [Integer] :part_size
# The size of each part except the last, in bytes. The last part can be
# smaller than this part size.
#
# @return [Types::InitiateMultipartUploadOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::InitiateMultipartUploadOutput#location #location} => String
# * {Types::InitiateMultipartUploadOutput#upload_id #upload_id} => String
#
#
# @example Example: To initiate a multipart upload
#
# # The example initiates a multipart upload to a vault named my-vault with a part size of 1 MiB (1024 x 1024 bytes) per
# # file.
#
# resp = client.initiate_multipart_upload({
# account_id: "-",
# part_size: 1048576,
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# location: "/111122223333/vaults/my-vault/multipart-uploads/19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ",
# upload_id: "19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.initiate_multipart_upload({
# account_id: "string", # required
# vault_name: "string", # required
# archive_description: "string",
# part_size: 1,
# })
#
# @example Response structure
#
# resp.location #=> String
# resp.upload_id #=> String
#
# @overload initiate_multipart_upload(params = {})
# @param [Hash] params ({})
def initiate_multipart_upload(params = {}, options = {})
req = build_request(:initiate_multipart_upload, params)
req.send_request(options)
end
# This operation initiates the vault locking process by doing the
# following:
#
# * Installing a vault lock policy on the specified vault.
#
# * Setting the lock state of vault lock to `InProgress`.
#
# * Returning a lock ID, which is used to complete the vault locking
# process.
#
# You can set one vault lock policy for each vault and this policy can
# be up to 20 KB in size. For more information about vault lock
# policies, see [Amazon Glacier Access Control with Vault Lock
# Policies][1].
#
# You must complete the vault locking process within 24 hours after the
# vault lock enters the `InProgress` state. After the 24 hour window
# ends, the lock ID expires, the vault automatically exits the
# `InProgress` state, and the vault lock policy is removed from the
# vault. You call CompleteVaultLock to complete the vault locking
# process by setting the state of the vault lock to `Locked`.
#
# After a vault lock is in the `Locked` state, you cannot initiate a new
# vault lock for the vault.
#
# You can abort the vault locking process by calling AbortVaultLock. You
# can get the state of the vault lock by calling GetVaultLock. For more
# information about the vault locking process, [Amazon Glacier Vault
# Lock][2].
#
# If this operation is called when the vault lock is in the `InProgress`
# state, the operation returns an `AccessDeniedException` error. When
# the vault lock is in the `InProgress` state you must call
# AbortVaultLock before you can initiate a new vault lock policy.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID. This value must match the
# AWS account ID associated with the credentials used to sign the
# request. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you specify your account ID, do not include any hyphens ('-') in
# the ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [Types::VaultLockPolicy] :policy
# The vault lock policy as a JSON string, which uses "\\" as an escape
# character.
#
# @return [Types::InitiateVaultLockOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::InitiateVaultLockOutput#lock_id #lock_id} => String
#
#
# @example Example: To initiate the vault locking process
#
# # The example initiates the vault locking process for the vault named my-vault.
#
# resp = client.initiate_vault_lock({
# account_id: "-",
# policy: {
# policy: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Define-vault-lock\",\"Effect\":\"Deny\",\"Principal\":{\"AWS\":\"arn:aws:iam::999999999999:root\"},\"Action\":\"glacier:DeleteArchive\",\"Resource\":\"arn:aws:glacier:us-west-2:999999999999:vaults/examplevault\",\"Condition\":{\"NumericLessThanEquals\":{\"glacier:ArchiveAgeinDays\":\"365\"}}}]}",
# },
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# lock_id: "AE863rKkWZU53SLW5be4DUcW",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.initiate_vault_lock({
# account_id: "string", # required
# vault_name: "string", # required
# policy: {
# policy: "string",
# },
# })
#
# @example Response structure
#
# resp.lock_id #=> String
#
# @overload initiate_vault_lock(params = {})
# @param [Hash] params ({})
def initiate_vault_lock(params = {}, options = {})
req = build_request(:initiate_vault_lock, params)
req.send_request(options)
end
# This operation lists jobs for a vault, including jobs that are
# in-progress and jobs that have recently finished. The List Job
# operation returns a list of these jobs sorted by job initiation time.
#
# <note markdown="1"> Amazon Glacier retains recently completed jobs for a period before
# deleting them; however, it eventually removes completed jobs. The
# output of completed jobs can be retrieved. Retaining completed jobs
# for a period of time after they have completed enables you to get a
# job output in the event you miss the job completion notification or
# your first attempt to download it fails. For example, suppose you
# start an archive retrieval job to download an archive. After the job
# completes, you start to download the archive but encounter a network
# error. In this scenario, you can retry and download the archive while
# the job exists.
#
# </note>
#
# The List Jobs operation supports pagination. You should always check
# the response `Marker` field. If there are no more jobs to list, the
# `Marker` field is set to `null`. If there are more jobs to list, the
# `Marker` field is set to a non-null value, which you can use to
# continue the pagination of the list. To return a list of jobs that
# begins at a specific job, set the marker request parameter to the
# `Marker` value for that job that you obtained from a previous List
# Jobs request.
#
# You can set a maximum limit for the number of jobs returned in the
# response by specifying the `limit` parameter in the request. The
# default limit is 50. The number of jobs returned might be fewer than
# the limit, but the number of returned jobs never exceeds the limit.
#
# Additionally, you can filter the jobs list returned by specifying the
# optional `statuscode` parameter or `completed` parameter, or both.
# Using the `statuscode` parameter, you can specify to return only jobs
# that match either the `InProgress`, `Succeeded`, or `Failed` status.
# Using the `completed` parameter, you can specify to return only jobs
# that were completed (`true`) or jobs that were not completed
# (`false`).
#
# For more information about using this operation, see the documentation
# for the underlying REST API [List Jobs][1].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [Integer] :limit
# The maximum number of jobs to be returned. The default limit is 50.
# The number of jobs returned might be fewer than the specified limit,
# but the number of returned jobs never exceeds the limit.
#
# @option params [String] :marker
# An opaque string used for pagination. This value specifies the job at
# which the listing of jobs should begin. Get the marker value from a
# previous List Jobs response. You only need to include the marker if
# you are continuing the pagination of results started in a previous
# List Jobs request.
#
# @option params [String] :statuscode
# The type of job status to return. You can specify the following
# values: `InProgress`, `Succeeded`, or `Failed`.
#
# @option params [String] :completed
# The state of the jobs to return. You can specify `true` or `false`.
#
# @return [Types::ListJobsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListJobsOutput#job_list #job_list} => Array<Types::GlacierJobDescription>
# * {Types::ListJobsOutput#marker #marker} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
#
# @example Example: To list jobs for a vault
#
# # The example lists jobs for the vault named my-vault.
#
# resp = client.list_jobs({
# account_id: "-",
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# job_list: [
# {
# action: "ArchiveRetrieval",
# archive_id: "kKB7ymWJVpPSwhGP6ycSOAekp9ZYe_--zM_mw6k76ZFGEIWQX-ybtRDvc2VkPSDtfKmQrj0IRQLSGsNuDp-AJVlu2ccmDSyDUmZwKbwbpAdGATGDiB3hHO0bjbGehXTcApVud_wyDw",
# archive_sha256_tree_hash: "9628195fcdbcbbe76cdde932d4646fa7de5f219fb39823836d81f0cc0e18aa67",
# archive_size_in_bytes: 3145728,
# completed: false,
# creation_date: Time.parse("2015-07-17T21:16:13.840Z"),
# job_description: "Retrieve archive on 2015-07-17",
# job_id: "l7IL5-EkXyEY9Ws95fClzIbk2O5uLYaFdAYOi-azsX_Z8V6NH4yERHzars8wTKYQMX6nBDI9cMNHzyZJO59-8N9aHWav",
# retrieval_byte_range: "0-3145727",
# sha256_tree_hash: "9628195fcdbcbbe76cdde932d4646fa7de5f219fb39823836d81f0cc0e18aa67",
# sns_topic: "arn:aws:sns:us-west-2:0123456789012:my-vault",
# status_code: "InProgress",
# vault_arn: "arn:aws:glacier:us-west-2:0123456789012:vaults/my-vault",
# },
# {
# action: "InventoryRetrieval",
# completed: false,
# creation_date: Time.parse("2015-07-17T20:23:41.616Z"),
# inventory_retrieval_parameters: {
# format: "JSON",
# },
# job_id: "zbxcm3Z_3z5UkoroF7SuZKrxgGoDc3RloGduS7Eg-RO47Yc6FxsdGBgf_Q2DK5Ejh18CnTS5XW4_XqlNHS61dsO4CnMW",
# status_code: "InProgress",
# vault_arn: "arn:aws:glacier:us-west-2:0123456789012:vaults/my-vault",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_jobs({
# account_id: "string", # required
# vault_name: "string", # required
# limit: 1,
# marker: "string",
# statuscode: "string",
# completed: "string",
# })
#
# @example Response structure
#
# resp.job_list #=> Array
# resp.job_list[0].job_id #=> String
# resp.job_list[0].job_description #=> String
# resp.job_list[0].action #=> String, one of "ArchiveRetrieval", "InventoryRetrieval", "Select"
# resp.job_list[0].archive_id #=> String
# resp.job_list[0].vault_arn #=> String
# resp.job_list[0].creation_date #=> Time
# resp.job_list[0].completed #=> Boolean
# resp.job_list[0].status_code #=> String, one of "InProgress", "Succeeded", "Failed"
# resp.job_list[0].status_message #=> String
# resp.job_list[0].archive_size_in_bytes #=> Integer
# resp.job_list[0].inventory_size_in_bytes #=> Integer
# resp.job_list[0].sns_topic #=> String
# resp.job_list[0].completion_date #=> Time
# resp.job_list[0].sha256_tree_hash #=> String
# resp.job_list[0].archive_sha256_tree_hash #=> String
# resp.job_list[0].retrieval_byte_range #=> String
# resp.job_list[0].tier #=> String
# resp.job_list[0].inventory_retrieval_parameters.format #=> String
# resp.job_list[0].inventory_retrieval_parameters.start_date #=> Time
# resp.job_list[0].inventory_retrieval_parameters.end_date #=> Time
# resp.job_list[0].inventory_retrieval_parameters.limit #=> String
# resp.job_list[0].inventory_retrieval_parameters.marker #=> String
# resp.job_list[0].job_output_path #=> String
# resp.job_list[0].select_parameters.input_serialization.csv.file_header_info #=> String, one of "USE", "IGNORE", "NONE"
# resp.job_list[0].select_parameters.input_serialization.csv.comments #=> String
# resp.job_list[0].select_parameters.input_serialization.csv.quote_escape_character #=> String
# resp.job_list[0].select_parameters.input_serialization.csv.record_delimiter #=> String
# resp.job_list[0].select_parameters.input_serialization.csv.field_delimiter #=> String
# resp.job_list[0].select_parameters.input_serialization.csv.quote_character #=> String
# resp.job_list[0].select_parameters.expression_type #=> String, one of "SQL"
# resp.job_list[0].select_parameters.expression #=> String
# resp.job_list[0].select_parameters.output_serialization.csv.quote_fields #=> String, one of "ALWAYS", "ASNEEDED"
# resp.job_list[0].select_parameters.output_serialization.csv.quote_escape_character #=> String
# resp.job_list[0].select_parameters.output_serialization.csv.record_delimiter #=> String
# resp.job_list[0].select_parameters.output_serialization.csv.field_delimiter #=> String
# resp.job_list[0].select_parameters.output_serialization.csv.quote_character #=> String
# resp.job_list[0].output_location.s3.bucket_name #=> String
# resp.job_list[0].output_location.s3.prefix #=> String
# resp.job_list[0].output_location.s3.encryption.encryption_type #=> String, one of "aws:kms", "AES256"
# resp.job_list[0].output_location.s3.encryption.kms_key_id #=> String
# resp.job_list[0].output_location.s3.encryption.kms_context #=> String
# resp.job_list[0].output_location.s3.canned_acl #=> String, one of "private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"
# resp.job_list[0].output_location.s3.access_control_list #=> Array
# resp.job_list[0].output_location.s3.access_control_list[0].grantee.type #=> String, one of "AmazonCustomerByEmail", "CanonicalUser", "Group"
# resp.job_list[0].output_location.s3.access_control_list[0].grantee.display_name #=> String
# resp.job_list[0].output_location.s3.access_control_list[0].grantee.uri #=> String
# resp.job_list[0].output_location.s3.access_control_list[0].grantee.id #=> String
# resp.job_list[0].output_location.s3.access_control_list[0].grantee.email_address #=> String
# resp.job_list[0].output_location.s3.access_control_list[0].permission #=> String, one of "FULL_CONTROL", "WRITE", "WRITE_ACP", "READ", "READ_ACP"
# resp.job_list[0].output_location.s3.tagging #=> Hash
# resp.job_list[0].output_location.s3.tagging["string"] #=> String
# resp.job_list[0].output_location.s3.user_metadata #=> Hash
# resp.job_list[0].output_location.s3.user_metadata["string"] #=> String
# resp.job_list[0].output_location.s3.storage_class #=> String, one of "STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA"
# resp.marker #=> String
#
# @overload list_jobs(params = {})
# @param [Hash] params ({})
def list_jobs(params = {}, options = {})
req = build_request(:list_jobs, params)
req.send_request(options)
end
# This operation lists in-progress multipart uploads for the specified
# vault. An in-progress multipart upload is a multipart upload that has
# been initiated by an InitiateMultipartUpload request, but has not yet
# been completed or aborted. The list returned in the List Multipart
# Upload response has no guaranteed order.
#
# The List Multipart Uploads operation supports pagination. By default,
# this operation returns up to 50 multipart uploads in the response. You
# should always check the response for a `marker` at which to continue
# the list; if there are no more items the `marker` is `null`. To return
# a list of multipart uploads that begins at a specific upload, set the
# `marker` request parameter to the value you obtained from a previous
# List Multipart Upload request. You can also limit the number of
# uploads returned in the response by specifying the `limit` parameter
# in the request.
#
# Note the difference between this operation and listing parts
# (ListParts). The List Multipart Uploads operation lists all multipart
# uploads for a vault and does not require a multipart upload ID. The
# List Parts operation requires a multipart upload ID since parts are
# associated with a single upload.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and the underlying REST API, see [Working
# with Archives in Amazon S3 Glacier][2] and [List Multipart Uploads
# ][3] in the *Amazon Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-uploads.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [String] :marker
# An opaque string used for pagination. This value specifies the upload
# at which the listing of uploads should begin. Get the marker value
# from a previous List Uploads response. You need only include the
# marker if you are continuing the pagination of results started in a
# previous List Uploads request.
#
# @option params [Integer] :limit
# Specifies the maximum number of uploads returned in the response body.
# If this value is not specified, the List Uploads operation returns up
# to 50 uploads.
#
# @return [Types::ListMultipartUploadsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListMultipartUploadsOutput#uploads_list #uploads_list} => Array<Types::UploadListElement>
# * {Types::ListMultipartUploadsOutput#marker #marker} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
#
# @example Example: To list all the in-progress multipart uploads for a vault
#
# # The example lists all the in-progress multipart uploads for the vault named examplevault.
#
# resp = client.list_multipart_uploads({
# account_id: "-",
# vault_name: "examplevault",
# })
#
# resp.to_h outputs the following:
# {
# marker: "null",
# uploads_list: [
# {
# archive_description: "archive 1",
# creation_date: Time.parse("2012-03-19T23:20:59.130Z"),
# multipart_upload_id: "xsQdFIRsfJr20CW2AbZBKpRZAFTZSJIMtL2hYf8mvp8dM0m4RUzlaqoEye6g3h3ecqB_zqwB7zLDMeSWhwo65re4C4Ev",
# part_size_in_bytes: 4194304,
# vault_arn: "arn:aws:glacier:us-west-2:012345678901:vaults/examplevault",
# },
# {
# archive_description: "archive 2",
# creation_date: Time.parse("2012-04-01T15:00:00.000Z"),
# multipart_upload_id: "nPyGOnyFcx67qqX7E-0tSGiRi88hHMOwOxR-_jNyM6RjVMFfV29lFqZ3rNsSaWBugg6OP92pRtufeHdQH7ClIpSF6uJc",
# part_size_in_bytes: 4194304,
# vault_arn: "arn:aws:glacier:us-west-2:012345678901:vaults/examplevault",
# },
# {
# archive_description: "archive 3",
# creation_date: Time.parse("2012-03-20T17:03:43.221Z"),
# multipart_upload_id: "qt-RBst_7yO8gVIonIBsAxr2t-db0pE4s8MNeGjKjGdNpuU-cdSAcqG62guwV9r5jh5mLyFPzFEitTpNE7iQfHiu1XoV",
# part_size_in_bytes: 4194304,
# vault_arn: "arn:aws:glacier:us-west-2:012345678901:vaults/examplevault",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_multipart_uploads({
# account_id: "string", # required
# vault_name: "string", # required
# marker: "string",
# limit: 1,
# })
#
# @example Response structure
#
# resp.uploads_list #=> Array
# resp.uploads_list[0].multipart_upload_id #=> String
# resp.uploads_list[0].vault_arn #=> String
# resp.uploads_list[0].archive_description #=> String
# resp.uploads_list[0].part_size_in_bytes #=> Integer
# resp.uploads_list[0].creation_date #=> Time
# resp.marker #=> String
#
# @overload list_multipart_uploads(params = {})
# @param [Hash] params ({})
def list_multipart_uploads(params = {}, options = {})
req = build_request(:list_multipart_uploads, params)
req.send_request(options)
end
# This operation lists the parts of an archive that have been uploaded
# in a specific multipart upload. You can make this request at any time
# during an in-progress multipart upload before you complete the upload
# (see CompleteMultipartUpload. List Parts returns an error for
# completed uploads. The list returned in the List Parts response is
# sorted by part range.
#
# The List Parts operation supports pagination. By default, this
# operation returns up to 50 uploaded parts in the response. You should
# always check the response for a `marker` at which to continue the
# list; if there are no more items the `marker` is `null`. To return a
# list of parts that begins at a specific part, set the `marker` request
# parameter to the value you obtained from a previous List Parts
# request. You can also limit the number of parts returned in the
# response by specifying the `limit` parameter in the request.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and the underlying REST API, see [Working
# with Archives in Amazon S3 Glacier][2] and [List Parts][3] in the
# *Amazon Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-archives.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-multipart-list-parts.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :upload_id
# The upload ID of the multipart upload.
#
# @option params [String] :marker
# An opaque string used for pagination. This value specifies the part at
# which the listing of parts should begin. Get the marker value from the
# response of a previous List Parts response. You need only include the
# marker if you are continuing the pagination of results started in a
# previous List Parts request.
#
# @option params [Integer] :limit
# The maximum number of parts to be returned. The default limit is 50.
# The number of parts returned might be fewer than the specified limit,
# but the number of returned parts never exceeds the limit.
#
# @return [Types::ListPartsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListPartsOutput#multipart_upload_id #multipart_upload_id} => String
# * {Types::ListPartsOutput#vault_arn #vault_arn} => String
# * {Types::ListPartsOutput#archive_description #archive_description} => String
# * {Types::ListPartsOutput#part_size_in_bytes #part_size_in_bytes} => Integer
# * {Types::ListPartsOutput#creation_date #creation_date} => Time
# * {Types::ListPartsOutput#parts #parts} => Array<Types::PartListElement>
# * {Types::ListPartsOutput#marker #marker} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
#
# @example Example: To list the parts of an archive that have been uploaded in a multipart upload
#
# # The example lists all the parts of a multipart upload.
#
# resp = client.list_parts({
# account_id: "-",
# upload_id: "OW2fM5iVylEpFEMM9_HpKowRapC3vn5sSL39_396UW9zLFUWVrnRHaPjUJddQ5OxSHVXjYtrN47NBZ-khxOjyEXAMPLE",
# vault_name: "examplevault",
# })
#
# resp.to_h outputs the following:
# {
# archive_description: "archive description",
# creation_date: Time.parse("2012-03-20T17:03:43.221Z"),
# marker: "null",
# multipart_upload_id: "OW2fM5iVylEpFEMM9_HpKowRapC3vn5sSL39_396UW9zLFUWVrnRHaPjUJddQ5OxSHVXjYtrN47NBZ-khxOjyEXAMPLE",
# part_size_in_bytes: 4194304,
# parts: [
# {
# range_in_bytes: "0-4194303",
# sha256_tree_hash: "01d34dabf7be316472c93b1ef80721f5d4",
# },
# {
# range_in_bytes: "4194304-8388607",
# sha256_tree_hash: "0195875365afda349fc21c84c099987164",
# },
# ],
# vault_arn: "arn:aws:glacier:us-west-2:012345678901:vaults/demo1-vault",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_parts({
# account_id: "string", # required
# vault_name: "string", # required
# upload_id: "string", # required
# marker: "string",
# limit: 1,
# })
#
# @example Response structure
#
# resp.multipart_upload_id #=> String
# resp.vault_arn #=> String
# resp.archive_description #=> String
# resp.part_size_in_bytes #=> Integer
# resp.creation_date #=> Time
# resp.parts #=> Array
# resp.parts[0].range_in_bytes #=> String
# resp.parts[0].sha256_tree_hash #=> String
# resp.marker #=> String
#
# @overload list_parts(params = {})
# @param [Hash] params ({})
def list_parts(params = {}, options = {})
req = build_request(:list_parts, params)
req.send_request(options)
end
# This operation lists the provisioned capacity units for the specified
# AWS account.
#
# @option params [required, String] :account_id
# The AWS account ID of the account that owns the vault. You can either
# specify an AWS account ID or optionally a single '-' (hyphen), in
# which case Amazon S3 Glacier uses the AWS account ID associated with
# the credentials used to sign the request. If you use an account ID,
# don't include any hyphens ('-') in the ID.
#
# @return [Types::ListProvisionedCapacityOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListProvisionedCapacityOutput#provisioned_capacity_list #provisioned_capacity_list} => Array<Types::ProvisionedCapacityDescription>
#
#
# @example Example: To list the provisioned capacity units for an account
#
# # The example lists the provisioned capacity units for an account.
#
# resp = client.list_provisioned_capacity({
# account_id: "-",
# })
#
# resp.to_h outputs the following:
# {
# provisioned_capacity_list: [
# {
# capacity_id: "zSaq7NzHFQDANTfQkDen4V7z",
# expiration_date: Time.parse("2016-12-12T00:00:00.000Z"),
# start_date: Time.parse("2016-11-11T20:11:51.095Z"),
# },
# {
# capacity_id: "yXaq7NzHFQNADTfQkDen4V7z",
# expiration_date: Time.parse("2017-01-15T00:00:00.000Z"),
# start_date: Time.parse("2016-12-13T20:11:51.095Z"),
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_provisioned_capacity({
# account_id: "string", # required
# })
#
# @example Response structure
#
# resp.provisioned_capacity_list #=> Array
# resp.provisioned_capacity_list[0].capacity_id #=> String
# resp.provisioned_capacity_list[0].start_date #=> Time
# resp.provisioned_capacity_list[0].expiration_date #=> Time
#
# @overload list_provisioned_capacity(params = {})
# @param [Hash] params ({})
def list_provisioned_capacity(params = {}, options = {})
req = build_request(:list_provisioned_capacity, params)
req.send_request(options)
end
# This operation lists all the tags attached to a vault. The operation
# returns an empty map if there are no tags. For more information about
# tags, see [Tagging Amazon S3 Glacier Resources][1].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @return [Types::ListTagsForVaultOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListTagsForVaultOutput#tags #tags} => Hash<String,String>
#
#
# @example Example: To list the tags for a vault
#
# # The example lists all the tags attached to the vault examplevault.
#
# resp = client.list_tags_for_vault({
# account_id: "-",
# vault_name: "examplevault",
# })
#
# resp.to_h outputs the following:
# {
# tags: {
# "date" => "july2015",
# "id" => "1234",
# },
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_tags_for_vault({
# account_id: "string", # required
# vault_name: "string", # required
# })
#
# @example Response structure
#
# resp.tags #=> Hash
# resp.tags["TagKey"] #=> String
#
# @overload list_tags_for_vault(params = {})
# @param [Hash] params ({})
def list_tags_for_vault(params = {}, options = {})
req = build_request(:list_tags_for_vault, params)
req.send_request(options)
end
# This operation lists all vaults owned by the calling user's account.
# The list returned in the response is ASCII-sorted by vault name.
#
# By default, this operation returns up to 10 items. If there are more
# vaults to list, the response `marker` field contains the vault Amazon
# Resource Name (ARN) at which to continue the list with a new List
# Vaults request; otherwise, the `marker` field is `null`. To return a
# list of vaults that begins at a specific vault, set the `marker`
# request parameter to the vault ARN you obtained from a previous List
# Vaults request. You can also limit the number of vaults returned in
# the response by specifying the `limit` parameter in the request.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and underlying REST API, see [Retrieving
# Vault Metadata in Amazon S3 Glacier][2] and [List Vaults ][3] in the
# *Amazon Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/retrieving-vault-info.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID. This value must match the
# AWS account ID associated with the credentials used to sign the
# request. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you specify your account ID, do not include any hyphens ('-') in
# the ID.
#
# @option params [String] :marker
# A string used for pagination. The marker specifies the vault ARN after
# which the listing of vaults should begin.
#
# @option params [Integer] :limit
# The maximum number of vaults to be returned. The default limit is 10.
# The number of vaults returned might be fewer than the specified limit,
# but the number of returned vaults never exceeds the limit.
#
# @return [Types::ListVaultsOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListVaultsOutput#vault_list #vault_list} => Array<Types::DescribeVaultOutput>
# * {Types::ListVaultsOutput#marker #marker} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
#
# @example Example: To list all vaults owned by the calling user's account
#
# # The example lists all vaults owned by the specified AWS account.
#
# resp = client.list_vaults({
# account_id: "-",
# limit: ,
# marker: "",
# })
#
# resp.to_h outputs the following:
# {
# vault_list: [
# {
# creation_date: Time.parse("2015-04-06T21:23:45.708Z"),
# last_inventory_date: Time.parse("2015-04-07T00:26:19.028Z"),
# number_of_archives: 1,
# size_in_bytes: 3178496,
# vault_arn: "arn:aws:glacier:us-west-2:0123456789012:vaults/my-vault",
# vault_name: "my-vault",
# },
# ],
# }
#
# @example Request syntax with placeholder values
#
# resp = client.list_vaults({
# account_id: "string", # required
# marker: "string",
# limit: 1,
# })
#
# @example Response structure
#
# resp.vault_list #=> Array
# resp.vault_list[0].vault_arn #=> String
# resp.vault_list[0].vault_name #=> String
# resp.vault_list[0].creation_date #=> Time
# resp.vault_list[0].last_inventory_date #=> Time
# resp.vault_list[0].number_of_archives #=> Integer
# resp.vault_list[0].size_in_bytes #=> Integer
# resp.marker #=> String
#
# @overload list_vaults(params = {})
# @param [Hash] params ({})
def list_vaults(params = {}, options = {})
req = build_request(:list_vaults, params)
req.send_request(options)
end
# This operation purchases a provisioned capacity unit for an AWS
# account.
#
# @option params [required, String] :account_id
# The AWS account ID of the account that owns the vault. You can either
# specify an AWS account ID or optionally a single '-' (hyphen), in
# which case Amazon S3 Glacier uses the AWS account ID associated with
# the credentials used to sign the request. If you use an account ID,
# don't include any hyphens ('-') in the ID.
#
# @return [Types::PurchaseProvisionedCapacityOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::PurchaseProvisionedCapacityOutput#capacity_id #capacity_id} => String
#
#
# @example Example: To purchases a provisioned capacity unit for an AWS account
#
# # The example purchases provisioned capacity unit for an AWS account.
#
# resp = client.purchase_provisioned_capacity({
# account_id: "-",
# })
#
# resp.to_h outputs the following:
# {
# capacity_id: "zSaq7NzHFQDANTfQkDen4V7z",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.purchase_provisioned_capacity({
# account_id: "string", # required
# })
#
# @example Response structure
#
# resp.capacity_id #=> String
#
# @overload purchase_provisioned_capacity(params = {})
# @param [Hash] params ({})
def purchase_provisioned_capacity(params = {}, options = {})
req = build_request(:purchase_provisioned_capacity, params)
req.send_request(options)
end
# This operation removes one or more tags from the set of tags attached
# to a vault. For more information about tags, see [Tagging Amazon S3
# Glacier Resources][1]. This operation is idempotent. The operation
# will be successful, even if there are no tags attached to the vault.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/tagging.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [Array<String>] :tag_keys
# A list of tag keys. Each corresponding tag is removed from the vault.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To remove tags from a vault
#
# # The example removes two tags from the vault named examplevault.
#
# resp = client.remove_tags_from_vault({
# tag_keys: [
# "examplekey1",
# "examplekey2",
# ],
# account_id: "-",
# vault_name: "examplevault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.remove_tags_from_vault({
# account_id: "string", # required
# vault_name: "string", # required
# tag_keys: ["string"],
# })
#
# @overload remove_tags_from_vault(params = {})
# @param [Hash] params ({})
def remove_tags_from_vault(params = {}, options = {})
req = build_request(:remove_tags_from_vault, params)
req.send_request(options)
end
# This operation sets and then enacts a data retrieval policy in the
# region specified in the PUT request. You can set one policy per region
# for an AWS account. The policy is enacted within a few minutes of a
# successful PUT operation.
#
# The set policy operation does not affect retrieval jobs that were in
# progress before the policy was enacted. For more information about
# data retrieval policies, see [Amazon Glacier Data Retrieval
# Policies][1].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/data-retrieval-policy.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID. This value must match the
# AWS account ID associated with the credentials used to sign the
# request. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you specify your account ID, do not include any hyphens ('-') in
# the ID.
#
# @option params [Types::DataRetrievalPolicy] :policy
# The data retrieval policy in JSON format.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To set and then enact a data retrieval policy
#
# # The example sets and then enacts a data retrieval policy.
#
# resp = client.set_data_retrieval_policy({
# policy: {
# rules: [
# {
# bytes_per_hour: 10737418240,
# strategy: "BytesPerHour",
# },
# ],
# },
# account_id: "-",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.set_data_retrieval_policy({
# account_id: "string", # required
# policy: {
# rules: [
# {
# strategy: "string",
# bytes_per_hour: 1,
# },
# ],
# },
# })
#
# @overload set_data_retrieval_policy(params = {})
# @param [Hash] params ({})
def set_data_retrieval_policy(params = {}, options = {})
req = build_request(:set_data_retrieval_policy, params)
req.send_request(options)
end
# This operation configures an access policy for a vault and will
# overwrite an existing policy. To configure a vault access policy, send
# a PUT request to the `access-policy` subresource of the vault. An
# access policy is specific to a vault and is also called a vault
# subresource. You can set one access policy per vault and the policy
# can be up to 20 KB in size. For more information about vault access
# policies, see [Amazon Glacier Access Control with Vault Access
# Policies][1].
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [Types::VaultAccessPolicy] :policy
# The vault access policy as a JSON string.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To set the access-policy on a vault
#
# # The example configures an access policy for the vault named examplevault.
#
# resp = client.set_vault_access_policy({
# account_id: "-",
# policy: {
# policy: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Define-owner-access-rights\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999999999999:root\"},\"Action\":\"glacier:DeleteArchive\",\"Resource\":\"arn:aws:glacier:us-west-2:999999999999:vaults/examplevault\"}]}",
# },
# vault_name: "examplevault",
# })
#
# @example Request syntax with placeholder values
#
# resp = client.set_vault_access_policy({
# account_id: "string", # required
# vault_name: "string", # required
# policy: {
# policy: "string",
# },
# })
#
# @overload set_vault_access_policy(params = {})
# @param [Hash] params ({})
def set_vault_access_policy(params = {}, options = {})
req = build_request(:set_vault_access_policy, params)
req.send_request(options)
end
# This operation configures notifications that will be sent when
# specific events happen to a vault. By default, you don't get any
# notifications.
#
# To configure vault notifications, send a PUT request to the
# `notification-configuration` subresource of the vault. The request
# should include a JSON document that provides an Amazon SNS topic and
# specific events for which you want Amazon S3 Glacier to send
# notifications to the topic.
#
# Amazon SNS topics must grant permission to the vault to be allowed to
# publish notifications to the topic. You can configure a vault to
# publish a notification for the following vault events:
#
# * **ArchiveRetrievalCompleted** This event occurs when a job that was
# initiated for an archive retrieval is completed (InitiateJob). The
# status of the completed job can be "Succeeded" or "Failed". The
# notification sent to the SNS topic is the same output as returned
# from DescribeJob.
#
# * **InventoryRetrievalCompleted** This event occurs when a job that
# was initiated for an inventory retrieval is completed (InitiateJob).
# The status of the completed job can be "Succeeded" or "Failed".
# The notification sent to the SNS topic is the same output as
# returned from DescribeJob.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][1].
#
# For conceptual information and underlying REST API, see [Configuring
# Vault Notifications in Amazon S3 Glacier][2] and [Set Vault
# Notification Configuration ][3] in the *Amazon Glacier Developer
# Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/configuring-notifications.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vault-notifications-put.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [Types::VaultNotificationConfig] :vault_notification_config
# Provides options for specifying notification configuration.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
#
# @example Example: To configure a vault to post a message to an Amazon SNS topic when jobs complete
#
# # The example sets the examplevault notification configuration.
#
# resp = client.set_vault_notifications({
# account_id: "-",
# vault_name: "examplevault",
# vault_notification_config: {
# events: [
# "ArchiveRetrievalCompleted",
# "InventoryRetrievalCompleted",
# ],
# sns_topic: "arn:aws:sns:us-west-2:012345678901:mytopic",
# },
# })
#
# @example Request syntax with placeholder values
#
# resp = client.set_vault_notifications({
# account_id: "string", # required
# vault_name: "string", # required
# vault_notification_config: {
# sns_topic: "string",
# events: ["string"],
# },
# })
#
# @overload set_vault_notifications(params = {})
# @param [Hash] params ({})
def set_vault_notifications(params = {}, options = {})
req = build_request(:set_vault_notifications, params)
req.send_request(options)
end
# This operation adds an archive to a vault. This is a synchronous
# operation, and for a successful upload, your data is durably
# persisted. Amazon S3 Glacier returns the archive ID in the
# `x-amz-archive-id` header of the response.
#
# You must use the archive ID to access your data in Amazon S3 Glacier.
# After you upload an archive, you should save the archive ID returned
# so that you can retrieve or delete the archive later. Besides saving
# the archive ID, you can also index it and give it a friendly name to
# allow for better searching. You can also use the optional archive
# description field to specify how the archive is referred to in an
# external index of archives, such as you might create in Amazon
# DynamoDB. You can also get the vault inventory to obtain a list of
# archive IDs in a vault. For more information, see InitiateJob.
#
# You must provide a SHA256 tree hash of the data you are uploading. For
# information about computing a SHA256 tree hash, see [Computing
# Checksums][1].
#
# You can optionally specify an archive description of up to 1,024
# printable ASCII characters. You can get the archive description when
# you either retrieve the archive or get the vault inventory. For more
# information, see InitiateJob. Amazon Glacier does not interpret the
# description in any way. An archive description does not need to be
# unique. You cannot use the description to retrieve or sort the archive
# list.
#
# Archives are immutable. After you upload an archive, you cannot edit
# the archive or its description.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][2].
#
# For conceptual information and underlying REST API, see [Uploading an
# Archive in Amazon Glacier][3] and [Upload Archive][4] in the *Amazon
# Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-an-archive.html
# [4]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-archive-post.html
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [String] :archive_description
# The optional description of the archive you are uploading.
#
# @option params [String] :checksum
# The SHA256 tree hash of the data being uploaded.
#
# @option params [String, IO] :body
# The data to upload.
#
# @return [Types::ArchiveCreationOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ArchiveCreationOutput#location #location} => String
# * {Types::ArchiveCreationOutput#checksum #checksum} => String
# * {Types::ArchiveCreationOutput#archive_id #archive_id} => String
#
#
# @example Example: To upload an archive
#
# # The example adds an archive to a vault.
#
# resp = client.upload_archive({
# account_id: "-",
# archive_description: "",
# body: "example-data-to-upload",
# checksum: "",
# vault_name: "my-vault",
# })
#
# resp.to_h outputs the following:
# {
# archive_id: "kKB7ymWJVpPSwhGP6ycSOAekp9ZYe_--zM_mw6k76ZFGEIWQX-ybtRDvc2VkPSDtfKmQrj0IRQLSGsNuDp-AJVlu2ccmDSyDUmZwKbwbpAdGATGDiB3hHO0bjbGehXTcApVud_wyDw",
# checksum: "969fb39823836d81f0cc028195fcdbcbbe76cdde932d4646fa7de5f21e18aa67",
# location: "/0123456789012/vaults/my-vault/archives/kKB7ymWJVpPSwhGP6ycSOAekp9ZYe_--zM_mw6k76ZFGEIWQX-ybtRDvc2VkPSDtfKmQrj0IRQLSGsNuDp-AJVlu2ccmDSyDUmZwKbwbpAdGATGDiB3hHO0bjbGehXTcApVud_wyDw",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.upload_archive({
# vault_name: "string", # required
# account_id: "string", # required
# archive_description: "string",
# checksum: "string",
# body: "data",
# })
#
# @example Response structure
#
# resp.location #=> String
# resp.checksum #=> String
# resp.archive_id #=> String
#
# @overload upload_archive(params = {})
# @param [Hash] params ({})
def upload_archive(params = {}, options = {})
req = build_request(:upload_archive, params)
req.send_request(options)
end
# This operation uploads a part of an archive. You can upload archive
# parts in any order. You can also upload them in parallel. You can
# upload up to 10,000 parts for a multipart upload.
#
# Amazon Glacier rejects your upload part request if any of the
# following conditions is true:
#
# * **SHA256 tree hash does not match**To ensure that part data is not
# corrupted in transmission, you compute a SHA256 tree hash of the
# part and include it in your request. Upon receiving the part data,
# Amazon S3 Glacier also computes a SHA256 tree hash. If these hash
# values don't match, the operation fails. For information about
# computing a SHA256 tree hash, see [Computing Checksums][1].
#
# * **Part size does not match**The size of each part except the last
# must match the size specified in the corresponding
# InitiateMultipartUpload request. The size of the last part must be
# the same size as, or smaller than, the specified size.
#
# <note markdown="1"> If you upload a part whose size is smaller than the part size you
# specified in your initiate multipart upload request and that part is
# not the last part, then the upload part request will succeed.
# However, the subsequent Complete Multipart Upload request will fail.
#
# </note>
#
# * **Range does not align**The byte range value in the request does not
# align with the part size specified in the corresponding initiate
# request. For example, if you specify a part size of 4194304 bytes (4
# MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to
# 8388607 (8 MB - 1) are valid part ranges. However, if you set a
# range value of 2 MB to 6 MB, the range does not align with the part
# size and the upload will fail.
#
# This operation is idempotent. If you upload the same part multiple
# times, the data included in the most recent request overwrites the
# previously uploaded data.
#
# An AWS account has full permission to perform all operations
# (actions). However, AWS Identity and Access Management (IAM) users
# don't have any permissions by default. You must grant them explicit
# permission to perform specific actions. For more information, see
# [Access Control Using AWS Identity and Access Management (IAM)][2].
#
# For conceptual information and underlying REST API, see [Uploading
# Large Archives in Parts (Multipart Upload)][3] and [Upload Part ][4]
# in the *Amazon Glacier Developer Guide*.
#
#
#
# [1]: https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
# [2]: https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html
# [3]: https://docs.aws.amazon.com/amazonglacier/latest/dev/uploading-archive-mpu.html
# [4]: https://docs.aws.amazon.com/amazonglacier/latest/dev/api-upload-part.html
#
# @option params [required, String] :account_id
# The `AccountId` value is the AWS account ID of the account that owns
# the vault. You can either specify an AWS account ID or optionally a
# single '`-`' (hyphen), in which case Amazon S3 Glacier uses the AWS
# account ID associated with the credentials used to sign the request.
# If you use an account ID, do not include any hyphens ('-') in the
# ID.
#
# @option params [required, String] :vault_name
# The name of the vault.
#
# @option params [required, String] :upload_id
# The upload ID of the multipart upload.
#
# @option params [String] :checksum
# The SHA256 tree hash of the data being uploaded.
#
# @option params [String] :range
# Identifies the range of bytes in the assembled archive that will be
# uploaded in this part. Amazon S3 Glacier uses this information to
# assemble the archive in the proper sequence. The format of this header
# follows RFC 2616. An example header is Content-Range:bytes
# 0-4194303/*.
#
# @option params [String, IO] :body
# The data to upload.
#
# @return [Types::UploadMultipartPartOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::UploadMultipartPartOutput#checksum #checksum} => String
#
#
# @example Example: To upload the first part of an archive
#
# # The example uploads the first 1 MiB (1024 x 1024 bytes) part of an archive.
#
# resp = client.upload_multipart_part({
# account_id: "-",
# body: "part1",
# checksum: "c06f7cd4baacb087002a99a5f48bf953",
# range: "bytes 0-1048575/*",
# upload_id: "19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ",
# vault_name: "examplevault",
# })
#
# resp.to_h outputs the following:
# {
# checksum: "c06f7cd4baacb087002a99a5f48bf953",
# }
#
# @example Request syntax with placeholder values
#
# resp = client.upload_multipart_part({
# account_id: "string", # required
# vault_name: "string", # required
# upload_id: "string", # required
# checksum: "string",
# range: "string",
# body: "data",
# })
#
# @example Response structure
#
# resp.checksum #=> String
#
# @overload upload_multipart_part(params = {})
# @param [Hash] params ({})
def upload_multipart_part(params = {}, options = {})
req = build_request(:upload_multipart_part, params)
req.send_request(options)
end
# @!endgroup
# @param params ({})
# @api private
def build_request(operation_name, params = {})
handlers = @handlers.for(operation_name)
context = Seahorse::Client::RequestContext.new(
operation_name: operation_name,
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-glacier'
context[:gem_version] = '1.32.0'
Seahorse::Client::Request.new(handlers, context)
end
# Polls an API operation until a resource enters a desired state.
#
# ## Basic Usage
#
# A waiter will call an API operation until:
#
# * It is successful
# * It enters a terminal state
# * It makes the maximum number of attempts
#
# In between attempts, the waiter will sleep.
#
# # polls in a loop, sleeping between attempts
# client.wait_until(waiter_name, params)
#
# ## Configuration
#
# You can configure the maximum number of polling attempts, and the
# delay (in seconds) between each polling attempt. You can pass
# configuration as the final arguments hash.
#
# # poll for ~25 seconds
# client.wait_until(waiter_name, params, {
# max_attempts: 5,
# delay: 5,
# })
#
# ## Callbacks
#
# You can be notified before each polling attempt and before each
# delay. If you throw `:success` or `:failure` from these callbacks,
# it will terminate the waiter.
#
# started_at = Time.now
# client.wait_until(waiter_name, params, {
#
# # disable max attempts
# max_attempts: nil,
#
# # poll for 1 hour, instead of a number of attempts
# before_wait: -> (attempts, response) do
# throw :failure if Time.now - started_at > 3600
# end
# })
#
# ## Handling Errors
#
# When a waiter is unsuccessful, it will raise an error.
# All of the failure errors extend from
# {Aws::Waiters::Errors::WaiterFailed}.
#
# begin
# client.wait_until(...)
# rescue Aws::Waiters::Errors::WaiterFailed
# # resource did not enter the desired state in time
# end
#
# ## Valid Waiters
#
# The following table lists the valid waiter names, the operations they call,
# and the default `:delay` and `:max_attempts` values.
#
# | waiter_name | params | :delay | :max_attempts |
# | ---------------- | ----------------------- | -------- | ------------- |
# | vault_exists | {Client#describe_vault} | 3 | 15 |
# | vault_not_exists | {Client#describe_vault} | 3 | 15 |
#
# @raise [Errors::FailureStateError] Raised when the waiter terminates
# because the waiter has entered a state that it will not transition
# out of, preventing success.
#
# @raise [Errors::TooManyAttemptsError] Raised when the configured
# maximum number of attempts have been made, and the waiter is not
# yet successful.
#
# @raise [Errors::UnexpectedError] Raised when an error is encounted
# while polling for a resource that is not expected.
#
# @raise [Errors::NoSuchWaiterError] Raised when you request to wait
# for an unknown state.
#
# @return [Boolean] Returns `true` if the waiter was successful.
# @param [Symbol] waiter_name
# @param [Hash] params ({})
# @param [Hash] options ({})
# @option options [Integer] :max_attempts
# @option options [Integer] :delay
# @option options [Proc] :before_attempt
# @option options [Proc] :before_wait
def wait_until(waiter_name, params = {}, options = {})
w = waiter(waiter_name, options)
yield(w.waiter) if block_given? # deprecated
w.wait(params)
end
# @api private
# @deprecated
def waiter_names
waiters.keys
end
private
# @param [Symbol] waiter_name
# @param [Hash] options ({})
def waiter(waiter_name, options = {})
waiter_class = waiters[waiter_name]
if waiter_class
waiter_class.new(options.merge(client: self))
else
raise Aws::Waiters::Errors::NoSuchWaiterError.new(waiter_name, waiters.keys)
end
end
def waiters
{
vault_exists: Waiters::VaultExists,
vault_not_exists: Waiters::VaultNotExists
}
end
class << self
# @api private
attr_reader :identifier
# @api private
def errors_module
Errors
end
end
end
end
| 44.803082 | 373 | 0.656851 |
037906cd6c40c741cfd2c08364797c5412507d13 | 49 | module Africansms
VERSION = '0.2.2'.freeze
end
| 12.25 | 26 | 0.714286 |
33aa99daa5511ffbd5a30a56b35877d3ed022055 | 2,680 | #
# File an ICLA:
# - add files to documents/grants
# - add entry to officers/grants.txt
# - respond to original email
#
# extract message
message = Mailbox.find(@message)
# extract file extension
fileext = File.extname(@selected).downcase if @signature.empty?
grant = "#@filename#{fileext}"
# verify that a grant under that name doesn't already exist
if grant =~ /^\w[-\w]*\.?\w*$/
if ASF::GrantFiles.exist?(grant.untaint)
_warn "documents/grants/#{grant} already exists"
end
else
# Should not be possible, as form checks for: '[a-zA-Z][-\w]+(\.[a-z]+)?'
_warn "#{grant} is not a valid file name"
end
# extract/verify project
_extract_project
# obtain per-user information
_personalize_email(env.user)
# initialize commit message
@document = "Software Grant from #{@company}"
########################################################################
# document/grants & officers/grants.txt #
########################################################################
# write attachment (+ signature, if present) to the documents/grants directory
task "svn commit documents/grants/#@filename#{fileext} and update grants.txt" do
# construct line to be inserted
@grantlines = "#{@company.strip}" +
"\n file: #{@filename}#{fileext}" +
"\n for: #{@description.strip.gsub(/\r?\n\s*/,"\n ")}"
form do
_input value: @selected, name: 'selected'
if @signature and not @signature.empty?
_input value: @signature, name: 'signature'
end
_textarea @grantlines, name: 'grantlines',
rows: @grantlines.split("\n").length
end
complete do |dir|
svn_multi('officers', 'grants.txt', 'grants', @selected, @signature, @filename, fileext, message, @document) do |input|
# update grants.txt
marker = "\n# registering. documents on way to Secretary.\n"
input.split(marker).insert(1, "\n#{@grantlines}\n", marker).join
end
end
end
########################################################################
# email submitter #
########################################################################
# send confirmation email
task "email #@email" do
# build mail from template
mail = message.reply(
subject: @document,
from: @from,
to: "#{@name.inspect} <#{@email}>",
cc: [
'[email protected]',
("private@#{@pmc.mail_list}.apache.org" if @pmc), # copy pmc
(@podling.private_mail_list if @podling) # copy podling
],
body: template('grant.erb')
)
# echo email
form do
_message mail.to_s
end
# deliver mail
complete do
mail.deliver!
end
end
| 27.346939 | 123 | 0.564925 |
219ab1989ea125c46f96ae4c1f4ea1cdcbbe5a4c | 7,191 | require 'spec_helper'
describe Snippet, models: true do
describe 'modules' do
subject { described_class }
it { is_expected.to include_module(Gitlab::VisibilityLevel) }
it { is_expected.to include_module(Linguist::BlobHelper) }
it { is_expected.to include_module(Participable) }
it { is_expected.to include_module(Referable) }
it { is_expected.to include_module(Sortable) }
it { is_expected.to include_module(Awardable) }
end
describe 'associations' do
it { is_expected.to belong_to(:author).class_name('User') }
it { is_expected.to belong_to(:project) }
it { is_expected.to have_many(:notes).dependent(:destroy) }
it { is_expected.to have_many(:award_emoji).dependent(:destroy) }
end
describe 'validation' do
it { is_expected.to validate_presence_of(:author) }
it { is_expected.to validate_presence_of(:title) }
it { is_expected.to validate_length_of(:title).is_at_most(255) }
it { is_expected.to validate_length_of(:file_name).is_at_most(255) }
it { is_expected.to validate_presence_of(:content) }
it { is_expected.to validate_inclusion_of(:visibility_level).in_array(Gitlab::VisibilityLevel.values) }
end
describe '#to_reference' do
context 'when snippet belongs to a project' do
let(:project) { build(:empty_project, name: 'sample-project') }
let(:snippet) { build(:snippet, id: 1, project: project) }
it 'returns a String reference to the object' do
expect(snippet.to_reference).to eq "$1"
end
it 'supports a cross-project reference' do
another_project = build(:empty_project, name: 'another-project', namespace: project.namespace)
expect(snippet.to_reference(another_project)).to eq "sample-project$1"
end
end
context 'when snippet does not belong to a project' do
let(:snippet) { build(:snippet, id: 1, project: nil) }
it 'returns a String reference to the object' do
expect(snippet.to_reference).to eq "$1"
end
it 'still returns shortest reference when project arg present' do
another_project = build(:empty_project, name: 'another-project')
expect(snippet.to_reference(another_project)).to eq "$1"
end
end
end
describe '#file_name' do
let(:project) { create(:empty_project) }
context 'file_name is nil' do
let(:snippet) { create(:snippet, project: project, file_name: nil) }
it 'returns an empty string' do
expect(snippet.file_name).to eq ''
end
end
context 'file_name is not nil' do
let(:snippet) { create(:snippet, project: project, file_name: 'foo.txt') }
it 'returns the file_name' do
expect(snippet.file_name).to eq 'foo.txt'
end
end
end
describe "#content_html_invalidated?" do
let(:snippet) { create(:snippet, content: "md", content_html: "html", file_name: "foo.md") }
it "invalidates the HTML cache of content when the filename changes" do
expect { snippet.file_name = "foo.rb" }.to change { snippet.content_html_invalidated? }.from(false).to(true)
end
end
describe '.search' do
let(:snippet) { create(:snippet) }
it 'returns snippets with a matching title' do
expect(described_class.search(snippet.title)).to eq([snippet])
end
it 'returns snippets with a partially matching title' do
expect(described_class.search(snippet.title[0..2])).to eq([snippet])
end
it 'returns snippets with a matching title regardless of the casing' do
expect(described_class.search(snippet.title.upcase)).to eq([snippet])
end
it 'returns snippets with a matching file name' do
expect(described_class.search(snippet.file_name)).to eq([snippet])
end
it 'returns snippets with a partially matching file name' do
expect(described_class.search(snippet.file_name[0..2])).to eq([snippet])
end
it 'returns snippets with a matching file name regardless of the casing' do
expect(described_class.search(snippet.file_name.upcase)).to eq([snippet])
end
end
describe '.search_code' do
let(:snippet) { create(:snippet, content: 'class Foo; end') }
it 'returns snippets with matching content' do
expect(described_class.search_code(snippet.content)).to eq([snippet])
end
it 'returns snippets with partially matching content' do
expect(described_class.search_code('class')).to eq([snippet])
end
it 'returns snippets with matching content regardless of the casing' do
expect(described_class.search_code('FOO')).to eq([snippet])
end
end
describe '.accessible_to' do
let(:author) { create(:author) }
let(:project) { create(:empty_project) }
let!(:public_snippet) { create(:snippet, :public) }
let!(:internal_snippet) { create(:snippet, :internal) }
let!(:private_snippet) { create(:snippet, :private, author: author) }
let!(:project_public_snippet) { create(:snippet, :public, project: project) }
let!(:project_internal_snippet) { create(:snippet, :internal, project: project) }
let!(:project_private_snippet) { create(:snippet, :private, project: project) }
it 'returns only public snippets when user is blank' do
expect(described_class.accessible_to(nil)).to match_array [public_snippet, project_public_snippet]
end
it 'returns only public, and internal snippets for regular users' do
user = create(:user)
expect(described_class.accessible_to(user)).to match_array [public_snippet, internal_snippet, project_public_snippet, project_internal_snippet]
end
it 'returns public, internal snippets and project private snippets for project members' do
member = create(:user)
project.team << [member, :developer]
expect(described_class.accessible_to(member)).to match_array [public_snippet, internal_snippet, project_public_snippet, project_internal_snippet, project_private_snippet]
end
it 'returns private snippets where the user is the author' do
expect(described_class.accessible_to(author)).to match_array [public_snippet, internal_snippet, private_snippet, project_public_snippet, project_internal_snippet]
end
it 'returns all snippets when for admins' do
admin = create(:admin)
expect(described_class.accessible_to(admin)).to match_array [public_snippet, internal_snippet, private_snippet, project_public_snippet, project_internal_snippet, project_private_snippet]
end
end
describe '#participants' do
let(:project) { create(:empty_project, :public) }
let(:snippet) { create(:snippet, content: 'foo', project: project) }
let!(:note1) do
create(:note_on_project_snippet,
noteable: snippet,
project: project,
note: 'a')
end
let!(:note2) do
create(:note_on_project_snippet,
noteable: snippet,
project: project,
note: 'b')
end
it 'includes the snippet author' do
expect(snippet.participants).to include(snippet.author)
end
it 'includes the note authors' do
expect(snippet.participants).to include(note1.author, note2.author)
end
end
end
| 35.59901 | 192 | 0.695731 |
bf77d68a81a6840f52c8719a5c3b9eb17b2fb6b2 | 265 | class CreateRatings < ActiveRecord::Migration
def change
create_table :ratings do |t|
t.string :name
t.text :comment
t.integer :stars
t.references :food, index: true, foreign_key: true
t.timestamps null: false
end
end
end
| 20.384615 | 56 | 0.656604 |
915dc0355ed7079b3539df39fa35408747f03482 | 174 | Pakyow::Realtime::MessageHandler.register :ping do |message, session, response|
response[:status] = 200
response[:headers] = {}
response[:body] = 'pong'
response
end
| 24.857143 | 79 | 0.706897 |
ab8773b7ede14c4f7cd9bc14eeb6116060c18180 | 6,847 | require 'spec_helper'
describe Group, 'Routable' do
let!(:group) { create(:group, name: 'foo') }
describe 'Validations' do
it { is_expected.to validate_presence_of(:route) }
end
describe 'Associations' do
it { is_expected.to have_one(:route).dependent(:destroy) }
it { is_expected.to have_many(:redirect_routes).dependent(:destroy) }
end
describe 'GitLab read-only instance' do
it 'does not save route if route is not present' do
group.route.path = ''
allow(Gitlab::Database).to receive(:read_only?).and_return(true)
expect(group).to receive(:update_route).and_call_original
expect { group.full_path }.to change { Route.count }.by(0)
end
end
describe 'Callbacks' do
it 'creates route record on create' do
expect(group.route.path).to eq(group.path)
expect(group.route.name).to eq(group.name)
end
it 'updates route record on path change' do
group.update_attributes(path: 'wow', name: 'much')
expect(group.route.path).to eq('wow')
expect(group.route.name).to eq('much')
end
it 'ensure route path uniqueness across different objects' do
create(:group, parent: group, path: 'xyz')
duplicate = build(:project, namespace: group, path: 'xyz')
expect { duplicate.save! }.to raise_error(ActiveRecord::RecordInvalid, 'Validation failed: Route path has already been taken, Route is invalid')
end
end
describe '.find_by_full_path' do
let!(:nested_group) { create(:group, parent: group) }
context 'without any redirect routes' do
it { expect(described_class.find_by_full_path(group.to_param)).to eq(group) }
it { expect(described_class.find_by_full_path(group.to_param.upcase)).to eq(group) }
it { expect(described_class.find_by_full_path(nested_group.to_param)).to eq(nested_group) }
it { expect(described_class.find_by_full_path('unknown')).to eq(nil) }
end
context 'with redirect routes' do
let!(:group_redirect_route) { group.redirect_routes.create!(path: 'bar') }
let!(:nested_group_redirect_route) { nested_group.redirect_routes.create!(path: nested_group.path.sub('foo', 'bar')) }
context 'without follow_redirects option' do
context 'with the given path not matching any route' do
it { expect(described_class.find_by_full_path('unknown')).to eq(nil) }
end
context 'with the given path matching the canonical route' do
it { expect(described_class.find_by_full_path(group.to_param)).to eq(group) }
it { expect(described_class.find_by_full_path(group.to_param.upcase)).to eq(group) }
it { expect(described_class.find_by_full_path(nested_group.to_param)).to eq(nested_group) }
end
context 'with the given path matching a redirect route' do
it { expect(described_class.find_by_full_path(group_redirect_route.path)).to eq(nil) }
it { expect(described_class.find_by_full_path(group_redirect_route.path.upcase)).to eq(nil) }
it { expect(described_class.find_by_full_path(nested_group_redirect_route.path)).to eq(nil) }
end
end
context 'with follow_redirects option set to true' do
context 'with the given path not matching any route' do
it { expect(described_class.find_by_full_path('unknown', follow_redirects: true)).to eq(nil) }
end
context 'with the given path matching the canonical route' do
it { expect(described_class.find_by_full_path(group.to_param, follow_redirects: true)).to eq(group) }
it { expect(described_class.find_by_full_path(group.to_param.upcase, follow_redirects: true)).to eq(group) }
it { expect(described_class.find_by_full_path(nested_group.to_param, follow_redirects: true)).to eq(nested_group) }
end
context 'with the given path matching a redirect route' do
it { expect(described_class.find_by_full_path(group_redirect_route.path, follow_redirects: true)).to eq(group) }
it { expect(described_class.find_by_full_path(group_redirect_route.path.upcase, follow_redirects: true)).to eq(group) }
it { expect(described_class.find_by_full_path(nested_group_redirect_route.path, follow_redirects: true)).to eq(nested_group) }
end
end
end
end
describe '.where_full_path_in' do
context 'without any paths' do
it 'returns an empty relation' do
expect(described_class.where_full_path_in([])).to eq([])
end
end
context 'without any valid paths' do
it 'returns an empty relation' do
expect(described_class.where_full_path_in(%w[unknown])).to eq([])
end
end
context 'with valid paths' do
let!(:nested_group) { create(:group, parent: group) }
it 'returns the projects matching the paths' do
result = described_class.where_full_path_in([group.to_param, nested_group.to_param])
expect(result).to contain_exactly(group, nested_group)
end
it 'returns projects regardless of the casing of paths' do
result = described_class.where_full_path_in([group.to_param.upcase, nested_group.to_param.upcase])
expect(result).to contain_exactly(group, nested_group)
end
end
end
describe '#full_path' do
let(:group) { create(:group) }
let(:nested_group) { create(:group, parent: group) }
it { expect(group.full_path).to eq(group.path) }
it { expect(nested_group.full_path).to eq("#{group.full_path}/#{nested_group.path}") }
context 'with RequestStore active', :request_store do
it 'does not load the route table more than once' do
expect(group).to receive(:uncached_full_path).once.and_call_original
3.times { group.full_path }
expect(group.full_path).to eq(group.path)
end
end
end
describe '#expires_full_path_cache' do
context 'with RequestStore active', :request_store do
it 'expires the full_path cache' do
expect(group.full_path).to eq('foo')
group.route.update(path: 'bar', name: 'bar')
group.expires_full_path_cache
expect(group.full_path).to eq('bar')
end
end
end
describe '#full_name' do
let(:group) { create(:group) }
let(:nested_group) { create(:group, parent: group) }
it { expect(group.full_name).to eq(group.name) }
it { expect(nested_group.full_name).to eq("#{group.name} / #{nested_group.name}") }
end
end
describe Project, 'Routable' do
describe '#full_path' do
let(:project) { build_stubbed(:project) }
it { expect(project.full_path).to eq "#{project.namespace.full_path}/#{project.path}" }
end
describe '#full_name' do
let(:project) { build_stubbed(:project) }
it { expect(project.full_name).to eq "#{project.namespace.human_name} / #{project.name}" }
end
end
| 38.038889 | 150 | 0.688477 |
871bad31839da38d005d792628eb6b9d9e8f0ef7 | 561 | # makes sure the dummy app is properly initialized:
# bundles required gems
# drops/creates the databases
# migrates the development database
# dumps db structure for reference
# removes old sql dumps in tmp dir
STDIN_STUB = 'STDIN_stub'
DUMMY_PATH = '../../../spec/dummy'
Dir.chdir File.expand_path(DUMMY_PATH, __FILE__) do
system 'mkdir tmp'
system 'bundle install'
system 'rm -f tmp/*sql'
%w[
db:drop:all
db:create:all
db:migrate
db:structure:dump
db:seed
].each do |command|
system "bundle exec rake #{command}"
end
end
| 23.375 | 51 | 0.7041 |
38d9151146a5c8272178e198d1d5990e12b7c4aa | 374 | require 'rest-client'
class AccountDataManager
def create(url)
#url = 'https://my.api.mockaroo.com/login_wikipedia.json?key=e66a4260'
response = RestClient.get url
if response.code != 200
fail(msg ="GET failed. Response status code was: '#{response.code}'")
end
# return a hash of account details
account_details = response
end
end | 19.684211 | 75 | 0.681818 |
bbdfc836b388c2b2c0510ad3712babcf6bc9210c | 7,418 | class MariadbAT102 < Formula
desc "Drop-in replacement for MySQL"
homepage "https://mariadb.org/"
url "https://downloads.mariadb.org/f/mariadb-10.2.39/source/mariadb-10.2.39.tar.gz"
sha256 "d0c81ddb5d554388952487258e4a7a10cd92504a305efbcc1fa94668f1e9315d"
license "GPL-2.0-only"
livecheck do
url "https://downloads.mariadb.org/"
regex(/Download v?(10\.2(?:\.\d+)+) Stable Now/i)
end
bottle do
sha256 big_sur: "dc9b29897f57791b273addad7c173f8f60bf0b2b4e0449bbdd8d19cc7d9e18a6"
sha256 catalina: "3e4307397c671b2003e61da701131ed2283c291eb075bc2c73f0839d0ef1eef8"
sha256 mojave: "88f33bd9b8972f478889e53361772063a7151d624cb0e67a986c8dbf5e7c07b3"
sha256 x86_64_linux: "78104cafa0fc3fb9dfaaadd92caa9b7bebcf4d96df847fc78fe067c7d6a34b47"
end
keg_only :versioned_formula
# See: https://mariadb.com/kb/en/changes-improvements-in-mariadb-102/
deprecate! date: "2022-05-01", because: :unsupported
depends_on "bison" => :build
depends_on "cmake" => :build
depends_on "pkg-config" => :build
depends_on "groonga"
depends_on "[email protected]"
depends_on "pcre2"
on_macos do
# Need patch to remove MYSQL_SOURCE_DIR from include path because it contains
# file called VERSION
# https://github.com/Homebrew/homebrew-core/pull/76887#issuecomment-840851149
# Reported upstream at https://jira.mariadb.org/browse/MDEV-7209 - this fix can be
# removed once that issue is closed and the fix has been merged into a stable release
patch :DATA
end
on_linux do
depends_on "gcc"
depends_on "linux-pam"
end
fails_with gcc: "5"
def install
# Set basedir and ldata so that mysql_install_db can find the server
# without needing an explicit path to be set. This can still
# be overridden by calling --basedir= when calling.
inreplace "scripts/mysql_install_db.sh" do |s|
s.change_make_var! "basedir", "\"#{prefix}\""
s.change_make_var! "ldata", "\"#{var}/mysql\""
end
# Use brew groonga
rm_r "storage/mroonga/vendor/groonga"
# -DINSTALL_* are relative to prefix
args = %W[
-DMYSQL_DATADIR=#{var}/mysql
-DINSTALL_INCLUDEDIR=include/mysql
-DINSTALL_MANDIR=share/man
-DINSTALL_DOCDIR=share/doc/#{name}
-DINSTALL_INFODIR=share/info
-DINSTALL_MYSQLSHAREDIR=share/mysql
-DWITH_READLINE=yes
-DWITH_SSL=yes
-DWITH_UNIT_TESTS=OFF
-DDEFAULT_CHARSET=utf8mb4
-DDEFAULT_COLLATION=utf8mb4_general_ci
-DINSTALL_SYSCONFDIR=#{etc}
-DCOMPILATION_COMMENT=Homebrew
]
# disable TokuDB, which is currently not supported on macOS
args << "-DPLUGIN_TOKUDB=NO"
system "cmake", ".", *std_cmake_args, *args
on_macos do
# Need to rename files called version/VERSION to avoid build failure
# https://github.com/Homebrew/homebrew-core/pull/76887#issuecomment-840851149
# Reported upstream at https://jira.mariadb.org/browse/MDEV-7209 - this fix can be
# removed once that issue is closed and the fix has been merged into a stable release.
mv "storage/mroonga/version", "storage/mroonga/version.txt"
end
system "make"
system "make", "install"
# Fix my.cnf to point to #{etc} instead of /etc
(etc/"my.cnf.d").mkpath
inreplace "#{etc}/my.cnf", "!includedir /etc/my.cnf.d",
"!includedir #{etc}/my.cnf.d"
touch etc/"my.cnf.d/.homebrew_dont_prune_me"
# Don't create databases inside of the prefix!
# See: https://github.com/Homebrew/homebrew/issues/4975
rm_rf prefix/"data"
# Save space
(prefix/"mysql-test").rmtree
(prefix/"sql-bench").rmtree
# Link the setup script into bin
bin.install_symlink prefix/"scripts/mysql_install_db"
# Fix up the control script and link into bin
inreplace "#{prefix}/support-files/mysql.server", /^(PATH=".*)(")/, "\\1:#{HOMEBREW_PREFIX}/bin\\2"
bin.install_symlink prefix/"support-files/mysql.server"
# Move sourced non-executable out of bin into libexec
libexec.install "#{bin}/wsrep_sst_common"
# Fix up references to wsrep_sst_common
%w[
wsrep_sst_mysqldump
wsrep_sst_rsync
wsrep_sst_xtrabackup
wsrep_sst_xtrabackup-v2
].each do |f|
inreplace "#{bin}/#{f}", "$(dirname \"$0\")/wsrep_sst_common",
"#{libexec}/wsrep_sst_common"
end
# Install my.cnf that binds to 127.0.0.1 by default
(buildpath/"my.cnf").write <<~EOS
# Default Homebrew MySQL server config
[mysqld]
# Only allow connections from localhost
bind-address = 127.0.0.1
EOS
etc.install "my.cnf"
end
def post_install
# Make sure the var/mysql directory exists
(var/"mysql").mkpath
# Don't initialize database, it clashes when testing other MySQL-like implementations.
return if ENV["HOMEBREW_GITHUB_ACTIONS"]
unless File.exist? "#{var}/mysql/mysql/user.frm"
ENV["TMPDIR"] = nil
system "#{bin}/mysql_install_db", "--verbose", "--user=#{ENV["USER"]}",
"--basedir=#{prefix}", "--datadir=#{var}/mysql", "--tmpdir=/tmp"
end
end
def caveats
<<~EOS
A "/etc/my.cnf" from another install may interfere with a Homebrew-built
server starting up correctly.
MySQL is configured to only allow connections from localhost by default
To connect:
mysql -uroot
EOS
end
plist_options manual: "#{HOMEBREW_PREFIX}/opt/[email protected]/bin/mysql.server start"
def plist
<<~EOS
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>KeepAlive</key>
<true/>
<key>Label</key>
<string>#{plist_name}</string>
<key>ProgramArguments</key>
<array>
<string>#{opt_bin}/mysqld_safe</string>
<string>--datadir=#{var}/mysql</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>WorkingDirectory</key>
<string>#{var}</string>
</dict>
</plist>
EOS
end
test do
(testpath/"mysql").mkpath
(testpath/"tmp").mkpath
system bin/"mysql_install_db", "--no-defaults", "--user=#{ENV["USER"]}",
"--basedir=#{prefix}", "--datadir=#{testpath}/mysql", "--tmpdir=#{testpath}/tmp",
"--auth-root-authentication-method=normal"
port = free_port
fork do
system "#{bin}/mysqld", "--no-defaults", "--user=#{ENV["USER"]}",
"--datadir=#{testpath}/mysql", "--port=#{port}", "--tmpdir=#{testpath}/tmp"
end
sleep 5
assert_match "information_schema",
shell_output("#{bin}/mysql --port=#{port} --user=root --password= --execute='show databases;'")
system "#{bin}/mysqladmin", "--port=#{port}", "--user=root", "--password=", "shutdown"
end
end
__END__
diff --git a/storage/mroonga/CMakeLists.txt b/storage/mroonga/CMakeLists.txt
index 555ab248751..cddb6f2f2a6 100644
--- a/storage/mroonga/CMakeLists.txt
+++ b/storage/mroonga/CMakeLists.txt
@@ -215,8 +215,7 @@ set(MYSQL_INCLUDE_DIRS
"${MYSQL_REGEX_INCLUDE_DIR}"
"${MYSQL_RAPIDJSON_INCLUDE_DIR}"
"${MYSQL_LIBBINLOGEVENTS_EXPORT_DIR}"
- "${MYSQL_LIBBINLOGEVENTS_INCLUDE_DIR}"
- "${MYSQL_SOURCE_DIR}")
+ "${MYSQL_LIBBINLOGEVENTS_INCLUDE_DIR}")
if(MRN_BUNDLED)
set(MYSQL_PLUGIN_DIR "${INSTALL_PLUGINDIR}")
| 33.264574 | 108 | 0.664195 |
1d3f0ccac8b209a361b31c584cbe5a1d8ba11376 | 399 | cask 'font-londrina-shadow' do
version '1.001'
sha256 '8e8fcd9dfb9dccc934ac9930ebc83a452c3c2049ffbce351b68622fe8308b10a'
url 'https://googlefontdirectory.googlecode.com/hg-history/67342bc472599b4c32201ee4a002fe59a6447a42/ofl/londrinashadow/LondrinaShadow-Regular.ttf'
homepage 'http://www.google.com/fonts/specimen/Londrina%20Shadow'
license :ofl
font 'LondrinaShadow-Regular.ttf'
end
| 36.272727 | 148 | 0.824561 |
e23afc512aa97edcec2c80b2a81118d68286b6da | 1,047 | # Copyright (C) 2009-2014 MongoDB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
module BSON
# Provides static helper methods around determining what environment is
# running without polluting the global namespace.
#
# @since 2.0.0
module Environment
extend self
# Determine if we are using JRuby or not.
#
# @example Are we running with JRuby?
# Environment.jruby?
#
# @return [ true, false ] If JRuby is our vm.
#
# @since 2.0.0
def jruby?
defined?(JRUBY_VERSION)
end
end
end
| 28.297297 | 74 | 0.706781 |
624ca2987ae11db101648641136e274615b41ebc | 682 | module BrDanfe
class XML
def css(xpath)
@xml.css(xpath)
end
def initialize(xml)
@xml = Nokogiri::XML(xml)
end
def [](xpath)
node = @xml.css(xpath)
node ? node.text : ''
end
def collect(ns, tag)
result = []
# With namespace
begin
@xml.xpath("//#{ns}:#{tag}").each do |det|
result << yield(det)
end
rescue StandardError
# Without namespace
@xml.xpath("//#{tag}").each do |det|
result << yield(det)
end
end
result
end
def version_is_310_or_newer?
@xml.css('infNFe').attr('versao').to_s.to_f >= 3.10
end
end
end
| 18.432432 | 57 | 0.513196 |
9155c24e409b3d0305096b29b8f71a3a78d06eab | 629 | cask 'blue-jeans' do
version '2.15.0.237'
sha256 'ed72ee9e64a1601f636f49189e1eaa4dd2d3a1847820e11f1b2aa1d4eb2b6c11'
url "https://swdl.bluejeans.com/desktop-app/mac/#{version.major_minor_patch}/#{version}/BlueJeansInstaller.dmg"
appcast 'https://www.bluejeans.com/downloads'
name 'Blue Jeans videoconferencing'
homepage 'https://www.bluejeans.com/'
installer manual: 'BlueJeansInstaller.app'
uninstall signal: [
['TERM', 'com.bluejeansnet.Blue'],
['TERM', 'com.bluejeansnet.BlueMenulet'],
],
delete: '/Applications/Blue Jeans.app'
end
| 34.944444 | 113 | 0.666137 |
913c919752bbf9a3bbe799a049ad01b176cc25fc | 8,486 | #==============================================================================
# ** TestUI_CRectangle
#------------------------------------------------------------------------------
# Unit tests for the ResizableImage control
#==============================================================================
class TestUI_CRectangle < TestUI_Class
#//////////////////////////////////////////////////////////////////////////
# * Public Methods
#//////////////////////////////////////////////////////////////////////////
#--------------------------------------------------------------------------
# * Setup tests
#--------------------------------------------------------------------------
def testFixtureSetup()
# bitmap test
@viewport_bitmap = Viewport.new(0, 0, 640, 450)
@sprite_bitmap = Sprite.new(@viewport_bitmap)
@bitmap = Bitmap.new(640, 450)
@sprite_bitmap.bitmap = @bitmap
# sprite test
@viewport = Viewport.new(0, 0, 640, 450)
@sprite = Sprite.new(@viewport)
@sprite.bitmap = Bitmap.new(640, 450)
# window test
@window = Window_Base.new(0, 0, 640, 450)
@window.visible = true
@sprite.visible = false
@sprite_bitmap.visible = false
@window_duration = 240
@sprite_duration = nil
@bitmap_duration = nil
# setup your objects for the tests (if needed)
end
#--------------------------------------------------------------------------
# * Tear down tests
#--------------------------------------------------------------------------
def testFixtureTearDown()
@window.dispose
@window = nil
@sprite.bitmap.dispose
@sprite.dispose
@sprite = nil
@viewport.dispose
@viewport = nil
@viewport_bitmap.dispose
@viewport_bitmap = nil
@sprite_bitmap.bitmap.dispose
@sprite_bitmap.dispose
@sprite_bitmap = nil
# destroy your objects when the tests are finished (if needed)
end
#--------------------------------------------------------------------------
# * Frame update
#--------------------------------------------------------------------------
def update()
@window.update
@viewport.update
@sprite.update
@viewport_bitmap.update
@sprite_bitmap.update
if @window_duration != nil
if @window_duration == 0
@window.visible = false
@sprite_duration = 240
@sprite.visible = true
@window_duration = nil
elsif @window_duration > 0
@window_duration -= 1
end
end
if @sprite_duration != nil
if @sprite_duration == 0
@sprite.visible = false
@bitmap_duration = 240
@sprite_bitmap.visible = true
@sprite_duration = nil
elsif @sprite_duration > 0
@sprite_duration -= 1
end
end
if @bitmap_duration != nil
if @bitmap_duration == 0
@sprite_bitmap.visible = false
@window_duration = 240
@window.visible = true
@bitmap_duration = nil
elsif @bitmap_duration > 0
@bitmap_duration -= 1
end
end
end
#//////////////////////////////////////////////////////////////////////////
# * Tests
#//////////////////////////////////////////////////////////////////////////
#--------------------------------------------------------------------------
# * Default rectangle
#--------------------------------------------------------------------------
def test_default
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(0, 0, 100, 50), Color.hp_gauge_color1)
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Rectangle which is inactive
#--------------------------------------------------------------------------
def test_inactive
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(100, 0, 100, 50), Color.hp_gauge_color1)
c.active = false
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Rectangle which is invisible
#--------------------------------------------------------------------------
def test_invisible
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(200, 0, 100, 50), Color.hp_gauge_color1)
c.visible = false
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Rectangle with a gradient
#--------------------------------------------------------------------------
def test_gradient
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(300, 0, 100, 50), Color.hp_gauge_color1, Color.hp_gauge_color2)
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Rectangle with a gradient which is inactive
#--------------------------------------------------------------------------
def test_inactiveGradient
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(400, 0, 100, 50), Color.hp_gauge_color1, Color.hp_gauge_color2)
c.active = false
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Default vertical rectangle
#--------------------------------------------------------------------------
def test_defaultVertical
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(0, 100, 50, 100), Color.hp_gauge_color1)
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Vertical Rectangle which is inactive
#--------------------------------------------------------------------------
def test_inactiveVertical
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(50, 100, 50, 100), Color.hp_gauge_color1)
c.active = false
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Vertical Rectangle which is invisible
#--------------------------------------------------------------------------
def test_invisibleVertical
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(100, 100, 50, 100), Color.hp_gauge_color1)
c.visible = false
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Vertical Rectangle with a gradient
#--------------------------------------------------------------------------
def test_gradientVertical
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(150, 100, 50, 100), Color.hp_gauge_color1, Color.hp_gauge_color2)
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Vertical Rectangle with a gradient which is inactive
#--------------------------------------------------------------------------
def test_inactiveGradientVertical
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(200, 100, 50, 100), Color.hp_gauge_color1, Color.hp_gauge_color2)
c.active = false
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Vertical Rectangle with a vertical gradient
#--------------------------------------------------------------------------
def test_verticalGradientVertical
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(250, 100, 50, 100), Color.hp_gauge_color1, Color.hp_gauge_color2, true)
c.draw()
}
return true
end
#--------------------------------------------------------------------------
# * Vertical Rectangle with a vertical gradient which is inactive
#--------------------------------------------------------------------------
def test_inactiveVerticalGradientVertical
[@window, @sprite, @bitmap].each{|container|
c = CRectangle.new(container, Rect.new(300, 100, 50, 100), Color.hp_gauge_color1, Color.hp_gauge_color2, true)
c.active = false
c.draw()
}
return true
end
end
| 34.080321 | 116 | 0.4214 |
e2e067a3cbbcc617a8be5d4273f11e5e2be26825 | 1,103 | # Encoding: utf-8
#
# This is auto-generated code, changes will be overwritten.
#
# Copyright:: Copyright 2015, Google Inc. All Rights Reserved.
# License:: Licensed under the Apache License, Version 2.0.
#
# Code generated by AdsCommon library 0.9.9 on 2015-06-30 09:24:53.
require 'ads_common/savon_service'
require 'adwords_api/v201506/shared_set_service_registry'
module AdwordsApi; module V201506; module SharedSetService
class SharedSetService < AdsCommon::SavonService
def initialize(config, endpoint)
namespace = 'https://adwords.google.com/api/adwords/cm/v201506'
super(config, endpoint, namespace, :v201506)
end
def get(*args, &block)
return execute_action('get', args, &block)
end
def mutate(*args, &block)
return execute_action('mutate', args, &block)
end
def query(*args, &block)
return execute_action('query', args, &block)
end
private
def get_service_registry()
return SharedSetServiceRegistry
end
def get_module()
return AdwordsApi::V201506::SharedSetService
end
end
end; end; end
| 25.651163 | 69 | 0.708069 |
ab7b3eb34b75b08d56542784504c0fc6f9703c49 | 572 | class AddFeedbackFieldsToApplicationForOffering < ActiveRecord::Migration
def self.up
add_column :application_for_offerings, :feedback_meeting_date, :datetime
add_column :application_for_offerings, :feedback_meeting_person_id, :integer
add_column :application_for_offerings, :feedback_meeting_comments, :text
end
def self.down
remove_column :application_for_offerings, :feedback_meeting_comments
remove_column :application_for_offerings, :feedback_meeting_person_id
remove_column :application_for_offerings, :feedback_meeting_date
end
end
| 40.857143 | 80 | 0.835664 |
08d1490b74853d514a6f54dcea422d2b91376569 | 315 | class RegistrationsController < Devise::RegistrationsController
private
def sign_up_params
params.require(:user).permit(:name, :email, :password, :password_confirmation)
end
def account_update_params
params.require(:user).permit(:name, :email, :password_confirmation, :current_password)
end
end
| 26.25 | 90 | 0.774603 |
1162c2d8cf6df62085f1551f063825f0db2a2d0b | 3,858 | class VimAT74 < Formula
desc "Vi 'workalike' with many additional features"
homepage "https://www.vim.org/"
url "https://github.com/vim/vim/archive/v7.4.2367.tar.gz"
sha256 "a9ae4031ccd73cc60e771e8bf9b3c8b7f10f63a67efce7f61cd694cd8d7cda5c"
revision 18
bottle do
sha256 "18867439556347ed829fc7bef2865f957bd8785b0ac44629e687cdedb6b624ba" => :mojave
sha256 "d6b2ef5343bb627902044c1b7308247313a01c4423e66218748bd33d836ceda0" => :high_sierra
sha256 "4f7321b2f4a244b305c78666aefe4c45a3cb832f75b67d60b26f180f4e5391f6" => :sierra
sha256 "cc6f1dbd75c1f44edc5bfdf801b5eea9fe8777637f5bebb588112ab1f3906204" => :x86_64_linux
end
keg_only :versioned_formula
depends_on "lua"
depends_on "perl"
depends_on "python"
depends_on "ruby"
# Python 3.7 compat
# Equivalent to upstream commit 24 Mar 2018 "patch 8.0.1635: undefining
# _POSIX_THREADS causes problems with Python 3"
# See https://github.com/vim/vim/commit/16d7eced1a08565a9837db8067c7b9db5ed68854
patch :DATA
def install
ENV.prepend_path "PATH", Formula["python"].opt_libexec/"bin"
# https://github.com/Homebrew/homebrew-core/pull/1046
ENV.delete("SDKROOT")
ENV["LUA_PREFIX"] = HOMEBREW_PREFIX if build.with?("lua") || build.with?("luajit")
# vim doesn't require any Python package, unset PYTHONPATH.
ENV.delete("PYTHONPATH")
# We specify HOMEBREW_PREFIX as the prefix to make vim look in the
# the right place (HOMEBREW_PREFIX/share/vim/{vimrc,vimfiles}) for
# system vimscript files. We specify the normal installation prefix
# when calling "make install".
# Homebrew will use the first suitable Perl & Ruby in your PATH if you
# build from source. Please don't attempt to hardcode either.
system "./configure", "--prefix=#{HOMEBREW_PREFIX}",
"--mandir=#{man}",
"--enable-multibyte",
"--with-tlib=ncurses",
"--enable-cscope",
"--with-compiledby=Homebrew",
"--enable-perlinterp",
"--enable-rubyinterp",
"--enable-python3interp",
"--enable-gui=no",
"--without-x",
"--enable-luainterp",
"--with-lua-prefix=#{Formula["lua"].opt_prefix}"
system "make"
# Parallel install could miss some symlinks
# https://github.com/vim/vim/issues/1031
ENV.deparallelize
# If stripping the binaries is enabled, vim will segfault with
# statically-linked interpreters like ruby
# https://github.com/vim/vim/issues/114
system "make", "install", "prefix=#{prefix}", "STRIP=#{which "true"}"
bin.install_symlink "vim" => "vi" if build.with? "override-system-vi"
end
test do
if OS.mac? && build.with?("python@2")
(testpath/"commands.vim").write <<~EOS
:python import vim; vim.current.buffer[0] = 'hello world'
:wq
EOS
system bin/"vim", "-T", "dumb", "-s", "commands.vim", "test.txt"
assert_equal "hello world", File.read("test.txt").chomp
elsif build.with? "python"
(testpath/"commands.vim").write <<~EOS
:python3 import vim; vim.current.buffer[0] = 'hello python3'
:wq
EOS
system bin/"vim", "-T", "dumb", "-s", "commands.vim", "test.txt"
assert_equal "hello python3", File.read("test.txt").chomp
end
end
end
__END__
diff --git a/src/if_python3.c b/src/if_python3.c
index 02d913492c..59c115dd8d 100644
--- a/src/if_python3.c
+++ b/src/if_python3.c
@@ -34,11 +34,6 @@
#include <limits.h>
-/* Python.h defines _POSIX_THREADS itself (if needed) */
-#ifdef _POSIX_THREADS
-# undef _POSIX_THREADS
-#endif
-
#if defined(_WIN32) && defined(HAVE_FCNTL_H)
# undef HAVE_FCNTL_H
#endif
| 37.096154 | 94 | 0.644116 |
26b25196961f6270c38d69c33a743e5fa75ddaf7 | 2,662 | #encoding: utf-8
require_relative "../generator"
require_relative "../fields_table_generator"
require_relative "../options_example_generator"
require_relative "../options_table_generator"
require_relative "../sections_generator"
require_relative "component_generator"
module Docs
class TransformGenerator < ComponentGenerator
ROOT_PATH = "../../../"
attr_reader :options_example_generator,
:options_table_generator,
:sections_generator,
:transform
def initialize(transform, guides)
super(guides)
options = transform.options.to_h.values.sort
@options_example_generator = OptionsExampleGenerator.new(options)
@options_table_generator = OptionsTableGenerator.new(options, transform.sections)
@sections_generator = SectionsGenerator.new(transform.sections)
@transform = transform
end
def generate
content = <<~EOF
---
description: #{remove_markdown_links(transform.allow_you_to_description)}
---
#{warning}
# #{transform.name} transform

#{beta(transform)}
The `#{transform.name}` transforms accepts #{event_type_links(transform.input_types).to_sentence} events and allows you to #{transform.allow_you_to_description}.
## Config File
{% code-tabs %}
{% code-tabs-item title="example" %}
```toml
#{options_example_generator.generate(
"transforms.my_#{transform.name}_transform_id",
:examples
)}
```
{% endcode-tabs-item %}
{% code-tabs-item title="schema" %}
```toml
#{options_example_generator.generate(
"transforms.<transform-id>",
:schema
)}
```
{% endcode-tabs-item %}
{% code-tabs-item title="specification" %}
```toml
#{options_example_generator.generate(
"transforms.#{transform.name}",
:spec
)}
```
{% endcode-tabs-item %}
{% endcode-tabs %}
## Options
#{options_table_generator.generate}
#{example_section(transform)}
#{guides_section(transform)}
#{how_it_works_section}
#{troubleshooting_section(transform)}
#{resources_section(transform)}
EOF
content
end
private
def how_it_works_section
content = sections_generator.generate.strip
if content == ""
""
else
content =
<<~EOF
## How It Works
#{content}
EOF
content.strip
end
end
end
end | 24.648148 | 169 | 0.601803 |
bb7d7e8f7b30bded1f8950b4de263433441e4121 | 648 | # frozen_string_literal: true
module EasyOrderable
class Assorter
def initialize(relation, args, custom_association_names)
@relation = relation
@args = args
@custom_association_names = custom_association_names
end
def call
joined_relation.order(*order_args)
end
private
attr_reader :relation, :args, :custom_association_names
def joined_relation
Joiner.new(relation, args, custom_association_names).call
end
def order_args
OrderArgumentsTransformer.new(args).call
end
end
end
require 'easy_orderable/joiner'
require 'easy_orderable/order_arguments_transformer'
| 20.903226 | 63 | 0.737654 |
abc6a8bb2d2ea0cad8a5b56502b32635a8c1da2d | 411 | require 'rbconfig'
WINDOWS = RbConfig::CONFIG['host_os'] =~ /Windows|mswin/
if (WINDOWS)
@devnull = 'NUL:'
else
@devnull = '/dev/null'
end
def quiet( &block )
io = [STDOUT.dup, STDERR.dup]
STDOUT.reopen @devnull
STDERR.reopen @devnull
block.call
ensure
STDOUT.reopen io.first
STDERR.reopen io.last
$stdout, $stderr = STDOUT, STDERR
end
quiet { puts 'foo' }
quiet { puts 'foo' }
puts 'foo'
| 16.44 | 56 | 0.671533 |
7a13daa9a520f2e86bab12b667ec732c2a0b1c67 | 288 | class Flightgear < Cask
url 'http://ftp.snt.utwente.nl/pub/software/flightgear/ftp/MacOSX/FlightGear-2.12.1.dmg'
homepage 'http://www.flightgear.org/'
version '2.12.1'
sha256 'd58f57bc22377bc4eaf4324e6f534d6cbf4e43a2830b587b91b1120169f9d53a'
link 'FlightGear.app'
end
| 36 | 95 | 0.753472 |
fff77c572ee454f3c6442ac8b18569d47af586ed | 336 | Deface::Override.new(
:virtual_path => "layouts/erp/backend/_sidebar",
:name => "add_backend_contacts_link_to_sidebar_menu",
:insert_after => "[data-erp-hook='sidebar-menu']",
:partial => "overrides/backend_contacts_link_to_sidebar_menu",
:namespaced => true,
:original => 'f5fe48b6dc6986328e0b873b3ffa1b228dd52a7c'
) | 42 | 65 | 0.738095 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.