hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
4a72fca84db7c65a03ba6c2b9b0e0969a5a2b61b | 769 | require File.expand_path('../../../spec_helper', __FILE__)
require File.expand_path('../shared/new', __FILE__)
require 'iconv'
describe "Iconv.open" do
it_behaves_like :iconv_new, :open
it "with a block invokes the block exactly once" do
count = 0
Iconv.open "us-ascii", "us-ascii" do
count += 1
end
count.should == 1
end
it "with a block yields the converter" do
Iconv.open "us-ascii", "us-ascii" do |conv|
conv.should be_kind_of(Iconv)
end
end
it "with a block returns the result of the block" do
Iconv.open("us-ascii", "us-ascii") { "block return value" }.should == "block return value"
end
# not testable with the current API:
# it "with a block always closes the converter when exiting the block"
end
| 26.517241 | 94 | 0.671001 |
e842d4c66c5cf06f29cc39c5341094a7c208b055 | 832 | # frozen_string_literal: true
class Mutations::UpdateOrder < Mutations::BaseMutation
field :order, Types::OrderType, null: false
argument :id,
Integer,
deprecation_reason:
"IDs are transitioning to the ID type. For the moment, please use the transitionalId field until \
all id fields are replaced with ones of type ID.",
required: false
argument :transitional_id, ID, required: false, camelize: true
argument :order, Types::OrderInputType, required: true
load_and_authorize_model_with_id Order, :id, :update
def resolve(**args)
order_attrs = args[:order].to_h
if order_attrs[:payment_amount]
order_attrs[:payment_amount] = MoneyHelper.coerce_money_input(order_attrs[:payment_amount])
end
order.update!(order_attrs)
{ order: order }
end
end
| 32 | 112 | 0.711538 |
61a368b63bc2586fc56331fed931b408c3570d2e | 137 | # Be sure to restart your server when you modify this file.
Rails.application.config.session_store :cookie_store, key: '_mayak_session'
| 34.25 | 75 | 0.80292 |
91b16ce7751f1a7a6c9e86fc3988d18688f4e45d | 192 | class AddMasterToBillingVersion < ActiveRecord::Migration
def change
add_column :billing_versions, :master_id, :integer
add_column :billing_versions, :master_type, :string
end
end
| 27.428571 | 57 | 0.786458 |
030058fb4193fee60482144c04cd69678217c542 | 51 | module AflierSurvey
module CalcsHelper
end
end
| 10.2 | 20 | 0.803922 |
1d598a9bd54c943324b05c8a21e6871a5be76793 | 16,951 | require "pathname"
require "securerandom"
require "set"
require "vagrant"
require "vagrant/config/v2/util"
require "vagrant/util/platform"
require File.expand_path("../vm_provisioner", __FILE__)
require File.expand_path("../vm_subvm", __FILE__)
module VagrantPlugins
module Kernel_V2
class VMConfig < Vagrant.plugin("2", :config)
DEFAULT_VM_NAME = :default
attr_accessor :base_mac
attr_accessor :boot_timeout
attr_accessor :box
attr_accessor :box_url
attr_accessor :box_download_insecure
attr_accessor :graceful_halt_timeout
attr_accessor :guest
attr_accessor :hostname
attr_accessor :usable_port_range
attr_reader :provisioners
def initialize
@boot_timeout = UNSET_VALUE
@box_download_insecure = UNSET_VALUE
@graceful_halt_timeout = UNSET_VALUE
@guest = UNSET_VALUE
@hostname = UNSET_VALUE
@provisioners = []
# Internal state
@__compiled_provider_configs = {}
@__defined_vm_keys = []
@__defined_vms = {}
@__finalized = false
@__networks = {}
@__providers = {}
@__provider_overrides = {}
@__synced_folders = {}
end
# This was from V1, but we just kept it here as an alias for hostname
# because too many people mess this up.
def host_name=(value)
@hostname = value
end
# Custom merge method since some keys here are merged differently.
def merge(other)
super.tap do |result|
other_networks = other.instance_variable_get(:@__networks)
result.instance_variable_set(:@__networks, @__networks.merge(other_networks))
result.instance_variable_set(:@provisioners, @provisioners + other.provisioners)
# Merge defined VMs by first merging the defined VM keys,
# preserving the order in which they were defined.
other_defined_vm_keys = other.instance_variable_get(:@__defined_vm_keys)
other_defined_vm_keys -= @__defined_vm_keys
new_defined_vm_keys = @__defined_vm_keys + other_defined_vm_keys
# Merge the actual defined VMs.
other_defined_vms = other.instance_variable_get(:@__defined_vms)
new_defined_vms = {}
@__defined_vms.each do |key, subvm|
new_defined_vms[key] = subvm.clone
end
other_defined_vms.each do |key, subvm|
if !new_defined_vms.has_key?(key)
new_defined_vms[key] = subvm.clone
else
new_defined_vms[key].config_procs.concat(subvm.config_procs)
new_defined_vms[key].options.merge!(subvm.options)
end
end
# Merge the providers by prepending any configuration blocks we
# have for providers onto the new configuration.
other_providers = other.instance_variable_get(:@__providers)
new_providers = @__providers.dup
other_providers.each do |key, blocks|
new_providers[key] ||= []
new_providers[key] += blocks
end
# Merge the provider overrides by appending them...
other_overrides = other.instance_variable_get(:@__provider_overrides)
new_overrides = @__provider_overrides.dup
other_overrides.each do |key, blocks|
new_overrides[key] ||= []
new_overrides[key] += blocks
end
# Merge synced folders.
other_folders = other.instance_variable_get(:@__synced_folders)
new_folders = {}
@__synced_folders.each do |key, value|
new_folders[key] = value.dup
end
other_folders.each do |id, options|
new_folders[id] ||= {}
new_folders[id].merge!(options)
end
result.instance_variable_set(:@__defined_vm_keys, new_defined_vm_keys)
result.instance_variable_set(:@__defined_vms, new_defined_vms)
result.instance_variable_set(:@__providers, new_providers)
result.instance_variable_set(:@__provider_overrides, new_overrides)
result.instance_variable_set(:@__synced_folders, new_folders)
end
end
# Defines a synced folder pair. This pair of folders will be synced
# to/from the machine. Note that if the machine you're using doesn't
# support multi-directional syncing (perhaps an rsync backed synced
# folder) then the host is always synced to the guest but guest data
# may not be synced back to the host.
#
# @param [String] hostpath Path to the host folder to share. If this
# is a relative path, it is relative to the location of the
# Vagrantfile.
# @param [String] guestpath Path on the guest to mount the shared
# folder.
# @param [Hash] options Additional options.
def synced_folder(hostpath, guestpath, options=nil)
if Vagrant::Util::Platform.windows?
# On Windows, Ruby just uses normal '/' for path seps, so
# just replace normal Windows style seps with Unix ones.
hostpath = hostpath.to_s.gsub("\\", "/")
end
options ||= {}
options = options.dup
options[:guestpath] = guestpath.to_s.gsub(/\/$/, '')
options[:hostpath] = hostpath
@__synced_folders[options[:guestpath]] = options
end
# Define a way to access the machine via a network. This exposes a
# high-level abstraction for networking that may not directly map
# 1-to-1 for every provider. For example, AWS has no equivalent to
# "port forwarding." But most providers will attempt to implement this
# in a way that behaves similarly.
#
# `type` can be one of:
#
# * `:forwarded_port` - A port that is accessible via localhost
# that forwards into the machine.
# * `:private_network` - The machine gets an IP that is not directly
# publicly accessible, but ideally accessible from this machine.
# * `:public_network` - The machine gets an IP on a shared network.
#
# @param [Symbol] type Type of network
# @param [Hash] options Options for the network.
def network(type, options=nil)
options ||= {}
options = options.dup
options[:protocol] ||= "tcp"
if !options[:id]
default_id = nil
if type == :forwarded_port
# For forwarded ports, set the default ID to the
# host port so that host ports overwrite each other.
default_id = "#{options[:protocol]}#{options[:host]}"
end
options[:id] = default_id || SecureRandom.uuid
end
# Scope the ID by type so that different types can share IDs
id = options[:id]
id = "#{type}-#{id}"
# Merge in the previous settings if we have them.
if @__networks.has_key?(id)
options = @__networks[id][1].merge(options)
end
# Merge in the latest settings and set the internal state
@__networks[id] = [type.to_sym, options]
end
# Configures a provider for this VM.
#
# @param [Symbol] name The name of the provider.
def provider(name, &block)
name = name.to_sym
@__providers[name] ||= []
@__provider_overrides[name] ||= []
if block_given?
@__providers[name] << block if block_given?
# If this block takes two arguments, then we curry it and store
# the configuration override for use later.
if block.arity == 2
@__provider_overrides[name] << block.curry[Vagrant::Config::V2::DummyConfig.new]
end
end
end
def provision(name, options=nil, &block)
@provisioners << VagrantConfigProvisioner.new(name.to_sym, options, &block)
end
def defined_vms
@__defined_vms
end
# This returns the keys of the sub-vms in the order they were
# defined.
def defined_vm_keys
@__defined_vm_keys
end
def define(name, options=nil, &block)
name = name.to_sym
options ||= {}
options = options.dup
options[:config_version] ||= "2"
# Add the name to the array of VM keys. This array is used to
# preserve the order in which VMs are defined.
@__defined_vm_keys << name if !@__defined_vm_keys.include?(name)
# Add the SubVM to the hash of defined VMs
if !@__defined_vms[name]
@__defined_vms[name] = VagrantConfigSubVM.new
end
@__defined_vms[name].options.merge!(options)
@__defined_vms[name].config_procs << [options[:config_version], block] if block
end
#-------------------------------------------------------------------
# Internal methods, don't call these.
#-------------------------------------------------------------------
def finalize!
# Defaults
@boot_timeout = 300 if @boot_timeout == UNSET_VALUE
@box_download_insecure = false if @box_download_insecure == UNSET_VALUE
@graceful_halt_timeout = 300 if @graceful_halt_timeout == UNSET_VALUE
@guest = nil if @guest == UNSET_VALUE
@hostname = nil if @hostname == UNSET_VALUE
@hostname = @hostname.to_s if @hostname
# Set the guest properly
@guest = @guest.to_sym if @guest
# If we haven't defined a single VM, then we need to define a
# default VM which just inherits the rest of the configuration.
define(DEFAULT_VM_NAME) if defined_vm_keys.empty?
# Compile all the provider configurations
@__providers.each do |name, blocks|
# If we don't have any configuration blocks, then ignore it
next if blocks.empty?
# Find the configuration class for this provider
config_class = Vagrant.plugin("2").manager.provider_configs[name]
config_class ||= Vagrant::Config::V2::DummyConfig
# Load it up
config = config_class.new
blocks.each do |b|
b.call(config, Vagrant::Config::V2::DummyConfig.new)
end
config.finalize!
# Store it for retrieval later
@__compiled_provider_configs[name] = config
end
@__synced_folders.each do |id, options|
# Ignore NFS on Windows
if options[:nfs] && Vagrant::Util::Platform.windows?
options[:nfs] = false
end
end
# Flag that we finalized
@__finalized = true
end
# This returns the compiled provider-specific configurationf or the
# given provider.
#
# @param [Symbol] name Name of the provider.
def get_provider_config(name)
raise "Must finalize first." if !@__finalized
result = @__compiled_provider_configs[name]
# If no compiled configuration was found, then we try to just
# use the default configuration from the plugin.
if !result
config_class = Vagrant.plugin("2").manager.provider_configs[name]
if config_class
result = config_class.new
result.finalize!
end
end
return result
end
# This returns a list of VM configurations that are overrides
# for this provider.
#
# @param [Symbol] name Name of the provider
# @return [Array<Proc>]
def get_provider_overrides(name)
(@__provider_overrides[name] || []).map do |p|
["2", p]
end
end
# This returns the list of networks configured.
def networks
@__networks.values
end
# This returns the list of synced folders
def synced_folders
@__synced_folders
end
def validate(machine)
errors = _detected_errors
errors << I18n.t("vagrant.config.vm.box_missing") if !box
errors << I18n.t("vagrant.config.vm.box_not_found", :name => box) if \
box && !box_url && !machine.box
errors << I18n.t("vagrant.config.vm.hostname_invalid_characters") if \
@hostname && @hostname !~ /^[-.a-z0-9]+$/i
has_nfs = false
used_guest_paths = Set.new
@__synced_folders.each do |id, options|
# If the shared folder is disabled then don't worry about validating it
next if options[:disabled]
guestpath = Pathname.new(options[:guestpath])
hostpath = Pathname.new(options[:hostpath]).expand_path(machine.env.root_path)
if guestpath.relative? && guestpath.to_s !~ /^\w+:/
errors << I18n.t("vagrant.config.vm.shared_folder_guestpath_relative",
:path => options[:guestpath])
else
if used_guest_paths.include?(options[:guestpath])
errors << I18n.t("vagrant.config.vm.shared_folder_guestpath_duplicate",
:path => options[:guestpath])
end
used_guest_paths.add(options[:guestpath])
end
if !hostpath.directory? && !options[:create]
errors << I18n.t("vagrant.config.vm.shared_folder_hostpath_missing",
:path => options[:hostpath])
end
if options[:nfs]
has_nfs = true
if options[:owner] || options[:group]
# Owner/group don't work with NFS
errors << I18n.t("vagrant.config.vm.shared_folder_nfs_owner_group",
:path => options[:hostpath])
end
end
if options[:mount_options] && !options[:mount_options].is_a?(Array)
errors << I18n.t("vagrant.config.vm.shared_folder_mount_options_array")
end
# One day remove this probably.
if options[:extra]
errors << "The 'extra' flag on synced folders is now 'mount_options'"
end
end
if has_nfs
if !machine.env.host
errors << I18n.t("vagrant.config.vm.nfs_requires_host")
else
errors << I18n.t("vagrant.config.vm.nfs_not_supported") if \
!machine.env.host.nfs?
end
end
# Validate networks
has_fp_port_error = false
fp_used = Set.new
valid_network_types = [:forwarded_port, :private_network, :public_network]
networks.each do |type, options|
if !valid_network_types.include?(type)
errors << I18n.t("vagrant.config.vm.network_type_invalid",
:type => type.to_s)
end
if type == :forwarded_port
if !has_fp_port_error && (!options[:guest] || !options[:host])
errors << I18n.t("vagrant.config.vm.network_fp_requires_ports")
has_fp_port_error = true
end
if options[:host]
key = "#{options[:protocol]}#{options[:host]}"
if fp_used.include?(key)
errors << I18n.t("vagrant.config.vm.network_fp_host_not_unique",
:host => options[:host].to_s,
:protocol => options[:protocol].to_s)
end
fp_used.add(key)
end
end
if type == :private_network
if options[:type] != :dhcp
if !options[:ip]
errors << I18n.t("vagrant.config.vm.network_ip_required")
end
end
if options[:ip] && options[:ip].end_with?(".1")
errors << I18n.t("vagrant.config.vm.network_ip_ends_in_one")
end
end
end
# We're done with VM level errors so prepare the section
errors = { "vm" => errors }
# Validate only the _active_ provider
if machine.provider_config
provider_errors = machine.provider_config.validate(machine)
if provider_errors
errors = Vagrant::Config::V2::Util.merge_errors(errors, provider_errors)
end
end
# Validate provisioners
@provisioners.each do |vm_provisioner|
if vm_provisioner.invalid?
errors["vm"] << I18n.t("vagrant.config.vm.provisioner_not_found",
:name => vm_provisioner.name)
next
end
if vm_provisioner.config
provisioner_errors = vm_provisioner.config.validate(machine)
if provisioner_errors
errors = Vagrant::Config::V2::Util.merge_errors(errors, provisioner_errors)
end
end
end
errors
end
end
end
end
| 35.388309 | 92 | 0.586691 |
03ca19f07737cb93cc7ed5aa1d883a1487bfc236 | 16,862 | require_relative "util/ssh"
require "digest/md5"
require "thread"
require "log4r"
module Vagrant
# This represents a machine that Vagrant manages. This provides a singular
# API for querying the state and making state changes to the machine, which
# is backed by any sort of provider (VirtualBox, VMware, etc.).
class Machine
# The box that is backing this machine.
#
# @return [Box]
attr_accessor :box
# Configuration for the machine.
#
# @return [Object]
attr_accessor :config
# Directory where machine-specific data can be stored.
#
# @return [Pathname]
attr_reader :data_dir
# The environment that this machine is a part of.
#
# @return [Environment]
attr_reader :env
# ID of the machine. This ID comes from the provider and is not
# guaranteed to be of any particular format except that it is
# a string.
#
# @return [String]
attr_reader :id
# Name of the machine. This is assigned by the Vagrantfile.
#
# @return [Symbol]
attr_reader :name
# The provider backing this machine.
#
# @return [Object]
attr_reader :provider
# The provider-specific configuration for this machine.
#
# @return [Object]
attr_accessor :provider_config
# The name of the provider.
#
# @return [Symbol]
attr_reader :provider_name
# The options given to the provider when registering the plugin.
#
# @return [Hash]
attr_reader :provider_options
# The UI for outputting in the scope of this machine.
#
# @return [UI]
attr_reader :ui
# The Vagrantfile that this machine is attached to.
#
# @return [Vagrantfile]
attr_reader :vagrantfile
# Initialize a new machine.
#
# @param [String] name Name of the virtual machine.
# @param [Class] provider The provider backing this machine. This is
# currently expected to be a V1 `provider` plugin.
# @param [Object] provider_config The provider-specific configuration for
# this machine.
# @param [Hash] provider_options The provider-specific options from the
# plugin definition.
# @param [Object] config The configuration for this machine.
# @param [Pathname] data_dir The directory where machine-specific data
# can be stored. This directory is ensured to exist.
# @param [Box] box The box that is backing this virtual machine.
# @param [Environment] env The environment that this machine is a
# part of.
def initialize(name, provider_name, provider_cls, provider_config, provider_options, config, data_dir, box, env, vagrantfile, base=false)
@logger = Log4r::Logger.new("vagrant::machine")
@logger.info("Initializing machine: #{name}")
@logger.info(" - Provider: #{provider_cls}")
@logger.info(" - Box: #{box}")
@logger.info(" - Data dir: #{data_dir}")
@box = box
@config = config
@data_dir = data_dir
@env = env
@vagrantfile = vagrantfile
@guest = Guest.new(
self,
Vagrant.plugin("2").manager.guests,
Vagrant.plugin("2").manager.guest_capabilities)
@name = name
@provider_config = provider_config
@provider_name = provider_name
@provider_options = provider_options
@ui = Vagrant::UI::Prefixed.new(@env.ui, @name)
@ui_mutex = Mutex.new
# Read the ID, which is usually in local storage
@id = nil
# XXX: This is temporary. This will be removed very soon.
if base
@id = name
else
reload
end
# Keep track of where our UUID should be placed
@index_uuid_file = nil
@index_uuid_file = @data_dir.join("index_uuid") if @data_dir
# Initializes the provider last so that it has access to all the
# state we setup on this machine.
@provider = provider_cls.new(self)
@provider._initialize(@provider_name, self)
# If we're using WinRM, we eager load the plugin because of
# GH-3390
if @config.vm.communicator == :winrm
@logger.debug("Eager loading WinRM communicator to avoid GH-3390")
communicate
end
# If the ID is the special not created ID, then set our ID to
# nil so that we destroy all our data.
if state.id == MachineState::NOT_CREATED_ID
self.id = nil
end
end
# This calls an action on the provider. The provider may or may not
# actually implement the action.
#
# @param [Symbol] name Name of the action to run.
# @param [Hash] extra_env This data will be passed into the action runner
# as extra data set on the environment hash for the middleware
# runner.
def action(name, opts=nil)
@logger.info("Calling action: #{name} on provider #{@provider}")
opts ||= {}
# Determine whether we lock or not
lock = true
lock = opts.delete(:lock) if opts.key?(:lock)
# Extra env keys are the remaining opts
extra_env = opts.dup
# Create a deterministic ID for this machine
vf = nil
vf = @env.vagrantfile_name[0] if @env.vagrantfile_name
id = Digest::MD5.hexdigest(
"#{@env.root_path}#{vf}#{@name}")
# We only lock if we're not executing an SSH action. In the future
# we will want to do more fine-grained unlocking in actions themselves
# but for a 1.6.2 release this will work.
locker = Proc.new { |*args, &block| block.call }
locker = @env.method(:lock) if lock && !name.to_s.start_with?("ssh")
# Lock this machine for the duration of this action
locker.call("machine-action-#{id}") do
# Get the callable from the provider.
callable = @provider.action(name)
# If this action doesn't exist on the provider, then an exception
# must be raised.
if callable.nil?
raise Errors::UnimplementedProviderAction,
action: name,
provider: @provider.to_s
end
# Call the action
action_raw(name, callable, extra_env)
end
rescue Errors::EnvironmentLockedError
raise Errors::MachineActionLockedError,
action: name,
name: @name
end
# This calls a raw callable in the proper context of the machine using
# the middleware stack.
#
# @param [Symbol] name Name of the action
# @param [Proc] callable
# @param [Hash] extra_env Extra env for the action env.
# @return [Hash] The resulting env
def action_raw(name, callable, extra_env=nil)
# Run the action with the action runner on the environment
env = {
action_name: "machine_action_#{name}".to_sym,
machine: self,
machine_action: name,
ui: @ui,
}.merge(extra_env || {})
@env.action_runner.run(callable, env)
end
# Returns a communication object for executing commands on the remote
# machine. Note that the _exact_ semantics of this are up to the
# communication provider itself. Despite this, the semantics are expected
# to be consistent across operating systems. For example, all linux-based
# systems should have similar communication (usually a shell). All
# Windows systems should have similar communication as well. Therefore,
# prior to communicating with the machine, users of this method are
# expected to check the guest OS to determine their behavior.
#
# This method will _always_ return some valid communication object.
# The `ready?` API can be used on the object to check if communication
# is actually ready.
#
# @return [Object]
def communicate
if !@communicator
requested = @config.vm.communicator
requested ||= :ssh
klass = Vagrant.plugin("2").manager.communicators[requested]
raise Errors::CommunicatorNotFound, comm: requested.to_s if !klass
@communicator = klass.new(self)
end
@communicator
end
# Returns a guest implementation for this machine. The guest implementation
# knows how to do guest-OS specific tasks, such as configuring networks,
# mounting folders, etc.
#
# @return [Guest]
def guest
raise Errors::MachineGuestNotReady if !communicate.ready?
@guest.detect! if [email protected]?
@guest
end
# This sets the unique ID associated with this machine. This will
# persist this ID so that in the future Vagrant will be able to find
# this machine again. The unique ID must be absolutely unique to the
# virtual machine, and can be used by providers for finding the
# actual machine associated with this instance.
#
# **WARNING:** Only providers should ever use this method.
#
# @param [String] value The ID.
def id=(value)
@logger.info("New machine ID: #{value.inspect}")
id_file = nil
if @data_dir
# The file that will store the id if we have one. This allows the
# ID to persist across Vagrant runs. Also, store the UUID for the
# machine index.
id_file = @data_dir.join("id")
end
if value
if id_file
# Write the "id" file with the id given.
id_file.open("w+") do |f|
f.write(value)
end
end
# If we don't have a UUID, then create one
if index_uuid.nil?
# Create the index entry and save it
entry = MachineIndex::Entry.new
entry.local_data_path = @env.local_data_path
entry.name = @name.to_s
entry.provider = @provider_name.to_s
entry.state = "preparing"
entry.vagrantfile_path = @env.root_path
entry.vagrantfile_name = @env.vagrantfile_name
if @box
entry.extra_data["box"] = {
"name" => @box.name,
"provider" => @box.provider.to_s,
"version" => @box.version.to_s,
}
end
entry = @env.machine_index.set(entry)
@env.machine_index.release(entry)
# Store our UUID so we can access it later
if @index_uuid_file
@index_uuid_file.open("w+") do |f|
f.write(entry.id)
end
end
end
else
# Delete the file, since the machine is now destroyed
id_file.delete if id_file && id_file.file?
# If we have a UUID associated with the index, remove it
uuid = index_uuid
if uuid
entry = @env.machine_index.get(uuid)
@env.machine_index.delete(entry) if entry
end
if @data_dir
# Delete the entire data directory contents since all state
# associated with the VM is now gone.
@data_dir.children.each do |child|
begin
child.rmtree
rescue Errno::EACCES
@logger.info("EACCESS deleting file: #{child}")
end
end
end
end
# Store the ID locally
@id = value.nil? ? nil : value.to_s
# Notify the provider that the ID changed in case it needs to do
# any accounting from it.
@provider.machine_id_changed
end
# Returns the UUID associated with this machine in the machine
# index. We only have a UUID if an ID has been set.
#
# @return [String] UUID or nil if we don't have one yet.
def index_uuid
return nil if !@index_uuid_file
return @index_uuid_file.read.chomp if @index_uuid_file.file?
return nil
end
# This returns a clean inspect value so that printing the value via
# a pretty print (`p`) results in a readable value.
#
# @return [String]
def inspect
"#<#{self.class}: #{@name} (#{@provider.class})>"
end
# This reloads the ID of the underlying machine.
def reload
old_id = @id
@id = nil
if @data_dir
# Read the id file from the data directory if it exists as the
# ID for the pre-existing physical representation of this machine.
id_file = @data_dir.join("id")
@id = id_file.read.chomp if id_file.file?
end
if @id != old_id && @provider
# It changed, notify the provider
@provider.machine_id_changed
end
@id
end
# This returns the SSH info for accessing this machine. This SSH info
# is queried from the underlying provider. This method returns `nil` if
# the machine is not ready for SSH communication.
#
# The structure of the resulting hash is guaranteed to contain the
# following structure, although it may return other keys as well
# not documented here:
#
# {
# host: "1.2.3.4",
# port: "22",
# username: "mitchellh",
# private_key_path: "/path/to/my/key"
# }
#
# Note that Vagrant makes no guarantee that this info works or is
# correct. This is simply the data that the provider gives us or that
# is configured via a Vagrantfile. It is still possible after this
# point when attempting to connect via SSH to get authentication
# errors.
#
# @return [Hash] SSH information.
def ssh_info
# First, ask the provider for their information. If the provider
# returns nil, then the machine is simply not ready for SSH, and
# we return nil as well.
info = @provider.ssh_info
return nil if info.nil?
# Delete out the nil entries.
info.dup.each do |key, value|
info.delete(key) if value.nil?
end
# We set the defaults
info[:host] ||= @config.ssh.default.host
info[:port] ||= @config.ssh.default.port
info[:private_key_path] ||= @config.ssh.default.private_key_path
info[:username] ||= @config.ssh.default.username
# We set overrides if they are set. These take precedence over
# provider-returned data.
info[:host] = @config.ssh.host if @config.ssh.host
info[:port] = @config.ssh.port if @config.ssh.port
info[:username] = @config.ssh.username if @config.ssh.username
info[:password] = @config.ssh.password if @config.ssh.password
# We also set some fields that are purely controlled by Varant
info[:forward_agent] = @config.ssh.forward_agent
info[:forward_x11] = @config.ssh.forward_x11
# Add in provided proxy command config
info[:proxy_command] = @config.ssh.proxy_command if @config.ssh.proxy_command
# Set the private key path. If a specific private key is given in
# the Vagrantfile we set that. Otherwise, we use the default (insecure)
# private key, but only if the provider didn't give us one.
if !info[:private_key_path] && !info[:password]
if @config.ssh.private_key_path
info[:private_key_path] = @config.ssh.private_key_path
else
info[:private_key_path] = @env.default_private_key_path
end
end
# If we have a private key in our data dir, then use that
if @data_dir
data_private_key = @data_dir.join("private_key")
if data_private_key.file?
info[:private_key_path] = [data_private_key.to_s]
end
end
# Setup the keys
info[:private_key_path] ||= []
info[:private_key_path] = Array(info[:private_key_path])
# Expand the private key path relative to the root path
info[:private_key_path].map! do |path|
File.expand_path(path, @env.root_path)
end
# Check that the private key permissions are valid
info[:private_key_path].each do |path|
key_path = Pathname.new(path)
if key_path.exist?
Vagrant::Util::SSH.check_key_permissions(key_path)
end
end
# Return the final compiled SSH info data
info
end
# Returns the state of this machine. The state is queried from the
# backing provider, so it can be any arbitrary symbol.
#
# @return [MachineState]
def state
result = @provider.state
raise Errors::MachineStateInvalid if !result.is_a?(MachineState)
# Update our state cache if we have a UUID and an entry in the
# master index.
uuid = index_uuid
if uuid
entry = @env.machine_index.get(uuid)
if entry
entry.state = result.short_description
@env.machine_index.set(entry)
@env.machine_index.release(entry)
end
end
result
end
# Temporarily changes the machine UI. This is useful if you want
# to execute an {#action} with a different UI.
def with_ui(ui)
@ui_mutex.synchronize do
begin
old_ui = @ui
@ui = ui
yield
ensure
@ui = old_ui
end
end
end
end
end
| 32.869396 | 141 | 0.627268 |
26bc87228689e321bfbaaa356abb012e4b2c1e83 | 4,772 | module UnpackStrategy
module Magic
# length of the longest regex (currently Tar)
MAX_MAGIC_NUMBER_LENGTH = 262
refine Pathname do
def magic_number
@magic_number ||= if directory?
""
else
binread(MAX_MAGIC_NUMBER_LENGTH) || ""
end
end
def file_type
@file_type ||= system_command("file", args: ["-b", self], print_stderr: false)
.stdout.chomp
end
def zipinfo
@zipinfo ||= system_command("zipinfo", args: ["-1", self], print_stderr: false)
.stdout
.encode(Encoding::UTF_8, invalid: :replace)
.split("\n")
end
end
end
private_constant :Magic
def self.strategies
@strategies ||= [
Tar, # needs to be before Bzip2/Gzip/Xz/Lzma
Pax,
Gzip,
Lzma,
Xz,
Lzip,
Air, # needs to be before Zip
Jar, # needs to be before Zip
LuaRock, # needs to be before Zip
MicrosoftOfficeXml, # needs to be before Zip
Zip,
Pkg, # needs to be before Xar
Xar,
Ttf,
Otf,
Git,
Mercurial,
Subversion,
Cvs,
SelfExtractingExecutable, # needs to be before Cab
Cab,
Executable,
Dmg, # needs to be before Bzip2
Bzip2,
Fossil,
Bazaar,
Compress,
P7Zip,
Sit,
Rar,
Lha,
].freeze
end
private_class_method :strategies
def self.from_type(type)
type = {
naked: :uncompressed,
nounzip: :uncompressed,
seven_zip: :p7zip,
}.fetch(type, type)
begin
const_get(type.to_s.split("_").map(&:capitalize).join.gsub(/\d+[a-z]/, &:upcase))
rescue NameError
nil
end
end
def self.from_extension(extension)
strategies.sort_by { |s| s.extensions.map(&:length).max || 0 }
.reverse
.find { |s| s.extensions.any? { |ext| extension.end_with?(ext) } }
end
def self.from_magic(path)
strategies.find { |s| s.can_extract?(path) }
end
def self.detect(path, extension_only: false, type: nil, ref_type: nil, ref: nil)
strategy = from_type(type) if type
if extension_only
strategy ||= from_extension(path.extname)
strategy ||= strategies.select { |s| s < Directory || s == Fossil }
.find { |s| s.can_extract?(path) }
else
strategy ||= from_magic(path)
strategy ||= from_extension(path.extname)
end
strategy ||= Uncompressed
strategy.new(path, ref_type: ref_type, ref: ref)
end
attr_reader :path
def initialize(path, ref_type: nil, ref: nil)
@path = Pathname(path).expand_path
@ref_type = ref_type
@ref = ref
end
def extract(to: nil, basename: nil, verbose: false)
basename ||= path.basename
unpack_dir = Pathname(to || Dir.pwd).expand_path
unpack_dir.mkpath
extract_to_dir(unpack_dir, basename: basename, verbose: verbose)
end
def extract_nestedly(to: nil, basename: nil, verbose: false, extension_only: false)
Dir.mktmpdir do |tmp_unpack_dir|
tmp_unpack_dir = Pathname(tmp_unpack_dir)
extract(to: tmp_unpack_dir, basename: basename, verbose: verbose)
children = tmp_unpack_dir.children
if children.count == 1 && !children.first.directory?
s = UnpackStrategy.detect(children.first, extension_only: extension_only)
s.extract_nestedly(to: to, verbose: verbose, extension_only: extension_only)
next
end
Directory.new(tmp_unpack_dir).extract(to: to, verbose: verbose)
end
end
def dependencies
[]
end
end
require "unpack_strategy/air"
require "unpack_strategy/bazaar"
require "unpack_strategy/bzip2"
require "unpack_strategy/cab"
require "unpack_strategy/compress"
require "unpack_strategy/cvs"
require "unpack_strategy/directory"
require "unpack_strategy/dmg"
require "unpack_strategy/executable"
require "unpack_strategy/fossil"
require "unpack_strategy/generic_unar"
require "unpack_strategy/git"
require "unpack_strategy/gzip"
require "unpack_strategy/jar"
require "unpack_strategy/lha"
require "unpack_strategy/lua_rock"
require "unpack_strategy/lzip"
require "unpack_strategy/lzma"
require "unpack_strategy/mercurial"
require "unpack_strategy/microsoft_office_xml"
require "unpack_strategy/otf"
require "unpack_strategy/p7zip"
require "unpack_strategy/pax"
require "unpack_strategy/pkg"
require "unpack_strategy/rar"
require "unpack_strategy/self_extracting_executable"
require "unpack_strategy/sit"
require "unpack_strategy/subversion"
require "unpack_strategy/tar"
require "unpack_strategy/ttf"
require "unpack_strategy/uncompressed"
require "unpack_strategy/xar"
require "unpack_strategy/xz"
require "unpack_strategy/zip"
| 26.364641 | 87 | 0.66052 |
1c161c15933feb307b4f8577eda77005b082c780 | 1,755 | # Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache License Version 2.0.
# This product includes software developed at Datadog (https:#www.datadoghq.com/).
# Copyright 2016-2020 Datadog, Inc.
# This software definition doesn"t build anything, it"s the place where we create
# files outside the omnibus installation directory, so that we can add them to
# the package manifest using `extra_package_file` in the project definition.
require './lib/ostools.rb'
name "datadog-cf-finalize"
description "steps required to finalize the CF build"
default_version "1.0.0"
skip_transitive_dependency_licensing true
build do
# TODO too many things done here, should be split
block do
# Conf files
if windows?
## this section creates the parallel `bin` directory structure for the Windows
## CF build pack. None of the files created here will end up in the binary
## (MSI) distribution.
cf_bin_root = "#{Omnibus::Config.source_dir()}/cf-root"
cf_bin_root_bin = "#{cf_bin_root}/bin"
cf_source_root = "#{Omnibus::Config.source_dir()}/datadog-agent/src/github.com/DataDog/datadog-agent/bin"
mkdir cf_bin_root_bin
mkdir "#{cf_bin_root_bin}/agent"
copy "#{cf_source_root}/agent/agent.exe", "#{cf_bin_root_bin}"
copy "#{cf_source_root}/agent/libdatadog-agent-three.dll", "#{cf_bin_root_bin}"
copy "#{cf_source_root}/agent/install-cmd.exe", "#{cf_bin_root_bin}/agent"
copy "#{cf_source_root}/agent/process-agent.exe", "#{cf_bin_root_bin}/agent"
copy "#{cf_source_root}/agent/trace-agent.exe", "#{cf_bin_root_bin}/agent"
end
end
end
| 46.184211 | 117 | 0.688319 |
ed95eb9046a66be9130aefdd42d1678ffb361dae | 843 | # Copyright 2021 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
module OneviewCookbook
module API3000
module C7000
# Server Profile Template API3000 C7000 provider
class ServerProfileTemplateProvider < OneviewCookbook::API2800::C7000::ServerProfileTemplateProvider
end
end
end
end
| 40.142857 | 106 | 0.775801 |
62b2ec58af4ff83314d1cd754afb65ffd0ee768c | 698 | module ApplicationHelper
def fa(icon_key, additional_class = nil)
content_tag :i, "", class: "fa fa-#{icon_key.to_s} #{additional_class}"
end
def checkmark(text = nil, state = false)
out = state ? fa("check-circle", "success fa-lg") : fa(:minus, "muted fa-lg")
out += content_tag(:span, text, class: "hidden-lg checkmark-label") if text
out
end
def alert_class_for(flash_type)
case flash_type.to_s
when "success"
"alert-success" # Green
when "error"
"alert-danger" # Red
when "alert"
"alert-warning" # Yellow
when "notice"
"alert-info" # Blue
else
flash_type.to_s
end
end
end
| 24.068966 | 81 | 0.600287 |
ab37ad431f498e2e22e15e991f4215e74b112d8a | 2,136 | require 'model_stubbing/extensions'
require 'model_stubbing/definition'
require 'model_stubbing/model'
require 'model_stubbing/stub'
module ModelStubbing
extend self
# Gets a hash of all current definitions.
def self.definitions() @definitions ||= {} end
# stores {stub => record_id} so that identical stubs keep the same ID
def self.record_ids() @record_ids ||= {} end
# stores {record_id => instantiated stubs}. reset after each spec
def self.records() @records ||= {} end
# Creates a new ModelStubbing::Definition. If called from within a class,
# it is automatically setup (See Definition#setup_on).
#
# Creates or updates a definition going by the given name as a key. If
# no name is given, it defaults to the current class or :default. Multiple
# #define_models calls with the same name will modify the definition.
#
# Options:
# * :copy - set to false if you don't want this definition to be a dup of
# the :default definition
# * :insert - set to false if you don't want to insert this definition
# into the database.
def define_models(name = nil, options = {}, &block)
if name.is_a? Hash
options = name
name = nil
end
name ||= is_a?(Class) ? self : :default
base_name = options[:copy] || :default
base = name == base_name ? nil : ModelStubbing.definitions[base_name]
defn = ModelStubbing.definitions[name] ||= (base && options[:copy] != false) ? base.dup : ModelStubbing::Definition.new
defn.setup_on self, options, &block
end
protected
@@mock_framework = nil
def self.stub_current_time_with(time)
guess_mock_framework!
case @@mock_framework
when :rspec then Time.stub!(:now).and_return(time)
when :mocha then Time.stubs(:now).returns(time)
end
end
def self.guess_mock_framework!
if @@mock_framework.nil?
@@mock_framework =
if Time.respond_to?(:stub!)
:rspec
elsif Time.respond_to?(:stubs)
:mocha
else
raise "Unknown mock framework."
end
end
end
end
Test::Unit::TestCase.extend ModelStubbing | 33.904762 | 128 | 0.669944 |
e827e424cc5c9cac17b8f219dfa27c6c3319c56a | 160 | class Api::V1::LicensesController < Api::ApiController
def index
@licenses = License.includes(:license_options)
respond_with_json(@licenses)
end
end | 26.666667 | 54 | 0.75625 |
b9ef8cafa473c19940ad5ae32ce588f01c402ce9 | 5,163 | ENV["RAILS_ENV"] ||= 'test'
require File.expand_path("../../config/environment", __FILE__)
require 'rspec/rails'
#require 'rspec'
#require 'rspec/expectations'
#require 'shoulda/matchers'
#require 'database_cleaner'
# require "bundler/setup"
# Bundler.setup
# require "rails"
require 'reactive-record'
# This file was generated by the `rails generate rspec:install` command. Conventionally, all
# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`.
# The generated `.rspec` file contains `--require spec_helper` which will cause this
# file to always be loaded, without a need to explicitly require it in any files.
#
# Given that it is always loaded, you are encouraged to keep this file as
# light-weight as possible. Requiring heavyweight dependencies from this file
# will add to the boot time of your test suite on EVERY test run, even for an
# individual file that may not need all of that loaded. Instead, consider making
# a separate helper file that requires the additional dependencies and performs
# the additional setup, and require it from the spec files that actually need it.
#
# The `.rspec` file also contains a few flags that are not defaults but that
# users commonly want.
#
# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
RSpec.configure do |config|
# rspec-expectations config goes here. You can use an alternate
# assertion/expectation library such as wrong or the stdlib/minitest
# assertions if you prefer.
config.expect_with :rspec do |expectations|
# Enable only the newer, non-monkey-patching expect syntax.
# For more details, see:
# - http://myronmars.to/n/dev-blog/2012/06/rspecs-new-expectation-syntax
expectations.syntax = [:should, :expect]
end
# rspec-mocks config goes here. You can use an alternate test double
# library (such as bogus or mocha) by changing the `mock_with` option here.
config.mock_with :rspec do |mocks|
# Enable only the newer, non-monkey-patching expect syntax.
# For more details, see:
# - http://teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/
mocks.syntax = :expect
# Prevents you from mocking or stubbing a method that does not exist on
# a real object. This is generally recommended.
mocks.verify_partial_doubles = true
end
#config.use_transactional_fixtures = false
# DatabaseCleaner.strategy = :truncation
# config.before(:suite) do
# begin
# DatabaseCleaner.clean
# DatabaseCleaner.start
# FactoryGirl.lint
# ensure
# DatabaseCleaner.clean
# end
# end
# config.after(:suite) do
# DatabaseCleaner.clean
# end
# config.before(:suite) do
# DatabaseCleaner.clean_with(:truncation)
# end
#
# config.before(:each) do
# DatabaseCleaner.strategy = :transaction
# end
#
# config.before(:each, :js => true) do
# DatabaseCleaner.strategy = :truncation
# end
#
# config.before(:each) do
# DatabaseCleaner.start
# end
#
# config.after(:each) do
# DatabaseCleaner.clean
# end
# The settings below are suggested to provide a good initial experience
# with RSpec, but feel free to customize to your heart's content.
# =begin
# # These two settings work together to allow you to limit a spec run
# # to individual examples or groups you care about by tagging them with
# # `:focus` metadata. When nothing is tagged with `:focus`, all examples
# # get run.
# config.filter_run :focus
# config.run_all_when_everything_filtered = true
#
# # Limits the available syntax to the non-monkey patched syntax that is recommended.
# # For more details, see:
# # - http://myronmars.to/n/dev-blog/2012/06/rspecs-new-expectation-syntax
# # - http://teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/
# # - http://myronmars.to/n/dev-blog/2014/05/notable-changes-in-rspec-3#new__config_option_to_disable_rspeccore_monkey_patching
# config.disable_monkey_patching!
#
# # Many RSpec users commonly either run the entire suite or an individual
# # file, and it's useful to allow more verbose output when running an
# # individual spec file.
# if config.files_to_run.one?
# # Use the documentation formatter for detailed output,
# # unless a formatter has already been configured
# # (e.g. via a command-line flag).
# config.default_formatter = 'doc'
# end
#
# # Print the 10 slowest examples and example groups at the
# # end of the spec run, to help surface which specs are running
# # particularly slow.
# config.profile_examples = 10
#
# # Run specs in random order to surface order dependencies. If you find an
# # order dependency and want to debug it, you can fix the order by providing
# # the seed, which is printed after each run.
# # --seed 1234
# config.order = :random
#
# # Seed global randomization in this process using the `--seed` CLI option.
# # Setting this allows you to use `--seed` to deterministically reproduce
# # test failures related to randomization by passing the same `--seed` value
# # as the one that triggered the failure.
# Kernel.srand config.seed
# =end
end
| 37.413043 | 131 | 0.71993 |
7ad061be405755eccec764e79619b460cda1c0ad | 3,995 | module TopologicalInventory
module Amazon
class Collector
module ServiceCatalog
def service_offerings(scope)
service_catalog_connection(scope).client.search_products_as_admin.product_view_details
rescue => e
logger.error("Couldn't fetch 'search_products_as_admin' of service catalog with #{scope}, message: #{e.message}")
[]
end
def service_instances(scope)
func = lambda do |&blk|
service_catalog_connection(scope).client.scan_provisioned_products.provisioned_products.each do |service_instance|
blk.call(:service_instance => service_instance, :described_record => describe_record(service_instance.last_record_id, scope))
end
end
Iterator.new(func, "Couldn't fetch 'provisioned_products' of service catalog with scope #{scope}.")
end
def service_plans(scope)
func = lambda do |&blk|
service_offerings(scope).each do |service_offering|
# TODO(lsmola) too many API calls, we need to do it in multiple threads
product_id = service_offering.product_view_summary.product_id
# Taking provisioning_artifacts of described product returns only active artifacts, doing list_provisioning_artifacts
# we are not able to recognize the active ones. Same with describe_product_as_admin, status is missing. Status is
# in the describe_provisioning_artifact, but it is wrong (always ACTIVE)
artifacts = describe_product(product_id, scope)
launch_paths = list_launch_paths(product_id, scope)
launch_paths.each do |launch_path|
artifacts.each do |artifact|
plan = {
:artifact => artifact,
:launch_path => launch_path,
:service_offering => service_offering
}
plan[:provisioning_parameters] = describe_provisioning_parameters(product_id, artifact.id, launch_path.id, scope)
blk.call(plan)
end
end
end
end
Iterator.new(func, "Couldn't fetch 'describe_provisioning_parameters' of service catalog with scope #{scope}.")
end
private
def describe_provisioning_parameters(product_id, artifact_id, launch_path_id, scope)
service_catalog_connection(scope).client.describe_provisioning_parameters(
:product_id => product_id,
:provisioning_artifact_id => artifact_id,
:path_id => launch_path_id
)
rescue => e
ident = {:product_id => product_id, :artifact_id => artifact_id, :launch_path_id => launch_path_id}
logger.warn("Couldn't fetch 'describe_provisioning_parameters' of service catalog for #{ident} with scope #{scope}, message: #{e.message}")
nil
end
def describe_product(product_id, scope)
service_catalog_connection(scope).client.describe_product(:id => product_id).provisioning_artifacts
rescue => e
logger.warn("Couldn't fetch 'describe_product' of service catalog with scope #{scope}, message: #{e.message}")
[]
end
def list_launch_paths(product_id, scope)
service_catalog_connection(scope).client.list_launch_paths(:product_id => product_id).launch_path_summaries
rescue => e
logger.warn("Couldn't fetch 'list_launch_paths' of service catalog with scope #{scope}, message: #{e.message}")
[]
end
def describe_record(record_id, scope)
service_catalog_connection(scope).client.describe_record(:id => record_id)
rescue => e
logger.warn("Couldn't fetch 'describe_record' of service catalog with scope #{scope}, message: #{e.message}")
nil
end
end
end
end
end
| 45.397727 | 149 | 0.635544 |
336ab08ca29cfc9a8273a62a18db5d99dab0c19b | 1,216 | # frozen_string_literal: true
require 'thor'
module Churned
# Handle the application command line parsing
# and the dispatch to various command objects
#
# @api public
class CLI < Thor
# Error raised by this runner
Error = Class.new(StandardError)
desc 'version', 'churned version'
def version
require_relative 'version'
puts "v#{Churned::VERSION}"
end
map %w(--version -v) => :version
desc 'console', 'Open a irb session to read the database'
method_option :help, aliases: '-h', type: :boolean,
desc: 'Display usage information'
def console(*)
if options[:help]
invoke :help, ['console']
else
require_relative 'commands/console'
Churned::Commands::Console.new(options).execute
end
end
desc 'install', 'Creates the \'.churned\' working directory'
method_option :help, aliases: '-h', type: :boolean,
desc: 'Display usage information'
def install(*)
if options[:help]
invoke :help, ['install']
else
require_relative 'commands/install'
Churned::Commands::Install.new(options).execute
end
end
end
end
| 26.434783 | 64 | 0.618421 |
f89355418fbcecde32d6339747f3dffea5553ecd | 1,454 | # frozen_string_literal: true
require 'rails_helper'
RSpec.describe 'Healthinfos', type: :request do
let(:user) { create(:user_with_healthinfo) }
let(:user_id) { user.id }
let(:healthinfo) { user.healthinfo }
let(:headers) { valid_headers }
describe 'GET /healthinfos' do
context 'when healthinfo exists' do
before { get '/healthinfos', params: {}, headers: headers }
it 'returns status code 200' do
expect(response).to have_http_status(200)
end
it 'returns heathinfo information' do
expect(json[0]).to include('age', 'gender', 'family', 'personal', 'user_id')
end
end
context 'when healthinfo do no exists' do
let(:user) { create(:user) }
before { get '/healthinfos', params: {}, headers: headers }
it 'returns status code 200' do
expect(response).to have_http_status(200)
end
it 'returns empty {} heathinfo information' do
# puts json
expect(json).to be_empty
end
end
end
describe 'POST /healthinfos' do
context 'when healthinfo is valid' do
let(:user) { create(:user) }
let(:valid_params) { { age: '23', gender: 'male', family: 'fadf', personal: 'personal', user_id: user.id }.to_json }
before { post '/healthinfos', params: valid_params, headers: headers }
it 'returns status code :created(201)' do
expect(response).to have_http_status(:created)
end
end
end
end
| 32.311111 | 122 | 0.639615 |
ff1168ee904ebcaba06fb7d05da459f53026469a | 285 | # frozen_string_literal: true
class ReasonsHelperTest < ActionView::TestCase
test 'should check for inactive reasons' do
ReasonsHelper.check_for_inactive_reasons
end
test 'should calculate weights for flagging' do
ReasonsHelper.calculate_weights_for_flagging
end
end
| 23.75 | 49 | 0.807018 |
87a5ef770f332914bbd4a9d983cbe145eb898f4d | 10,497 | # frozen_string_literal: true
#--
# Copyright 2006 by Chad Fowler, Rich Kilmer, Jim Weirich and others.
# All rights reserved.
# See LICENSE.txt for permissions.
#++
require 'fileutils'
require 'rubygems'
require 'rubygems/installer_uninstaller_utils'
require 'rubygems/dependency_list'
require 'rubygems/rdoc'
require 'rubygems/user_interaction'
##
# An Uninstaller.
#
# The uninstaller fires pre and post uninstall hooks. Hooks can be added
# either through a rubygems_plugin.rb file in an installed gem or via a
# rubygems/defaults/#{RUBY_ENGINE}.rb or rubygems/defaults/operating_system.rb
# file. See Gem.pre_uninstall and Gem.post_uninstall for details.
class Gem::Uninstaller
include Gem::UserInteraction
include Gem::InstallerUninstallerUtils
##
# The directory a gem's executables will be installed into
attr_reader :bin_dir
##
# The gem repository the gem will be installed into
attr_reader :gem_home
##
# The Gem::Specification for the gem being uninstalled, only set during
# #uninstall_gem
attr_reader :spec
##
# Constructs an uninstaller that will uninstall +gem+
def initialize(gem, options = {})
# TODO document the valid options
@gem = gem
@version = options[:version] || Gem::Requirement.default
@gem_home = File.realpath(options[:install_dir] || Gem.dir)
@plugins_dir = Gem.plugindir(@gem_home)
@force_executables = options[:executables]
@force_all = options[:all]
@force_ignore = options[:ignore]
@bin_dir = options[:bin_dir]
@format_executable = options[:format_executable]
@abort_on_dependent = options[:abort_on_dependent]
# Indicate if development dependencies should be checked when
# uninstalling. (default: false)
#
@check_dev = options[:check_dev]
if options[:force]
@force_all = true
@force_ignore = true
end
# only add user directory if install_dir is not set
@user_install = false
@user_install = options[:user_install] unless options[:install_dir]
# Optimization: populated during #uninstall
@default_specs_matching_uninstall_params = []
end
##
# Performs the uninstall of the gem. This removes the spec, the Gem
# directory, and the cached .gem file.
def uninstall
dependency = Gem::Dependency.new @gem, @version
list = []
dirs =
Gem::Specification.dirs +
[Gem.default_specifications_dir]
Gem::Specification.each_spec dirs do |spec|
next unless dependency.matches_spec? spec
list << spec
end
if list.empty?
raise Gem::InstallError, "gem #{@gem.inspect} is not installed"
end
default_specs, list = list.partition do |spec|
spec.default_gem?
end
warn_cannot_uninstall_default_gems(default_specs - list)
@default_specs_matching_uninstall_params = default_specs
list, other_repo_specs = list.partition do |spec|
@gem_home == spec.base_dir or
(@user_install and spec.base_dir == Gem.user_dir)
end
list.sort!
if list.empty?
return unless other_repo_specs.any?
other_repos = other_repo_specs.map {|spec| spec.base_dir }.uniq
message = ["#{@gem} is not installed in GEM_HOME, try:"]
message.concat other_repos.map {|repo|
"\tgem uninstall -i #{repo} #{@gem}"
}
raise Gem::InstallError, message.join("\n")
elsif @force_all
remove_all list
elsif list.size > 1
gem_names = list.map {|gem| gem.full_name }
gem_names << "All versions"
say
_, index = choose_from_list "Select gem to uninstall:", gem_names
if index == list.size
remove_all list
elsif index >= 0 && index < list.size
uninstall_gem list[index]
else
say "Error: must enter a number [1-#{list.size + 1}]"
end
else
uninstall_gem list.first
end
end
##
# Uninstalls gem +spec+
def uninstall_gem(spec)
@spec = spec
unless dependencies_ok? spec
if abort_on_dependent? || !ask_if_ok(spec)
raise Gem::DependencyRemovalException,
"Uninstallation aborted due to dependent gem(s)"
end
end
Gem.pre_uninstall_hooks.each do |hook|
hook.call self
end
remove_executables @spec
remove_plugins @spec
remove @spec
regenerate_plugins
Gem.post_uninstall_hooks.each do |hook|
hook.call self
end
@spec = nil
end
##
# Removes installed executables and batch files (windows only) for +spec+.
def remove_executables(spec)
return if spec.executables.empty?
executables = spec.executables.clone
# Leave any executables created by other installed versions
# of this gem installed.
list = Gem::Specification.find_all do |s|
s.name == spec.name && s.version != spec.version
end
list.each do |s|
s.executables.each do |exe_name|
executables.delete exe_name
end
end
return if executables.empty?
executables = executables.map {|exec| formatted_program_filename exec }
remove = if @force_executables.nil?
ask_yes_no("Remove executables:\n" +
"\t#{executables.join ', '}\n\n" +
"in addition to the gem?",
true)
else
@force_executables
end
if remove
bin_dir = @bin_dir || Gem.bindir(spec.base_dir)
raise Gem::FilePermissionError, bin_dir unless File.writable? bin_dir
executables.each do |exe_name|
say "Removing #{exe_name}"
exe_file = File.join bin_dir, exe_name
safe_delete { FileUtils.rm exe_file }
safe_delete { FileUtils.rm "#{exe_file}.bat" }
end
else
say "Executables and scripts will remain installed."
end
end
##
# Removes all gems in +list+.
#
# NOTE: removes uninstalled gems from +list+.
def remove_all(list)
list.each {|spec| uninstall_gem spec }
end
##
# spec:: the spec of the gem to be uninstalled
def remove(spec)
unless path_ok?(@gem_home, spec) or
(@user_install and path_ok?(Gem.user_dir, spec))
e = Gem::GemNotInHomeException.new \
"Gem '#{spec.full_name}' is not installed in directory #{@gem_home}"
e.spec = spec
raise e
end
raise Gem::FilePermissionError, spec.base_dir unless
File.writable?(spec.base_dir)
safe_delete { FileUtils.rm_r spec.full_gem_path }
safe_delete { FileUtils.rm_r spec.extension_dir }
old_platform_name = spec.original_name
gem = spec.cache_file
gem = File.join(spec.cache_dir, "#{old_platform_name}.gem") unless
File.exist? gem
safe_delete { FileUtils.rm_r gem }
begin
Gem::RDoc.new(spec).remove
rescue NameError
end
gemspec = spec.spec_file
unless File.exist? gemspec
gemspec = File.join(File.dirname(gemspec), "#{old_platform_name}.gemspec")
end
safe_delete { FileUtils.rm_r gemspec }
announce_deletion_of(spec)
Gem::Specification.reset
end
##
# Remove any plugin wrappers for +spec+.
def remove_plugins(spec) # :nodoc:
return if spec.plugins.empty?
remove_plugins_for(spec, @plugins_dir)
end
##
# Regenerates plugin wrappers after removal.
def regenerate_plugins
latest = Gem::Specification.latest_spec_for(@spec.name)
return if latest.nil?
regenerate_plugins_for(latest, @plugins_dir)
end
##
# Is +spec+ in +gem_dir+?
def path_ok?(gem_dir, spec)
full_path = File.join gem_dir, 'gems', spec.full_name
original_path = File.join gem_dir, 'gems', spec.original_name
full_path == spec.full_gem_path || original_path == spec.full_gem_path
end
##
# Returns true if it is OK to remove +spec+ or this is a forced
# uninstallation.
def dependencies_ok?(spec) # :nodoc:
return true if @force_ignore
deplist = Gem::DependencyList.from_specs
deplist.ok_to_remove?(spec.full_name, @check_dev)
end
##
# Should the uninstallation abort if a dependency will go unsatisfied?
#
# See ::new.
def abort_on_dependent? # :nodoc:
@abort_on_dependent
end
##
# Asks if it is OK to remove +spec+. Returns true if it is OK.
def ask_if_ok(spec) # :nodoc:
msg = ['']
msg << 'You have requested to uninstall the gem:'
msg << "\t#{spec.full_name}"
msg << ''
siblings = Gem::Specification.select do |s|
s.name == spec.name && s.full_name != spec.full_name
end
spec.dependent_gems(@check_dev).each do |dep_spec, dep, satlist|
unless siblings.any? {|s| s.satisfies_requirement? dep }
msg << "#{dep_spec.name}-#{dep_spec.version} depends on #{dep}"
end
end
msg << 'If you remove this gem, these dependencies will not be met.'
msg << 'Continue with Uninstall?'
return ask_yes_no(msg.join("\n"), false)
end
##
# Returns the formatted version of the executable +filename+
def formatted_program_filename(filename) # :nodoc:
# TODO perhaps the installer should leave a small manifest
# of what it did for us to find rather than trying to recreate
# it again.
if @format_executable
require 'rubygems/installer'
Gem::Installer.exec_format % File.basename(filename)
else
filename
end
end
def safe_delete(&block)
block.call
rescue Errno::ENOENT
nil
rescue Errno::EPERM
e = Gem::UninstallError.new
e.spec = @spec
raise e
end
private
def announce_deletion_of(spec)
name = spec.full_name
say "Successfully uninstalled #{name}"
if default_spec_matches?(spec)
say(
"There was both a regular copy and a default copy of #{name}. The " \
"regular copy was successfully uninstalled, but the default copy " \
"was left around because default gems can't be removed."
)
end
end
# @return true if the specs of any default gems are `==` to the given `spec`.
def default_spec_matches?(spec)
!default_specs_that_match(spec).empty?
end
# @return [Array] specs of default gems that are `==` to the given `spec`.
def default_specs_that_match(spec)
@default_specs_matching_uninstall_params.select {|default_spec| spec == default_spec }
end
def warn_cannot_uninstall_default_gems(specs)
specs.each do |spec|
say "Gem #{spec.full_name} cannot be uninstalled because it is a default gem"
end
end
end
| 25.540146 | 90 | 0.661332 |
33304dc8b90f7ad82b45b531eb5ff9b7b509fab1 | 70,956 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'date'
require 'google/apis/core/base_service'
require 'google/apis/core/json_representation'
require 'google/apis/core/hashable'
require 'google/apis/errors'
module Google
module Apis
module StorageV1
# A bucket.
class Bucket
include Google::Apis::Core::Hashable
# Access controls on the bucket.
# Corresponds to the JSON property `acl`
# @return [Array<Google::Apis::StorageV1::BucketAccessControl>]
attr_accessor :acl
# The bucket's billing configuration.
# Corresponds to the JSON property `billing`
# @return [Google::Apis::StorageV1::Bucket::Billing]
attr_accessor :billing
# The bucket's Cross-Origin Resource Sharing (CORS) configuration.
# Corresponds to the JSON property `cors`
# @return [Array<Google::Apis::StorageV1::Bucket::CorsConfiguration>]
attr_accessor :cors_configurations
# Defines the default value for Event-Based hold on newly created objects in
# this bucket. Event-Based hold is a way to retain objects indefinitely until an
# event occurs, signified by the hold's release. After being released, such
# objects will be subject to bucket-level retention (if any). One sample use
# case of this flag is for banks to hold loan documents for at least 3 years
# after loan is paid in full. Here bucket-level retention is 3 years and the
# event is loan being paid in full. In this example these objects will be held
# intact for any number of years until the event has occurred (hold is released)
# and then 3 more years after that. Objects under Event-Based hold cannot be
# deleted, overwritten or archived until the hold is removed.
# Corresponds to the JSON property `defaultEventBasedHold`
# @return [Boolean]
attr_accessor :default_event_based_hold
alias_method :default_event_based_hold?, :default_event_based_hold
# Default access controls to apply to new objects when no ACL is provided.
# Corresponds to the JSON property `defaultObjectAcl`
# @return [Array<Google::Apis::StorageV1::ObjectAccessControl>]
attr_accessor :default_object_acl
# Encryption configuration used by default for newly inserted objects, when no
# encryption config is specified.
# Corresponds to the JSON property `encryption`
# @return [Google::Apis::StorageV1::Bucket::Encryption]
attr_accessor :encryption
# HTTP 1.1 Entity tag for the bucket.
# Corresponds to the JSON property `etag`
# @return [String]
attr_accessor :etag
# The ID of the bucket. For buckets, the id and name properties are the same.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# The kind of item this is. For buckets, this is always storage#bucket.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# User-provided labels, in key/value pairs.
# Corresponds to the JSON property `labels`
# @return [Hash<String,String>]
attr_accessor :labels
# The bucket's lifecycle configuration. See lifecycle management for more
# information.
# Corresponds to the JSON property `lifecycle`
# @return [Google::Apis::StorageV1::Bucket::Lifecycle]
attr_accessor :lifecycle
# The location of the bucket. Object data for objects in the bucket resides in
# physical storage within this region. Defaults to US. See the developer's guide
# for the authoritative list.
# Corresponds to the JSON property `location`
# @return [String]
attr_accessor :location
# The bucket's logging configuration, which defines the destination bucket and
# optional name prefix for the current bucket's logs.
# Corresponds to the JSON property `logging`
# @return [Google::Apis::StorageV1::Bucket::Logging]
attr_accessor :logging
# The metadata generation of this bucket.
# Corresponds to the JSON property `metageneration`
# @return [Fixnum]
attr_accessor :metageneration
# The name of the bucket.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The owner of the bucket. This is always the project team's owner group.
# Corresponds to the JSON property `owner`
# @return [Google::Apis::StorageV1::Bucket::Owner]
attr_accessor :owner
# The project number of the project the bucket belongs to.
# Corresponds to the JSON property `projectNumber`
# @return [Fixnum]
attr_accessor :project_number
# Defines the retention policy for a bucket. The Retention policy enforces a
# minimum retention time for all objects contained in the bucket, based on their
# creation time. Any attempt to overwrite or delete objects younger than the
# retention period will result in a PERMISSION_DENIED error. An unlocked
# retention policy can be modified or removed from the bucket via the
# UpdateBucketMetadata RPC. A locked retention policy cannot be removed or
# shortened in duration for the lifetime of the bucket. Attempting to remove or
# decrease period of a locked retention policy will result in a
# PERMISSION_DENIED error.
# Corresponds to the JSON property `retentionPolicy`
# @return [Google::Apis::StorageV1::Bucket::RetentionPolicy]
attr_accessor :retention_policy
# The URI of this bucket.
# Corresponds to the JSON property `selfLink`
# @return [String]
attr_accessor :self_link
# The bucket's default storage class, used whenever no storageClass is specified
# for a newly-created object. This defines how objects in the bucket are stored
# and determines the SLA and the cost of storage. Values include MULTI_REGIONAL,
# REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If
# this value is not specified when the bucket is created, it will default to
# STANDARD. For more information, see storage classes.
# Corresponds to the JSON property `storageClass`
# @return [String]
attr_accessor :storage_class
# The creation time of the bucket in RFC 3339 format.
# Corresponds to the JSON property `timeCreated`
# @return [DateTime]
attr_accessor :time_created
# The modification time of the bucket in RFC 3339 format.
# Corresponds to the JSON property `updated`
# @return [DateTime]
attr_accessor :updated
# The bucket's versioning configuration.
# Corresponds to the JSON property `versioning`
# @return [Google::Apis::StorageV1::Bucket::Versioning]
attr_accessor :versioning
# The bucket's website configuration, controlling how the service behaves when
# accessing bucket contents as a web site. See the Static Website Examples for
# more information.
# Corresponds to the JSON property `website`
# @return [Google::Apis::StorageV1::Bucket::Website]
attr_accessor :website
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@acl = args[:acl] if args.key?(:acl)
@billing = args[:billing] if args.key?(:billing)
@cors_configurations = args[:cors_configurations] if args.key?(:cors_configurations)
@default_event_based_hold = args[:default_event_based_hold] if args.key?(:default_event_based_hold)
@default_object_acl = args[:default_object_acl] if args.key?(:default_object_acl)
@encryption = args[:encryption] if args.key?(:encryption)
@etag = args[:etag] if args.key?(:etag)
@id = args[:id] if args.key?(:id)
@kind = args[:kind] if args.key?(:kind)
@labels = args[:labels] if args.key?(:labels)
@lifecycle = args[:lifecycle] if args.key?(:lifecycle)
@location = args[:location] if args.key?(:location)
@logging = args[:logging] if args.key?(:logging)
@metageneration = args[:metageneration] if args.key?(:metageneration)
@name = args[:name] if args.key?(:name)
@owner = args[:owner] if args.key?(:owner)
@project_number = args[:project_number] if args.key?(:project_number)
@retention_policy = args[:retention_policy] if args.key?(:retention_policy)
@self_link = args[:self_link] if args.key?(:self_link)
@storage_class = args[:storage_class] if args.key?(:storage_class)
@time_created = args[:time_created] if args.key?(:time_created)
@updated = args[:updated] if args.key?(:updated)
@versioning = args[:versioning] if args.key?(:versioning)
@website = args[:website] if args.key?(:website)
end
# The bucket's billing configuration.
class Billing
include Google::Apis::Core::Hashable
# When set to true, Requester Pays is enabled for this bucket.
# Corresponds to the JSON property `requesterPays`
# @return [Boolean]
attr_accessor :requester_pays
alias_method :requester_pays?, :requester_pays
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@requester_pays = args[:requester_pays] if args.key?(:requester_pays)
end
end
#
class CorsConfiguration
include Google::Apis::Core::Hashable
# The value, in seconds, to return in the Access-Control-Max-Age header used in
# preflight responses.
# Corresponds to the JSON property `maxAgeSeconds`
# @return [Fixnum]
attr_accessor :max_age_seconds
# The list of HTTP methods on which to include CORS response headers, (GET,
# OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "
# any method".
# Corresponds to the JSON property `method`
# @return [Array<String>]
attr_accessor :http_method
# The list of Origins eligible to receive CORS response headers. Note: "*" is
# permitted in the list of origins, and means "any Origin".
# Corresponds to the JSON property `origin`
# @return [Array<String>]
attr_accessor :origin
# The list of HTTP headers other than the simple response headers to give
# permission for the user-agent to share across domains.
# Corresponds to the JSON property `responseHeader`
# @return [Array<String>]
attr_accessor :response_header
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@max_age_seconds = args[:max_age_seconds] if args.key?(:max_age_seconds)
@http_method = args[:http_method] if args.key?(:http_method)
@origin = args[:origin] if args.key?(:origin)
@response_header = args[:response_header] if args.key?(:response_header)
end
end
# Encryption configuration used by default for newly inserted objects, when no
# encryption config is specified.
class Encryption
include Google::Apis::Core::Hashable
# A Cloud KMS key that will be used to encrypt objects inserted into this bucket,
# if no encryption method is specified. Limited availability; usable only by
# enabled projects.
# Corresponds to the JSON property `defaultKmsKeyName`
# @return [String]
attr_accessor :default_kms_key_name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@default_kms_key_name = args[:default_kms_key_name] if args.key?(:default_kms_key_name)
end
end
# The bucket's lifecycle configuration. See lifecycle management for more
# information.
class Lifecycle
include Google::Apis::Core::Hashable
# A lifecycle management rule, which is made of an action to take and the
# condition(s) under which the action will be taken.
# Corresponds to the JSON property `rule`
# @return [Array<Google::Apis::StorageV1::Bucket::Lifecycle::Rule>]
attr_accessor :rule
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@rule = args[:rule] if args.key?(:rule)
end
#
class Rule
include Google::Apis::Core::Hashable
# The action to take.
# Corresponds to the JSON property `action`
# @return [Google::Apis::StorageV1::Bucket::Lifecycle::Rule::Action]
attr_accessor :action
# The condition(s) under which the action will be taken.
# Corresponds to the JSON property `condition`
# @return [Google::Apis::StorageV1::Bucket::Lifecycle::Rule::Condition]
attr_accessor :condition
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@action = args[:action] if args.key?(:action)
@condition = args[:condition] if args.key?(:condition)
end
# The action to take.
class Action
include Google::Apis::Core::Hashable
# Target storage class. Required iff the type of the action is SetStorageClass.
# Corresponds to the JSON property `storageClass`
# @return [String]
attr_accessor :storage_class
# Type of the action. Currently, only Delete and SetStorageClass are supported.
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@storage_class = args[:storage_class] if args.key?(:storage_class)
@type = args[:type] if args.key?(:type)
end
end
# The condition(s) under which the action will be taken.
class Condition
include Google::Apis::Core::Hashable
# Age of an object (in days). This condition is satisfied when an object reaches
# the specified age.
# Corresponds to the JSON property `age`
# @return [Fixnum]
attr_accessor :age
# A date in RFC 3339 format with only the date part (for instance, "2013-01-15").
# This condition is satisfied when an object is created before midnight of the
# specified date in UTC.
# Corresponds to the JSON property `createdBefore`
# @return [Date]
attr_accessor :created_before
# Relevant only for versioned objects. If the value is true, this condition
# matches live objects; if the value is false, it matches archived objects.
# Corresponds to the JSON property `isLive`
# @return [Boolean]
attr_accessor :is_live
alias_method :is_live?, :is_live
# Objects having any of the storage classes specified by this condition will be
# matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD,
# and DURABLE_REDUCED_AVAILABILITY.
# Corresponds to the JSON property `matchesStorageClass`
# @return [Array<String>]
attr_accessor :matches_storage_class
# Relevant only for versioned objects. If the value is N, this condition is
# satisfied when there are at least N versions (including the live version)
# newer than this version of the object.
# Corresponds to the JSON property `numNewerVersions`
# @return [Fixnum]
attr_accessor :num_newer_versions
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@age = args[:age] if args.key?(:age)
@created_before = args[:created_before] if args.key?(:created_before)
@is_live = args[:is_live] if args.key?(:is_live)
@matches_storage_class = args[:matches_storage_class] if args.key?(:matches_storage_class)
@num_newer_versions = args[:num_newer_versions] if args.key?(:num_newer_versions)
end
end
end
end
# The bucket's logging configuration, which defines the destination bucket and
# optional name prefix for the current bucket's logs.
class Logging
include Google::Apis::Core::Hashable
# The destination bucket where the current bucket's logs should be placed.
# Corresponds to the JSON property `logBucket`
# @return [String]
attr_accessor :log_bucket
# A prefix for log object names.
# Corresponds to the JSON property `logObjectPrefix`
# @return [String]
attr_accessor :log_object_prefix
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@log_bucket = args[:log_bucket] if args.key?(:log_bucket)
@log_object_prefix = args[:log_object_prefix] if args.key?(:log_object_prefix)
end
end
# The owner of the bucket. This is always the project team's owner group.
class Owner
include Google::Apis::Core::Hashable
# The entity, in the form project-owner-projectId.
# Corresponds to the JSON property `entity`
# @return [String]
attr_accessor :entity
# The ID for the entity.
# Corresponds to the JSON property `entityId`
# @return [String]
attr_accessor :entity_id
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@entity = args[:entity] if args.key?(:entity)
@entity_id = args[:entity_id] if args.key?(:entity_id)
end
end
# Defines the retention policy for a bucket. The Retention policy enforces a
# minimum retention time for all objects contained in the bucket, based on their
# creation time. Any attempt to overwrite or delete objects younger than the
# retention period will result in a PERMISSION_DENIED error. An unlocked
# retention policy can be modified or removed from the bucket via the
# UpdateBucketMetadata RPC. A locked retention policy cannot be removed or
# shortened in duration for the lifetime of the bucket. Attempting to remove or
# decrease period of a locked retention policy will result in a
# PERMISSION_DENIED error.
class RetentionPolicy
include Google::Apis::Core::Hashable
# The time from which policy was enforced and effective. RFC 3339 format.
# Corresponds to the JSON property `effectiveTime`
# @return [DateTime]
attr_accessor :effective_time
# Once locked, an object retention policy cannot be modified.
# Corresponds to the JSON property `isLocked`
# @return [Boolean]
attr_accessor :is_locked
alias_method :is_locked?, :is_locked
# Specifies the duration that objects need to be retained. Retention duration
# must be greater than zero and less than 100 years. Note that enforcement of
# retention periods less than a day is not guaranteed. Such periods should only
# be used for testing purposes.
# Corresponds to the JSON property `retentionPeriod`
# @return [Fixnum]
attr_accessor :retention_period
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@effective_time = args[:effective_time] if args.key?(:effective_time)
@is_locked = args[:is_locked] if args.key?(:is_locked)
@retention_period = args[:retention_period] if args.key?(:retention_period)
end
end
# The bucket's versioning configuration.
class Versioning
include Google::Apis::Core::Hashable
# While set to true, versioning is fully enabled for this bucket.
# Corresponds to the JSON property `enabled`
# @return [Boolean]
attr_accessor :enabled
alias_method :enabled?, :enabled
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@enabled = args[:enabled] if args.key?(:enabled)
end
end
# The bucket's website configuration, controlling how the service behaves when
# accessing bucket contents as a web site. See the Static Website Examples for
# more information.
class Website
include Google::Apis::Core::Hashable
# If the requested object path is missing, the service will ensure the path has
# a trailing '/', append this suffix, and attempt to retrieve the resulting
# object. This allows the creation of index.html objects to represent directory
# pages.
# Corresponds to the JSON property `mainPageSuffix`
# @return [String]
attr_accessor :main_page_suffix
# If the requested object path is missing, and any mainPageSuffix object is
# missing, if applicable, the service will return the named object from this
# bucket as the content for a 404 Not Found result.
# Corresponds to the JSON property `notFoundPage`
# @return [String]
attr_accessor :not_found_page
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@main_page_suffix = args[:main_page_suffix] if args.key?(:main_page_suffix)
@not_found_page = args[:not_found_page] if args.key?(:not_found_page)
end
end
end
# An access-control entry.
class BucketAccessControl
include Google::Apis::Core::Hashable
# The name of the bucket.
# Corresponds to the JSON property `bucket`
# @return [String]
attr_accessor :bucket
# The domain associated with the entity, if any.
# Corresponds to the JSON property `domain`
# @return [String]
attr_accessor :domain
# The email address associated with the entity, if any.
# Corresponds to the JSON property `email`
# @return [String]
attr_accessor :email
# The entity holding the permission, in one of the following forms:
# - user-userId
# - user-email
# - group-groupId
# - group-email
# - domain-domain
# - project-team-projectId
# - allUsers
# - allAuthenticatedUsers Examples:
# - The user [email protected] would be [email protected].
# - The group [email protected] would be [email protected].
# - To refer to all members of the Google Apps for Business domain example.com,
# the entity would be domain-example.com.
# Corresponds to the JSON property `entity`
# @return [String]
attr_accessor :entity
# The ID for the entity, if any.
# Corresponds to the JSON property `entityId`
# @return [String]
attr_accessor :entity_id
# HTTP 1.1 Entity tag for the access-control entry.
# Corresponds to the JSON property `etag`
# @return [String]
attr_accessor :etag
# The ID of the access-control entry.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# The kind of item this is. For bucket access control entries, this is always
# storage#bucketAccessControl.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The project team associated with the entity, if any.
# Corresponds to the JSON property `projectTeam`
# @return [Google::Apis::StorageV1::BucketAccessControl::ProjectTeam]
attr_accessor :project_team
# The access permission for the entity.
# Corresponds to the JSON property `role`
# @return [String]
attr_accessor :role
# The link to this access-control entry.
# Corresponds to the JSON property `selfLink`
# @return [String]
attr_accessor :self_link
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@bucket = args[:bucket] if args.key?(:bucket)
@domain = args[:domain] if args.key?(:domain)
@email = args[:email] if args.key?(:email)
@entity = args[:entity] if args.key?(:entity)
@entity_id = args[:entity_id] if args.key?(:entity_id)
@etag = args[:etag] if args.key?(:etag)
@id = args[:id] if args.key?(:id)
@kind = args[:kind] if args.key?(:kind)
@project_team = args[:project_team] if args.key?(:project_team)
@role = args[:role] if args.key?(:role)
@self_link = args[:self_link] if args.key?(:self_link)
end
# The project team associated with the entity, if any.
class ProjectTeam
include Google::Apis::Core::Hashable
# The project number.
# Corresponds to the JSON property `projectNumber`
# @return [String]
attr_accessor :project_number
# The team.
# Corresponds to the JSON property `team`
# @return [String]
attr_accessor :team
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@project_number = args[:project_number] if args.key?(:project_number)
@team = args[:team] if args.key?(:team)
end
end
end
# An access-control list.
class BucketAccessControls
include Google::Apis::Core::Hashable
# The list of items.
# Corresponds to the JSON property `items`
# @return [Array<Google::Apis::StorageV1::BucketAccessControl>]
attr_accessor :items
# The kind of item this is. For lists of bucket access control entries, this is
# always storage#bucketAccessControls.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@items = args[:items] if args.key?(:items)
@kind = args[:kind] if args.key?(:kind)
end
end
# A list of buckets.
class Buckets
include Google::Apis::Core::Hashable
# The list of items.
# Corresponds to the JSON property `items`
# @return [Array<Google::Apis::StorageV1::Bucket>]
attr_accessor :items
# The kind of item this is. For lists of buckets, this is always storage#buckets.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The continuation token, used to page through large result sets. Provide this
# value in a subsequent request to return the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@items = args[:items] if args.key?(:items)
@kind = args[:kind] if args.key?(:kind)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# An notification channel used to watch for resource changes.
class Channel
include Google::Apis::Core::Hashable
# The address where notifications are delivered for this channel.
# Corresponds to the JSON property `address`
# @return [String]
attr_accessor :address
# Date and time of notification channel expiration, expressed as a Unix
# timestamp, in milliseconds. Optional.
# Corresponds to the JSON property `expiration`
# @return [Fixnum]
attr_accessor :expiration
# A UUID or similar unique string that identifies this channel.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# Identifies this as a notification channel used to watch for changes to a
# resource. Value: the fixed string "api#channel".
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# Additional parameters controlling delivery channel behavior. Optional.
# Corresponds to the JSON property `params`
# @return [Hash<String,String>]
attr_accessor :params
# A Boolean value to indicate whether payload is wanted. Optional.
# Corresponds to the JSON property `payload`
# @return [Boolean]
attr_accessor :payload
alias_method :payload?, :payload
# An opaque ID that identifies the resource being watched on this channel.
# Stable across different API versions.
# Corresponds to the JSON property `resourceId`
# @return [String]
attr_accessor :resource_id
# A version-specific identifier for the watched resource.
# Corresponds to the JSON property `resourceUri`
# @return [String]
attr_accessor :resource_uri
# An arbitrary string delivered to the target address with each notification
# delivered over this channel. Optional.
# Corresponds to the JSON property `token`
# @return [String]
attr_accessor :token
# The type of delivery mechanism used for this channel.
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@address = args[:address] if args.key?(:address)
@expiration = args[:expiration] if args.key?(:expiration)
@id = args[:id] if args.key?(:id)
@kind = args[:kind] if args.key?(:kind)
@params = args[:params] if args.key?(:params)
@payload = args[:payload] if args.key?(:payload)
@resource_id = args[:resource_id] if args.key?(:resource_id)
@resource_uri = args[:resource_uri] if args.key?(:resource_uri)
@token = args[:token] if args.key?(:token)
@type = args[:type] if args.key?(:type)
end
end
# A Compose request.
class ComposeRequest
include Google::Apis::Core::Hashable
# An object.
# Corresponds to the JSON property `destination`
# @return [Google::Apis::StorageV1::Object]
attr_accessor :destination
# The kind of item this is.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The list of source objects that will be concatenated into a single object.
# Corresponds to the JSON property `sourceObjects`
# @return [Array<Google::Apis::StorageV1::ComposeRequest::SourceObject>]
attr_accessor :source_objects
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@destination = args[:destination] if args.key?(:destination)
@kind = args[:kind] if args.key?(:kind)
@source_objects = args[:source_objects] if args.key?(:source_objects)
end
#
class SourceObject
include Google::Apis::Core::Hashable
# The generation of this object to use as the source.
# Corresponds to the JSON property `generation`
# @return [Fixnum]
attr_accessor :generation
# The source object's name. The source object's bucket is implicitly the
# destination bucket.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Conditions that must be met for this operation to execute.
# Corresponds to the JSON property `objectPreconditions`
# @return [Google::Apis::StorageV1::ComposeRequest::SourceObject::ObjectPreconditions]
attr_accessor :object_preconditions
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@generation = args[:generation] if args.key?(:generation)
@name = args[:name] if args.key?(:name)
@object_preconditions = args[:object_preconditions] if args.key?(:object_preconditions)
end
# Conditions that must be met for this operation to execute.
class ObjectPreconditions
include Google::Apis::Core::Hashable
# Only perform the composition if the generation of the source object that would
# be used matches this value. If this value and a generation are both specified,
# they must be the same value or the call will fail.
# Corresponds to the JSON property `ifGenerationMatch`
# @return [Fixnum]
attr_accessor :if_generation_match
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@if_generation_match = args[:if_generation_match] if args.key?(:if_generation_match)
end
end
end
end
# A subscription to receive Google PubSub notifications.
class Notification
include Google::Apis::Core::Hashable
# An optional list of additional attributes to attach to each Cloud PubSub
# message published for this notification subscription.
# Corresponds to the JSON property `custom_attributes`
# @return [Hash<String,String>]
attr_accessor :custom_attributes
# HTTP 1.1 Entity tag for this subscription notification.
# Corresponds to the JSON property `etag`
# @return [String]
attr_accessor :etag
# If present, only send notifications about listed event types. If empty, sent
# notifications for all event types.
# Corresponds to the JSON property `event_types`
# @return [Array<String>]
attr_accessor :event_types
# The ID of the notification.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# The kind of item this is. For notifications, this is always storage#
# notification.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# If present, only apply this notification configuration to object names that
# begin with this prefix.
# Corresponds to the JSON property `object_name_prefix`
# @return [String]
attr_accessor :object_name_prefix
# The desired content of the Payload.
# Corresponds to the JSON property `payload_format`
# @return [String]
attr_accessor :payload_format
# The canonical URL of this notification.
# Corresponds to the JSON property `selfLink`
# @return [String]
attr_accessor :self_link
# The Cloud PubSub topic to which this subscription publishes. Formatted as: '//
# pubsub.googleapis.com/projects/`project-identifier`/topics/`my-topic`'
# Corresponds to the JSON property `topic`
# @return [String]
attr_accessor :topic
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@custom_attributes = args[:custom_attributes] if args.key?(:custom_attributes)
@etag = args[:etag] if args.key?(:etag)
@event_types = args[:event_types] if args.key?(:event_types)
@id = args[:id] if args.key?(:id)
@kind = args[:kind] if args.key?(:kind)
@object_name_prefix = args[:object_name_prefix] if args.key?(:object_name_prefix)
@payload_format = args[:payload_format] if args.key?(:payload_format)
@self_link = args[:self_link] if args.key?(:self_link)
@topic = args[:topic] if args.key?(:topic)
end
end
# A list of notification subscriptions.
class Notifications
include Google::Apis::Core::Hashable
# The list of items.
# Corresponds to the JSON property `items`
# @return [Array<Google::Apis::StorageV1::Notification>]
attr_accessor :items
# The kind of item this is. For lists of notifications, this is always storage#
# notifications.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@items = args[:items] if args.key?(:items)
@kind = args[:kind] if args.key?(:kind)
end
end
# An object.
class Object
include Google::Apis::Core::Hashable
# Access controls on the object.
# Corresponds to the JSON property `acl`
# @return [Array<Google::Apis::StorageV1::ObjectAccessControl>]
attr_accessor :acl
# The name of the bucket containing this object.
# Corresponds to the JSON property `bucket`
# @return [String]
attr_accessor :bucket
# Cache-Control directive for the object data. If omitted, and the object is
# accessible to all anonymous users, the default will be public, max-age=3600.
# Corresponds to the JSON property `cacheControl`
# @return [String]
attr_accessor :cache_control
# Number of underlying components that make up this object. Components are
# accumulated by compose operations.
# Corresponds to the JSON property `componentCount`
# @return [Fixnum]
attr_accessor :component_count
# Content-Disposition of the object data.
# Corresponds to the JSON property `contentDisposition`
# @return [String]
attr_accessor :content_disposition
# Content-Encoding of the object data.
# Corresponds to the JSON property `contentEncoding`
# @return [String]
attr_accessor :content_encoding
# Content-Language of the object data.
# Corresponds to the JSON property `contentLanguage`
# @return [String]
attr_accessor :content_language
# Content-Type of the object data. If an object is stored without a Content-Type,
# it is served as application/octet-stream.
# Corresponds to the JSON property `contentType`
# @return [String]
attr_accessor :content_type
# CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in
# big-endian byte order. For more information about using the CRC32c checksum,
# see Hashes and ETags: Best Practices.
# Corresponds to the JSON property `crc32c`
# @return [String]
attr_accessor :crc32c
# Metadata of customer-supplied encryption key, if the object is encrypted by
# such a key.
# Corresponds to the JSON property `customerEncryption`
# @return [Google::Apis::StorageV1::Object::CustomerEncryption]
attr_accessor :customer_encryption
# HTTP 1.1 Entity tag for the object.
# Corresponds to the JSON property `etag`
# @return [String]
attr_accessor :etag
# Defines the Event-Based hold for an object. Event-Based hold is a way to
# retain objects indefinitely until an event occurs, signified by the hold's
# release. After being released, such objects will be subject to bucket-level
# retention (if any). One sample use case of this flag is for banks to hold loan
# documents for at least 3 years after loan is paid in full. Here bucket-level
# retention is 3 years and the event is loan being paid in full. In this example
# these objects will be held intact for any number of years until the event has
# occurred (hold is released) and then 3 more years after that.
# Corresponds to the JSON property `eventBasedHold`
# @return [Boolean]
attr_accessor :event_based_hold
alias_method :event_based_hold?, :event_based_hold
# The content generation of this object. Used for object versioning.
# Corresponds to the JSON property `generation`
# @return [Fixnum]
attr_accessor :generation
# The ID of the object, including the bucket name, object name, and generation
# number.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# The kind of item this is. For objects, this is always storage#object.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# Cloud KMS Key used to encrypt this object, if the object is encrypted by such
# a key. Limited availability; usable only by enabled projects.
# Corresponds to the JSON property `kmsKeyName`
# @return [String]
attr_accessor :kms_key_name
# MD5 hash of the data; encoded using base64. For more information about using
# the MD5 hash, see Hashes and ETags: Best Practices.
# Corresponds to the JSON property `md5Hash`
# @return [String]
attr_accessor :md5_hash
# Media download link.
# Corresponds to the JSON property `mediaLink`
# @return [String]
attr_accessor :media_link
# User-provided metadata, in key/value pairs.
# Corresponds to the JSON property `metadata`
# @return [Hash<String,String>]
attr_accessor :metadata
# The version of the metadata for this object at this generation. Used for
# preconditions and for detecting changes in metadata. A metageneration number
# is only meaningful in the context of a particular generation of a particular
# object.
# Corresponds to the JSON property `metageneration`
# @return [Fixnum]
attr_accessor :metageneration
# The name of the object. Required if not specified by URL parameter.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The owner of the object. This will always be the uploader of the object.
# Corresponds to the JSON property `owner`
# @return [Google::Apis::StorageV1::Object::Owner]
attr_accessor :owner
# Specifies the earliest time that the object's retention period expires. This
# value is server-determined and is in RFC 3339 format. Note 1: This field is
# not provided for objects with an active Event-Based hold, since retention
# expiration is unknown until the hold is removed. Note 2: This value can be
# provided even when TemporaryHold is set (so that the user can reason about
# policy without having to first unset the TemporaryHold).
# Corresponds to the JSON property `retentionExpirationTime`
# @return [DateTime]
attr_accessor :retention_expiration_time
# The link to this object.
# Corresponds to the JSON property `selfLink`
# @return [String]
attr_accessor :self_link
# Content-Length of the data in bytes.
# Corresponds to the JSON property `size`
# @return [Fixnum]
attr_accessor :size
# Storage class of the object.
# Corresponds to the JSON property `storageClass`
# @return [String]
attr_accessor :storage_class
# Defines the temporary hold for an object. This flag is used to enforce a
# temporary hold on an object. While it is set to true, the object is protected
# against deletion and overwrites. A common use case of this flag is regulatory
# investigations where objects need to be retained while the investigation is
# ongoing.
# Corresponds to the JSON property `temporaryHold`
# @return [Boolean]
attr_accessor :temporary_hold
alias_method :temporary_hold?, :temporary_hold
# The creation time of the object in RFC 3339 format.
# Corresponds to the JSON property `timeCreated`
# @return [DateTime]
attr_accessor :time_created
# The deletion time of the object in RFC 3339 format. Will be returned if and
# only if this version of the object has been deleted.
# Corresponds to the JSON property `timeDeleted`
# @return [DateTime]
attr_accessor :time_deleted
# The time at which the object's storage class was last changed. When the object
# is initially created, it will be set to timeCreated.
# Corresponds to the JSON property `timeStorageClassUpdated`
# @return [DateTime]
attr_accessor :time_storage_class_updated
# The modification time of the object metadata in RFC 3339 format.
# Corresponds to the JSON property `updated`
# @return [DateTime]
attr_accessor :updated
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@acl = args[:acl] if args.key?(:acl)
@bucket = args[:bucket] if args.key?(:bucket)
@cache_control = args[:cache_control] if args.key?(:cache_control)
@component_count = args[:component_count] if args.key?(:component_count)
@content_disposition = args[:content_disposition] if args.key?(:content_disposition)
@content_encoding = args[:content_encoding] if args.key?(:content_encoding)
@content_language = args[:content_language] if args.key?(:content_language)
@content_type = args[:content_type] if args.key?(:content_type)
@crc32c = args[:crc32c] if args.key?(:crc32c)
@customer_encryption = args[:customer_encryption] if args.key?(:customer_encryption)
@etag = args[:etag] if args.key?(:etag)
@event_based_hold = args[:event_based_hold] if args.key?(:event_based_hold)
@generation = args[:generation] if args.key?(:generation)
@id = args[:id] if args.key?(:id)
@kind = args[:kind] if args.key?(:kind)
@kms_key_name = args[:kms_key_name] if args.key?(:kms_key_name)
@md5_hash = args[:md5_hash] if args.key?(:md5_hash)
@media_link = args[:media_link] if args.key?(:media_link)
@metadata = args[:metadata] if args.key?(:metadata)
@metageneration = args[:metageneration] if args.key?(:metageneration)
@name = args[:name] if args.key?(:name)
@owner = args[:owner] if args.key?(:owner)
@retention_expiration_time = args[:retention_expiration_time] if args.key?(:retention_expiration_time)
@self_link = args[:self_link] if args.key?(:self_link)
@size = args[:size] if args.key?(:size)
@storage_class = args[:storage_class] if args.key?(:storage_class)
@temporary_hold = args[:temporary_hold] if args.key?(:temporary_hold)
@time_created = args[:time_created] if args.key?(:time_created)
@time_deleted = args[:time_deleted] if args.key?(:time_deleted)
@time_storage_class_updated = args[:time_storage_class_updated] if args.key?(:time_storage_class_updated)
@updated = args[:updated] if args.key?(:updated)
end
# Metadata of customer-supplied encryption key, if the object is encrypted by
# such a key.
class CustomerEncryption
include Google::Apis::Core::Hashable
# The encryption algorithm.
# Corresponds to the JSON property `encryptionAlgorithm`
# @return [String]
attr_accessor :encryption_algorithm
# SHA256 hash value of the encryption key.
# Corresponds to the JSON property `keySha256`
# @return [String]
attr_accessor :key_sha256
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@encryption_algorithm = args[:encryption_algorithm] if args.key?(:encryption_algorithm)
@key_sha256 = args[:key_sha256] if args.key?(:key_sha256)
end
end
# The owner of the object. This will always be the uploader of the object.
class Owner
include Google::Apis::Core::Hashable
# The entity, in the form user-userId.
# Corresponds to the JSON property `entity`
# @return [String]
attr_accessor :entity
# The ID for the entity.
# Corresponds to the JSON property `entityId`
# @return [String]
attr_accessor :entity_id
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@entity = args[:entity] if args.key?(:entity)
@entity_id = args[:entity_id] if args.key?(:entity_id)
end
end
end
# An access-control entry.
class ObjectAccessControl
include Google::Apis::Core::Hashable
# The name of the bucket.
# Corresponds to the JSON property `bucket`
# @return [String]
attr_accessor :bucket
# The domain associated with the entity, if any.
# Corresponds to the JSON property `domain`
# @return [String]
attr_accessor :domain
# The email address associated with the entity, if any.
# Corresponds to the JSON property `email`
# @return [String]
attr_accessor :email
# The entity holding the permission, in one of the following forms:
# - user-userId
# - user-email
# - group-groupId
# - group-email
# - domain-domain
# - project-team-projectId
# - allUsers
# - allAuthenticatedUsers Examples:
# - The user [email protected] would be [email protected].
# - The group [email protected] would be [email protected].
# - To refer to all members of the Google Apps for Business domain example.com,
# the entity would be domain-example.com.
# Corresponds to the JSON property `entity`
# @return [String]
attr_accessor :entity
# The ID for the entity, if any.
# Corresponds to the JSON property `entityId`
# @return [String]
attr_accessor :entity_id
# HTTP 1.1 Entity tag for the access-control entry.
# Corresponds to the JSON property `etag`
# @return [String]
attr_accessor :etag
# The content generation of the object, if applied to an object.
# Corresponds to the JSON property `generation`
# @return [Fixnum]
attr_accessor :generation
# The ID of the access-control entry.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# The kind of item this is. For object access control entries, this is always
# storage#objectAccessControl.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The name of the object, if applied to an object.
# Corresponds to the JSON property `object`
# @return [String]
attr_accessor :object
# The project team associated with the entity, if any.
# Corresponds to the JSON property `projectTeam`
# @return [Google::Apis::StorageV1::ObjectAccessControl::ProjectTeam]
attr_accessor :project_team
# The access permission for the entity.
# Corresponds to the JSON property `role`
# @return [String]
attr_accessor :role
# The link to this access-control entry.
# Corresponds to the JSON property `selfLink`
# @return [String]
attr_accessor :self_link
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@bucket = args[:bucket] if args.key?(:bucket)
@domain = args[:domain] if args.key?(:domain)
@email = args[:email] if args.key?(:email)
@entity = args[:entity] if args.key?(:entity)
@entity_id = args[:entity_id] if args.key?(:entity_id)
@etag = args[:etag] if args.key?(:etag)
@generation = args[:generation] if args.key?(:generation)
@id = args[:id] if args.key?(:id)
@kind = args[:kind] if args.key?(:kind)
@object = args[:object] if args.key?(:object)
@project_team = args[:project_team] if args.key?(:project_team)
@role = args[:role] if args.key?(:role)
@self_link = args[:self_link] if args.key?(:self_link)
end
# The project team associated with the entity, if any.
class ProjectTeam
include Google::Apis::Core::Hashable
# The project number.
# Corresponds to the JSON property `projectNumber`
# @return [String]
attr_accessor :project_number
# The team.
# Corresponds to the JSON property `team`
# @return [String]
attr_accessor :team
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@project_number = args[:project_number] if args.key?(:project_number)
@team = args[:team] if args.key?(:team)
end
end
end
# An access-control list.
class ObjectAccessControls
include Google::Apis::Core::Hashable
# The list of items.
# Corresponds to the JSON property `items`
# @return [Array<Google::Apis::StorageV1::ObjectAccessControl>]
attr_accessor :items
# The kind of item this is. For lists of object access control entries, this is
# always storage#objectAccessControls.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@items = args[:items] if args.key?(:items)
@kind = args[:kind] if args.key?(:kind)
end
end
# A list of objects.
class Objects
include Google::Apis::Core::Hashable
# The list of items.
# Corresponds to the JSON property `items`
# @return [Array<Google::Apis::StorageV1::Object>]
attr_accessor :items
# The kind of item this is. For lists of objects, this is always storage#objects.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The continuation token, used to page through large result sets. Provide this
# value in a subsequent request to return the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# The list of prefixes of objects matching-but-not-listed up to and including
# the requested delimiter.
# Corresponds to the JSON property `prefixes`
# @return [Array<String>]
attr_accessor :prefixes
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@items = args[:items] if args.key?(:items)
@kind = args[:kind] if args.key?(:kind)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@prefixes = args[:prefixes] if args.key?(:prefixes)
end
end
# A bucket/object IAM policy.
class Policy
include Google::Apis::Core::Hashable
# An association between a role, which comes with a set of permissions, and
# members who may assume that role.
# Corresponds to the JSON property `bindings`
# @return [Array<Google::Apis::StorageV1::Policy::Binding>]
attr_accessor :bindings
# HTTP 1.1 Entity tag for the policy.
# Corresponds to the JSON property `etag`
# NOTE: Values are automatically base64 encoded/decoded in the client library.
# @return [String]
attr_accessor :etag
# The kind of item this is. For policies, this is always storage#policy. This
# field is ignored on input.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The ID of the resource to which this policy belongs. Will be of the form
# projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/
# object for objects. A specific generation may be specified by appending #
# generationNumber to the end of the object name, e.g. projects/_/buckets/my-
# bucket/objects/data.txt#17. The current generation can be denoted with #0.
# This field is ignored on input.
# Corresponds to the JSON property `resourceId`
# @return [String]
attr_accessor :resource_id
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@bindings = args[:bindings] if args.key?(:bindings)
@etag = args[:etag] if args.key?(:etag)
@kind = args[:kind] if args.key?(:kind)
@resource_id = args[:resource_id] if args.key?(:resource_id)
end
#
class Binding
include Google::Apis::Core::Hashable
#
# Corresponds to the JSON property `condition`
# @return [Object]
attr_accessor :condition
# A collection of identifiers for members who may assume the provided role.
# Recognized identifiers are as follows:
# - allUsers — A special identifier that represents anyone on the internet; with
# or without a Google account.
# - allAuthenticatedUsers — A special identifier that represents anyone who is
# authenticated with a Google account or a service account.
# - user:emailid — An email address that represents a specific account. For
# example, user:[email protected] or user:[email protected].
# - serviceAccount:emailid — An email address that represents a service account.
# For example, serviceAccount:[email protected] .
# - group:emailid — An email address that represents a Google group. For example,
# group:[email protected].
# - domain:domain — A Google Apps domain name that represents all the users of
# that domain. For example, domain:google.com or domain:example.com.
# - projectOwner:projectid — Owners of the given project. For example,
# projectOwner:my-example-project
# - projectEditor:projectid — Editors of the given project. For example,
# projectEditor:my-example-project
# - projectViewer:projectid — Viewers of the given project. For example,
# projectViewer:my-example-project
# Corresponds to the JSON property `members`
# @return [Array<String>]
attr_accessor :members
# The role to which members belong. Two types of roles are supported: new IAM
# roles, which grant permissions that do not map directly to those provided by
# ACLs, and legacy IAM roles, which do map directly to ACL permissions. All
# roles are of the format roles/storage.specificRole.
# The new IAM roles are:
# - roles/storage.admin — Full control of Google Cloud Storage resources.
# - roles/storage.objectViewer — Read-Only access to Google Cloud Storage
# objects.
# - roles/storage.objectCreator — Access to create objects in Google Cloud
# Storage.
# - roles/storage.objectAdmin — Full control of Google Cloud Storage objects.
# The legacy IAM roles are:
# - roles/storage.legacyObjectReader — Read-only access to objects without
# listing. Equivalent to an ACL entry on an object with the READER role.
# - roles/storage.legacyObjectOwner — Read/write access to existing objects
# without listing. Equivalent to an ACL entry on an object with the OWNER role.
# - roles/storage.legacyBucketReader — Read access to buckets with object
# listing. Equivalent to an ACL entry on a bucket with the READER role.
# - roles/storage.legacyBucketWriter — Read access to buckets with object
# listing/creation/deletion. Equivalent to an ACL entry on a bucket with the
# WRITER role.
# - roles/storage.legacyBucketOwner — Read and write access to existing buckets
# with object listing/creation/deletion. Equivalent to an ACL entry on a bucket
# with the OWNER role.
# Corresponds to the JSON property `role`
# @return [String]
attr_accessor :role
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@condition = args[:condition] if args.key?(:condition)
@members = args[:members] if args.key?(:members)
@role = args[:role] if args.key?(:role)
end
end
end
# A rewrite response.
class RewriteResponse
include Google::Apis::Core::Hashable
# true if the copy is finished; otherwise, false if the copy is in progress.
# This property is always present in the response.
# Corresponds to the JSON property `done`
# @return [Boolean]
attr_accessor :done
alias_method :done?, :done
# The kind of item this is.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The total size of the object being copied in bytes. This property is always
# present in the response.
# Corresponds to the JSON property `objectSize`
# @return [Fixnum]
attr_accessor :object_size
# An object.
# Corresponds to the JSON property `resource`
# @return [Google::Apis::StorageV1::Object]
attr_accessor :resource
# A token to use in subsequent requests to continue copying data. This token is
# present in the response only when there is more data to copy.
# Corresponds to the JSON property `rewriteToken`
# @return [String]
attr_accessor :rewrite_token
# The total bytes written so far, which can be used to provide a waiting user
# with a progress indicator. This property is always present in the response.
# Corresponds to the JSON property `totalBytesRewritten`
# @return [Fixnum]
attr_accessor :total_bytes_rewritten
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@done = args[:done] if args.key?(:done)
@kind = args[:kind] if args.key?(:kind)
@object_size = args[:object_size] if args.key?(:object_size)
@resource = args[:resource] if args.key?(:resource)
@rewrite_token = args[:rewrite_token] if args.key?(:rewrite_token)
@total_bytes_rewritten = args[:total_bytes_rewritten] if args.key?(:total_bytes_rewritten)
end
end
# A subscription to receive Google PubSub notifications.
class ServiceAccount
include Google::Apis::Core::Hashable
# The ID of the notification.
# Corresponds to the JSON property `email_address`
# @return [String]
attr_accessor :email_address
# The kind of item this is. For notifications, this is always storage#
# notification.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@email_address = args[:email_address] if args.key?(:email_address)
@kind = args[:kind] if args.key?(:kind)
end
end
# A storage.(buckets|objects).testIamPermissions response.
class TestIamPermissionsResponse
include Google::Apis::Core::Hashable
# The kind of item this is.
# Corresponds to the JSON property `kind`
# @return [String]
attr_accessor :kind
# The permissions held by the caller. Permissions are always of the format
# storage.resource.capability, where resource is one of buckets or objects. The
# supported permissions are as follows:
# - storage.buckets.delete — Delete bucket.
# - storage.buckets.get — Read bucket metadata.
# - storage.buckets.getIamPolicy — Read bucket IAM policy.
# - storage.buckets.create — Create bucket.
# - storage.buckets.list — List buckets.
# - storage.buckets.setIamPolicy — Update bucket IAM policy.
# - storage.buckets.update — Update bucket metadata.
# - storage.objects.delete — Delete object.
# - storage.objects.get — Read object data and metadata.
# - storage.objects.getIamPolicy — Read object IAM policy.
# - storage.objects.create — Create object.
# - storage.objects.list — List objects.
# - storage.objects.setIamPolicy — Update object IAM policy.
# - storage.objects.update — Update object metadata.
# Corresponds to the JSON property `permissions`
# @return [Array<String>]
attr_accessor :permissions
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@kind = args[:kind] if args.key?(:kind)
@permissions = args[:permissions] if args.key?(:permissions)
end
end
end
end
end
| 41.325568 | 115 | 0.605347 |
614c6117f18920a4e292cc5b4cb3ed33519b758b | 1,382 | # coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'jortt/client/version'
Gem::Specification.new do |spec|
spec.name = 'jortt'
spec.version = Jortt::Client::VERSION
spec.authors = [
'Bob Forma',
'Michael Franken',
'Lars Vonk',
'Stephan van Diepen',
]
spec.email = [
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
]
spec.summary = 'jortt.nl REST API client'
spec.homepage = 'https://app.jortt.nl/api-documentatie'
spec.license = 'MIT'
spec.files = `git ls-files -z`.split("\x0")
spec.executables = spec.files.grep(%r{^exec/}) { |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ['lib']
spec.add_runtime_dependency 'rest-client', ['>= 2.0', '< 2.2']
spec.add_runtime_dependency 'oauth2', '~> 1.4.4'
spec.add_development_dependency 'bundler', '~> 2.0'
spec.add_development_dependency 'codecov', '~> 0.1'
spec.add_development_dependency 'rake', '~> 13.0'
spec.add_development_dependency 'rspec', '~> 3.7'
spec.add_development_dependency 'rspec-its', '~> 1.2'
spec.add_development_dependency 'webmock', '~> 3.3'
spec.add_development_dependency 'vcr', '~> 6.0'
end
| 33.707317 | 75 | 0.643271 |
2662051e55f3349263cf99c4181f8ee01e847723 | 119 | #\ -s thin -o 127.0.0.1 -E production -p 7007 -P muck-worker.pid
require '../routes/muck-worker.rb'
run MuckWorkerWeb
| 29.75 | 65 | 0.697479 |
bf34e29d65f4109839d9b17a79185e8ea08365fa | 128 | require 'test_helper'
class PlanetReligionTest < ActiveSupport::TestCase
# test "the truth" do
# assert true
# end
end
| 16 | 50 | 0.71875 |
ed38632ab957124761608f57d14ee621ef7af1f1 | 666 | class TaskSerializer < ActiveModel::Serializer
attributes :id, :name, :user, :project, :created_at, :updated_at, :completed_at, :body, :completed, :due_on, :note
#:assigned_to, :project_id,
# def assigned_to
# {
# user_id: self.object.user.id,
# user_name: self.object.user.name,
# user_workspace_id: self.object.user.workspace_id
# }
# end
# def project
# {
# project_id: self.object.project.id,
# project_name: self.object.project.name
# }
# end
def user
{
user_id: self.object.user.id,
user_name: self.object.user.name,
user_created_at: self.object.user.created_at
}
end
end
| 24.666667 | 116 | 0.645646 |
031e51fa2e0fea8cbb6c76fb8a72fbfe510a7e5b | 5,805 | require 'rubyXL/objects/ooxml_object'
require 'rubyXL/objects/simple_types'
require 'rubyXL/objects/text'
require 'rubyXL/objects/formula'
require 'rubyXL/cell'
module RubyXL
# http://msdn.microsoft.com/en-us/library/documentformat.openxml.spreadsheet.cellvalues(v=office.14).aspx
module DataType
SHARED_STRING = 's'
RAW_STRING = 'str'
INLINE_STRING = 'inlineStr'
ERROR = 'e'
BOOLEAN = 'b'
NUMBER = 'n'
DATE = 'd' # Only available in Office2010.
end
# http://www.datypic.com/sc/ooxml/e-ssml_v-1.html
class CellValue < OOXMLObject
define_attribute(:_, :string, :accessor => :value)
# define_attribute(:'xml:space', %w{ preserve })
define_element_name 'v'
def before_write_xml
preserve_whitespace
true
end
end
# http://www.datypic.com/sc/ooxml/e-ssml_c-2.html
class Cell < OOXMLObject
NUMBER_REGEXP = /\A-?\d+((?:\.\d+)?(?:e[+-]?\d+)?)?\Z/i
define_attribute(:r, :ref)
define_attribute(:s, :int, :default => 0, :accessor => :style_index)
define_attribute(:t, RubyXL::ST_CellType, :accessor => :datatype, :default => 'n' )
define_attribute(:cm, :int, :default => 0)
define_attribute(:vm, :int, :default => 0)
define_attribute(:ph, :bool, :default => false)
define_child_node(RubyXL::Formula, :accessor => :formula)
define_child_node(RubyXL::CellValue, :accessor => :value_container)
define_child_node(RubyXL::RichText) # is
define_element_name 'c'
attr_accessor :worksheet
def index_in_collection
r.col_range.begin
end
def row
r && r.first_row
end
def row=(v)
self.r = RubyXL::Reference.new(v, column || 0)
end
def column
r && r.first_col
end
def column=(v)
self.r = RubyXL::Reference.new(row || 0, v)
end
def raw_value
value_container && value_container.value
end
def raw_value=(v)
self.value_container ||= RubyXL::CellValue.new
value_container.value = v
end
def get_cell_xf
workbook.stylesheet.cell_xfs[self.style_index || 0]
end
def get_cell_font
workbook.stylesheet.fonts[get_cell_xf.font_id]
end
def get_cell_border
workbook.stylesheet.borders[get_cell_xf.border_id]
end
def number_format
workbook.stylesheet.get_number_format_by_id(get_cell_xf.num_fmt_id)
end
def is_date?
return false unless raw_value =~ NUMBER_REGEXP # Only fully numeric values can be dates
num_fmt = self.number_format
num_fmt && num_fmt.is_date_format?
end
# Gets massaged value of the cell, converting datatypes to those known to Ruby (that includes
# stripping any special formatting from RichText).
def value(args = {})
r = self.raw_value
case datatype
when RubyXL::DataType::SHARED_STRING then workbook.shared_strings_container[r.to_i].to_s
when RubyXL::DataType::INLINE_STRING then is.to_s
when RubyXL::DataType::RAW_STRING then raw_value
else
if is_date? then workbook.num_to_date(r.to_f)
elsif r.is_a?(String) && (r =~ NUMBER_REGEXP) then # Numeric
if $1 != '' then r.to_f
else r.to_i
end
else r
end
end
end
def inspect
str = "#<#{self.class}(#{row},#{column}): #{raw_value.inspect}"
str += " =#{self.formula.expression}" if self.formula
str += ", datatype=#{self.datatype.inspect}, style_index=#{self.style_index.inspect}>"
return str
end
include LegacyCell
end
#TODO#<row r="1" spans="1:1" x14ac:dyDescent="0.25">
# http://www.datypic.com/sc/ooxml/e-ssml_row-1.html
class Row < OOXMLObject
define_attribute(:r, :int)
define_attribute(:spans, :string)
define_attribute(:s, :int, :default => 0, :accessor => :style_index)
# define_attribute(:customFormat, :bool, :default => false)
define_attribute(:ht, :double)
define_attribute(:hidden, :bool, :default => false)
define_attribute(:customHeight, :bool, :default => false)
define_attribute(:outlineLevel, :int, :default => 0)
define_attribute(:collapsed, :bool, :default => false)
define_attribute(:thickTop, :bool, :default => false)
define_attribute(:thickBot, :bool, :default => false)
define_attribute(:ph, :bool, :default => false)
define_child_node(RubyXL::Cell, :collection => true, :accessor => :cells)
define_element_name 'row'
attr_accessor :worksheet
def before_write_xml
!(cells.nil? || cells.empty?)
end
def index_in_collection
r - 1
end
def [](ind)
cells[ind]
end
def size
cells.size
end
def insert_cell_shift_right(c, col_index)
cells.insert(col_index, c)
update_cell_coords(col_index)
end
def delete_cell_shift_left(col_index)
cells.delete_at(col_index)
update_cell_coords(col_index)
end
def update_cell_coords(start_from_index)
cells.drop(start_from_index).each_with_index { |cell, i|
next if cell.nil?
cell.column = start_from_index + i
}
end
private :update_cell_coords
def xf
@worksheet.workbook.cell_xfs[self.style_index || 0]
end
def get_fill_color
@worksheet.workbook.get_fill_color(xf)
end
def get_font
@worksheet.workbook.fonts[xf.font_id]
end
DEFAULT_HEIGHT = 13
end
# http://www.datypic.com/sc/ooxml/e-ssml_sheetData-1.html
class SheetData < OOXMLObject
define_child_node(RubyXL::Row, :collection => true, :accessor => :rows)
define_element_name 'sheetData'
def [](ind)
rows[ind]
end
def size
rows.size
end
end
end
| 26.875 | 107 | 0.641344 |
112bc6db551b9a9de35f4204a80630bfe1d6581c | 956 | require 'jdbc_common'
require 'db/postgres'
class CreateSchema < ActiveRecord::Migration
def self.up
execute "CREATE SCHEMA test"
execute "CREATE TABLE test.people (id serial, name text)"
execute "INSERT INTO test.people (name) VALUES ('Alex')"
execute "CREATE TABLE public.people (id serial, wrongname text)"
end
def self.down
execute "DROP SCHEMA test CASCADE"
execute "DROP TABLE people"
end
end
class Person < ActiveRecord::Base
establish_connection POSTGRES_CONFIG.merge(:schema_search_path => 'test')
end
class PostgresSchemaSearchPathTest < Test::Unit::TestCase
def setup
CreateSchema.up
end
def teardown
CreateSchema.down
end
def test_columns
assert_equal(%w{id name}, Person.column_names)
end
def test_find_right
assert_not_nil Person.find_by_name("Alex")
end
def test_find_wrong
assert_raise NoMethodError do
Person.find_by_wrongname("Alex")
end
end
end
| 21.244444 | 75 | 0.732218 |
03489f673d4d29eec331aa0535ea9101707639e5 | 246 | class AddShowableandRemoveManagementFromChemicalSubstances < ActiveRecord::Migration
def change
add_column :chemical_substances, :showable, :boolean, :default => true
remove_column :chemical_substances, :management, :boolean
end
end
| 30.75 | 84 | 0.800813 |
18503422bc718735f94c766be62b180f79b7d0a9 | 24,122 | # Two variables, one recipe.
caname = 'docker_service_default'
caroot = "/ca/#{caname}"
################
# action :create
################
# create a container without starting it
docker_container 'hello-world' do
command '/hello'
action :create
end
#############
# action :run
#############
# This command will exit succesfully. This will happen on every
# chef-client run.
docker_container 'busybox_ls' do
repo 'busybox'
command 'ls -la /'
not_if "[ ! -z `docker ps -qaf 'name=busybox_ls$'` ]"
action :run
end
# The :run_if_missing action will only run once. It is the default
# action.
docker_container 'alpine_ls' do
repo 'alpine'
tag '3.1'
command 'ls -la /'
action :run_if_missing
end
###############
# port property
###############
# This process remains running between chef-client runs, :run will do
# nothing on subsequent converges.
docker_container 'an_echo_server' do
repo 'alpine'
tag '3.1'
command 'nc -ll -p 7 -e /bin/cat'
port '7:7'
action :run
end
# let docker pick the host port
docker_container 'another_echo_server' do
repo 'alpine'
tag '3.1'
command 'nc -ll -p 7 -e /bin/cat'
port '7'
action :run
end
# specify the udp protocol
docker_container 'an_udp_echo_server' do
repo 'alpine'
tag '3.1'
command 'nc -ul -p 7 -e /bin/cat'
port '5007:7/udp'
action :run
end
# multiple ips
docker_container 'multi_ip_port' do
repo 'alpine'
tag '3.1'
command 'nc -ul -p 7 -e /bin/cat'
port ['8301', '8301:8301/udp', '127.0.0.1:8500:8500', '127.0.1.1:8500:8500']
action :run
end
# port range
docker_container 'port_range' do
repo 'alpine'
tag '3.1'
command 'sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
port ['2000-2001', '2000-2001/udp', '3000-3001/tcp', '7000-7002:8000-8002']
action :run
end
##############
# action :kill
##############
# start a container to be killed
execute 'bill' do
command 'docker run --name bill -d busybox sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
not_if "[ ! -z `docker ps -qaf 'name=bill$'` ]"
action :run
end
docker_container 'bill' do
action :kill
end
##############
# action :stop
##############
# start a container to be stopped
execute 'hammer_time' do
command 'docker run --name hammer_time -d busybox sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
not_if "[ ! -z `docker ps -qaf 'name=hammer_time$'` ]"
action :run
end
docker_container 'hammer_time' do
action :stop
end
###############
# action :pause
###############
# clean up existed container after a service restart
execute 'rm stale red_light' do
command 'docker rm -f red_light'
only_if 'docker ps -a | grep red_light | grep Exited'
action :run
end
# start a container to be paused
execute 'red_light' do
command 'docker run --name red_light -d busybox sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
not_if "[ ! -z `docker ps -qaf 'name=red_light$'` ]"
action :run
end
docker_container 'red_light' do
action :pause
end
#################
# action :unpause
#################
# start and pause a container to be unpaused
bash 'green_light' do
code <<-EOF
docker run --name green_light -d busybox sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"
docker pause green_light
EOF
not_if "[ ! -z `docker ps -qaf 'name=green_light$'` ]"
action :run
end
docker_container 'green_light' do
action :unpause
end
#################
# action :restart
#################
# create and stop a container to be restarted
bash 'quitter' do
code <<-EOF
docker run --name quitter -d busybox sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"
docker kill quitter
EOF
not_if "[ ! -z `docker ps -qaf 'name=quitter$'` ]"
action :run
end
docker_container 'quitter' do
not_if { ::File.exist?('/marker_container_quitter_restarter') }
action :restart
end
file '/marker_container_quitter_restarter' do
action :create
end
# start a container to be restarted
execute 'restarter' do
command 'docker run --name restarter -d busybox sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
not_if "[ ! -z `docker ps -qaf 'name=restarter$'` ]"
action :run
end
docker_container 'restarter' do
not_if { ::File.exist?('/marker_container_restarter') }
action :restart
end
file '/marker_container_restarter' do
action :create
end
################
# action :delete
################
# create a container to be deleted
execute 'deleteme' do
command 'docker run --name deleteme -d busybox sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
not_if { ::File.exist?('/marker_container_deleteme') }
action :run
end
file '/marker_container_deleteme' do
action :create
end
docker_container 'deleteme' do
action :delete
end
##################
# action :redeploy
##################
docker_container 'redeployer' do
repo 'alpine'
tag '3.1'
command 'sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
port '7'
action :run
end
docker_container 'unstarted_redeployer' do
repo 'alpine'
tag '3.1'
command 'sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
port '7'
action :create
end
execute 'redeploy redeployers' do
command 'touch /marker_container_redeployer'
creates '/marker_container_redeployer'
notifies :redeploy, 'docker_container[redeployer]', :immediately
notifies :redeploy, 'docker_container[unstarted_redeployer]', :immediately
action :run
end
#############
# bind mounts
#############
directory '/hostbits' do
owner 'root'
group 'root'
mode '0755'
action :create
end
file '/hostbits/hello.txt' do
content 'hello there\n'
owner 'root'
group 'root'
mode '0644'
action :create
end
directory '/more-hostbits' do
owner 'root'
group 'root'
mode '0755'
action :create
end
file '/more-hostbits/hello.txt' do
content 'hello there\n'
owner 'root'
group 'root'
mode '0644'
action :create
end
# docker inspect -f "{{ .HostConfig.Binds }}"
docker_container 'bind_mounter' do
repo 'busybox'
command 'ls -la /bits /more-bits'
volumes ['/hostbits:/bits', '/more-hostbits:/more-bits', '/snow', '/winter:/spring:ro', '/summer']
action :run_if_missing
end
docker_container 'binds_alias' do
repo 'busybox'
command 'ls -la /bits /more-bits'
binds ['/fall:/sun', '/snow', '/winter:/spring:ro', '/summer']
action :run_if_missing
end
##############
# volumes_from
##############
# build a chef container
directory '/chefbuilder' do
owner 'root'
group 'root'
action :create
end
execute 'copy chef to chefbuilder' do
command 'tar cf - /opt/chef | tar xf - -C /chefbuilder'
creates '/chefbuilder/opt'
action :run
end
file '/chefbuilder/Dockerfile' do
content <<-EOF
FROM scratch
ADD opt /opt
EOF
action :create
end
docker_image 'chef_container' do
tag 'latest'
source '/chefbuilder'
action :build_if_missing
end
# create a volume container
docker_container 'chef_container' do
command 'true'
volumes '/opt/chef'
action :create
end
# Inspect the docker logs with test-kitchen bussers
docker_container 'ohai_debian' do
command '/opt/chef/embedded/bin/ohai platform'
repo 'debian'
volumes_from 'chef_container'
action :run_if_missing
end
#####
# env
#####
file '/env_file1' do
content <<-EOF
GOODBYE=TOMPETTY
1950=2017
EOF
action :create
end
file '/env_file2' do
content <<-EOF
HELLO=WORLD
EOF
action :create
end
docker_container 'env' do
repo 'debian'
env ['PATH=/usr/bin', 'FOO=bar']
env_file lazy { '/env_file1' }
command 'env'
action :run_if_missing
end
docker_container 'env_files' do
repo 'debian'
env_file lazy { ['/env_file1', '/env_file2'] }
command 'env'
action :run_if_missing
end
############
# entrypoint
############
# Inspect container logs with test-kitchen bussers
docker_container 'ohai_again' do
repo 'debian'
volumes_from 'chef_container'
entrypoint '/opt/chef/embedded/bin/ohai'
action :run_if_missing
end
docker_container 'ohai_again_debian' do
repo 'debian'
volumes_from 'chef_container'
entrypoint '/opt/chef/embedded/bin/ohai'
command 'platform'
action :run_if_missing
end
##########
# cmd_test
##########
directory '/cmd_test' do
action :create
end
file '/cmd_test/Dockerfile' do
content <<-EOF
FROM alpine
CMD [ "/bin/ls", "-la", "/" ]
EOF
action :create
end
docker_image 'cmd_test' do
tag 'latest'
source '/cmd_test'
action :build_if_missing
end
docker_container 'cmd_test' do
action :run_if_missing
end
#############
# :autoremove
#############
# Inspect volume container with test-kitchen bussers
docker_container 'sean_was_here' do
command "touch /opt/chef/sean_was_here-#{Time.new.strftime('%Y%m%d%H%M')}" #
repo 'debian'
volumes_from 'chef_container'
autoremove true
not_if { ::File.exist?('/marker_container_sean_was_here') }
action :run
end
# marker to prevent :run on subsequent converges.
file '/marker_container_sean_was_here' do
action :create
end
#########
# cap_add
#########
# Inspect system with test-kitchen bussers
docker_container 'cap_add_net_admin' do
repo 'debian'
command 'bash -c "ip addr add 10.9.8.7/24 brd + dev eth0 label eth0:0 ; ip addr list"'
cap_add 'NET_ADMIN'
action :run_if_missing
end
docker_container 'cap_add_net_admin_error' do
repo 'debian'
command 'bash -c "ip addr add 10.9.8.7/24 brd + dev eth0 label eth0:0 ; ip addr list"'
action :run_if_missing
end
##########
# cap_drop
##########
# Inspect container logs with test-kitchen bussers
docker_container 'cap_drop_mknod' do
repo 'debian'
command 'bash -c "mknod -m 444 /dev/urandom2 c 1 9 ; ls -la /dev/urandom2"'
cap_drop 'MKNOD'
action :run_if_missing
end
docker_container 'cap_drop_mknod_error' do
repo 'debian'
command 'bash -c "mknod -m 444 /dev/urandom2 c 1 9 ; ls -la /dev/urandom2"'
action :run_if_missing
end
###########################
# hostname and domain_name
###########################
# Inspect container logs with test-kitchen bussers
docker_container 'fqdn' do
repo 'debian'
command 'hostname -f'
hostname 'computers'
domain_name 'biz'
action :run_if_missing
end
#####
# dns
#####
# Inspect container logs with test-kitchen bussers
docker_container 'dns' do
repo 'debian'
command 'cat /etc/resolv.conf'
hostname 'computers'
dns ['4.3.2.1', '1.2.3.4']
dns_search ['computers.biz', 'chef.io']
action :run_if_missing
end
#############
# extra_hosts
#############
# Inspect container logs with test-kitchen bussers
docker_container 'extra_hosts' do
repo 'debian'
command 'cat /etc/hosts'
extra_hosts ['east:4.3.2.1', 'west:1.2.3.4']
action :run_if_missing
end
############
# cpu_shares
############
# docker inspect -f '{{ .HostConfig.CpuShares }}' cpu_shares
docker_container 'cpu_shares' do
repo 'alpine'
tag '3.1'
command 'ls -la'
cpu_shares 512
action :run_if_missing
end
#############
# cpuset_cpus
#############
# docker inspect cpu_shares | grep '"CpusetCpus": "0,1"'
docker_container 'cpuset_cpus' do
repo 'alpine'
tag '3.1'
command 'ls -la'
cpuset_cpus '0,1'
action :run_if_missing
end
################
# restart_policy
################
# docker inspect restart_policy | grep 'RestartPolicy'
docker_container 'try_try_again' do
repo 'alpine'
tag '3.1'
command 'grep asdasdasd /etc/passwd'
restart_policy 'on-failure'
restart_maximum_retry_count 2
action :run_if_missing
end
docker_container 'reboot_survivor' do
repo 'alpine'
tag '3.1'
command 'nc -ll -p 123 -e /bin/cat'
port '123'
restart_policy 'always'
action :run_if_missing
end
docker_container 'reboot_survivor_retry' do
repo 'alpine'
tag '3.1'
command 'nc -ll -p 123 -e /bin/cat'
port '123'
restart_maximum_retry_count 2
action :run_if_missing
end
#######
# links
#######
# docker inspect -f "{{ .Config.Env }}" link_source
# docker inspect -f "{{ .NetworkSettings.IPAddress }}" link_source
docker_container 'link_source' do
repo 'alpine'
tag '3.1'
env ['FOO=bar', 'BIZ=baz']
command 'sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
port '321'
action :run
end
docker_container 'link_source_2' do
repo 'alpine'
tag '3.1'
env ['FOO=few', 'BIZ=buzz']
command 'sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
port '322'
kill_after 1
action :run
end
# docker inspect -f "{{ .HostConfig.Links }}" link_target_1
# docker inspect -f "{{ .Config.Env }}" link_target_1
docker_container 'link_target_1' do
repo 'alpine'
tag '3.1'
env ['ASD=asd']
command 'ping -c 1 hello'
links 'link_source:hello'
subscribes :run, 'docker_container[link_source]'
action :run_if_missing
end
# docker logs linker_target_2
docker_container 'link_target_2' do
repo 'alpine'
tag '3.1'
command 'env'
links ['link_source:hello']
subscribes :run, 'docker_container[link_source]'
action :run_if_missing
end
# docker logs linker_target_3
docker_container 'link_target_3' do
repo 'alpine'
tag '3.1'
env ['ASD=asd']
command 'ping -c 1 hello_again'
links ['link_source:hello', 'link_source_2:hello_again']
subscribes :run, 'docker_container[link_source]'
subscribes :run, 'docker_container[link_source_2]'
action :run_if_missing
end
# docker logs linker_target_4
docker_container 'link_target_4' do
repo 'alpine'
tag '3.1'
command 'env'
links ['link_source:hello', 'link_source_2:hello_again']
subscribes :run, 'docker_container[link_source]'
subscribes :run, 'docker_container[link_source_2]'
action :run_if_missing
end
# When we deploy the link_source container links are broken and we
# have to redeploy the linked containers to fix them.
execute 'redeploy_link_source' do
command 'touch /marker_container_redeploy_link_source'
creates '/marker_container_redeploy_link_source'
notifies :redeploy, 'docker_container[link_source_2]'
notifies :redeploy, 'docker_container[link_target_1]'
notifies :redeploy, 'docker_container[link_target_2]'
notifies :redeploy, 'docker_container[link_target_3]'
notifies :redeploy, 'docker_container[link_target_4]'
action :run
end
##############
# link removal
##############
# docker inspect -f "{{ .Volumes }}" another_link_source
# docker inspect -f "{{ .HostConfig.Links }}" another_link_source
docker_container 'another_link_source' do
repo 'alpine'
tag '3.1'
command 'nc -ll -p 456 -e /bin/cat'
port '456'
action :run_if_missing
end
# docker inspect -f "{{ .HostConfig.Links }}" another_link_target
docker_container 'another_link_target' do
repo 'alpine'
tag '3.1'
command 'ping -c 1 hello'
links ['another_link_source:derp']
action :run_if_missing
end
################
# volume removal
################
directory '/dangler' do
owner 'root'
group 'root'
mode '0755'
action :create
end
file '/dangler/Dockerfile' do
content <<-EOF
FROM busybox
RUN mkdir /stuff
VOLUME /stuff
EOF
action :create
end
docker_image 'dangler' do
tag 'latest'
source '/dangler'
action :build_if_missing
end
# create a volume container
docker_container 'dangler' do
command 'true'
not_if { ::File.exist?('/marker_container_dangler') }
action :create
end
file '/marker_container_dangler' do
action :create
end
docker_container 'dangler_volume_remover' do
container_name 'dangler'
remove_volumes true
action :delete
end
#########
# mutator
#########
docker_tag 'mutator_from_busybox' do
target_repo 'busybox'
target_tag 'latest'
to_repo 'someara/mutator'
to_tag 'latest'
end
docker_container 'mutator' do
repo 'someara/mutator'
tag 'latest'
command "sh -c 'touch /mutator-`date +\"%Y-%m-%d_%H-%M-%S\"`'"
outfile '/mutator.tar'
force true
action :run_if_missing
end
execute 'commit mutator' do
command 'touch /marker_container_mutator'
creates '/marker_container_mutator'
notifies :commit, 'docker_container[mutator]', :immediately
notifies :export, 'docker_container[mutator]', :immediately
notifies :redeploy, 'docker_container[mutator]', :immediately
action :run
end
##############
# network_mode
##############
docker_container 'network_mode' do
repo 'alpine'
tag '3.1'
command 'nc -ll -p 776 -e /bin/cat'
port '776:776'
network_mode 'host'
action :run
end
#####################
# change_network_mode
#####################
execute 'change_network_mode' do
command 'docker run --name change_network_mode -d alpine:3.1 sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
not_if "[ ! -z `docker ps -qaf 'name=change_network_mode$'` ]"
action :run
end
docker_container 'change_network_mode' do
repo 'alpine'
tag '3.1'
command 'sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
network_mode 'host'
action :run
end
#########
# ulimits
#########
docker_container 'ulimits' do
repo 'alpine'
tag '3.1'
command 'sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
port '778:778'
cap_add 'SYS_RESOURCE'
ulimits [
{ 'Name' => 'nofile', 'Soft' => 40_960, 'Hard' => 40_960 },
{ 'Name' => 'core', 'Soft' => 100_000_000, 'Hard' => 100_000_000 },
{ 'Name' => 'memlock', 'Soft' => 100_000_000, 'Hard' => 100_000_000 },
]
action :run
end
##############
# api_timeouts
##############
docker_container 'api_timeouts' do
command 'nc -ll -p 779 -e /bin/cat'
repo 'alpine'
tag '3.1'
read_timeout 60
write_timeout 60
action :run_if_missing
end
##############
# uber_options
##############
# start a container to be modified
execute 'uber_options' do
command 'docker run --name uber_options -d busybox sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
not_if "[ ! -z `docker ps -qaf 'name=uber_options$'` ]"
action :run
end
docker_container 'uber_options' do
repo 'alpine'
tag '3.1'
hostname 'www'
domainname 'computers.biz'
env ['FOO=foo', 'BAR=bar']
mac_address '00:00:DE:AD:BE:EF'
network_disabled false
tty true
volumes ['/root', '/hostbits:/bits', '/more-hostbits:/more-bits']
working_dir '/'
cap_add %w(NET_ADMIN SYS_RESOURCE)
cap_drop 'MKNOD'
cpu_shares 512
cpuset_cpus '0,1'
dns ['8.8.8.8', '8.8.4.4']
dns_search ['computers.biz']
extra_hosts ['east:4.3.2.1', 'west:1.2.3.4']
links ['link_source:hello']
port '1234:1234'
volumes_from 'chef_container'
user 'operator'
command '"trap exit 0 SIGTERM; while :; do sleep 5; done"'
entrypoint '/bin/sh -c'
ulimits [
'nofile=40960:40960',
'core=100000000:100000000',
'memlock=100000000:100000000',
]
labels ['foo:bar', 'hello:world']
action :run
end
###########
# overrides
###########
# build a chef container
directory '/overrides' do
owner 'root'
group 'root'
action :create
end
file '/overrides/Dockerfile' do
content <<-EOF
FROM busybox
RUN adduser -D bob
CMD trap exit 0 SIGTERM; while :; do sleep 1; done
USER bob
ENV FOO foo
ENV BAR bar
ENV BIZ=biz BAZ=baz
ENV BUZZ buzz
VOLUME /home
WORKDIR /var
EXPOSE 4321
EOF
notifies :build, 'docker_image[overrides]'
action :create
end
docker_image 'overrides' do
tag 'latest'
source '/overrides'
force true
action :build_if_missing
notifies :redeploy, 'docker_container[overrides-1]'
notifies :redeploy, 'docker_container[overrides-2]'
end
docker_container 'overrides-1' do
repo 'overrides'
action :run
end
docker_container 'overrides-2' do
repo 'overrides'
user 'operator'
command 'sh -c "trap exit 0 SIGTERM; while :; do sleep 1; done"'
env ['FOO=biz']
volumes '/var/log'
workdir '/tmp'
port ['9988:9988', '8877:8877']
action :run
end
#################
# host override
#################
docker_container 'host_override' do
repo 'alpine'
host 'tcp://127.0.0.1:2376'
command 'ls -la /'
tls_verify true
tls_ca_cert "#{caroot}/ca.pem"
tls_client_cert "#{caroot}/cert.pem"
tls_client_key "#{caroot}/key.pem"
action :create
end
#################
# logging drivers
#################
docker_container 'syslogger' do
command 'nc -ll -p 780 -e /bin/cat'
repo 'alpine'
tag '3.1'
log_driver 'syslog'
log_opts 'tag=container-syslogger'
action :run_if_missing
end
############
# kill_after
############
# start a container that can't be stopped and relies on kill_after
directory '/kill_after' do
owner 'root'
group 'root'
action :create
end
file '/kill_after/loop.sh' do
content <<-EOF
#!/bin/sh
trap 'exit 0' SIGTERM
while true; do :; done
EOF
notifies :build, 'docker_image[kill_after]'
action :create
end
file '/kill_after/Dockerfile' do
content <<-EOF
FROM busybox
ADD loop.sh /
RUN chmod +x /loop.sh
CMD "/loop.sh"
EOF
notifies :build, 'docker_image[kill_after]'
action :create
end
docker_image 'kill_after' do
tag 'latest'
source '/kill_after'
force true
action :build_if_missing
end
execute 'kill_after' do
command 'docker run --name kill_after -d kill_after'
not_if "[ ! -z `docker ps -qaf 'name=kill_after$'` ]"
action :run
end
docker_container 'kill_after' do
repo 'kill_after'
kill_after 1
action :stop
end
######
# oom_kill_disable
######
docker_container 'oom_kill_disable' do
repo 'alpine'
tag '3.1'
command 'ls -la'
oom_kill_disable true
timeout 40
action :run_if_missing
end
######
# oom_score_adj
######
docker_container 'oom_score_adj' do
repo 'alpine'
tag '3.1'
command 'ls -la'
oom_score_adj 600
timeout 40
action :run_if_missing
end
##########
# pid_mode
##########
docker_container 'pid_mode' do
repo 'alpine'
tag '3.1'
command 'ps -ef'
pid_mode 'host'
timeout 40
action :run_if_missing
end
######
# init
######
# docker inspect init | grep '"Init": true'
docker_container 'init' do
repo 'alpine'
tag '3.1'
command 'ls -la'
init true
timeout 40
action :run_if_missing
end
##########
# ipc_mode
##########
docker_container 'ipc_mode' do
repo 'alpine'
tag '3.1'
command 'ps -ef'
ipc_mode 'host'
timeout 40
action :run_if_missing
end
##########
# uts_mode
##########
docker_container 'uts_mode' do
repo 'alpine'
tag '3.1'
command 'ps -ef'
uts_mode 'host'
timeout 40
action :run_if_missing
end
##################
# read-only rootfs
##################
docker_container 'ro_rootfs' do
repo 'alpine'
tag '3.1'
command 'ps -ef'
ro_rootfs true
timeout 40
action :run_if_missing
end
##################
# sysctl settings
##################
docker_container 'sysctls' do
repo 'alpine'
tag '3.1'
command '/sbin/sysctl -a'
sysctls 'net.core.somaxconn' => '65535',
'net.core.xfrm_acq_expires' => '42'
timeout 40
action :run_if_missing
end
########################
# Dockerfile CMD changes
########################
directory '/usr/local/src/cmd_change_one' do
action :create
end
file '/usr/local/src/cmd_change_one/Dockerfile' do
content <<EOF
FROM alpine:3.1
CMD [ "nc", "-ll", "-p", "6", "-e", "/bin/cat" ]
EOF
action :create
end
directory '/usr/local/src/cmd_change_two' do
action :create
end
file '/usr/local/src/cmd_change_two/Dockerfile' do
content <<EOF
FROM alpine:3.1
CMD [ "nc", "-ll", "-p", "9", "-e", "/bin/cat" ]
EOF
action :create
end
execute 'build initial cmd_change image' do
command 'docker build -t cmd_change /usr/local/src/cmd_change_one'
not_if 'docker images | grep cmd_change'
action :run
end
execute 'run cmd_change' do
command 'docker run --name cmd_change -d --network=bridge cmd_change'
not_if 'docker ps -a | grep cmd_change$'
action :run
end
docker_container 'cmd_change' do
repo 'cmd_change'
action :run
end
docker_image 'cmd_change' do
tag 'latest'
action :build
source '/usr/local/src/cmd_change_two'
not_if { ::File.exist?('/marker_cmd_change') }
notifies :redeploy, 'docker_container[cmd_change]'
end
file '/marker_cmd_change' do
action :create
end
##############
# security_opt
##############
docker_container 'security_opt' do
repo 'alpine'
tag '3.1'
command 'nc -ll -p 70 -e /bin/cat'
port '70:70'
security_opt ['no-new-privileges', 'label=type:DERP']
action :run
end
########
# memory
########
docker_container 'memory' do
repo 'alpine'
tag '3.1'
command 'nc -ll -p 70 -e /bin/cat'
port '71:71'
kernel_memory '10m'
memory '5m'
memory_swap '5M'
memory_swappiness 50
memory_reservation '5m'
action :run
end
| 20.459712 | 118 | 0.666653 |
21a638bb9b9555a184d186cf9938ce1d71993eca | 6,451 | module Admin
module RelationshipsHelper
def setup_relationship(field)
@field = field
@model_to_relate = @resource.reflect_on_association(field.to_sym).class_name.constantize
@model_to_relate_as_resource = @model_to_relate.to_resource
@reflection = @resource.reflect_on_association(field.to_sym)
@association = @reflection.macro
end
def typus_form_has_many(field)
setup_relationship(field)
unless @reflection.through_reflection
foreign_key = @reflection.primary_key_name
end
@items_to_relate = @model_to_relate.all
if set_condition && !@items_to_relate.empty?
form = build_relate_form
end
options = { foreign_key => @item.id }
build_pagination
render "admin/templates/has_n",
:model_to_relate => @model_to_relate,
:model_to_relate_as_resource => @model_to_relate_as_resource,
:foreign_key => foreign_key,
:add_new => raw(build_add_new(options)),
:form => form,
:table => build_relationship_table
end
def typus_form_has_and_belongs_to_many(field)
setup_relationship(field)
@items_to_relate = (@model_to_relate.all - @item.send(field))
if set_condition && !@items_to_relate.empty?
form = build_relate_form
end
build_pagination
render "admin/templates/has_n",
:model_to_relate => @model_to_relate,
:model_to_relate_as_resource => @model_to_relate_as_resource,
:add_new => raw(build_add_new),
:form => form,
:table => build_relationship_table
end
def build_pagination
options = { :order => @model_to_relate.typus_order_by, :conditions => set_conditions }
items_per_page = @model_to_relate.typus_options_for(:per_page)
data = @resource.unscoped.find(params[:id]).send(@field).all(options)
@items = data.paginate(:per_page => items_per_page, :page => params[:page])
end
def build_relate_form
render "admin/templates/relate_form",
:model_to_relate => @model_to_relate,
:items_to_relate => @items_to_relate
end
def build_relationship_table
build_list(@model_to_relate,
@model_to_relate.typus_fields_for(:relationship),
@items,
@model_to_relate_as_resource,
{},
@association)
end
def build_add_new(options = {})
default_options = { :controller => "/admin/#{@field}", :action => "new",
:resource => @resource.name, :resource_id => @item.id,
:back_to => @back_to }
return unless set_condition && current_user.can?("create", @model_to_relate)
link_to _t("Add new"), default_options.merge(options)
end
def set_condition
if @resource.typus_user_id? && current_user.is_not_root?
@item.owned_by?(current_user)
else
true
end
end
def set_conditions
if @model_to_relate.typus_options_for(:only_user_items) && current_user.is_not_root?
{ Typus.user_fk => current_user }
end
end
#--
# TODO: Move html code to partial.
#++
def typus_form_has_one(field)
html = ""
model_to_relate = @resource.reflect_on_association(field.to_sym).class_name.constantize
model_to_relate_as_resource = model_to_relate.to_resource
reflection = @resource.reflect_on_association(field.to_sym)
association = reflection.macro
html << <<-HTML
<a name="#{field}"></a>
<div class="box_relationships" id="#{model_to_relate_as_resource}">
<h2>
#{link_to model_to_relate.model_name.human, :controller => "/admin/#{model_to_relate_as_resource}"}
</h2>
HTML
items = Array.new
items << @resource.find(params[:id]).send(field) unless @resource.find(params[:id]).send(field).nil?
unless items.empty?
options = { :back_to => @back_to, :resource => @resource.to_resource, :resource_id => @item.id }
html << build_list(model_to_relate,
model_to_relate.typus_fields_for(:relationship),
items,
model_to_relate_as_resource,
options,
association)
else
message = _t("There are no %{records}.",
:records => model_to_relate.model_name.human.downcase)
html << <<-HTML
<div id="flash" class="notice"><p>#{message}</p></div>
HTML
end
html << <<-HTML
</div>
HTML
return html
end
def typus_belongs_to_field(attribute, form)
##
# We only can pass parameters to 'new' and 'edit', so this hack makes
# the work to replace the current action.
#
params[:action] = (params[:action] == 'create') ? 'new' : params[:action]
back_to = url_for(:controller => params[:controller], :action => params[:action], :id => params[:id])
related = @resource.reflect_on_association(attribute.to_sym).class_name.constantize
related_fk = @resource.reflect_on_association(attribute.to_sym).primary_key_name
confirm = [ _t("Are you sure you want to leave this page?"),
_t("If you have made any changes to the fields without clicking the Save/Update entry button, your changes will be lost."),
_t("Click OK to continue, or click Cancel to stay on this page.") ]
message = link_to _t("Add"), { :controller => "/admin/#{related.to_resource}",
:action => 'new',
:back_to => back_to,
:selected => related_fk },
:confirm => confirm.join("\n\n") if current_user.can?('create', related)
render "admin/templates/belongs_to",
:resource => @resource,
:form => form,
:related_fk => related_fk,
:message => message,
:label_text => @resource.human_attribute_name(attribute),
:values => related.all(:order => related.typus_order_by).collect { |p| [p.to_label, p.id] },
# :html_options => { :disabled => attribute_disabled?(attribute) },
:html_options => {},
:options => { :include_blank => true }
end
end
end
| 34.132275 | 141 | 0.603782 |
01c40e119ef0d2217916dd0f96062d42c0a32026 | 210 | atom_feed language: 'en-GB', root_url: root_url do |feed|
feed.title 'Statistics on GOV.UK'
feed.author do |author|
author.name 'HM Government'
end
documents_as_feed_entries(@statistics, feed)
end
| 23.333333 | 57 | 0.733333 |
bf0f72a91953dff5e21efc3f1cc3e191fc27a1b2 | 566 | class UpdateCodeReviewCommentsToUseStorageAppId < ActiveRecord::Migration[5.2]
def change
add_column :code_review_comments, :storage_app_id, :integer, null: false, after: :id
add_index :code_review_comments, [:storage_app_id, :project_version],
name: 'index_code_review_comments_on_storage_app_id_and_version'
remove_index :code_review_comments,
name: 'index_code_review_comments_on_project_id_and_version',
column: [:channel_token_id, :project_version]
remove_column :code_review_comments, :channel_token_id, :integer
end
end
| 43.538462 | 88 | 0.795053 |
e88f73f0bbdd03cbf2240ac715e71480c6593470 | 154 | # frozen_string_literal: true
Rails.application.config.content_security_policy do |policy|
policy.default_src :self
policy.frame_ancestors :none
end
| 22 | 60 | 0.824675 |
08bf5fbed76a92e222db9e07451d37802a95b0f8 | 1,583 | module Filemaker
module Model
class Field
attr_reader :name, :type, :default_value, :fm_name
def initialize(name, type, options = {})
@name = name
@type = type
@default_value = serialize_for_update(options.fetch(:default) { nil })
# We need to downcase because Filemaker::Record is
# HashWithIndifferentAndCaseInsensitiveAccess
@fm_name = (options.fetch(:fm_name) { name }).to_s.downcase.freeze
end
# Will delegate to the underlying @type for casting
# From raw input to Ruby type
def cast(value)
return value if value.nil?
@type.__filemaker_cast_to_ruby_object(value)
rescue StandardError => e
warn "[#{e.message}] Could not cast: #{name}=#{value}"
value
end
# Convert to Ruby type situable for making FileMaker update
# For attr_writer
def serialize_for_update(value)
return value if value.nil?
@type.__filemaker_serialize_for_update(value)
rescue StandardError => e
warn "[#{e.message}] Could not serialize for update: #{name}=#{value}"
value
end
# Convert to Ruby type situable for making FileMaker query
def serialize_for_query(value)
return value if value.nil?
return value if value =~ /^==|=\*/
return value if value =~ /(\.\.\.)/
@type.__filemaker_serialize_for_query(value)
rescue StandardError => e
warn "[#{e.message}] Could not serialize for query: #{name}=#{value}"
value
end
end
end
end
| 30.442308 | 78 | 0.624131 |
395cc76bdde8bdfe9e634cdc465e28a61c58d2c0 | 2,143 | # frozen_string_literal: true
require 'spec_helper'
describe 'solidus_virtual_gift_card:send_current_emails' do
subject { task.invoke }
let(:task) { Rake::Task['solidus_virtual_gift_card:send_current_emails'] }
let(:purchaser) { create(:user) }
before do
Rails.application.load_tasks
task.reenable
end
context 'with gift card sent today' do
it 'sends emails to be sent today' do
gift_card = Spree::VirtualGiftCard.create!(amount: 50, send_email_at: Date.today, redeemable: true, purchaser: purchaser)
expect(Spree::GiftCardMailer).to receive(:gift_card_email).with(gift_card).and_return(double(deliver_later: true))
subject
end
it 'does not send unredeemable giftcards' do
gift_card = Spree::VirtualGiftCard.create!(amount: 50, send_email_at: Date.today)
expect(Spree::GiftCardMailer).not_to receive(:gift_card_email).with(gift_card)
subject
end
end
context 'with gift card already sent today' do
it 'sends emails to be sent today' do
Spree::VirtualGiftCard.create!(amount: 50, send_email_at: Date.today, sent_at: DateTime.now, redeemable: true, purchaser: purchaser)
expect(Spree::GiftCardMailer).not_to receive(:gift_card_email)
subject
end
end
context 'with gift cards sent in the future' do
it 'does not sends emails' do
Spree::VirtualGiftCard.create!(amount: 50, send_email_at: 10.days.from_now.to_date, redeemable: true, purchaser: purchaser)
expect(Spree::GiftCardMailer).not_to receive(:gift_card_email)
subject
end
end
context 'with gift cards sent in the past' do
it 'does not sends emails' do
Spree::VirtualGiftCard.create!(amount: 50, send_email_at: 1.day.ago, sent_at: 1.day.ago.to_date, redeemable: true, purchaser: purchaser)
expect(Spree::GiftCardMailer).not_to receive(:gift_card_email)
subject
end
end
context 'with gift cards not specified' do
it 'does not sends emails' do
Spree::VirtualGiftCard.create!(amount: 50, send_email_at: nil)
expect(Spree::GiftCardMailer).not_to receive(:gift_card_email)
subject
end
end
end
| 34.564516 | 142 | 0.723752 |
7a8d30329db52492869df1c2d28cfed1ab1ed7b4 | 4,886 | # frozen_string_literal: true
# This file is copied to spec/ when you run 'rails generate rspec:install'
require 'spec_helper'
ENV['RAILS_ENV'] ||= 'test'
require File.expand_path('../../config/environment', __FILE__)
# Prevent database truncation if the environment is production
abort('The Rails environment is running in production mode!') if Rails.env.production?
require 'rspec/rails'
# Add additional requires below this line. Rails is not loaded until this point!
require 'active_fedora/cleaner'
require 'ffaker'
require 'sidekiq/api'
ENV['IMPORT_FILE_PATH'] = "#{::Rails.root}/spec/fixtures"
ENV['IMPORT_PATH'] = "#{::Rails.root}/spec/fixtures"
ENV['UPLOAD_PATH'] = "#{::Rails.root}/tmp/uploads"
ENV['CACHE_PATH'] = "#{::Rails.root}/tmp/uploads/cache"
# Allow Hyrax to upload files from the fixtures directory. Needed for testing
# file attachment.
Hyrax.config.whitelisted_ingest_dirs << "#{::Rails.root}/spec/fixtures"
# Requires supporting ruby files with custom matchers and macros, etc, in
# spec/support/ and its subdirectories. Files matching `spec/**/*_spec.rb` are
# run as spec files by default. This means that files in spec/support that end
# in _spec.rb will both be required and run as specs, causing the specs to be
# run twice. It is recommended that you do not name files matching this glob to
# end with _spec.rb. You can configure this pattern with the --pattern
# option on the command line or in ~/.rspec, .rspec or `.rspec-local`.
#
# The following line is provided for convenience purposes. It has the downside
# of increasing the boot-up time by auto-requiring all files in the support
# directory. Alternatively, in the individual `*_spec.rb` files, manually
# require only the support files necessary.
#
Dir[Rails.root.join('spec', 'support', '**', '*.rb')].each { |f| require f }
# Checks for pending migrations and applies them before tests are run.
# If you are not using ActiveRecord, you can remove this line.
ActiveRecord::Migration.maintain_test_schema!
Capybara.register_driver :googlebot do |app|
browser_options.args << '--user-agent Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
Capybara::Selenium::Driver.new(app, browser: :chrome, options: browser_options)
end
Capybara.register_driver :selenium_chrome_headless_sandboxless do |app|
browser_options = ::Selenium::WebDriver::Chrome::Options.new
browser_options.args << '--headless'
browser_options.args << '--disable-gpu'
browser_options.args << '--no-sandbox'
Capybara::Selenium::Driver.new(app, browser: :chrome, options: browser_options)
end
Capybara.default_driver = :rack_test # This is a faster driver
Capybara.javascript_driver = :selenium_chrome_headless_sandboxless # This is slower
RSpec.configure do |config|
config.before do
ActiveJob::Base.queue_adapter.enqueued_jobs = []
end
config.before(:each, type: :system, js: true) do
driven_by :selenium_chrome_headless_sandboxless
end
config.before(:each, type: :system, js: false) do
driven_by :rack_test
end
config.before(:suite) do
ActiveJob::Base.queue_adapter = :test
ActiveFedora::Cleaner.clean!
end
config.before(clean: true) do
ActiveFedora::Cleaner.clean!
end
config.before(inline_jobs: true) do
ActiveJob::Base.queue_adapter = :inline
end
config.after(inline_jobs: true) do
ActiveJob::Base.queue_adapter = :test
end
config.before perform_jobs: true do
ActiveJob::Base.queue_adapter.perform_enqueued_jobs = true
end
config.after perform_jobs: true do
ActiveJob::Base.queue_adapter.filter = nil
ActiveJob::Base.queue_adapter.perform_enqueued_jobs = false
end
# Remove this line if you're not using ActiveRecord or ActiveRecord fixtures
config.fixture_path = "#{::Rails.root}/spec/fixtures"
# If you're not using ActiveRecord, or you'd prefer not to run each of your
# examples within a transaction, remove the following line or assign false
# instead of true.
config.use_transactional_fixtures = true
# RSpec Rails can automatically mix in different behaviours to your tests
# based on their file location, for example enabling you to call `get` and
# `post` in specs under `spec/controllers`.
#
# You can disable this behaviour by removing the line below, and instead
# explicitly tag your specs with their type, e.g.:
#
# RSpec.describe UsersController, :type => :controller do
# # ...
# end
#
# The different available types are documented in the features, such as in
# https://relishapp.com/rspec/rspec-rails/docs
config.infer_spec_type_from_file_location!
# Filter lines from Rails gems in backtraces.
config.filter_rails_from_backtrace!
# arbitrary gems may also be filtered via:
# config.filter_gems_from_backtrace("gem name")
config.include Devise::Test::ControllerHelpers, type: :controller
end
| 38.472441 | 113 | 0.744576 |
2830899af8c409c797cb81fd56a4d8599d126fd8 | 1,199 | require 'puppet/confine/feature'
require 'puppet/util/feature'
# Confine Puppet providers based on libs (Gems)
#
# This implementation is functionally a super-set of Puppet::Config::Feature.
# However, Puppet::Confine.inherited prevents sub-classing
# Puppet::Confine::Feature so the entire class was essentially cut'n'pasted.
class Puppet::Confine::Libs < Puppet::Confine
# Create a Puppet::Confine instance that requires a list of libs (Gems). A
# separate Puppet::Util::Feature is created for each lib. This is similar to
# Puppet::Confine::Feature for lib dependencies except that a feature does
# not need to be manually declared.
#
# @return [Puppet::Confine::Libs]
# @api public
def initialize(values)
values = [values] unless values.is_a?(Array)
values.each do |lib|
Puppet.features.add(lib.to_sym, :libs => lib)
end
super(values)
end
def self.summarize(confines)
confines.collect { |c| c.values }.flatten.uniq.find_all { |value| ! confines[0].pass?(value) }
end
# Is the named feature available?
def pass?(value)
Puppet.features.send("#{value.to_s}?".to_sym)
end
def message(value)
"Lib #{value} is missing"
end
end
| 29.975 | 98 | 0.707256 |
03f1e87bf8bd304d3139291129cc1a5e5ca24f14 | 1,680 | Dummy::Application.configure do
# Settings specified here will take precedence over those in config/application.rb
# The test environment is used exclusively to run your application's
# test suite. You never need to work with it otherwise. Remember that
# your test database is "scratch space" for the test suite and is wiped
# and recreated between test runs. Don't rely on the data there!
config.cache_classes = true
# Configure static asset server for tests with Cache-Control for performance
config.serve_static_assets = true
config.static_cache_control = "public, max-age=3600"
# Log error messages when you accidentally call methods on nil
config.eager_load = false
# Show full error reports and disable caching
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
# Raise exceptions instead of rendering exception templates
config.action_dispatch.show_exceptions = false
# Disable request forgery protection in test environment
config.action_controller.allow_forgery_protection = false
# Tell Action Mailer not to deliver emails to the real world.
# The :test delivery method accumulates sent emails in the
# ActionMailer::Base.deliveries array.
config.action_mailer.delivery_method = :test
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Print deprecation notices to the stderr
config.active_support.deprecation = :stderr
end
| 42 | 85 | 0.778571 |
bb9f3519327d1fab022ba485e7c37d8687eae7ce | 2,037 | # This component exists only to remove all the files that are not needed in the final package.
component 'cleanup' do |pkg, settings, _platform|
# This component must depend on all other C++ components in order to be
# executed last, after they all finish building.
pkg.build_requires 'puppet-runtime'
pkg.build_requires 'cpp-pcp-client'
pkg.build_requires 'cpp-hocon'
pkg.build_requires 'leatherman'
pkg.build_requires 'pxp-agent'
rm = platform.is_windows? ? '/usr/bin/rm' : 'rm'
cleanup_steps = []
cleanup_steps << "#{rm} -rf #{settings[:includedir]}"
cleanup_steps << "#{rm} -rf #{settings[:prefix]}/share"
cleanup_steps << "#{rm} -rf #{settings[:prefix]}/ssl"
cleanup_steps << "#{rm} -rf #{settings[:prefix]}/usr/local"
cleanup_steps << "#{rm} -rf #{settings[:prefix]}/CMake"
if platform.is_windows?
%w[
erb gem httpclient irb libcrypto libcurl thor
liggcc libconv libssl rake x64-msvcrt-ruby rdoc ri rubyw
curl c_rehash libconv openssl libgcc_s_sjlj yaml-cpp
msvcrt-ruby libmsvcrt-ruby libeay32 ssleay32 ruby
].each do |component|
cleanup_steps << "#{rm} -rf #{settings[:bindir]}/#{component}*"
end
%w[
cmake engines pkgconfig ruby libx64-msvcrt yaml-cpp libssl
libcrypto libcurl libssl libx64-msvcrt yaml-cpp
].each do |component|
cleanup_steps << "#{rm} -rf #{settings[:libdir]}/#{component}*"
end
else
bins = "-name '*pxp-agent*' -o -name '*execution_wrapper*' -o -name '*apply_ruby_shim.rb*'"
cleanup_steps << "#{platform.find} #{settings[:bindir]} -type f ! \\( #{bins} \\) -exec #{rm} -rf {} +"
cleanup_steps << "#{platform.find} #{settings[:libdir]} -type d ! -name 'lib' -exec #{rm} -rf {} +"
libs = "-name '*leatherman*' -o -name '*libpxp*' -o -name '*libcpp*'"
libs += " -o -name '*libstdc*' -o -name '*libgcc_s*'" if platform.is_aix?
cleanup_steps << "#{platform.find} #{settings[:libdir]} ! -name 'lib' ! \\( #{libs} \\) -exec #{rm} -rf {} +"
end
pkg.install { cleanup_steps }
end
| 43.340426 | 114 | 0.648012 |
e206a2f2418f3dca368df28bc7358a788293bdb8 | 1,030 | class SiwappHooks
def invoice_generation(inv)
unless Settings.event_invoice_generation_url.blank?
begin
invoice_json = InvoiceSerializer.new(inv).to_json
response = HTTP.post(
Settings.event_invoice_generation_url,
:json => JSON.parse(invoice_json)
)
if response.code / 100 == 2
WebhookLog.create level: 'info', message: "Invoice #{inv} successfully posted", event: :invoice_generation
else
WebhookLog.create level: 'error', message: "Invoice #{inv} couldn't be posted. Error #{response.code}", event: :invoice_generation
end
rescue ActiveRecord::RecordNotFound
WebhookLog.create level: 'error', message: "Invoice #{inv} not found", event: :invoice_generation
rescue HTTP::Error => error
WebhookLog.create level: 'error', message: "Error posting #{inv}: #{error}", event: :invoice_generation
end
end
end
end
Wisper.subscribe(SiwappHooks.new, async: true)
| 39.615385 | 140 | 0.649515 |
bb7c4311f8d50b86430aa58a2f53737dec8d2be6 | 151 | require 'test_helper'
class HeartbeatTest < ActiveSupport::TestCase
# Replace this with your real tests.
def test_truth
assert true
end
end
| 16.777778 | 45 | 0.754967 |
6142af1982028fc25bdd6b9f7f9dac9d4f39c16a | 3,075 | require "gen_server/version"
require "gen_server/signal_handler"
module GenServer
attr_reader :pid
def initialize(*args)
init(*args)
start_child_process
parent_setup_mailbox
end
def init(*args)
end
def start_child_process
@pid = Process.fork do
@pid = Process.pid # Set our own pid.
child_start_watchdog
child_signal_handler
child_setup_mailbox
child_loop
end
end
def call(*message)
system("mkfifo #{reply_file_name}") unless File.exists?(reply_file_name)
parent_write_message [:call, Process.pid, message]
parent_read_reply
end
def cast(*message)
parent_write_message [:cast, message]
nil
end
def wait
Process.wait(@pid)
end
def inspect
"#PID<#{@pid}>"
end
private
def mailbox_file
"#{@pid}.mailbox"
end
def reply_file_name
"#{Process.pid}.reply"
end
def child_start_watchdog
Thread.new do
while true
# If our parent pid magically becomes 1, then we know our
# parent died... :(
if Process.ppid == 1
exit(1)
end
sleep(1)
end
end
end
def child_signal_handler
load "gen_server/signal_handler.rb"
end
def child_setup_mailbox
system("mkfifo #{@pid}.mailbox") or raise "cannot create fifo pipes"
@mailbox = File.open(mailbox_file, "r")
end
def child_loop
while true
message = child_read_message
case message[0]
when :call
_, from, message = message
reply = handle_call(from, *message)
child_write_reply(from, reply)
when :cast
_, message = message
handle_cast(*message)
else
raise ArgumentError, "unexpected message type: #{type.inspect}"
end
end
rescue EOFError
retry
end
def child_read_message
Marshal.load(@mailbox)
rescue EOFError
retry
end
def child_write_reply(reply_pid, reply)
@reply_files ||= {}
@reply_files[reply_pid] ||= File.open("#{reply_pid}.reply", "w+")
reply_file = @reply_files[reply_pid]
Marshal.dump(reply, reply_file)
reply_file.flush
end
def parent_setup_mailbox
retry_count = 0
while !File.exists?(mailbox_file)
raise "cannot find named pipes" if retry_count > 10
sleep(0.05)
retry_count += 1
end
@mailbox = File.open(mailbox_file, "w+")
end
def parent_write_message(message)
Marshal.dump(message, @mailbox)
@mailbox.flush
end
def parent_read_reply
# Pipe semantics are such that opening a pipe on one end will block until
# something opens it on the other end. Thus we have to fire off a message
# that will be replied to *before* we try to open the pipe.
# This will NOT work:
# open_pipe
# send_msg
# recv_msg
# This will work:
# send_msg
# open_pipe
# recv_msg
@reply_file ||= File.open(reply_file_name, "r")
Marshal.load(@reply_file)
end
def marshal_dump
@pid
end
def marshal_load(pid)
@pid = pid
parent_setup_mailbox
end
end
| 19.711538 | 77 | 0.646504 |
bb0b0876eca2e9cc17e5542b477dea378f5eb62a | 608 | class NoteObserver < BaseObserver
def after_create(note)
notification.new_note(note)
# Skip system notes, like status changes and cross-references.
unless note.system
event_service.leave_note(note, note.author)
# Create a cross-reference note if this Note contains GFM that names an
# issue, merge request, or commit.
note.references.each do |mentioned|
Note.create_cross_reference_note(mentioned, note.noteable, note.author, note.project)
end
end
end
def after_update(note)
note.notice_added_references(note.project, note.author)
end
end
| 28.952381 | 93 | 0.725329 |
e81566f5538a376d2ec94f188e7fc555771c1e0e | 362 | # frozen_string_literal: true
# Copyright The OpenTelemetry Authors
#
# SPDX-License-Identifier: Apache-2.0
require 'opentelemetry'
module OpenTelemetry
module Instrumentation
# Contains the OpenTelemetry instrumentation for the Koala gem
module Koala
end
end
end
require_relative './koala/instrumentation'
require_relative './koala/version'
| 19.052632 | 66 | 0.781768 |
ace4a87a43d16a4402d9a5b4e9f45ef018262012 | 1,104 | cask '[email protected]' do
version '5.5.3p2,f15b2772e4d0'
sha256 :no_check
url "https://download.unity3d.com/download_unity/f15b2772e4d0/MacEditorTargetInstaller/UnitySetup-Windows-Support-for-Editor-5.5.3p2.pkg"
name 'Windows Build Support'
homepage 'https://unity3d.com/unity/'
pkg 'UnitySetup-Windows-Support-for-Editor-5.5.3p2.pkg'
depends_on cask: '[email protected]'
preflight do
if File.exist? "/Applications/Unity"
FileUtils.move "/Applications/Unity", "/Applications/Unity.temp"
end
if File.exist? "/Applications/Unity-5.5.3p2"
FileUtils.move "/Applications/Unity-5.5.3p2", '/Applications/Unity'
end
end
postflight do
if File.exist? '/Applications/Unity'
FileUtils.move '/Applications/Unity', "/Applications/Unity-5.5.3p2"
end
if File.exist? '/Applications/Unity.temp'
FileUtils.move '/Applications/Unity.temp', '/Applications/Unity'
end
end
uninstall quit: 'com.unity3d.UnityEditor5.x',
delete: '/Applications/Unity-5.5.3p2/PlaybackEngines/WindowsStandaloneSupport'
end
| 30.666667 | 139 | 0.712862 |
33186cf50d5d81ee41a8171a8fb6c512a96c7176 | 3,823 | require 'spec_helper'
describe MembersHelper do
describe '#action_member_permission' do
let(:project_member) { build(:project_member) }
let(:group_member) { build(:group_member) }
it { expect(action_member_permission(:admin, project_member)).to eq :admin_project_member }
it { expect(action_member_permission(:admin, group_member)).to eq :admin_group_member }
end
describe '#remove_member_message' do
let(:requester) { create(:user) }
let(:project) { create(:project, :public, :access_requestable) }
let(:project_member) { build(:project_member, project: project) }
let(:project_member_invite) { build(:project_member, project: project).tap { |m| m.generate_invite_token! } }
let(:project_member_request) { project.request_access(requester) }
let(:group) { create(:group, :access_requestable) }
let(:group_member) { build(:group_member, group: group) }
let(:group_member_invite) { build(:group_member, group: group).tap { |m| m.generate_invite_token! } }
let(:group_member_request) { group.request_access(requester) }
it { expect(remove_member_message(project_member)).to eq "Are you sure you want to remove #{project_member.user.name} from the #{project.name_with_namespace} project?" }
it { expect(remove_member_message(project_member_invite)).to eq "Are you sure you want to revoke the invitation for #{project_member_invite.invite_email} to join the #{project.name_with_namespace} project?" }
it { expect(remove_member_message(project_member_request)).to eq "Are you sure you want to deny #{requester.name}'s request to join the #{project.name_with_namespace} project?" }
it { expect(remove_member_message(project_member_request, user: requester)).to eq "Are you sure you want to withdraw your access request for the #{project.name_with_namespace} project?" }
it { expect(remove_member_message(group_member)).to eq "Are you sure you want to remove #{group_member.user.name} from the #{group.name} group?" }
it { expect(remove_member_message(group_member_invite)).to eq "Are you sure you want to revoke the invitation for #{group_member_invite.invite_email} to join the #{group.name} group?" }
it { expect(remove_member_message(group_member_request)).to eq "Are you sure you want to deny #{requester.name}'s request to join the #{group.name} group?" }
it { expect(remove_member_message(group_member_request, user: requester)).to eq "Are you sure you want to withdraw your access request for the #{group.name} group?" }
end
describe '#remove_member_title' do
let(:requester) { create(:user) }
let(:project) { create(:project, :public, :access_requestable) }
let(:project_member) { build(:project_member, project: project) }
let(:project_member_request) { project.request_access(requester) }
let(:group) { create(:group, :access_requestable) }
let(:group_member) { build(:group_member, group: group) }
let(:group_member_request) { group.request_access(requester) }
it { expect(remove_member_title(project_member)).to eq 'Remove user from project' }
it { expect(remove_member_title(project_member_request)).to eq 'Deny access request from project' }
it { expect(remove_member_title(group_member)).to eq 'Remove user from group' }
it { expect(remove_member_title(group_member_request)).to eq 'Deny access request from group' }
end
describe '#leave_confirmation_message' do
let(:project) { build_stubbed(:project) }
let(:group) { build_stubbed(:group) }
let(:user) { build_stubbed(:user) }
it { expect(leave_confirmation_message(project)).to eq "Are you sure you want to leave the \"#{project.name_with_namespace}\" project?" }
it { expect(leave_confirmation_message(group)).to eq "Are you sure you want to leave the \"#{group.name}\" group?" }
end
end
| 67.070175 | 212 | 0.738948 |
79cd99adfffdd96775dbe3695908d78e82e754ac | 1,082 | #!/usr/bin/env ruby
require 'natto'
# 元文章と生成したファイルの設置場所
DOCS_DIR = 'docs'
DEST_DIR = 'generated'
# 品詞ID
NOUNS_POSID = (36..67).to_a
VERBS_POSID = (31..33).to_a
# テキストから名詞と動詞だけ取り出す
def pick_words(text)
words = []
natto = Natto::MeCab.new
natto.parse(text) do |n|
if !n.is_eos? && (NOUNS_POSID + VERBS_POSID).include?(n.posid)
words << n.surface.to_s
end
end
words
end
# ディレクトリに分けたファイルリスト
files = {}
Dir.entries(DOCS_DIR).each do |category|
path = File.join(DOCS_DIR, category)
if File.directory?(path) && !%w(. ..).include?(category)
files[category] = []
Dir.entries(path).each do |name|
filename = File.join(path, name)
if File.file?(filename) && !/^\./.match(name)
files[category] << filename
end
end
end
end
# カテゴリ毎に.lstファイルを作って単語を並べる
files.each do |category, filenames|
puts "Parse: #{category}"
f = open(File.join(DEST_DIR, "#{category}.lst"), 'w')
filenames.each do |filename|
words = pick_words(IO.read(filename).gsub(',', ' '))
f.puts "__label__#{category}, #{words.join(' ')}"
end
end
| 22.541667 | 66 | 0.641405 |
f86c490f90f5822efc262505d35a60f2b78213be | 581 | # frozen_string_literal: true
module Remocon
class ParameterFileDumper
def initialize(parameters)
@parameters = parameters.with_indifferent_access
end
def dump
@parameters.each_with_object({}) do |(key, body), hash|
hash[key] = body[:defaultValue]
hash[key][:description] = body[:description] if body[:description]
next unless body[:conditionalValues]
hash[key][:conditions] = body[:conditionalValues].each_with_object({}) do |(key2, body2), hash2|
hash2[key2] = body2
end
end
end
end
end
| 25.26087 | 104 | 0.654045 |
ac5e3fcc75d64fe18c035f2f2c64ec2e2b5a8ab0 | 905 | # frozen_string_literal: true
module Masterfiles
module Locations
module LocationType
class New
def self.call(form_values: nil, form_errors: nil, remote: true)
ui_rule = UiRules::Compiler.new(:location_type, :new, form_values: form_values)
rules = ui_rule.compile
layout = Crossbeams::Layout::Page.build(rules) do |page|
page.form_object ui_rule.form_object
page.form_values form_values
page.form_errors form_errors
page.form do |form|
form.action '/masterfiles/locations/location_types'
form.remote! if remote
form.add_field :location_type_code
form.add_field :short_code
form.add_field :can_be_moved
form.add_field :hierarchical
end
end
layout
end
end
end
end
end
| 29.193548 | 89 | 0.605525 |
d5f728700e0a03dbcdfad11144b93c3143bd4889 | 159 | Fabricator(:reviewer, from: :user) do
username { Faker::Name.unique.name }
email { "#{Faker::Lorem.word}@email.com" }
password { Faker::Lorem.word }
end
| 26.5 | 44 | 0.672956 |
1d9db81d3daf6f8db38c07a7e617aa5de2558c92 | 210 | # frozen_string_literal: true
module Refinery
module Testimonials
# Controller for Testimonials
class TestimonialsController < ::ApplicationController
respond_to :json, :html
end
end
end
| 19.090909 | 58 | 0.747619 |
08ed0e5915d52f775481cdb2ae2142e250bcc019 | 59 | class AccountUser
def role
Role.new
end
end | 11.8 | 17 | 0.610169 |
26d32fff801225851ac0edf0851030e5817a2b9e | 1,286 | require_relative 'boot'
require "rails"
# Pick the frameworks you want:
require "active_model/railtie"
require "active_job/railtie"
require "active_record/railtie"
require "action_controller/railtie"
require "action_mailer/railtie"
require "action_view/railtie"
require "action_cable/engine"
# require "sprockets/railtie"
# require "rails/test_unit/railtie"
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(*Rails.groups)
module Snake_io
class Application < Rails::Application
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Only loads a smaller set of middleware suitable for API only apps.
# Middleware like session, flash, cookies can be added back manually.
# Skip views, helpers and assets when generating a new resource.
config.api_only = true
paths['public'] = File.join 'client', 'dist'
end
end
Rails.application.config.middleware.insert_after(
ActionDispatch::Static,
ActionDispatch::Static,
Rails.root.join("public").to_s,
# Rails.application.config.static_cache_control
)
| 32.15 | 82 | 0.764386 |
798ff55807508dd2788beb1c9a996888ac5fa728 | 457 | # frozen_string_literal: true
require 'schema_dot_org'
# Model the Schema.org `Thing > CreativeWork > WebPage`. See https://schema.org/FAQPage
#
module SchemaDotOrg
class FAQPage < WebPage
attr_accessor :item_list_element
def _to_json_struct
super.merge({
"mainEntity" => item_list_element.map(&:to_json_struct)
})
end
def item_list_element
@item_list_element || []
end
end
end
| 20.772727 | 88 | 0.654267 |
6203d9140f8e3ad719d5d33fa17ed4a417cc111d | 1,622 | module HelpfulComments
class ModelSchemas < Base
# takes a descendant of ActiveRecord::Base
def initialize(klass)
unless klass < ActiveRecord::Base
raise ArgumentError, 'klass must descend from ActiveRecord::Base'
end
@klass = klass
end
# builds the lines to be put into the top of the file
def build
results = [%w[Field Type Null Default]]
@klass.columns_hash.each do |name, column|
default = case column.default
when NilClass then ''
when TrueClass then '1'
when FalseClass then '0'
else column.default
end
results << [name, column.sql_type, column.null ? 'YES' : 'NO', default]
end
lengths = results.transpose.map { |list| list.max_by { |segment| segment.to_s.length }.length + 1 }
definition = results.map do |line|
'# ' + line.map.with_index { |segment, index| "| %-#{lengths[index]}s" % segment }.join + "|\n"
end
header_line = '# ' + lengths.map { |length| '+' + '-' * (length + 1) }.join + "+\n"
[header_line, definition.first, header_line] + definition[1..-1] + [header_line]
end
# puts the comments into the file
def load
@comments = self.build
max_size = @comments.size
written = false
load_comments(Rails.root.join('app', 'models').to_s, max_size) do |file, line|
if !written and line.lstrip.starts_with?('class') and !file.trailing?(@comments)
file.write(@comments.join)
written = true
end
file.write(line)
end
end
end
end
| 32.44 | 105 | 0.593711 |
bf6562eda599187914259d9d361abbe9bfb60118 | 831 | cask 'forticlient' do
version '6.4'
sha256 'a673cc372f4caf90476e6cda4e51c8450ac58e09a8c5218bba537e86792e5d23'
# filestore.fortinet.com/forticlient/ was verified as official when first introduced to the cask
url "https://filestore.fortinet.com/forticlient/downloads/FortiClientVPNOnlineInstaller_#{version}.dmg"
name 'FortiClient'
homepage 'https://forticlient.com/'
installer manual: 'FortiClientUpdate.app'
uninstall script: {
executable: '/Applications/FortiClientUninstaller.app/Contents/Resources/uninstall_helper',
sudo: true,
}
zap trash: [
'/Library/Application Support/Fortinet',
'~/Library/Application Support/Fortinet',
'~/Library/Application Support/FortiClient',
]
end
| 36.130435 | 113 | 0.670277 |
5d55085c6edce73072a5c1565d56f869fbb37795 | 487 | module Zype
class LoginClient < Zype::Client
def initialize(_ = '')
@headers = { 'Content-Type' => 'application/json' }
self.class.base_uri Zype.configuration.login_host
end
def execute(method:, path:, params: {})
resp = send(method, path: path, params: params)
if resp.success?
resp['response'].nil? ? resp.parsed_response : resp['response']
else
error!(code: resp.code, message: resp['message'])
end
end
end
end
| 27.055556 | 71 | 0.616016 |
26fa081d20fcdaf87ae01eb0e1d6236998a2eebc | 379 | require 'formula'
class Tmpwatch < Formula
homepage 'https://fedorahosted.org/tmpwatch/'
url 'https://fedorahosted.org/releases/t/m/tmpwatch/tmpwatch-2.11.tar.bz2'
sha1 'c578dd98e5ea64ad987a95ae55926685a0df0659'
def install
system "./configure", "--disable-dependency-tracking",
"--prefix=#{prefix}"
system "make", "install"
end
end
| 27.071429 | 76 | 0.6781 |
ed03b8dcf79750f53f15c9fc2d03de8a914eb3cb | 352 | class NomenclaturalRank::Icn::GenusGroup::Section < NomenclaturalRank::Icn::GenusGroup
def self.parent_rank
NomenclaturalRank::Icn::GenusGroup::Subgenus
end
def self.valid_parents
[NomenclaturalRank::Icn::GenusGroup::Genus.to_s] + [NomenclaturalRank::Icn::GenusGroup::Subgenus.to_s]
end
def self.abbreviation
'sect.'
end
end
| 23.466667 | 106 | 0.75 |
1da712cf8a5f694018044dbb8a246d78f7b81a98 | 659 | # Be sure to restart your server when you modify this file.
# Your secret key is used for verifying the integrity of signed cookies.
# If you change this key, all old signed cookies will become invalid!
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
# You can use `rake secret` to generate a secure secret key.
# Make sure your secret_key_base is kept private
# if you're sharing your code publicly.
Dummy::Application.config.secret_key_base = '29da0120ec149038516076ae013e10f95e30cf0e4bc033c74cbc6e648e218175330abe79b6b2340c3aea0b0c16dfbb1e395e6c92b58e16e32da62b22c37393f0'
| 50.692308 | 174 | 0.814871 |
ab1cf729b702c1180005832cf739f6e9bec33ed3 | 5,230 | class Visp < Formula
desc "Visual Servoing Platform library"
homepage "https://visp.inria.fr/"
url "https://visp-doc.inria.fr/download/releases/visp-3.4.0.tar.gz"
sha256 "6c12bab1c1ae467c75f9e5831e01a1f8912ab7eae64249faf49d3a0b84334a77"
license "GPL-2.0-or-later"
livecheck do
url "https://visp.inria.fr/download/"
regex(/href=.*?visp[._-]v?(\d+(?:\.\d+)+)\.t/i)
end
bottle do
sha256 arm64_big_sur: "68aedb3fee225ffd2c5535160c0623f4b4c7b321b3a16873bac54e9e73f5f56f"
sha256 big_sur: "41cc69085b948858a06c3cd51617ded0573b38da55ff9caabc97f5ab44aefe85"
sha256 catalina: "945a1209edbecc168156366c4bb6f4e8b81c1d8972c705f057ac291ccdef480c"
sha256 mojave: "14f87a2273c27b126dd46d6df1d23783d0bb09ccec6dc6ae7f095a2cbc0d2b32"
end
depends_on "cmake" => :build
depends_on "pkg-config" => :build
depends_on "eigen"
depends_on "gsl"
depends_on "jpeg"
depends_on "libdc1394"
depends_on "libpng"
depends_on "opencv"
depends_on "pcl"
depends_on "zbar"
uses_from_macos "libxml2"
uses_from_macos "zlib"
# Fix Apple Silicon build
patch :DATA
def install
ENV.cxx11
# Avoid superenv shim references
inreplace "CMakeLists.txt" do |s|
s.sub!(/CMake build tool:"\s+\${CMAKE_BUILD_TOOL}/,
"CMake build tool: gmake\"")
s.sub!(/C\+\+ Compiler:"\s+\${VISP_COMPILER_STR}/,
"C++ Compiler: clang++\"")
s.sub!(/C Compiler:"\s+\${CMAKE_C_COMPILER}/,
"C Compiler: clang\"")
end
system "cmake", ".", "-DBUILD_DEMOS=OFF",
"-DBUILD_EXAMPLES=OFF",
"-DBUILD_TESTS=OFF",
"-DBUILD_TUTORIALS=OFF",
"-DUSE_DC1394=ON",
"-DDC1394_INCLUDE_DIR=#{Formula["libdc1394"].opt_include}",
"-DDC1394_LIBRARY=#{Formula["libdc1394"].opt_lib}/libdc1394.dylib",
"-DUSE_EIGEN3=ON",
"-DEigen3_DIR=#{Formula["eigen"].opt_share}/eigen3/cmake",
"-DUSE_GSL=ON",
"-DGSL_INCLUDE_DIR=#{Formula["gsl"].opt_include}",
"-DGSL_cblas_LIBRARY=#{Formula["gsl"].opt_lib}/libgslcblas.dylib",
"-DGSL_gsl_LIBRARY=#{Formula["gsl"].opt_lib}/libgsl.dylib",
"-DUSE_JPEG=ON",
"-DJPEG_INCLUDE_DIR=#{Formula["jpeg"].opt_include}",
"-DJPEG_LIBRARY=#{Formula["jpeg"].opt_lib}/libjpeg.dylib",
"-DUSE_LAPACK=ON",
"-DUSE_LIBUSB_1=OFF",
"-DUSE_OPENCV=ON",
"-DOpenCV_DIR=#{Formula["opencv"].opt_share}/OpenCV",
"-DUSE_PCL=ON",
"-DUSE_PNG=ON",
"-DPNG_PNG_INCLUDE_DIR=#{Formula["libpng"].opt_include}",
"-DPNG_LIBRARY_RELEASE=#{Formula["libpng"].opt_lib}/libpng.dylib",
"-DUSE_PTHREAD=ON",
"-DUSE_PYLON=OFF",
"-DUSE_REALSENSE=OFF",
"-DUSE_REALSENSE2=OFF",
"-DUSE_X11=OFF",
"-DUSE_XML2=ON",
"-DUSE_ZBAR=ON",
"-DZBAR_INCLUDE_DIRS=#{Formula["zbar"].opt_include}",
"-DZBAR_LIBRARIES=#{Formula["zbar"].opt_lib}/libzbar.dylib",
"-DUSE_ZLIB=ON",
*std_cmake_args
system "make", "install"
end
test do
(testpath/"test.cpp").write <<~EOS
#include <visp3/core/vpConfig.h>
#include <iostream>
int main()
{
std::cout << VISP_VERSION_MAJOR << "." << VISP_VERSION_MINOR <<
"." << VISP_VERSION_PATCH << std::endl;
return 0;
}
EOS
system ENV.cxx, "test.cpp", "-I#{include}", "-L#{lib}", "-o", "test"
assert_equal version.to_s, shell_output("./test").chomp
end
end
__END__
diff --git a/3rdparty/simdlib/Simd/SimdEnable.h b/3rdparty/simdlib/Simd/SimdEnable.h
index a5ca71702..6c79eb0d9 100644
--- a/3rdparty/simdlib/Simd/SimdEnable.h
+++ b/3rdparty/simdlib/Simd/SimdEnable.h
@@ -44,8 +44,8 @@
#include <TargetConditionals.h> // To detect OSX or IOS using TARGET_OS_IPHONE or TARGET_OS_IOS macro
#endif
-// The following includes <sys/auxv.h> and <asm/hwcap.h> are not available for iOS.
-#if (TARGET_OS_IOS == 0) // not iOS
+// The following includes <sys/auxv.h> and <asm/hwcap.h> are not available for macOS, iOS.
+#if !defined(__APPLE__) // not macOS, iOS
#if defined(SIMD_PPC_ENABLE) || defined(SIMD_PPC64_ENABLE) || defined(SIMD_ARM_ENABLE) || defined(SIMD_ARM64_ENABLE)
#include <unistd.h>
#include <fcntl.h>
@@ -124,7 +124,7 @@ namespace Simd
}
#endif//defined(SIMD_X86_ENABLE) || defined(SIMD_X64_ENABLE)
-#if (TARGET_OS_IOS == 0) // not iOS
+#if !defined(__APPLE__) // not macOS, iOS
#if defined(__GNUC__) && (defined(SIMD_PPC_ENABLE) || defined(SIMD_PPC64_ENABLE) || defined(SIMD_ARM_ENABLE) || defined(SIMD_ARM64_ENABLE))
namespace CpuInfo
{
| 40.542636 | 140 | 0.572467 |
6283c37f40b158d31130cc9edc3203a0076dc365 | 131 | %w[
.ruby-version
.rbenv-vars
config/cities.yml
tmp/restart.txt
tmp/caching-dev.txt
].each { |path| Spring.watch(path) }
| 16.375 | 36 | 0.671756 |
61ca9f379b4d074f65bbd6d7a415ccfe55d03af7 | 1,891 | #-- encoding: UTF-8
#-- copyright
# OpenProject is an open source project management software.
# Copyright (C) 2012-2020 the OpenProject GmbH
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3.
#
# OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
# Copyright (C) 2006-2017 Jean-Philippe Lang
# Copyright (C) 2010-2013 the ChiliProject Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# See docs/COPYRIGHT.rdoc for more details.
#++
module OAuth
class PersistApplicationService
include Contracted
attr_reader :application, :current_user
def initialize(model, user:)
@application = model
@current_user = user
self.contract_class = OAuth::ApplicationContract
end
def call(attributes)
set_defaults
application.attributes = attributes
result, errors = validate_and_save(application, current_user)
ServiceResult.new success: result, errors: errors, result: application
end
def set_defaults
return if application.owner_id
application.owner = current_user
application.owner_type = 'User'
end
end
end
| 31.516667 | 91 | 0.741407 |
ff8b8d8ec54136f0e7a62973ed8fed95898dafd9 | 12,488 | # frozen_string_literal: true
module EE
module ProjectsHelper
extend ::Gitlab::Utils::Override
override :sidebar_operations_paths
def sidebar_operations_paths
super + %w[
oncall_schedules
]
end
override :project_permissions_settings
def project_permissions_settings(project)
super.merge({
requirementsAccessLevel: project.requirements_access_level,
cveIdRequestEnabled: (project.public? && project.project_setting.cve_id_request_enabled?)
})
end
override :project_permissions_panel_data
def project_permissions_panel_data(project)
super.merge({
requirementsAvailable: project.feature_available?(:requirements),
requestCveAvailable: ::Gitlab.com?,
cveIdRequestHelpPath: help_page_path('user/application_security/cve_id_request')
})
end
override :default_url_to_repo
def default_url_to_repo(project = @project)
case default_clone_protocol
when 'krb5'
project.kerberos_url_to_repo
else
super
end
end
override :extra_default_clone_protocol
def extra_default_clone_protocol
if alternative_kerberos_url? && current_user
"krb5"
else
super
end
end
override :remove_project_message
def remove_project_message(project)
return super unless project.adjourned_deletion?
date = permanent_deletion_date(Time.now.utc)
_("Deleting a project places it into a read-only state until %{date}, at which point the project will be permanently deleted. Are you ABSOLUTELY sure?") %
{ date: date }
end
def approvals_app_data(project = @project)
{
project_id: project.id,
can_edit: can_modify_approvers.to_s,
can_modify_author_settings: can_modify_author_settings.to_s,
can_modify_commiter_settings: can_modify_commiter_settings.to_s,
project_path: expose_path(api_v4_projects_path(id: project.id)),
settings_path: expose_path(api_v4_projects_approval_settings_path(id: project.id)),
approvals_path: expose_path(api_v4_projects_approvals_path(id: project.id)),
rules_path: expose_path(api_v4_projects_approval_settings_rules_path(id: project.id)),
allow_multi_rule: project.multiple_approval_rules_available?.to_s,
eligible_approvers_docs_path: help_page_path('user/project/merge_requests/approvals/rules', anchor: 'eligible-approvers'),
security_approvals_help_page_path: help_page_path('user/application_security/index', anchor: 'security-approvals-in-merge-requests'),
security_configuration_path: project_security_configuration_path(project),
vulnerability_check_help_page_path: help_page_path('user/application_security/index', anchor: 'security-approvals-in-merge-requests'),
license_check_help_page_path: help_page_path('user/application_security/index', anchor: 'enabling-license-approvals-within-a-project'),
coverage_check_help_page_path: help_page_path('ci/pipelines/settings', anchor: 'coverage-check-approval-rule')
}.tap do |data|
if ::Feature.enabled?(:group_merge_request_approval_settings_feature_flag, project.root_ancestor, default_enabled: :yaml)
data[:approvals_path] = expose_path(api_v4_projects_merge_request_approval_setting_path(id: project.id))
data[:group_name] = project.root_ancestor.name
end
end
end
def status_checks_app_data(project)
{
data: {
project_id: project.id,
status_checks_path: expose_path(api_v4_projects_external_status_checks_path(id: project.id))
}
}
end
def can_modify_approvers(project = @project)
can?(current_user, :modify_approvers_rules, project)
end
def can_modify_author_settings(project = @project)
can?(current_user, :modify_merge_request_author_setting, project)
end
def can_modify_commiter_settings(project = @project)
can?(current_user, :modify_merge_request_committer_setting, project)
end
def permanent_delete_message(project)
message = _('This action will %{strongOpen}permanently delete%{strongClose} %{codeOpen}%{project}%{codeClose} %{strongOpen}immediately%{strongClose}, including its repositories and all related resources, including issues and merge requests.')
html_escape(message) % remove_message_data(project)
end
def marked_for_removal_message(project)
date = permanent_deletion_date(Time.now.utc)
message = _('This action will %{strongOpen}permanently delete%{strongClose} %{codeOpen}%{project}%{codeClose} %{strongOpen}on %{date}%{strongClose}, including its repositories and all related resources, including issues and merge requests.')
html_escape(message) % remove_message_data(project).merge(date: date)
end
def permanent_deletion_date(date)
(date + ::Gitlab::CurrentSettings.deletion_adjourned_period.days).strftime('%F')
end
# Given the current GitLab configuration, check whether the GitLab URL for Kerberos is going to be different than the HTTP URL
def alternative_kerberos_url?
::Gitlab.config.alternative_gitlab_kerberos_url?
end
def can_change_push_rule?(push_rule, rule, context)
return true if push_rule.global?
can?(current_user, :"change_#{rule}", context)
end
def ci_cd_projects_available?
::License.feature_available?(:ci_cd_projects) && import_sources_enabled?
end
def merge_pipelines_available?
return false unless @project.builds_enabled?
@project.feature_available?(:merge_pipelines)
end
def merge_trains_available?
return false unless @project.builds_enabled?
@project.feature_available?(:merge_trains)
end
def size_limit_message(project)
show_lfs = project.lfs_enabled? ? 'including LFS files' : ''
"Max size of this project's repository, #{show_lfs}. For no limit, enter 0. To inherit the group/global value, leave blank."
end
override :membership_locked?
def membership_locked?
group = @project.group
return false unless group
group.membership_lock? || ::Gitlab::CurrentSettings.lock_memberships_to_ldap?
end
def group_project_templates_count(group_id)
allowed_subgroups = current_user.available_subgroups_with_custom_project_templates(group_id)
::Project.in_namespace(allowed_subgroups).not_aimed_for_deletion.count
end
def project_security_dashboard_config(project)
if project.vulnerabilities.none?
{
has_vulnerabilities: 'false',
has_jira_vulnerabilities_integration_enabled: project.configured_to_create_issues_from_vulnerabilities?.to_s,
empty_state_svg_path: image_path('illustrations/security-dashboard_empty.svg'),
operational_configuration_path: new_project_security_policy_path(@project),
operational_empty_state_svg_path: image_path('illustrations/security-dashboard_empty.svg'),
operational_help_path: help_page_path('user/application_security/policies/index'),
survey_request_svg_path: image_path('illustrations/security-dashboard_empty.svg'),
security_dashboard_help_path: help_page_path('user/application_security/security_dashboard/index'),
no_vulnerabilities_svg_path: image_path('illustrations/issues.svg'),
project_full_path: project.full_path,
security_configuration_path: project_security_configuration_path(@project)
}.merge!(security_dashboard_pipeline_data(project))
else
{
has_vulnerabilities: 'true',
has_jira_vulnerabilities_integration_enabled: project.configured_to_create_issues_from_vulnerabilities?.to_s,
project: { id: project.id, name: project.name },
project_full_path: project.full_path,
vulnerabilities_export_endpoint: api_v4_security_projects_vulnerability_exports_path(id: project.id),
empty_state_svg_path: image_path('illustrations/security-dashboard-empty-state.svg'),
survey_request_svg_path: image_path('illustrations/security-dashboard_empty.svg'),
no_vulnerabilities_svg_path: image_path('illustrations/issues.svg'),
dashboard_documentation: help_page_path('user/application_security/security_dashboard/index'),
not_enabled_scanners_help_path: help_page_path('user/application_security/index', anchor: 'quick-start'),
no_pipeline_run_scanners_help_path: new_project_pipeline_path(project),
operational_configuration_path: new_project_security_policy_path(@project),
operational_empty_state_svg_path: image_path('illustrations/security-dashboard_empty.svg'),
operational_help_path: help_page_path('user/application_security/policies/index'),
security_dashboard_help_path: help_page_path('user/application_security/security_dashboard/index'),
auto_fix_documentation: help_page_path('user/application_security/index', anchor: 'auto-fix-merge-requests'),
auto_fix_mrs_path: project_merge_requests_path(@project, label_name: 'GitLab-auto-fix'),
scanners: VulnerabilityScanners::ListService.new(project).execute.to_json,
can_admin_vulnerability: can?(current_user, :admin_vulnerability, project).to_s,
false_positive_doc_url: help_page_path('user/application_security/vulnerabilities/index'),
can_view_false_positive: can_view_false_positive?
}.merge!(security_dashboard_pipeline_data(project))
end
end
def can_view_false_positive?
project.licensed_feature_available?(:sast_fp_reduction).to_s
end
def can_update_security_orchestration_policy_project?(project)
can?(current_user, :update_security_orchestration_policy_project, project)
end
def can_create_feedback?(project, feedback_type)
feedback = Vulnerabilities::Feedback.new(project: project, feedback_type: feedback_type)
can?(current_user, :create_vulnerability_feedback, feedback)
end
def create_vulnerability_feedback_issue_path(project)
if can_create_feedback?(project, :issue)
project_vulnerability_feedback_index_path(project)
end
end
def create_vulnerability_feedback_merge_request_path(project)
if can_create_feedback?(project, :merge_request)
project_vulnerability_feedback_index_path(project)
end
end
def create_vulnerability_feedback_dismissal_path(project)
if can_create_feedback?(project, :dismissal)
project_vulnerability_feedback_index_path(project)
end
end
def show_discover_project_security?(project)
!!current_user &&
::Gitlab.com? &&
!project.feature_available?(:security_dashboard) &&
can?(current_user, :admin_namespace, project.root_ancestor)
end
def show_compliance_framework_badge?(project)
project&.licensed_feature_available?(:custom_compliance_frameworks) && project&.compliance_framework_setting&.compliance_management_framework.present?
end
def scheduled_for_deletion?(project)
project.marked_for_deletion_at.present?
end
def enable_sast_entry_points_experiment?(project)
can?(current_user, :admin_project, project) &&
!project.empty_repo? &&
!OnboardingProgress.completed?(project.root_ancestor, :security_scan_enabled)
end
def sast_entry_points_experiment_enabled?(project)
enable_sast_entry_points_experiment?(project) &&
experiment(:sast_entry_points, namespace: project.root_ancestor).variant.group == :experiment
end
private
def remove_message_data(project)
{
project: project.path,
strongOpen: '<strong>'.html_safe,
strongClose: '</strong>'.html_safe,
codeOpen: '<code>'.html_safe,
codeClose: '</code>'.html_safe
}
end
def security_dashboard_pipeline_data(project)
pipeline = project.latest_pipeline_with_security_reports
return {} unless pipeline
{
pipeline: {
id: pipeline.id,
path: pipeline_path(pipeline),
created_at: pipeline.created_at.to_s(:iso8601),
security_builds: {
failed: {
count: pipeline.latest_failed_security_builds.count,
path: failures_project_pipeline_path(pipeline.project, pipeline)
}
}
}
}
end
end
end
| 41.765886 | 248 | 0.735586 |
f77bd8de75de825e1130cbd6f64eb620ac08acd9 | 1,655 | #
# Be sure to run `pod lib lint CHFoundation.podspec' to ensure this is a
# valid spec before submitting.
#
# Any lines starting with a # are optional, but their use is encouraged
# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = 'CHFoundation'
s.version = '0.1.6'
s.summary = 'CHFoundation'
# This description is used to generate tags and improve search results.
# * Think: What does it do? Why did you write it? What is the focus?
# * Try to keep it short, snappy and to the point.
# * Write the description between the DESC delimiters below.
# * Finally, don't worry about the indent, CocoaPods strips it!
s.description = <<-DESC
CHFoundation for iOS in CooHua Inc. Include NSString, NSArray, NSData etc categories.
DESC
s.homepage = 'https://gitlab.coohua.com/zhoucheng/CHFoundation'
# s.screenshots = 'www.example.com/screenshots_1', 'www.example.com/screenshots_2'
s.license = { :type => 'MIT', :file => 'LICENSE' }
s.author = { 'zhoucheng' => '[email protected]' }
s.source = { :git => 'https://gitlab.coohua.com/zhoucheng/CHFoundation.git', :tag => s.version.to_s }
# s.social_media_url = 'https://twitter.com/<TWITTER_USERNAME>'
s.ios.deployment_target = '8.0'
s.source_files = 'CHFoundation/Classes/**/*'
s.public_header_files = 'CHFoundation/Classes/**/*.h'
# s.resource_bundles = {
# 'CHFoundation' => ['CHFoundation/Assets/*.png']
# }
s.frameworks = 'Foundation', 'Security'
# s.dependency 'AFNetworking', '~> 2.3'
end
| 38.488372 | 113 | 0.652568 |
f7af8050c81fb14894844cf847c2cfb2f9ad41af | 57 | # utility classes
require 'aws-sdk-sns/message_verifier'
| 19 | 38 | 0.807018 |
b94195dc64807c8621526882b0636c8f7145fcb6 | 584 | worker_processes Integer(ENV['UNICORN_WORKERS'] || 4)
timeout 30
preload_app true
listen(ENV['PORT'] || 3000, :backlog => Integer(ENV['UNICORN_BACKLOG'] || 200))
before_fork do |server, worker|
Signal.trap 'TERM' do
puts 'Unicorn master intercepting TERM and sending myself QUIT instead'
Process.kill 'QUIT', Process.pid
end
if defined?(Sequel::Model)
Sequel::DATABASES.each{ |db| db.disconnect }
end
end
after_fork do |server, worker|
Signal.trap 'TERM' do
puts 'Unicorn worker intercepting TERM and doing nothing. Wait for master to sent QUIT'
end
end | 27.809524 | 91 | 0.724315 |
e95d50f8cf4ca68aa9a284fb32f7aa9564520679 | 3,461 | # frozen_string_literal: true
require 'optparse'
require 'rubycritic/browser'
module RubyCritic
module Cli
class Options
def initialize(argv)
@argv = argv
self.parser = OptionParser.new
end
# rubocop:disable Metrics/MethodLength
def parse
parser.new do |opts|
opts.banner = 'Usage: rubycritic [options] [paths]'
opts.on('-p', '--path [PATH]', 'Set path where report will be saved (tmp/rubycritic by default)') do |path|
@root = path
end
opts.on('-b', '--branch BRANCH', 'Set branch to compare') do |branch|
self.base_branch = String(branch)
set_current_branch
self.mode = :compare_branches
end
opts.on('-t', '--maximum-decrease [MAX_DECREASE]',
'Set a threshold for score difference between two branches (works only with -b)') do |threshold_score|
self.threshold_score = Integer(threshold_score)
end
opts.on(
'-f', '--format [FORMAT]',
%i[html json console lint],
'Report smells in the given format:',
' html (default; will open in a browser)',
' json',
' console',
' lint'
) do |format|
self.format = format
end
opts.on('-s', '--minimum-score [MIN_SCORE]', 'Set a minimum score') do |min_score|
self.minimum_score = Float(min_score)
end
opts.on('-m', '--mode-ci [BASE_BRANCH]',
'Use CI mode (faster, analyses diffs w.r.t base_branch (default: master))') do |branch|
self.base_branch = branch || 'master'
set_current_branch
self.mode = :ci
end
opts.on('--deduplicate-symlinks', 'De-duplicate symlinks based on their final target') do
self.deduplicate_symlinks = true
end
opts.on('--suppress-ratings', 'Suppress letter ratings') do
self.suppress_ratings = true
end
opts.on('--no-browser', 'Do not open html report with browser') do
self.no_browser = true
end
opts.on_tail('-v', '--version', "Show gem's version") do
self.mode = :version
end
opts.on_tail('-h', '--help', 'Show this message') do
self.mode = :help
end
end.parse!(@argv)
self
end
def to_h
{
mode: mode,
root: root,
format: format,
deduplicate_symlinks: deduplicate_symlinks,
paths: paths,
suppress_ratings: suppress_ratings,
help_text: parser.help,
minimum_score: minimum_score || 0,
no_browser: no_browser,
base_branch: base_branch,
feature_branch: feature_branch,
threshold_score: threshold_score || 0
}
end
# rubocop:enable Metrics/MethodLength
private
attr_accessor :mode, :root, :format, :deduplicate_symlinks,
:suppress_ratings, :minimum_score, :no_browser,
:parser, :base_branch, :feature_branch, :threshold_score
def paths
if @argv.empty?
['.']
else
@argv
end
end
def set_current_branch
self.feature_branch = SourceControlSystem::Git.current_branch
end
end
end
end
| 29.581197 | 120 | 0.54753 |
bfc007219ffeccb2ef91b47484f031a110012a14 | 853 | # frozen_string_literal: true
# It truncates the DB. Use this always on profiles.
def clean_db!
ActiveRecord::Base.establish_connection
ActiveRecord::Base.connection.execute("SET FOREIGN_KEY_CHECKS = 0")
ActiveRecord::Base.connection.tables.each do |table|
next if table == "schema_migrations"
ActiveRecord::Base.connection.execute("TRUNCATE #{table}")
end
ActiveRecord::Base.connection.execute("SET FOREIGN_KEY_CHECKS = 1")
end
# Creates a registry that works with the current setup, and it creates the
# Portus special user.
def create_registry!
# Hostname configurable so some tests can check wrong hostnames.
hostname = ENV["PORTUS_INTEGRATION_HOSTNAME"] || "172.17.0.1:5000"
Registry.create!(name: "registry", hostname: hostname, use_ssl: false)
ENV["PORTUS_INTEGRATION_HOSTNAME"] = nil
User.create_portus_user!
end
| 34.12 | 74 | 0.764361 |
91f7215e520cd3d80566bce1800c7ec5d320f46a | 4,124 | module AgbHandicap
SCORING_SCHEMES = {
'METRIC' => 'Standard WA target face 10 -1',
'IMPERIAL' => 'Standard WA target face 9 - 1',
'INNER_TEN' => 'Standard WA target face 10-1 with x-ring counting as 10, eg compound scoring',
'TRIPLE' => 'Standard 3-spot 5-zone WA target face, eg WA18m round',
'TRIPLE_INNER_TEN' => 'Standard 3-spot 5-zone WA target face with x-ring counting as 10, eg compound WA18m round',
'ONE_TO_FIVE' => '5-zone scoring, eg Worcester, NFAA Indoor',
'SIX_ZONE' => '6-zone WA target face, eg compound 50m'
}
class << self
# Calculate AGB score handicap as per David Lane's original algorithm
#
# Example:
# >> distances = [
# {'range_in_meters' => 91.44, 'total_shots' => 72, 'target_diameter_cm' => 122, 'scoring_scheme' => 'IMPERIAL'},
# {'range_in_meters' => 73.152, 'total_shots' => 48, 'target_diameter_cm' => 122, 'scoring_scheme' => 'IMPERIAL'},
# {'range_in_meters' => 54.864, 'total_shots' => 24, 'target_diameter_cm' => 122, 'scoring_scheme' => 'IMPERIAL'}
# ]
# >> score = 1105
#
# >> result = AgbHandicap.calculate(score, distances)
#
def calculate(score, distances, rounded = true)
result = agbhandicap(score, distances)
rounded ? result.ceil.to_i : result
end
private
def agbhandicap(score, distances)
rtrange = 32.0
hc = 50.0
while (rtrange > 0.01)
nextscore = agbscore(hc, distances)
if (score < nextscore)
hc = hc + rtrange
end
if (score > nextscore)
hc = hc - rtrange
end
rtrange = rtrange / 2
end
hc = 0 if (hc < 0)
hc = 100 if (hc > 100)
return hc.to_f.round(1)
end
def agbscore(h, distances)
score = 0.0
distances.each do | d |
score = score + calculate_distance_score(d, h)
end
score
end
def calculate_distance_score(distance, h)
range = distance['range_in_meters'].to_f
shots = distance['total_shots'].to_f
diameter = distance['target_diameter_cm'].to_f
scoring = distance['scoring_scheme']
score = 0
sr = score_range(h.to_f, range).to_f
case scoring
when 'METRIC'
score = 10
(1..10).each do | n |
score = score - solution((n.to_f * diameter / 20.0 + 0.357), sr)
end
when 'IMPERIAL'
score = 9
(1..4).each do | n |
score = score - (2.0 * solution((n.to_f * diameter / 10.0 + 0.357), sr))
end
score = score - solution((diameter / 2 + 0.357), sr)
when 'ONE_TO_FIVE' # could be worcester or NFAA round etc
score = 5
(1..5).each do | n |
score = score - solution((n.to_f * diameter / 10.0 + 0.357), sr)
end
when 'INNER_TEN'
score = 10
score = score - solution((diameter / 40 + 0.357), sr)
(2..10).each do | n |
score = score - solution((n.to_f * diameter / 20 + 0.357), sr)
end
when 'TRIPLE'
score = 10
(1..4).each do | n |
score = score - solution((n.to_f * diameter / 20 + 0.357), sr)
end
score = score - (6 * solution((5 * diameter / 20 + 0.357), sr))
when 'TRIPLE_INNER_TEN'
score = 10
score = score - solution((diameter / 40 + 0.357), sr)
(2..4).each do | n |
score = score - solution((n.to_f * diameter / 20 + 0.357), sr)
end
score = score - (6 * solution((5 * diameter / 20 + 0.357), sr))
when 'SIX_ZONE'
score = 10
(1..5).each do | n |
score = score - solution((n.to_f * diameter / 20 + 0.357), sr)
end
score = score - (5 * solution((6 * diameter / 20 + 0.357), sr))
end
(score.to_f * shots).to_f
end
def score_range(h, range)
100 * range * (1.036 ** (h + 12.9)) * 5e-4 * (1 + 1.429e-6 * (1.07 ** (h + 4.3)) * (range * range))
end
def solution(operator, score_range)
return Math.exp( -(operator / score_range) ** 2 )
end
end # end class
end
| 31.242424 | 125 | 0.546799 |
03b7a4226b34a690570a23bd08074679f75d4810 | 505 | class UserPolicy < ApplicationPolicy
def initialize(user, record_user)
@user = user
@record_user = record_user
end
def show?
user.admin? || user == record_user
end
def update?
user.admin? || user == record_user
end
def destroy?
user.admin? || user == record_user
end
def permitted_attributes
base_attrs = [:name, :email]
if user.admin?
base_attrs += [:admin]
else
base_attrs
end
end
private
attr_reader :user, :record_user
end
| 15.78125 | 38 | 0.641584 |
8765580cadbc54abb921389b43dcea0869860581 | 56 | class Test2Job < Marty::CronJob
def perform
end
end
| 11.2 | 31 | 0.732143 |
39d8ec4f6981fe21b312a9dbb1513d29c471fd38 | 1,329 | class Tfsec < Formula
desc "Static analysis powered security scanner for your terraform code"
homepage "https://github.com/tfsec/tfsec"
url "https://github.com/tfsec/tfsec/archive/v0.37.3.tar.gz"
sha256 "1f8ea759cee78f5ca41b7cff04aacbe34d0b93cf82053f0a17e0542e18b47dd9"
license "MIT"
livecheck do
url :stable
strategy :github_latest
end
bottle do
sha256 cellar: :any_skip_relocation, arm64_big_sur: "521862e32e40c4f607593837b8d1c70f24f30ec735ebc0e5c63de158827503af"
sha256 cellar: :any_skip_relocation, big_sur: "862ca3dbd15be8d7d491ace960ab763f8d7efc57dfd60e19dd753f063a26e617"
sha256 cellar: :any_skip_relocation, catalina: "9b55abc68caf9a6098ed3d6b1ebc74bdd769984aef446ca6d120b8180cae75e6"
sha256 cellar: :any_skip_relocation, mojave: "cc7488e7ec2dc021a2621d91094f6c8cccb0ef50c9db73bc7b98d540db7942c7"
end
depends_on "go" => :build
resource "testfile" do
url "https://raw.githubusercontent.com/tfsec/tfsec/2d9b76a/example/brew-validate.tf"
sha256 "3ef5c46e81e9f0b42578fd8ddce959145cd043f87fd621a12140e99681f1128a"
end
def install
system "scripts/install.sh", "v#{version}"
bin.install "tfsec"
end
test do
resource("testfile").stage do
assert_match "No problems detected!", shell_output("#{bin}/tfsec .")
end
end
end
| 34.973684 | 122 | 0.771257 |
ab4677398bd6c3fee3863c2154ef182ae9258cda | 39 | module ArRedis
VERSION = '0.1.0'
end
| 9.75 | 19 | 0.666667 |
01d9c502ce80b82b2efb6dc524d4a2f9c040fc4a | 1,143 | #
# Cookbook Name:: fail2ban
# Resource:: fail2ban_jail
#
# Copyright 2015, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
actions :create, :delete
default_action :create
attribute :name, :kind_of => String, :name_attribute => true
attribute :filter, :kind_of => String
attribute :logpath, :kind_of => String
attribute :protocol, :kind_of => String
attribute :ports, :kind_of => Array, :default => []
attribute :maxretry, :kind_of => Integer
def after_created
if node[:lsb][:release].to_f >= 14.04
notifies :reload, "service[fail2ban]"
else
notifies :create, "template[/etc/fail2ban/jail.local]"
end
end
| 30.891892 | 74 | 0.741907 |
1d868806f1baca43293520a4f06c3fb13bb97214 | 172 | begin
require 'syck'
rescue LoadError
# do nothing
end
require 'yaml'
$test_file = tmp("yaml_test_file")
$test_parse_file = File.dirname(__FILE__) + "/test_yaml.yml"
| 15.636364 | 60 | 0.732558 |
79d68837e35acc45142c727687597a5404a38dc9 | 618 | # This file was auto-generated by lib/tasks/web.rake
module Slack
module Web
module Api
module Endpoints
module TeamProfile
#
# Retrieve a team's profile.
#
# @option options [Object] :visibility
# Filter by visibility.
# @see https://api.slack.com/methods/team.profile.get
# @see https://github.com/slack-ruby/slack-api-ref/blob/master/methods/team.profile/team.profile.get.json
def team_profile_get(options = {})
post('team.profile.get', options)
end
end
end
end
end
end
| 26.869565 | 115 | 0.585761 |
5d97362b55dab4b01edb1b5909c47e8dbd96399d | 749 | # typed: true
module Kuby
module CertManager
module DSL
module CertManager
module V1
class ClusterIssuerSpecAcmeSolversDns01AcmeDNSAccountSecretRef < ::KubeDSL::DSLObject
value_field :name
value_field :key
validates :name, field: { format: :string }, presence: true
validates :key, field: { format: :string }, presence: false
def serialize
{}.tap do |result|
result[:name] = name
result[:key] = key
end
end
def kind_sym
:cluster_issuer_spec_acme_solvers_dns01_acme_dns_account_secret_ref
end
end
end
end
end
end
end | 24.966667 | 95 | 0.551402 |
1c226a3fb80e0f0f284fcbb5d71a6118fa1e6b3e | 16,259 | require 'recursive-open-struct'
RSpec.describe MiqServer::WorkerManagement::Monitor::Kubernetes do
let(:server) { EvmSpecHelper.create_guid_miq_server_zone.second }
let(:orchestrator) { double("ContainerOrchestrator") }
let(:deployment_name) { '1-generic-79bb8b8bb5-8ggbg' }
let(:pod_label) { '1-generic' }
before do
# MiqWorkerType.seed
allow(server).to receive(:orchestrator).and_return(orchestrator)
end
after do
server.current_pods.clear
server.current_deployments.clear
end
it "#current_pods initialized" do
expect(server.current_pods).to_not be_nil
end
it ".current_pods initialized" do
expect(server.class.current_pods).to_not be_nil
end
it ".current_pods and #current_pods share the same hash" do
expect(server.class.current_pods.object_id).to eql(server.current_pods.object_id)
server.current_pods[:a] = :b
expect(server.class.current_pods[:a]).to eql(:b)
end
context "#ensure_kube_monitors_started" do
it "calls start_kube_monitor if nil monitor thread" do
expect(server).to receive(:start_kube_monitor).once.with(:deployments)
expect(server).to receive(:start_kube_monitor).once.with(:pods)
server.deployments_monitor_thread = nil
server.pods_monitor_thread = nil
server.send(:ensure_kube_monitors_started)
end
it "calls start_kube_monitor if monitor thread terminated normally" do
expect(server).to receive(:start_kube_monitor).twice
thread = double(:alive? => false, :status => false)
expect(thread).to receive(:join).never
server.deployments_monitor_thread = thread
server.pods_monitor_thread = thread
server.send(:ensure_kube_monitors_started)
end
it "joins a dead thread with an exception before calling start_kube_monitor" do
expect(server).to receive(:start_kube_monitor).twice
thread = double
expect(thread).to receive(:alive?).twice.and_return(false)
expect(thread).to receive(:status).twice.and_return(nil)
expect(thread).to receive(:join).twice
server.deployments_monitor_thread = thread
server.pods_monitor_thread = thread
server.send(:ensure_kube_monitors_started)
end
end
context "#delete_failed_deployments" do
let(:current_pods) do
stats = Concurrent::Hash.new
stats[:last_state_terminated] = false
stats[:container_restarts] = 0
stats[:label_name] = pod_label
h = Concurrent::Hash.new
h['1-generic-79bb8b8bb5-8ggbg'] = stats
h
end
before do
allow(server).to receive(:ensure_kube_monitors_started)
end
context "with no deployments" do
it "doesn't call delete_deployment" do
allow(server).to receive(:current_pods).and_return(Concurrent::Hash.new)
expect(orchestrator).to receive(:delete_deployment).never
server.cleanup_failed_deployments
end
end
context "with 1 running deployment" do
it "doesn't call delete_deployment" do
allow(server).to receive(:current_pods).and_return(current_pods)
expect(orchestrator).to receive(:delete_deployment).never
server.cleanup_failed_deployments
end
end
context "with a failed deployment" do
it "calls delete_deployment with pod name" do
current_pods[deployment_name][:last_state_terminated] = true
current_pods[deployment_name][:container_restarts] = 100
allow(server).to receive(:current_pods).and_return(current_pods)
expect(orchestrator).to receive(:delete_deployment).with(pod_label)
server.cleanup_failed_deployments
end
end
end
context "#save_deployment(private)" do
let(:pod_name) { "1-generic" }
let(:fake_deployment_data) do
RecursiveOpenStruct.new(
:metadata => {
:name => pod_name
},
:spec => {
:replicas => 2,
:template => {
:spec => {
:containers => [{:name => pod_name}]
}
}
},
:status => {
:readyReplicas => 2
}
)
end
it "saves replicas" do
server.send(:save_deployment, fake_deployment_data)
expect(server.current_deployments[pod_name].fetch_path(:spec, :replicas)).to eq(2)
end
it "saves containers" do
server.send(:save_deployment, fake_deployment_data)
expect(server.current_deployments[pod_name].fetch_path(:spec, :template, :spec, :containers).first[:name]).to eq(pod_name)
end
it "discards other keys" do
server.send(:save_deployment, fake_deployment_data)
expect(server.current_deployments[pod_name].keys).to eq([:spec])
end
it "updates existing saved deployment" do
server.send(:save_deployment, fake_deployment_data)
fake_deployment_data.spec.replicas = 5
server.send(:save_deployment, fake_deployment_data)
expect(server.current_deployments[pod_name].fetch_path(:spec, :replicas)).to eq(5)
end
end
context "#collect_initial(private)" do
let(:resource_version) { "21943006" }
let(:started_at) { "2020-07-22T18:47:08Z" }
let(:pods) do
metadata = double(:name => deployment_name, :labels => double(:name => pod_label))
state = double(:running => double(:startedAt => started_at))
last_state = double(:terminated => nil)
status = double(:containerStatuses => [double(:state => state, :lastState => last_state, :restartCount => 0)])
pods = [double(:metadata => metadata, :status => status)]
allow(pods).to receive(:resourceVersion).and_return(resource_version)
pods
end
before do
allow(orchestrator).to receive(:get_pods).and_return(pods)
end
it "collects deployments optionally" do
deploy = double
deployments = [deploy]
allow(deployments).to receive(:resourceVersion).and_return(resource_version)
allow(orchestrator).to receive(:get_deployments).and_return(deployments)
expect(server).to receive(:save_deployment).with(deploy)
server.send(:collect_initial, :deployments)
end
it "calls save_pod for running pod" do
server.send(:collect_initial)
expect(server.current_pods[deployment_name][:label_name]).to eq(pod_label)
expect(server.current_pods[deployment_name][:last_state_terminated]).to eq(false)
expect(server.current_pods[deployment_name][:container_restarts]).to eq(0)
end
it "calls save_pod to update a known running pod" do
pod_hash = Concurrent::Hash.new
pod_hash[:label_name] = pod_label
pod_hash[:last_state_terminated] = true
server.current_pods[deployment_name] = pod_hash
expect(server.current_pods[deployment_name][:last_state_terminated]).to eq(true)
server.send(:collect_initial)
expect(server.current_pods[deployment_name][:last_state_terminated]).to eq(false)
end
it "calls save_pod for terminated pod" do
allow(pods.first.status.containerStatuses.first.lastState).to receive(:terminated).and_return(double(:exitCode => 1, :reason => "Error"))
allow(pods.first.status.containerStatuses.first.state).to receive(:running).and_return(nil)
allow(pods.first.status.containerStatuses.first).to receive(:restartCount).and_return(10)
server.send(:collect_initial)
expect(server.current_pods[deployment_name][:label_name]).to eq(pod_label)
expect(server.current_pods[deployment_name][:last_state_terminated]).to eq(true)
expect(server.current_pods[deployment_name][:container_restarts]).to eq(10)
end
it "returns resource_version" do
expect(server.send(:collect_initial)).to eq(resource_version)
end
end
context "#watch_for_events(private)" do
let(:event_object) { double }
let(:watch_event) do
double(:object => event_object)
end
context "processes event" do
before do
allow(orchestrator).to receive(:watch_pods).and_return([watch_event])
end
it "ADDED calls save_pod with event object" do
allow(watch_event).to receive(:type).and_return("ADDED")
expect(server).to receive(:save_pod).with(event_object)
server.send(:watch_for_events, :pods, nil)
end
it "MODIFIED calls save_pod with event object" do
allow(watch_event).to receive(:type).and_return("MODIFIED")
expect(server).to receive(:save_pod).with(event_object)
server.send(:watch_for_events, :pods, nil)
end
it "DELETED calls delete_pod with event object" do
allow(watch_event).to receive(:type).and_return("DELETED")
expect(server).to receive(:delete_pod).with(event_object)
server.send(:watch_for_events, :pods, nil)
end
it "UNKNOWN type isn't saved or deleted" do
allow(watch_event).to receive(:type).and_return("UNKNOWN")
expect(server).to receive(:save_pod).never
expect(server).to receive(:delete_pod).never
server.send(:watch_for_events, :pods, nil)
end
it "ERROR logs warning and breaks" do
expected_code = 410
expected_message = "too old resource version: 199900 (27177196)"
expected_reason = "Gone"
allow(watch_event).to receive(:type).and_return("ERROR")
allow(event_object).to receive(:code).and_return(expected_code)
allow(event_object).to receive(:message).and_return(expected_message)
allow(event_object).to receive(:reason).and_return(expected_reason)
allow(server).to receive(:log_pod_error_event) do |code, message, reason|
expect(code).to eq(expected_code)
expect(message).to eq(expected_message)
expect(reason).to eq(expected_reason)
end
server.send(:watch_for_events, :pods, nil)
end
end
end
context "#sync_deployment_settings" do
let(:worker1) { FactoryBot.create(:miq_generic_worker, :miq_server => server) }
let(:worker2) { FactoryBot.create(:miq_generic_worker, :miq_server => server) }
let(:worker3) { FactoryBot.create(:miq_priority_worker, :miq_server => server) }
before do
allow(server).to receive(:podified?).and_return(true)
end
it "calls patch_deployment when changed" do
allow(server).to receive(:podified_miq_workers).and_return([worker1])
allow(server).to receive(:deployment_resource_constraints_changed?).with(worker1).and_return(true)
expect(worker1).to receive(:patch_deployment)
server.sync_deployment_settings
end
it "doesn't call patch_deployment when unchanged" do
allow(server).to receive(:podified_miq_workers).and_return([worker1])
allow(server).to receive(:deployment_resource_constraints_changed?).with(worker1).and_return(false)
expect(worker1).to receive(:patch_deployment).never
server.sync_deployment_settings
end
it "calls patch_deployment when changed once per worker class" do
allow(server).to receive(:podified_miq_workers).and_return([worker1, worker2, worker3])
allow(server).to receive(:deployment_resource_constraints_changed?).with(worker1).and_return(true)
allow(server).to receive(:deployment_resource_constraints_changed?).with(worker3).and_return(true)
expect(worker1).to receive(:patch_deployment)
expect(worker2).to receive(:patch_deployment).never
expect(worker3).to receive(:patch_deployment)
server.sync_deployment_settings
end
end
context "deployment_resource_constraints_changed?" do
let(:constraint_one) { {:limits => {:cpu => "999m", :memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}} }
let(:deployment) do
{
:spec => {
:template => {
:spec => {
:containers => [:resources => constraint_one]
}
}
}
}
end
let(:worker) { FactoryBot.create(:miq_generic_worker, :miq_server => server) }
it "empty current_deployments" do
stub_settings(:server => {:worker_monitor => {:enforce_resource_constraints => true}})
server.current_deployments[worker.worker_deployment_name] = nil
allow(worker).to receive(:resource_constraints).and_return(constraint_one)
expect(server).to receive(:constraints_changed?).with({}, constraint_one)
server.deployment_resource_constraints_changed?(worker)
end
it "normal" do
stub_settings(:server => {:worker_monitor => {:enforce_resource_constraints => true}})
server.current_deployments[worker.worker_deployment_name] = deployment
allow(worker).to receive(:resource_constraints).and_return(constraint_one)
expect(server).to receive(:constraints_changed?).with(constraint_one, constraint_one)
server.deployment_resource_constraints_changed?(worker)
end
it "detects no changes if not enforced" do
stub_settings(:server => {:worker_monitor => {:enforce_resource_constraints => false}})
expect(server).to receive(:constraints_changed?).never
expect(server.deployment_resource_constraints_changed?(worker)).to eq(false)
end
end
context "constraints_changed?" do
let(:empty) { {} }
let(:constraint_one) { {:limits => {:cpu => "999m", :memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}} }
let(:constraint_two) { {:limits => {:cpu => "888m", :memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}} }
it "No current, no desired constraints" do
expect(server.constraints_changed?(empty, empty)).to eq(false)
end
it "No current, new desired constraints" do
expect(server.constraints_changed?(empty, constraint_one)).to eq(true)
end
it "Current equals desired" do
expect(server.constraints_changed?(constraint_one, constraint_one)).to eq(false)
end
it "Current does not equal desired" do
expect(server.constraints_changed?(constraint_one, constraint_two)).to eq(true)
end
it "Detects 1024Mi memory == 1Gi" do
new_value = {:limits => {:memory => "1024Mi"}}
expect(server.constraints_changed?(constraint_one, constraint_one.deep_merge(new_value))).to eq(false)
end
it "Detects 0.15 == 150m" do
# From: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits
# A request with a decimal point, like 0.1, is converted to 100m by the API, and precision finer than 1m is not allowed. For this reason, the form 100m might be preferred.
new_value = {:requests => {:cpu => "0.15"}}
expect(server.constraints_changed?(constraint_one, constraint_one.deep_merge(new_value))).to eq(false)
end
it "Current missing cpu limit" do
current = {:limits => {:memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}}
desired = {:limits => {:cpu => "999m", :memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}}
expect(server.constraints_changed?(current, desired)).to eq(true)
end
it "Desired missing cpu limit" do
current = {:limits => {:cpu => "999m", :memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}}
desired = {:limits => {:memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}}
expect(server.constraints_changed?(current, desired)).to eq(true)
end
it "Current missing memory request" do
current = {:limits => {:cpu => "999m", :memory => "1Gi"}, :requests => {:cpu => "150m"}}
desired = {:limits => {:cpu => "999m", :memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}}
expect(server.constraints_changed?(current, desired)).to eq(true)
end
it "Desired missing memory request" do
current = {:limits => {:cpu => "999m", :memory => "1Gi"}, :requests => {:cpu => "150m", :memory => "500Mi"}}
desired = {:limits => {:cpu => "999m", :memory => "1Gi"}, :requests => {:cpu => "150m"}}
expect(server.constraints_changed?(current, desired)).to eq(true)
end
it "checks millicores" do
current = constraint_one.deep_merge(:limits => {:cpu => "1"})
desired = constraint_one.deep_merge(:limits => {:cpu => "1000m"})
expect(server.constraints_changed?(current, desired)).to eq(false)
end
end
end
| 39.656098 | 177 | 0.678701 |
bb49c8d5c5008d5d9983d66b07174a18684f3f98 | 8,256 | require "cases/helper"
require "models/author"
require "models/binary"
require "models/cake_designer"
require "models/chef"
require "models/comment"
require "models/edge"
require "models/essay"
require "models/post"
require "models/price_estimate"
require "models/topic"
require "models/treasure"
require "models/vertex"
module ActiveRecord
class WhereTest < ActiveRecord::TestCase
fixtures :posts, :edges, :authors, :binaries, :essays
def test_where_copies_bind_params
author = authors(:david)
posts = author.posts.where('posts.id != 1')
joined = Post.where(id: posts)
assert_operator joined.length, :>, 0
joined.each { |post|
assert_equal author, post.author
assert_not_equal 1, post.id
}
end
def test_where_copies_arel_bind_params
chef = Chef.create!
CakeDesigner.create!(chef: chef)
cake_designers = CakeDesigner.joins(:chef).where(chefs: { id: chef.id })
chefs = Chef.where(employable: cake_designers)
assert_equal [chef], chefs.to_a
end
def test_rewhere_on_root
assert_equal posts(:welcome), Post.rewhere(title: 'Welcome to the weblog').first
end
def test_belongs_to_shallow_where
author = Author.new
author.id = 1
assert_equal Post.where(author_id: 1).to_sql, Post.where(author: author).to_sql
end
def test_belongs_to_nil_where
assert_equal Post.where(author_id: nil).to_sql, Post.where(author: nil).to_sql
end
def test_belongs_to_array_value_where
assert_equal Post.where(author_id: [1,2]).to_sql, Post.where(author: [1,2]).to_sql
end
def test_belongs_to_nested_relation_where
expected = Post.where(author_id: Author.where(id: [1,2])).to_sql
actual = Post.where(author: Author.where(id: [1,2])).to_sql
assert_equal expected, actual
end
def test_belongs_to_nested_where
parent = Comment.new
parent.id = 1
expected = Post.where(comments: { parent_id: 1 }).joins(:comments)
actual = Post.where(comments: { parent: parent }).joins(:comments)
assert_equal expected.to_sql, actual.to_sql
end
def test_belongs_to_nested_where_with_relation
author = authors(:david)
expected = Author.where(id: author ).joins(:posts)
actual = Author.where(posts: { author_id: Author.where(id: author.id) }).joins(:posts)
assert_equal expected.to_a, actual.to_a
end
def test_polymorphic_shallow_where
treasure = Treasure.new
treasure.id = 1
expected = PriceEstimate.where(estimate_of_type: 'Treasure', estimate_of_id: 1)
actual = PriceEstimate.where(estimate_of: treasure)
assert_equal expected.to_sql, actual.to_sql
end
def test_polymorphic_nested_array_where
treasure = Treasure.new
treasure.id = 1
hidden = HiddenTreasure.new
hidden.id = 2
expected = PriceEstimate.where(estimate_of_type: 'Treasure', estimate_of_id: [treasure, hidden])
actual = PriceEstimate.where(estimate_of: [treasure, hidden])
assert_equal expected.to_sql, actual.to_sql
end
def test_polymorphic_nested_relation_where
expected = PriceEstimate.where(estimate_of_type: 'Treasure', estimate_of_id: Treasure.where(id: [1,2]))
actual = PriceEstimate.where(estimate_of: Treasure.where(id: [1,2]))
assert_equal expected.to_sql, actual.to_sql
end
def test_polymorphic_sti_shallow_where
treasure = HiddenTreasure.new
treasure.id = 1
expected = PriceEstimate.where(estimate_of_type: 'Treasure', estimate_of_id: 1)
actual = PriceEstimate.where(estimate_of: treasure)
assert_equal expected.to_sql, actual.to_sql
end
def test_polymorphic_nested_where
thing = Post.new
thing.id = 1
expected = Treasure.where(price_estimates: { thing_type: 'Post', thing_id: 1 }).joins(:price_estimates)
actual = Treasure.where(price_estimates: { thing: thing }).joins(:price_estimates)
assert_equal expected.to_sql, actual.to_sql
end
def test_polymorphic_sti_nested_where
treasure = HiddenTreasure.new
treasure.id = 1
expected = Treasure.where(price_estimates: { estimate_of_type: 'Treasure', estimate_of_id: 1 }).joins(:price_estimates)
actual = Treasure.where(price_estimates: { estimate_of: treasure }).joins(:price_estimates)
assert_equal expected.to_sql, actual.to_sql
end
def test_decorated_polymorphic_where
treasure_decorator = Struct.new(:model) do
def self.method_missing(method, *args, &block)
Treasure.send(method, *args, &block)
end
def is_a?(klass)
model.is_a?(klass)
end
def method_missing(method, *args, &block)
model.send(method, *args, &block)
end
end
treasure = Treasure.new
treasure.id = 1
decorated_treasure = treasure_decorator.new(treasure)
expected = PriceEstimate.where(estimate_of_type: 'Treasure', estimate_of_id: 1)
actual = PriceEstimate.where(estimate_of: decorated_treasure)
assert_equal expected.to_sql, actual.to_sql
end
def test_aliased_attribute
expected = Topic.where(heading: 'The First Topic')
actual = Topic.where(title: 'The First Topic')
assert_equal expected.to_sql, actual.to_sql
end
def test_where_error
assert_raises(ActiveRecord::StatementInvalid) do
Post.where(:id => { 'posts.author_id' => 10 }).first
end
end
def test_where_with_table_name
post = Post.first
assert_equal post, Post.where(:posts => { 'id' => post.id }).first
end
def test_where_with_table_name_and_empty_hash
assert_equal 0, Post.where(:posts => {}).count
end
def test_where_with_table_name_and_empty_array
assert_equal 0, Post.where(:id => []).count
end
def test_where_with_table_name_and_nested_empty_array
assert_deprecated do
assert_equal [], Post.where(:id => [[]]).to_a
end
end
def test_where_with_empty_hash_and_no_foreign_key
assert_equal 0, Edge.where(:sink => {}).count
end
def test_where_with_blank_conditions
[[], {}, nil, ""].each do |blank|
assert_equal 4, Edge.where(blank).order("sink_id").to_a.size
end
end
def test_where_with_integer_for_string_column
count = Post.where(:title => 0).count
assert_equal 0, count
end
def test_where_with_float_for_string_column
count = Post.where(:title => 0.0).count
assert_equal 0, count
end
def test_where_with_boolean_for_string_column
count = Post.where(:title => false).count
assert_equal 0, count
end
def test_where_with_decimal_for_string_column
count = Post.where(:title => BigDecimal.new(0)).count
assert_equal 0, count
end
def test_where_with_duration_for_string_column
count = Post.where(:title => 0.seconds).count
assert_equal 0, count
end
def test_where_with_integer_for_binary_column
count = Binary.where(:data => 0).count
assert_equal 0, count
end
def test_where_on_association_with_custom_primary_key
author = authors(:david)
essay = Essay.where(writer: author).first
assert_equal essays(:david_modest_proposal), essay
end
def test_where_on_association_with_custom_primary_key_with_relation
author = authors(:david)
essay = Essay.where(writer: Author.where(id: author.id)).first
assert_equal essays(:david_modest_proposal), essay
end
def test_where_on_association_with_relation_performs_subselect_not_two_queries
author = authors(:david)
assert_queries(1) do
Essay.where(writer: Author.where(id: author.id)).to_a
end
end
def test_where_on_association_with_custom_primary_key_with_array_of_base
author = authors(:david)
essay = Essay.where(writer: [author]).first
assert_equal essays(:david_modest_proposal), essay
end
def test_where_on_association_with_custom_primary_key_with_array_of_ids
essay = Essay.where(writer: ["David"]).first
assert_equal essays(:david_modest_proposal), essay
end
end
end
| 29.591398 | 125 | 0.694646 |
6252e4e85d8e866a1326c167a77f48bbd625e914 | 856 | Puppet::Type.newtype(:compellent_server) do
@doc = "Manage Compellent Server creation and deletion."
apply_to_device
ensurable
newparam(:name) do
desc "The server name. Valid characters are a-z, 1-9 & underscore."
isnamevar
validate do |value|
unless value =~ /^[\w\s\-]+$/
raise ArgumentError, "%s is not a valid initial server name." % value
end
end
end
newparam(:operatingsystem) do
desc "The Server operatingSystem."
end
newparam(:notes) do
desc "The description for the server."
end
newparam(:serverfolder) do
desc "The server folder name."
validate do |value|
unless value =~ /^[\w\s\-]*$/
raise ArgumentError, "%s is not a valid initial server folder name." % value
end
end
end
newparam(:wwn) do
desc "The WWN to map Server."
end
end
| 20.878049 | 84 | 0.641355 |
261c353eb50090efc049d24ef2ff335530e964a0 | 4,145 | require 'spec_helper'
class TestConcurrencyCache
@@cache = {}
def self.read(key)
@@cache[key]
end
def self.write(key, value)
@@cache[key] = value
end
def self.delete(key)
@@cache.delete(key)
end
def self.clear
@@cache = {}
end
end
describe Concurrency do
before :each do
TestConcurrencyCache.clear
class ConcurrencyTest
include Concurrency
self.concurrency_cache = TestConcurrencyCache
def self.class_test_method
sleep(1)
end
def instance_test_method
sleep(1)
end
def self.both_instance_and_class_test_method; end
def both_instance_and_class_test_method; end
end
end
after :each do
Object.send(:remove_const, :ConcurrencyTest)
end
it "should allow specifying which methods should implement the concurrency check" do
expect { ConcurrencyTest.send(:concurrency_safe, :instance_test_method) }
.to_not raise_error
expect { ConcurrencyTest.send(:concurrency_safe, :class_test_method) }
.to_not raise_error
expect { ConcurrencyTest.send(:concurrency_safe, :both_instance_and_class_test_method) }
.to raise_error(Concurrency::AmbiguousMethodException)
expect { ConcurrencyTest.send(:concurrency_safe, :both_instance_and_class_test_method, type: :instance) }
.to_not raise_error
expect { ConcurrencyTest.send(:concurrency_safe, :both_instance_and_class_test_method, type: :class) }
.to_not raise_error
expect { ConcurrencyTest.send(:concurrency_safe, :unknown_method) }
.to raise_error(Concurrency::NoMethodException)
end
it "should allow identyfying the type of a method" do
expect(ConcurrencyTest.send(:method_types, :class_test_method)).to eq ['class']
expect(ConcurrencyTest.send(:method_types, :instance_test_method)).to eq ['instance']
expect(ConcurrencyTest.send(:method_types, :both_instance_and_class_test_method)).to eq ['class','instance']
expect(ConcurrencyTest.send(:method_types, :unknown_method)).to be_blank
expect(ConcurrencyTest.send(:method_type, :class_test_method)).to eq 'class'
expect(ConcurrencyTest.send(:method_type, :instance_test_method)).to eq 'instance'
expect { ConcurrencyTest.send(:method_type, :both_instance_and_class_test_method) }
.to raise_error(Concurrency::AmbiguousMethodException)
expect { ConcurrencyTest.send(:method_type, :unknown_method) }
.to raise_error(Concurrency::NoMethodException)
end
it "should allow checking the concurrency lock for specified class methods" do
ConcurrencyTest.send(:concurrency_safe, :class_test_method)
started = false
expect(ConcurrencyTest.concurrency_safe_method_locked?(:class_test_method)).to be false
thread = Thread.new { ConcurrencyTest.send(:class_test_method); started = true }
thread.join
expect(ConcurrencyTest.concurrency_safe_method_locked?(:class_test_method)).to be true until started
end
it "should allow checking the concurrency lock for specified instance methods" do
ConcurrencyTest.send(:concurrency_safe, :class_test_method)
instance = ConcurrencyTest.new
started = false
expect(instance.concurrency_safe_method_locked?(:instance_test_method)).to be false
thread = Thread.new { instance.send(:instance_test_method); started = true }
thread.join
expect(instance.concurrency_safe_method_locked?(:instance_test_method)).to be true until started
end
it "should implement the concurrency check for specified class methods" do
ConcurrencyTest.send(:concurrency_safe, :class_test_method)
threads = 2.times.map { Thread.new { ConcurrencyTest.send(:class_test_method) } }
expect { threads.each(&:join) }
.to raise_error(Concurrency::ConcurrentCallException)
end
it "should implement the concurrency check for specified instance methods" do
ConcurrencyTest.send(:concurrency_safe, :instance_test_method)
instance = ConcurrencyTest.new
threads = 2.times.map { Thread.new { instance.send(:instance_test_method) } }
expect { threads.each(&:join) }
.to raise_error(Concurrency::ConcurrentCallException)
end
end
| 37.342342 | 112 | 0.75006 |
878f8a69503adf93aaf88c37027a17039a4532d3 | 12,623 | require_relative '../spec_helper'
describe GCOVTOOLS::Project do
describe "#name" do
it "can be given in the constructor" do
project = GCOVTOOLS::Project.new "Test"
expect(project.name).to eq("Test")
end
it "is optional in the constructor" do
project = GCOVTOOLS::Project.new
expect(project.name).to eq("")
end
it "can be modified" do
project = GCOVTOOLS::Project.new "Test"
project.name = "Test2"
expect(project.name).to eq("Test2")
end
end
describe "#files" do
it "returns no files if it is empty" do
project = GCOVTOOLS::Project.new "Test"
expect(project.files.count).to eq(0)
end
it "returns the files it has been given" do
project = GCOVTOOLS::Project.new "Test"
project << GCOVTOOLS::File.new("foobar.cpp")
project << GCOVTOOLS::File.new("boofar.cpp")
expect(project.files.count).to eq(2)
expect(project.files.map(&:name)).to include(a_string_ending_with("foobar.cpp"))
expect(project.files.map(&:name)).to include(a_string_ending_with("boofar.cpp"))
end
end
describe ".load_dir" do
it "loads all files in the given directory" do
project = GCOVTOOLS::Project.load_dir(File.join(File.dirname(__FILE__),"data"))
expect(project.files.count).to eq(3)
expect(project.files.map{|file|file.name}).to include(a_string_ending_with("test2.cpp"))
expect(project.files.map{|file|file.name}).not_to include(a_string_ending_with("test3.cpp"))
end
it "recursively loads all files in the given directory structure" do
project = GCOVTOOLS::Project.load_dir(File.join(File.dirname(__FILE__),"data"), :recursive => true)
expect(project.files.count).to eq(4)
expect(project.files.map{|file|file.name}).to include(a_string_ending_with("test2.cpp"))
expect(project.files.map{|file|file.name}).to include( a_string_ending_with("test3.cpp") )
end
end
describe "#add_file" do
it "should add the given file" do
project = GCOVTOOLS::Project.new
project.add_file(File.join(File.dirname(__FILE__),"data","test2.cpp.gcov"))
expect(project.files.count).to eq(1)
expect(project.files[0].name).to eq( "test2.cpp" )
end
it "should split concatenated gcov files into multiple objects" do
project = GCOVTOOLS::Project.new
project.add_file(File.join(File.dirname(__FILE__),"concat","test_cat.cpp.gcov"))
expect(project.files.count).to eq(2)
expect(project.files.map(&:name)).to include( "test.cpp" )
expect(project.files.map(&:name)).to include( "test1.cpp" )
end
it "should filter using given array of expressions" do
project = GCOVTOOLS::Project.new
project.add_file(File.join(File.dirname(__FILE__),"data","test2.cpp.gcov"), :exclude => [/test2\.cpp/,/test3\.cpp/])
expect(project.files.count).to eq(0)
end
it "should filter out concatenated files that match the filter" do
project = GCOVTOOLS::Project.new
project.add_file(File.join(File.dirname(__FILE__),"concat","test_cat.cpp.gcov"), :exclude => [/test\.cpp$/])
expect(project.files.count).to eq(1)
expect(project.files.map(&:name)).not_to include( a_string_ending_with("test.cpp") )
expect(project.files.map(&:name)).to include( a_string_ending_with("test1.cpp") )
end
it "should not filter out concatenated files that don't match the filter" do
project = GCOVTOOLS::Project.new
project.add_file(File.join(File.dirname(__FILE__),"concat","test_cat.cpp.gcov"), :exclude => [/test_cat\.cpp\.gcov$/])
expect(project.files.count).to eq(2)
expect(project.files.map(&:name)).to include( a_string_ending_with("test.cpp") )
expect(project.files.map(&:name)).to include( a_string_ending_with("test1.cpp") )
end
it "should filter inclusively if told to" do
project = GCOVTOOLS::Project.new
project.add_file(File.join(File.dirname(__FILE__),"concat","test_cat.cpp.gcov"), :include => [/test\.cpp$/] )
expect(project.files.count).to eq(1)
expect(project.files.map(&:name)).not_to include( a_string_ending_with("test1.cpp") )
expect(project.files.map(&:name)).to include( a_string_ending_with("test.cpp") )
end
it "should apply all inclusive filters" do
project = GCOVTOOLS::Project.new
project.add_file(File.join(File.dirname(__FILE__),"concat","test_cat.cpp.gcov"), :include => [/test\.cpp$/,/test1\.cpp$/] )
expect(project.files.count).to eq(2)
expect(project.files.map(&:name)).to include( a_string_ending_with("test1.cpp") )
expect(project.files.map(&:name)).to include( a_string_ending_with("test.cpp") )
end
it "should apply exclusive filters after inclusive ones" do
project = GCOVTOOLS::Project.new
project.add_file(File.join(File.dirname(__FILE__),"concat","test_cat.cpp.gcov"), :include => [/test.*\.cpp$/], :exclude => [/test.cpp/] )
expect(project.files.count).to eq(1)
expect(project.files.map(&:name)).to include( a_string_ending_with("test1.cpp") )
expect(project.files.map(&:name)).not_to include( a_string_ending_with("test.cpp") )
end
it "should merge file stats for identical filenames" do
project = GCOVTOOLS::Project.new
project.add_files do
file = GCOVTOOLS::File.new "myfile.cpp"
file.add_lines do
file << GCOVTOOLS::Line.new(0,:none,"Source:myfile.cpp")
file << GCOVTOOLS::Line.new(1,4,"line 1")
file << GCOVTOOLS::Line.new(2,23,"line 2")
file << GCOVTOOLS::Line.new(3,:none,"line 3")
file << GCOVTOOLS::Line.new(4,:missed,"line 4")
file << GCOVTOOLS::Line.new(5,:none,"line 5")
end
project << file
file = GCOVTOOLS::File.new "myfile.cpp"
file.add_lines do
file << GCOVTOOLS::Line.new(0,:none,"Source:myfile.cpp")
file << GCOVTOOLS::Line.new(1,:missed,"line 1")
file << GCOVTOOLS::Line.new(2,40,"line 2")
file << GCOVTOOLS::Line.new(3,:none,"line 3")
file << GCOVTOOLS::Line.new(4,:none,"line 4")
end
project << file
end # add_files
expect(project.files.count).to eq(1)
end # it
end # describe
describe "#add_dir" do
it "adds all files in the given directory" do
project = GCOVTOOLS::Project.load_dir(File.join(File.dirname(__FILE__),"data","data2"))
project.add_dir(File.join(File.dirname(__FILE__),"data"))
expect(project.files.count).to eq(4)
expect(project.files.map{|file|file.name}).to include( a_string_ending_with("test2.cpp") )
expect(project.files.map{|file|file.name}).to include( a_string_ending_with("test3.cpp") )
end
it "recursively adds all files in the given directory" do
project = GCOVTOOLS::Project.new
project.add_dir(File.join(File.dirname(__FILE__),"data"), :recursive => true)
expect(project.files.count).to eq(4)
expect(project.files.map{|file|file.name}).to include( a_string_ending_with("test2.cpp") )
expect(project.files.map{|file|file.name}).to include( a_string_ending_with("test3.cpp") )
end
it "filters using given singular expression" do
project = GCOVTOOLS::Project.new
project.add_dir(File.join(File.dirname(__FILE__),"data"), :recursive => true, :exclude => [/test2\.cpp/])
expect(project.files.count).to eq(3)
expect(project.files.map{|file|file.name}).not_to include( a_string_ending_with("test2.cpp") )
expect(project.files.map{|file|file.name}).to include( a_string_ending_with("test3.cpp") )
end
it "filters using given array of expressions" do
project = GCOVTOOLS::Project.new
project.add_dir(File.join(File.dirname(__FILE__),"data"), :recursive => true, :exclude => [/test2\.cpp/,/test3\.cpp/])
expect(project.files.count).to eq(2)
expect(project.files.map{|file|file.name}).not_to include( a_string_ending_with("test2.cpp") )
expect(project.files.map{|file|file.name}).not_to include( a_string_ending_with("test3.cpp") )
end
it "should filter out concatenated files that match the filter" do
project = GCOVTOOLS::Project.new
project.add_dir(File.join(File.dirname(__FILE__),"concat"), :recursive => true, :exclude => [/test\.cpp$/])
expect(project.files.count).to eq(1)
expect(project.files.map(&:name)).not_to include( a_string_ending_with("test.cpp") )
expect(project.files.map(&:name)).to include( a_string_ending_with("test1.cpp") )
end
it "should not filter out concatenated files that don't match the filter" do
project = GCOVTOOLS::Project.new
project.add_dir(File.join(File.dirname(__FILE__),"concat"), :recursive => true, :exclude => [/test_cat\.cpp/])
expect(project.files.count).to eq(2)
expect(project.files.map(&:name)).to include( a_string_ending_with("test.cpp") )
expect(project.files.map(&:name)).to include( a_string_ending_with("test1.cpp") )
end
it "should filter inclusively if told to" do
project = GCOVTOOLS::Project.new
project.add_dir(File.join(File.dirname(__FILE__),"concat"), :recursive => true, :include => [/test\.cpp/])
expect(project.files.count).to eq(1)
expect(project.files.map(&:name)).not_to include( a_string_ending_with("test1.cpp") )
expect(project.files.map(&:name)).to include( a_string_ending_with("test.cpp") )
end
it "should apply all inclusive filters" do
project = GCOVTOOLS::Project.new
project.add_dir(File.join(File.dirname(__FILE__),"concat"), :recursive => true, :include => [/test\.cpp$/,/test1\.cpp$/] )
expect(project.files.count).to eq(2)
expect(project.files.map(&:name)).to include( a_string_ending_with("test1.cpp") )
expect(project.files.map(&:name)).to include( a_string_ending_with("test.cpp") )
end
it "should apply exclusive filters after inclusive ones" do
project = GCOVTOOLS::Project.new
project.add_dir(File.join(File.dirname(__FILE__),"concat"), :recursive => true, :include => [/test.*\.cpp$/], :exclude => [/test.cpp/] )
expect(project.files.count).to eq(1)
expect(project.files.map(&:name)).to include( a_string_ending_with("test1.cpp") )
expect(project.files.map(&:name)).not_to include( a_string_ending_with("test.cpp") )
end
end
describe "#stats" do
it "should be computed based on file stats" do
project = GCOVTOOLS::Project.new
project.add_files do
file = GCOVTOOLS::File.new "myfile.cpp"
file.add_lines do
file << GCOVTOOLS::Line.new(1,4,"line 1")
file << GCOVTOOLS::Line.new(2,23,"line 2")
file << GCOVTOOLS::Line.new(3,:none,"line 3")
file << GCOVTOOLS::Line.new(4,:missed,"line 4")
file << GCOVTOOLS::Line.new(5,:none,"line 5")
end
project << file
file = GCOVTOOLS::File.new "myfile2.cpp"
file.add_lines do
file << GCOVTOOLS::Line.new(1,:missed,"line 1")
file << GCOVTOOLS::Line.new(2,40,"line 2")
file << GCOVTOOLS::Line.new(3,:none,"line 3")
file << GCOVTOOLS::Line.new(4,:none,"line 4")
end
project << file
end
expect(project.stats[:lines]).to eq(5)
expect(project.stats[:total_lines]).to eq(9)
expect(project.stats[:total_exec]).to eq(67)
expect(project.stats[:empty_lines]).to eq(4)
expect(project.stats[:exec_lines]).to eq(3)
expect(project.stats[:missed_lines]).to eq(2)
expect(project.stats[:coverage]).to eq(3.0/5)
expect(project.stats[:hits_per_line]).to eq(67.0/5)
end
it "should be computed based on file stats" do
project = GCOVTOOLS::Project.new
project.add_files do
file = GCOVTOOLS::File.new "myfile.cpp"
file.add_lines do
file << GCOVTOOLS::Line.new(1,:none,"line 1")
file << GCOVTOOLS::Line.new(2,:none,"line 2")
file << GCOVTOOLS::Line.new(3,:none,"line 3")
file << GCOVTOOLS::Line.new(4,:none,"line 4")
end
project << file
file = GCOVTOOLS::File.new "myfile2.cpp"
file.add_lines do
file << GCOVTOOLS::Line.new(1,:none,"line 1")
file << GCOVTOOLS::Line.new(2,:none,"line 2")
end
project << file
end
expect(project.stats[:lines]).to eq(0)
expect(project.stats[:coverage]).to eq(1)
expect(project.stats[:hits_per_line]).to eq(0)
end # it
end # describe #stats
end # describe Project
| 42.076667 | 143 | 0.650083 |
e257bcf7553b11147c6ccd26d485a510033254d5 | 11,705 | # frozen_string_literal: true
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "helper"
require "gapic/grpc/service_stub"
require "google/cloud/dataproc/v1/clusters_pb"
require "google/cloud/dataproc/v1/clusters_services_pb"
require "google/cloud/dataproc/v1/cluster_controller"
class ::Google::Cloud::Dataproc::V1::ClusterController::OperationsTest < Minitest::Test
class ClientStub
attr_accessor :call_rpc_count, :requests
def initialize response, operation, &block
@response = response
@operation = operation
@block = block
@call_rpc_count = 0
@requests = []
end
def call_rpc *args, **kwargs
@call_rpc_count += 1
@requests << @block&.call(*args, **kwargs)
yield @response, @operation if block_given?
@response
end
end
def test_list_operations
# Create GRPC objects.
grpc_response = ::Google::Longrunning::ListOperationsResponse.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
filter = "hello world"
page_size = 42
page_token = "hello world"
list_operations_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :list_operations, name
assert_kind_of ::Google::Longrunning::ListOperationsRequest, request
assert_equal "hello world", request["name"]
assert_equal "hello world", request["filter"]
assert_equal 42, request["page_size"]
assert_equal "hello world", request["page_token"]
refute_nil options
end
Gapic::ServiceStub.stub :new, list_operations_client_stub do
# Create client
client = ::Google::Cloud::Dataproc::V1::ClusterController::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.list_operations({ name: name, filter: filter, page_size: page_size, page_token: page_token }) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use named arguments
client.list_operations name: name, filter: filter, page_size: page_size, page_token: page_token do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.list_operations ::Google::Longrunning::ListOperationsRequest.new(name: name, filter: filter, page_size: page_size, page_token: page_token) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.list_operations({ name: name, filter: filter, page_size: page_size, page_token: page_token }, grpc_options) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.list_operations ::Google::Longrunning::ListOperationsRequest.new(name: name, filter: filter, page_size: page_size, page_token: page_token), grpc_options do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, list_operations_client_stub.call_rpc_count
end
end
def test_get_operation
# Create GRPC objects.
grpc_response = ::Google::Longrunning::Operation.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
get_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :get_operation, name
assert_kind_of ::Google::Longrunning::GetOperationRequest, request
assert_equal "hello world", request["name"]
refute_nil options
end
Gapic::ServiceStub.stub :new, get_operation_client_stub do
# Create client
client = ::Google::Cloud::Dataproc::V1::ClusterController::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.get_operation({ name: name }) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use named arguments
client.get_operation name: name do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object
client.get_operation ::Google::Longrunning::GetOperationRequest.new(name: name) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use hash object with options
client.get_operation({ name: name }, grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.get_operation ::Google::Longrunning::GetOperationRequest.new(name: name), grpc_options do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, get_operation_client_stub.call_rpc_count
end
end
def test_delete_operation
# Create GRPC objects.
grpc_response = ::Google::Protobuf::Empty.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
delete_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :delete_operation, name
assert_kind_of ::Google::Longrunning::DeleteOperationRequest, request
assert_equal "hello world", request["name"]
refute_nil options
end
Gapic::ServiceStub.stub :new, delete_operation_client_stub do
# Create client
client = ::Google::Cloud::Dataproc::V1::ClusterController::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.delete_operation({ name: name }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.delete_operation name: name do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.delete_operation ::Google::Longrunning::DeleteOperationRequest.new(name: name) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.delete_operation({ name: name }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.delete_operation ::Google::Longrunning::DeleteOperationRequest.new(name: name), grpc_options do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, delete_operation_client_stub.call_rpc_count
end
end
def test_cancel_operation
# Create GRPC objects.
grpc_response = ::Google::Protobuf::Empty.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
cancel_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :cancel_operation, name
assert_kind_of ::Google::Longrunning::CancelOperationRequest, request
assert_equal "hello world", request["name"]
refute_nil options
end
Gapic::ServiceStub.stub :new, cancel_operation_client_stub do
# Create client
client = ::Google::Cloud::Dataproc::V1::ClusterController::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.cancel_operation({ name: name }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.cancel_operation name: name do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.cancel_operation ::Google::Longrunning::CancelOperationRequest.new(name: name) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.cancel_operation({ name: name }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.cancel_operation ::Google::Longrunning::CancelOperationRequest.new(name: name), grpc_options do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, cancel_operation_client_stub.call_rpc_count
end
end
def test_configure
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
client = block_config = config = nil
Gapic::ServiceStub.stub :new, nil do
client = ::Google::Cloud::Dataproc::V1::ClusterController::Operations.new do |config|
config.credentials = grpc_channel
end
end
config = client.configure do |c|
block_config = c
end
assert_same block_config, config
assert_kind_of ::Google::Cloud::Dataproc::V1::ClusterController::Operations::Configuration, config
end
end
| 37.041139 | 190 | 0.716531 |
87004873f6b082f09b2d4d1aae1b3214b6f196c5 | 878 | require_relative '../lib/github_repository'
# RUBY
# Source: https://github.com/ruby/ruby
class Ruby
attr_reader :name, :description
def initialize
@name = 'Ruby'
@description = 'The Ruby MRI runtime.'
extract
end
def latest_stable
@versions.sort.reverse.first
end
def latest_unstable
# latest release _previewX or _rcX latest tag
'Not supported'
end
def versions
@versions.sort.reverse
end
private
def extract
tags = GithubRepository.new('ruby', 'ruby').tags
arr = []
tags.each do |name|
match = /^v([0-9]+_[0-9]+_[0-9]+)(_[0-9]{3})?$/.match(name)
next if match.nil?
v, p = match.captures
v = v.tr('_', '.') unless v.nil?
v = v << p.tr('_', 'p') unless p.nil?
arr << v
end
arr = arr.compact.uniq
@versions = arr.collect! { |e| Versionomy.parse(e) }
end
end
| 19.511111 | 65 | 0.603645 |
1d3c09a48b7ea3ae2d77febbc351a74829b298a8 | 3,077 | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Issue::Metrics do
let(:project) { create(:project) }
subject { create(:issue, project: project) }
describe '.for_issues' do
subject(:scope) { described_class.for_issues([issue1, issue2]) }
let(:issue1) { create(:issue) }
let(:issue2) { create(:issue) }
it 'returns metrics associated with given issues' do
create(:issue)
expect(scope).to match_array([issue1.metrics, issue2.metrics])
end
end
describe '.with_first_mention_not_earlier_than' do
subject(:scope) { described_class.with_first_mention_not_earlier_than(timestamp) }
let(:timestamp) { DateTime.current }
it 'returns metrics without mentioning in commit or with mentioning after given timestamp' do
issue1 = create(:issue)
issue2 = create(:issue).tap { |i| i.metrics.update!(first_mentioned_in_commit_at: timestamp + 1.day) }
create(:issue).tap { |i| i.metrics.update!(first_mentioned_in_commit_at: timestamp - 1.day) }
expect(scope).to match_array([issue1.metrics, issue2.metrics])
end
end
describe "when recording the default set of issue metrics on issue save" do
context "milestones" do
it "records the first time an issue is associated with a milestone" do
time = Time.current
travel_to(time) { subject.update(milestone: create(:milestone, project: project)) }
metrics = subject.metrics
expect(metrics).to be_present
expect(metrics.first_associated_with_milestone_at).to be_like_time(time)
end
it "does not record the second time an issue is associated with a milestone" do
time = Time.current
travel_to(time) { subject.update(milestone: create(:milestone, project: project)) }
travel_to(time + 2.hours) { subject.update(milestone: nil) }
travel_to(time + 6.hours) { subject.update(milestone: create(:milestone, project: project)) }
metrics = subject.metrics
expect(metrics).to be_present
expect(metrics.first_associated_with_milestone_at).to be_like_time(time)
end
end
context "list labels" do
it "records the first time an issue is associated with a list label" do
list_label = create(:list).label
time = Time.current
travel_to(time) { subject.update(label_ids: [list_label.id]) }
metrics = subject.metrics
expect(metrics).to be_present
expect(metrics.first_added_to_board_at).to be_like_time(time)
end
it "does not record the second time an issue is associated with a list label" do
time = Time.current
first_list_label = create(:list).label
travel_to(time) { subject.update(label_ids: [first_list_label.id]) }
second_list_label = create(:list).label
travel_to(time + 5.hours) { subject.update(label_ids: [second_list_label.id]) }
metrics = subject.metrics
expect(metrics).to be_present
expect(metrics.first_added_to_board_at).to be_like_time(time)
end
end
end
end
| 36.2 | 108 | 0.688008 |
e8e3d506a5a0b9fe61704b4ef04318c440ff50e7 | 1,838 | # frozen_string_literal: true
# Advent of Code 2016
#
# Robert Haines
#
# Public Domain
require 'aoc2016'
module AOC2016
class InternetProtocol7 < Day
def setup
@input = read_input_file.split("\n")
end
def part1
num = @input.reduce(0) do |acc, line|
acc + (supports_tls?(line) ? 1 : 0)
end
puts "Part 1: #{num}"
end
def part2
num = @input.reduce(0) do |acc, line|
acc + (supports_ssl?(line) ? 1 : 0)
end
puts "Part 2: #{num}"
end
def supports_tls?(line)
seq = ip7_sequence(line)
seq[:hypernet].each { return false if detect_abba(_1) }
seq[:supernet].each { return true if detect_abba(_1) }
false
end
def supports_ssl?(line)
seq = ip7_sequence(line)
hyp = seq[:hypernet].reduce([]){ |acc, s| acc + detect_aba(s) }
return false if hyp.empty?
hyp.each do |a, b|
seq[:supernet].each { return true if detect_aba(_1, b, a) }
end
false
end
def detect_abba(str)
(0..(str.length - 4)).each do |i|
if str[i] == str[i + 3] &&
str[i + 1] == str[i + 2] &&
str[i] != str[i + 1]
return true
end
end
false
end
def detect_aba(str, a = nil, b = nil)
return /#{a}#{b}#{a}/.match(str) != nil unless a.nil? || b.nil?
result = []
(0..(str.length - 3)).each do |i|
if str[i] == str[i + 2] && str[i] != str[i + 1]
result << [str[i], str[i + 1]]
end
end
result
end
def ip7_sequence(line)
seq = { :supernet => [], :hypernet => [] }
line.split(/\[|\]/).each_with_index do |s, i|
if i % 2 == 0
seq[:supernet] << s
else
seq[:hypernet] << s
end
end
seq
end
end
end
| 19.978261 | 69 | 0.501088 |
ac86d8ab23b6cdaef4b1abdf71040bae34eff3e3 | 141 | # Be sure to restart your server when you modify this file.
Rails.application.config.session_store :cookie_store, key: '_mini-blog_session'
| 35.25 | 79 | 0.801418 |
1d2ee9c34fcf9b52c2a3a6d66bbcbd507c8f3454 | 111 | # frozen_string_literal: true
module Quiz
class TableQuestion < Question
belongs_to :question
end
end
| 13.875 | 32 | 0.765766 |
e2d425b609b1a69316e8fb4b82f612da2e09c206 | 5,660 | require "logstash/inputs/threadable"
require "logstash/namespace"
require "logstash/plugin_mixins/aws_config"
# Pull events from an Amazon Web Services Simple Queue Service (SQS) queue.
#
# SQS is a simple, scalable queue system that is part of the
# Amazon Web Services suite of tools.
#
# Although SQS is similar to other queuing systems like AMQP, it
# uses a custom API and requires that you have an AWS account.
# See http://aws.amazon.com/sqs/ for more details on how SQS works,
# what the pricing schedule looks like and how to setup a queue.
#
# To use this plugin, you *must*:
#
# * Have an AWS account
# * Setup an SQS queue
# * Create an identify that has access to consume messages from the queue.
#
# The "consumer" identity must have the following permissions on the queue:
#
# * sqs:ChangeMessageVisibility
# * sqs:ChangeMessageVisibilityBatch
# * sqs:DeleteMessage
# * sqs:DeleteMessageBatch
# * sqs:GetQueueAttributes
# * sqs:GetQueueUrl
# * sqs:ListQueues
# * sqs:ReceiveMessage
#
# Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user.
# A sample policy is as follows:
#
# {
# "Statement": [
# {
# "Action": [
# "sqs:ChangeMessageVisibility",
# "sqs:ChangeMessageVisibilityBatch",
# "sqs:GetQueueAttributes",
# "sqs:GetQueueUrl",
# "sqs:ListQueues",
# "sqs:SendMessage",
# "sqs:SendMessageBatch"
# ],
# "Effect": "Allow",
# "Resource": [
# "arn:aws:sqs:us-east-1:123456789012:Logstash"
# ]
# }
# ]
# }
#
# See http://aws.amazon.com/iam/ for more details on setting up AWS identities.
#
class LogStash::Inputs::SQS < LogStash::Inputs::Threadable
include LogStash::PluginMixins::AwsConfig
config_name "sqs"
milestone 1
default :codec, "json"
# Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN.
config :queue, :validate => :string, :required => true
# Name of the event field in which to store the SQS message ID
config :id_field, :validate => :string
# Name of the event field in which to store the SQS message MD5 checksum
config :md5_field, :validate => :string
# Name of the event field in which to store the SQS message Sent Timestamp
config :sent_timestamp_field, :validate => :string
public
def aws_service_endpoint(region)
return {
:sqs_endpoint => "sqs.#{region}.amazonaws.com"
}
end
public
def register
@logger.info("Registering SQS input", :queue => @queue)
require "aws-sdk"
@sqs = AWS::SQS.new(aws_options_hash)
begin
@logger.debug("Connecting to AWS SQS queue", :queue => @queue)
@sqs_queue = @sqs.queues.named(@queue)
@logger.info("Connected to AWS SQS queue successfully.", :queue => @queue)
rescue Exception => e
@logger.error("Unable to access SQS queue.", :error => e.to_s, :queue => @queue)
throw e
end # begin/rescue
end # def register
public
def run(output_queue)
@logger.debug("Polling SQS queue", :queue => @queue)
receive_opts = {
:limit => 10,
:visibility_timeout => 30,
:attributes => [:sent_at]
}
continue_polling = true
while running? && continue_polling
continue_polling = run_with_backoff(60, 1) do
@sqs_queue.receive_message(receive_opts) do |message|
if message
@codec.decode(message.body) do |event|
decorate(event)
if @id_field
event[@id_field] = message.id
end
if @md5_field
event[@md5_field] = message.md5
end
if @sent_timestamp_field
event[@sent_timestamp_field] = message.sent_timestamp.utc
end
@logger.debug? && @logger.debug("Processed SQS message", :message_id => message.id, :message_md5 => message.md5, :sent_timestamp => message.sent_timestamp, :queue => @queue)
output_queue << event
message.delete
end # codec.decode
end # valid SQS message
end # receive_message
end # run_with_backoff
end # polling loop
end # def run
def teardown
@sqs_queue = nil
finished
end # def teardown
private
# Runs an AWS request inside a Ruby block with an exponential backoff in case
# we exceed the allowed AWS RequestLimit.
#
# @param [Integer] max_time maximum amount of time to sleep before giving up.
# @param [Integer] sleep_time the initial amount of time to sleep before retrying.
# @param [Block] block Ruby code block to execute.
def run_with_backoff(max_time, sleep_time, &block)
if sleep_time > max_time
@logger.error("AWS::EC2::Errors::RequestLimitExceeded ... failed.", :queue => @queue)
return false
end # retry limit exceeded
begin
block.call
rescue AWS::EC2::Errors::RequestLimitExceeded
@logger.info("AWS::EC2::Errors::RequestLimitExceeded ... retrying SQS request", :queue => @queue, :sleep_time => sleep_time)
sleep sleep_time
run_with_backoff(max_time, sleep_time * 2, &block)
rescue AWS::EC2::Errors::InstanceLimitExceeded
@logger.warn("AWS::EC2::Errors::InstanceLimitExceeded ... aborting SQS message retreival.")
return false
rescue Exception => bang
@logger.error("Error reading SQS queue.", :error => bang, :queue => @queue)
return false
end # begin/rescue
return true
end # def run_with_backoff
end # class LogStash::Inputs::SQS
| 33.099415 | 187 | 0.646466 |
7aad781ddcde548cbf83e68bd82a88107c965729 | 315 | require 'file_upload_status'
require 'encoding_detection'
class UploadCsvFile
include EncodingDetection
def initialize(csv_upload)
@csv_upload = csv_upload
@errors = {}
end
def call
path = Paperclip.io_adapters.for(@csv_upload.data).path
initialize_data(path)
@data.process
end
end
| 17.5 | 59 | 0.736508 |
e22b9c4109db6cc1a367418312a64de24e3dd6a0 | 3,113 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::ApiManagement::Mgmt::V2019_12_01
module Models
#
# List of Users Identity list representation.
#
class UserIdentityCollection
include MsRestAzure
include MsRest::JSONable
# @return [Array<UserIdentityContract>] User Identity values.
attr_accessor :value
# @return [Integer] Total record count number across all pages.
attr_accessor :count
# @return [String] Next page link if any.
attr_accessor :next_link
# return [Proc] with next page method call.
attr_accessor :next_method
#
# Gets the rest of the items for the request, enabling auto-pagination.
#
# @return [Array<UserIdentityContract>] operation results.
#
def get_all_items
items = @value
page = self
while page.next_link != nil && !page.next_link.strip.empty? do
page = page.get_next_page
items.concat(page.value)
end
items
end
#
# Gets the next page of results.
#
# @return [UserIdentityCollection] with next page content.
#
def get_next_page
response = @next_method.call(@next_link).value! unless @next_method.nil?
unless response.nil?
@next_link = response.body.next_link
@value = response.body.value
self
end
end
#
# Mapper for UserIdentityCollection class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'UserIdentityCollection',
type: {
name: 'Composite',
class_name: 'UserIdentityCollection',
model_properties: {
value: {
client_side_validation: true,
required: false,
serialized_name: 'value',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'UserIdentityContractElementType',
type: {
name: 'Composite',
class_name: 'UserIdentityContract'
}
}
}
},
count: {
client_side_validation: true,
required: false,
serialized_name: 'count',
type: {
name: 'Number'
}
},
next_link: {
client_side_validation: true,
required: false,
serialized_name: 'nextLink',
type: {
name: 'String'
}
}
}
}
}
end
end
end
end
| 28.3 | 80 | 0.519435 |
796cf061e4f39a5ab03fb753a91d240b57d89dc2 | 807 | # frozen_string_literal: true
# Copyright 2016-2021 Copado NCS LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Dummy namespace for Test Kitchen to allow loading of Kitchen::Terraform::Version from gem specification.
module Kitchen
# The namespace for Kitchen-Terraform.
module Terraform
end
end
| 35.086957 | 106 | 0.770756 |
08f0ca65cf2fc7079f63c9c59559a48abfc3f23f | 337 | class RemoveUniqueConstraintOnPhoneNumberFromReportingParties < ActiveRecord::Migration
def up
remove_index :reporting_parties, :phone_number
add_index :reporting_parties, :phone_number
end
def down
remove_index :reporting_parties, :phone_number
add_index :reporting_parties, :phone_number, unique: true
end
end
| 28.083333 | 87 | 0.801187 |
334e5574559e361a717b5d6ad6951ce0efd554d2 | 1,084 | class FixNamespaceDuplication < ActiveRecord::Migration
def up
#fixes path duplication
select_all('SELECT MAX(id) max, COUNT(id) cnt, path FROM namespaces GROUP BY path HAVING COUNT(id) > 1').each do |nms|
bad_nms_ids = select_all("SELECT id FROM namespaces WHERE path = '#{nms['path']}' AND id <> #{nms['max']}").map{|x| x["id"]}
execute("UPDATE projects SET namespace_id = #{nms["max"]} WHERE namespace_id IN(#{bad_nms_ids.join(', ')})")
execute("DELETE FROM namespaces WHERE id IN(#{bad_nms_ids.join(', ')})")
end
#fixes name duplication
select_all('SELECT MAX(id) max, COUNT(id) cnt, name FROM namespaces GROUP BY name HAVING COUNT(id) > 1').each do |nms|
bad_nms_ids = select_all("SELECT id FROM namespaces WHERE name = '#{nms['name']}' AND id <> #{nms['max']}").map{|x| x["id"]}
execute("UPDATE projects SET namespace_id = #{nms["max"]} WHERE namespace_id IN(#{bad_nms_ids.join(', ')})")
execute("DELETE FROM namespaces WHERE id IN(#{bad_nms_ids.join(', ')})")
end
end
def down
# not implemented
end
end
| 49.272727 | 130 | 0.654982 |
1d342d81e2a5b31252327b956b7364d2f19f8f9e | 346 | module Travis::API::V3
class Renderer::Log < ModelRenderer
def self.render(model, representation = :standard, **options)
return super unless options[:accept] == 'text/plain'.freeze
model.content
end
representation(:minimal, :id)
representation(:standard, *representations[:minimal], :content, :log_parts)
end
end
| 28.833333 | 79 | 0.699422 |
0191b935db4b25e0e7dc56cd816507f2397c5805 | 1,034 | require "appium_lib"
require "pry"
def caps
{ caps: { deviceName: "VD",
platformName: "Android",
app: (File.join(File.dirname(__FILE__), "PreciseUnitConversion.apk")),
appPackage: "com.ba.universalconverter",
newCommandTimeout: "3600",
appium_lib: { wait: 20,
debug: false
}
}
}
end
Appium::Driver.new(caps)
Appium.promote_appium_methods Object
def find_element_in_list(text)
4.times{Appium::TouchAction.new.swipe(start_x: 0.5, start_y: 0.2, end_x: 0.5, end_y: 0.8, duration: 600).perform}
current_screen = get_source
previous_screeen = ""
until (exists{text_exact("#{text}")})||(current_screen == previous_screeen) do
Appium::TouchAction.new.swipe(start_x: 0.5, start_y: 0.7, end_x: 0.5, end_y: 0.2, duration: 800).perform
previous_screeen = current_screen
current_screen = get_source
end
unless exists{text_exact("#{text}")}
fail ("Element with name #{text} not found")
end
end | 28.722222 | 115 | 0.637331 |
d5bfa2b987b28816a1963ec2a7b40d69052c042d | 10,840 | require 'test/unit'
require 'fileutils'
# Mock out what we need from AR::Base
module ActiveRecord
class Base
class << self
attr_accessor :pluralize_table_names, :timestamped_migrations
end
self.pluralize_table_names = true
self.timestamped_migrations = true
end
module ConnectionAdapters
class Column
attr_reader :name, :default, :type, :limit, :null, :sql_type, :precision, :scale
def initialize(name, default, sql_type = nil)
@name = name
@default = default
@type = @sql_type = sql_type
end
def human_name
@name.humanize
end
end
end
end
# Mock up necessities from ActionView
module ActionView
module Helpers
module ActionRecordHelper; end
class InstanceTag; end
end
end
# Set RAILS_ROOT appropriately fixture generation
tmp_dir = "#{File.dirname(__FILE__)}/../fixtures/tmp"
if defined? RAILS_ROOT
RAILS_ROOT.replace tmp_dir
else
RAILS_ROOT = tmp_dir
end
FileUtils.mkdir_p RAILS_ROOT
$LOAD_PATH.unshift "#{File.dirname(__FILE__)}/../../lib"
require 'initializer'
# Mocks out the configuration
module Rails
def self.configuration
Rails::Configuration.new
end
end
require 'rails_generator'
class GeneratorTestCase < Test::Unit::TestCase
include FileUtils
def setup
ActiveRecord::Base.pluralize_table_names = true
mkdir_p "#{RAILS_ROOT}/app/views/layouts"
mkdir_p "#{RAILS_ROOT}/config"
mkdir_p "#{RAILS_ROOT}/db"
mkdir_p "#{RAILS_ROOT}/test/fixtures"
mkdir_p "#{RAILS_ROOT}/public/stylesheets"
File.open("#{RAILS_ROOT}/config/routes.rb", 'w') do |f|
f << "ActionController::Routing::Routes.draw do |map|\n\nend"
end
end
def teardown
rm_rf "#{RAILS_ROOT}/app"
rm_rf "#{RAILS_ROOT}/test"
rm_rf "#{RAILS_ROOT}/config"
rm_rf "#{RAILS_ROOT}/db"
rm_rf "#{RAILS_ROOT}/public"
end
def test_truth
# don't complain, test/unit
end
# Instantiates the Generator.
def build_generator(name, params)
Rails::Generator::Base.instance(name, params)
end
# Runs the +create+ command (like the command line does).
def run_generator(name, params)
silence_generator do
build_generator(name, params).command(:create).invoke!
end
end
# Silences the logger temporarily and returns the output as a String.
def silence_generator
logger_original = Rails::Generator::Base.logger
myout = StringIO.new
Rails::Generator::Base.logger = Rails::Generator::SimpleLogger.new(myout)
yield if block_given?
Rails::Generator::Base.logger = logger_original
myout.string
end
# Asserts that the given controller was generated.
# It takes a name or symbol without the <tt>_controller</tt> part and an optional super class.
# The contents of the class source file is passed to a block.
def assert_generated_controller_for(name, parent = "ApplicationController")
assert_generated_class "app/controllers/#{name.to_s.underscore}_controller", parent do |body|
yield body if block_given?
end
end
# Asserts that the given model was generated.
# It takes a name or symbol and an optional super class.
# The contents of the class source file is passed to a block.
def assert_generated_model_for(name, parent = "ActiveRecord::Base")
assert_generated_class "app/models/#{name.to_s.underscore}", parent do |body|
yield body if block_given?
end
end
# Asserts that the given helper was generated.
# It takes a name or symbol without the <tt>_helper</tt> part.
# The contents of the module source file is passed to a block.
def assert_generated_helper_for(name)
assert_generated_module "app/helpers/#{name.to_s.underscore}_helper" do |body|
yield body if block_given?
end
end
# Asserts that the given functional test was generated.
# It takes a name or symbol without the <tt>_controller_test</tt> part and an optional super class.
# The contents of the class source file is passed to a block.
def assert_generated_functional_test_for(name, parent = "ActionController::TestCase")
assert_generated_class "test/functional/#{name.to_s.underscore}_controller_test",parent do |body|
yield body if block_given?
end
end
# Asserts that the given helper test test was generated.
# It takes a name or symbol without the <tt>_helper_test</tt> part and an optional super class.
# The contents of the class source file is passed to a block.
def assert_generated_helper_test_for(name, parent = "ActionView::TestCase")
path = "test/unit/helpers/#{name.to_s.underscore}_helper_test"
# Have to pass the path without the "test/" part so that class_name_from_path will return a correct result
class_name = class_name_from_path(path.gsub(/^test\//, ''))
assert_generated_class path,parent,class_name do |body|
yield body if block_given?
end
end
# Asserts that the given unit test was generated.
# It takes a name or symbol without the <tt>_test</tt> part and an optional super class.
# The contents of the class source file is passed to a block.
def assert_generated_unit_test_for(name, parent = "ActiveSupport::TestCase")
assert_generated_class "test/unit/#{name.to_s.underscore}_test", parent do |body|
yield body if block_given?
end
end
# Asserts that the given file was generated.
# The contents of the file is passed to a block.
def assert_generated_file(path)
assert_file_exists(path)
File.open("#{RAILS_ROOT}/#{path}") do |f|
yield f.read if block_given?
end
end
# asserts that the given file exists
def assert_file_exists(path)
assert File.exist?("#{RAILS_ROOT}/#{path}"),
"The file '#{RAILS_ROOT}/#{path}' should exist"
end
# Asserts that the given class source file was generated.
# It takes a path without the <tt>.rb</tt> part and an optional super class.
# The contents of the class source file is passed to a block.
def assert_generated_class(path, parent = nil, class_name = class_name_from_path(path))
assert_generated_file("#{path}.rb") do |body|
assert_match /class #{class_name}#{parent.nil? ? '':" < #{parent}"}/, body, "the file '#{path}.rb' should be a class"
yield body if block_given?
end
end
def class_name_from_path(path)
# FIXME: Sucky way to detect namespaced classes
if path.split('/').size > 3
path =~ /\/?(\d+_)?(\w+)\/(\w+)$/
"#{$2.camelize}::#{$3.camelize}"
else
path =~ /\/?(\d+_)?(\w+)$/
$2.camelize
end
end
# Asserts that the given module source file was generated.
# It takes a path without the <tt>.rb</tt> part.
# The contents of the class source file is passed to a block.
def assert_generated_module(path)
# FIXME: Sucky way to detect namespaced modules
if path.split('/').size > 3
path =~ /\/?(\w+)\/(\w+)$/
module_name = "#{$1.camelize}::#{$2.camelize}"
else
path =~ /\/?(\w+)$/
module_name = $1.camelize
end
assert_generated_file("#{path}.rb") do |body|
assert_match /module #{module_name}/, body, "the file '#{path}.rb' should be a module"
yield body if block_given?
end
end
# Asserts that the given CSS stylesheet file was generated.
# It takes a path without the <tt>.css</tt> part.
# The contents of the stylesheet source file is passed to a block.
def assert_generated_stylesheet(path)
assert_generated_file("public/stylesheets/#{path}.css") do |body|
yield body if block_given?
end
end
# Asserts that the given YAML file was generated.
# It takes a path without the <tt>.yml</tt> part.
# The parsed YAML tree is passed to a block.
def assert_generated_yaml(path)
assert_generated_file("#{path}.yml") do |body|
yaml = YAML.load(body)
assert yaml, 'YAML data missing'
yield yaml if block_given?
end
end
# Asserts that the given fixtures YAML file was generated.
# It takes a fixture name without the <tt>.yml</tt> part.
# The parsed YAML tree is passed to a block.
def assert_generated_fixtures_for(name)
assert_generated_yaml "test/fixtures/#{name.to_s.underscore}" do |yaml|
yield yaml if block_given?
end
end
# Asserts that the given views were generated.
# It takes a controller name and a list of views (including extensions).
# The body of each view is passed to a block.
def assert_generated_views_for(name, *actions)
actions.each do |action|
assert_generated_file("app/views/#{name.to_s.underscore}/#{action}") do |body|
yield body if block_given?
end
end
end
def assert_generated_migration(name, parent = "ActiveRecord::Migration")
file = Dir.glob("#{RAILS_ROOT}/db/migrate/*_#{name.to_s.underscore}.rb").first
assert !file.nil?, "should have generated the migration file but didn't"
file = file.match(/db\/migrate\/[0-9]+_\w+/).to_s
assert_generated_class file, parent do |body|
assert_match /timestamps/, body, "should have timestamps defined"
yield body if block_given?
end
end
# Asserts that the given migration file was not generated.
# It takes the name of the migration as a parameter.
def assert_skipped_migration(name)
migration_file = "#{RAILS_ROOT}/db/migrate/001_#{name.to_s.underscore}.rb"
assert !File.exist?(migration_file), "should not create migration #{migration_file}"
end
# Asserts that the given resource was added to the routes.
def assert_added_route_for(name)
assert_generated_file("config/routes.rb") do |body|
assert_match /map.resources :#{name.to_s.underscore}/, body,
"should add route for :#{name.to_s.underscore}"
end
end
# Asserts that the given methods are defined in the body.
# This does assume standard rails code conventions with regards to the source code.
# The body of each individual method is passed to a block.
def assert_has_method(body, *methods)
methods.each do |name|
assert body =~ /^ def #{name}(\(.+\))?\n((\n| .*\n)*) end/, "should have method #{name}"
yield(name, $2) if block_given?
end
end
# Asserts that the given column is defined in the migration.
def assert_generated_column(body, name, type)
assert_match /t\.#{type.to_s} :#{name.to_s}/, body, "should have column #{name.to_s} defined"
end
# Asserts that the given table is defined in the migration.
def assert_generated_table(body, name)
assert_match /create_table :#{name.to_s} do/, body, "should have table #{name.to_s} defined"
end
# Asserts the given field name gets translated to an attribute type
# properly.
#
# assert_attribute_type 'date', :date_select
#
def assert_attribute_type(name, attribute_type)
assert_equal(
Rails::Generator::GeneratedAttribute.new('test', name).field_type,
attribute_type
)
end
end
| 33.560372 | 123 | 0.699262 |
acb25739678dbd558f084248fdceabe4ffa4c5ad | 963 | require File.expand_path(File.dirname(__FILE__) + '/test_helper.rb')
class TestFakerAvatar < Test::Unit::TestCase
def setup
@tester = Faker::Avatar
end
def test_avatar
assert @tester.image.match(/http:\/\/robohash\.org\/(.+)\.png/)[1] != nil
end
def test_avatar_with_param
assert @tester.image('faker').match(/http:\/\/robohash\.org\/(.+)\.png/)[1] == 'faker'
end
def test_avatar_with_correct_size
assert @tester.image('faker', '150x320').match(/http:\/\/robohash\.org\/faker\.png\?size=(.+)/)[1] == '150x320'
end
def test_avatar_with_incorrect_size
assert_raise ArgumentError do
@tester.image(nil, '150x320z')
end
end
def test_avatar_with_supported_format
assert @tester.image('faker', '300x300', 'jpg').match(/http:\/\/robohash\.org\/faker\.jpg/)
end
def test_avatar_with_incorrect_format
assert_raise ArgumentError do
@tester.image(nil, '300x300', 'wrong_format')
end
end
end
| 26.027027 | 115 | 0.680166 |
39733eef718bd427d9c5f43feaf4caa3502cc591 | 5,893 | require 'rails_helper'
RSpec.describe "hbx_admin/_edit_aptc_csr", :dbclean => :after_each do
let(:person) { FactoryGirl.create(:person, :with_family ) }
let(:user) { FactoryGirl.create(:user, person: person) }
let(:year) { TimeKeeper.date_of_record.year }
before :each do
sign_in(user)
assign(:person, person)
assign(:family, person.primary_family)
assign(:months_array, Date::ABBR_MONTHNAMES.compact)
assign(:household_info, Admin::Aptc.build_household_level_aptc_csr_data(year, person.primary_family, [])) # Case with no Enrollment
assign(:household_members, [{ person.id =>[102.0, 102.0] }] )
assign(:year_options, [2016, 2017])
assign(:current_year, 2016)
family = person.primary_family
active_household = family.households.first
hbx_enrollments = active_household.hbx_enrollments
tax_household = FactoryGirl.create(:tax_household, household: active_household )
eligibility_determination = FactoryGirl.create(:eligibility_determination, tax_household: tax_household)
allow(family).to receive(:active_household).and_return active_household
allow(active_household).to receive(:latest_active_tax_household).and_return tax_household
allow(tax_household).to receive(:latest_eligibility_determination).and_return eligibility_determination
allow(active_household).to receive(:hbx_enrollments).and_return hbx_enrollments
end
context "without enrollment" do
it "Should display the Editing APTC/CSR text" do
render "hbx_admin/edit_aptc_csr_no_enrollment.html.erb", person: person, family: person.primary_family
expect(rendered).to match(/Editing APTC \/ CSR for:/)
end
it "Should display Person Demographics Information" do
render "hbx_admin/edit_aptc_csr_no_enrollment.html.erb", person: person, family: person.primary_family
expect(rendered).to match(/HBX ID/)
expect(rendered).to match(/Name/)
expect(rendered).to match(/DOB/)
expect(rendered).to match(/SSN/)
end
it "Should display the Household Information" do
render "hbx_admin/edit_aptc_csr_no_enrollment.html.erb", person: person, family: person.primary_family
months_array = Date::ABBR_MONTHNAMES.compact
months_array.each do |month|
expect(rendered).to match(/month/)
end
expect(rendered).to match(/MAX APTC/i)
expect(rendered).to match(/AVAILABLE APTC/i)
expect(rendered).to match(/CSR % AS INTEGER/i)
expect(rendered).to match(/SLCSP/i)
expect(rendered).to match(/Household Member\(s\)/i)
expect(rendered).to match(/APTC Amount \/ Percent Ratio/i)
end
end
context "with enrollment" do
let(:family) { FactoryGirl.create(:family, :with_primary_family_member) }
let(:household) {FactoryGirl.create(:household, family: family)}
let!(:hbx_with_aptc_1) {FactoryGirl.create(:hbx_enrollment, household: household, is_active: true, aasm_state: 'coverage_selected', changing: false, effective_on: (TimeKeeper.date_of_record.beginning_of_month - 40.days), applied_aptc_amount: 100)}
let!(:hbx_with_aptc_2) {FactoryGirl.create(:hbx_enrollment, household: household, is_active: true, aasm_state: 'coverage_selected', changing: false, effective_on: (TimeKeeper.date_of_record.beginning_of_month + 10.days), applied_aptc_amount: 210)}
let!(:hbx_enrollments) {[hbx_with_aptc_1, hbx_with_aptc_2]}
let!(:hbxs) { double("hbxs") }
before :each do
assign(:person, person)
assign(:family, person.primary_family)
family = person.primary_family
active_household = family.households.first
assign(:family, person.primary_family)
assign(:months_array, Date::ABBR_MONTHNAMES.compact)
assign(:enrollments_info, Admin::Aptc.build_enrollments_data(person.primary_family, [], [], 112, 87, {})) # Case with no Enrollment
assign(:household_members, [{ person.id =>[102.0, 102.0] }] )
allow(hbxs).to receive(:canceled_and_terminated).and_return hbxs
allow(hbxs).to receive(:with_plan).and_return hbxs
allow(hbxs).to receive(:with_aptc).and_return hbxs
allow(hbxs).to receive(:by_year).and_return hbxs
allow(hbxs).to receive(:by_year).and_return hbxs
allow(hbxs).to receive(:+).and_return hbxs
allow(hbxs).to receive(:without_aptc).and_return hbxs
allow(hbxs).to receive(:each).and_return hbx_with_aptc_1
allow(active_household).to receive(:hbx_enrollments).and_return hbxs
end
it "Should display the Editing APTC/CSR text" do
render "hbx_admin/edit_aptc_csr_active_enrollment.html.erb", person: person, family: person.primary_family
expect(rendered).to match(/Editing APTC \/ CSR for:/)
end
it "Should display Person Demographics Information" do
render "hbx_admin/edit_aptc_csr_no_enrollment.html.erb", person: person, family: person.primary_family
expect(rendered).to match(/HBX ID/)
expect(rendered).to match(/Name/)
expect(rendered).to match(/DOB/)
expect(rendered).to match(/SSN/)
end
it "Should display the Household Information" do
render "hbx_admin/edit_aptc_csr_no_enrollment.html.erb", person: person, family: person.primary_family
months_array = Date::ABBR_MONTHNAMES.compact
months_array.each do |month|
expect(rendered).to match(/month/)
end
expect(rendered).to match(/MAX APTC/i)
expect(rendered).to match(/AVAILABLE APTC/i)
expect(rendered).to match(/CSR % AS INTEGER/i)
expect(rendered).to match(/SLCSP/i)
expect(rendered).to match(/Household Member\(s\)/i)
expect(rendered).to match(/APTC Amount \/ Percent Ratio/i)
end
end
end
| 52.616071 | 253 | 0.698456 |
1853c32ce02b5cd232c9c132a739d7bad9e7c287 | 661 | Pod::Spec.new do |s|
s.name = "MTPocket"
s.version = "0.6.0"
s.summary = "Web request library that doesn't suck. Simple and powerful. Convenient and informative. JSON and XML to collection object conversion."
s.homepage = "https://github.com/mysterioustrousers/MTPocket"
s.license = 'BSD'
s.author = { "Adam Kirk" => "[email protected]" }
s.source = { :git => "https://github.com/mysterioustrousers/MTPocket.git", :tag => "0.6.0" }
s.source_files = 'MTPocket/*.{h,m}'
s.dependency 'XMLDictionary', '~> 2.0.0'
s.dependency 'MTJSONUtils', '~> 0.0.1'
s.dependency 'Base64'
s.requires_arc = false
end
| 44.066667 | 154 | 0.632375 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.