hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
b97b21d7b9392c0d8723af18af4cb7f441337fa2 | 2,317 | $:.unshift File.join(File.dirname(__FILE__), 'lib')
require 'sinatra'
require 'tex_sanitizer'
require 'tex_template'
require "sinatra/config_file"
require 'digest'
class TeX2SVG < Sinatra::Base
register Sinatra::ConfigFile
Version = '1.0.10'
config_file 'config.yml'
pdflatex = settings.pdflatex
pdf2svg = settings.pdf2svg
max_length = settings.max_length
max_cpu = settings.max_cpu
usr_tikz_commands = settings.additional_tikz_commands
set :bind, settings.interface
set :port, settings.port
%i(get post).each do |method|
send method, '/' do
response.headers['Server'] = "tex2svg #{Version}"
tex = params['tex']
type = params['type']
type.strip! if type
type = 'tikzpicture' unless type && ['tikzcd', 'xypic'].include?(type)
if (tex && tex.length <= max_length)
tex.strip!
case type
when 'tikzpicture'
san = TeXSanitizer.new(tex,
TeXSanitizer::Itex_control_sequences + TeXSanitizer::Tikzpicture_control_sequences + usr_tikz_commands,
TeXSanitizer::Itex_environments + TeXSanitizer::Tikz_environments)
clean = TeXTemplate.tikzpicture(san.sanitize)
when 'tikzcd'
san = TeXSanitizer.new(tex,
TeXSanitizer::Itex_control_sequences + TeXSanitizer::Tikzpicture_control_sequences +
TeXSanitizer::Tikzcd_control_sequences + usr_tikz_commands,
TeXSanitizer::Itex_environments + TeXSanitizer::Tikz_environments)
clean = TeXTemplate.tikzcd(san.sanitize)
when 'xypic'
end
i = Digest::SHA2.hexdigest(rand(1000000).to_s)
File.open("tmp/#{i}.tex", 'w') {|f| f.print(clean)}
system("#{pdflatex} --interaction=batchmode #{i}.tex; #{pdf2svg} #{i}.pdf #{i}.svg", {:rlimit_cpu=>max_cpu, :chdir=>'tmp'})
if File.exist?("tmp/#{i}.svg")
File.open("tmp/#{i}.svg") {|f| clean = f.readlines.join}
else
clean = "No SVG file was generated.\n"
end
%w[tex aux pdf log svg].each {|ext| File.delete("tmp/#{i}.#{ext}") if File.exist?("tmp/#{i}.#{ext}")}
clean
else
"TeX fragment must be less than #{max_length} characters. Yours was #{tex.length}.\n" if (tex && tex.length > max_length)
end
end
end
end
| 37.983607 | 131 | 0.634441 |
11784bc2ea35318eafaf7f85006c91bab0fa5c56 | 285 | require 'redis'
module Mallory
module Backend
class Redis
def initialize(host, port)
redis = ::Redis.new(:host => host, :port => port)
@proxies = redis.smembers("good_proxies")
end
def any
@proxies.sample
end
end
end
end
| 15 | 57 | 0.575439 |
61d6b6039b19f6c26b5db3de8bfbb4b163af608f | 25 | module RoutineHelper
end
| 8.333333 | 20 | 0.88 |
1831320892d69fe77ebcf800d3cb97b9d848fc83 | 5,387 | require 'English'
require 'digest' # required for {set,reset}_api_token
require 'base64' # required for {set,reset}_api_token
# We always assume the following fields exists:
# => :user_name, :last_name, :first_name
# If there are added columns, add the default values to default_values
class User < ApplicationRecord
before_validation :strip_name
before_validation :nillify_empty_email_and_id_number
enum theme: { light: 1, dark: 2 }
# Group relationships
has_many :key_pairs, dependent: :destroy
validates_format_of :type, with: /\AEndUser|AutotestUser|AdminUser\z/
validates_presence_of :user_name, :last_name, :first_name, :time_zone, :display_name
validates_uniqueness_of :user_name
validates_uniqueness_of :email, allow_nil: true
validates_uniqueness_of :id_number, allow_nil: true
validates_inclusion_of :time_zone, in: ActiveSupport::TimeZone.all.map(&:name)
validates :user_name,
format: { with: /\A[a-zA-Z0-9\-_]+\z/,
message: 'user_name must be alphanumeric, hyphen, or underscore' },
unless: ->(u) { u.autotest_user? || u.admin_user? }
after_initialize :set_display_name, :set_time_zone
validates_inclusion_of :locale, in: I18n.available_locales.map(&:to_s)
# Authentication constants to be used as return values
# see self.authenticated? and main_controller for details
AUTHENTICATE_SUCCESS = 'success'.freeze
AUTHENTICATE_ERROR = 'error'.freeze
AUTHENTICATE_BAD_PLATFORM = 'bad_platform'.freeze
AUTHENTICATE_BAD_CHAR = 'bad_char'.freeze
# Authenticates login against its password
# through a script specified by Settings.validate_file
def self.authenticate(login, password, ip: nil)
# Do not allow the following characters in usernames/passwords
# Right now, this is \n and \0 only, since username and password
# are delimited by \n and C programs use \0 to terminate strings
not_allowed_regexp = Regexp.new(/[\n\0]+/)
if not_allowed_regexp.match(login) || not_allowed_regexp.match(password)
m_logger = MarkusLogger.instance
m_logger.log("User '#{login}' failed to log in. Username/password contained " \
'illegal characters', MarkusLogger::ERROR)
AUTHENTICATE_BAD_CHAR
else
# Open a pipe and write to stdin of the program specified by Settings.validate_file.
# We could read something from the programs stdout, but there is no need
# for that at the moment (you would do it by e.g. pipe.readlines)
# External validation is supported on *NIX only
if RUBY_PLATFORM =~ /(:?mswin|mingw)/ # should match for Windows only
return AUTHENTICATE_BAD_PLATFORM
end
# In general, the external password validation program should exit with 0 for success
# and exit with any other integer for failure.
pipe = IO.popen("'#{Settings.validate_file}'", 'w+') # quotes to avoid choking on spaces
to_stdin = [login, password, ip].compact.join("\n")
pipe.puts(to_stdin) # write to stdin of Settings.validate_file
pipe.close
m_logger = MarkusLogger.instance
custom_message = Settings.validate_custom_status_message[$CHILD_STATUS.exitstatus.to_s]
if $CHILD_STATUS.exitstatus == 0
m_logger.log("User '#{login}' logged in.", MarkusLogger::INFO)
AUTHENTICATE_SUCCESS
elsif custom_message
m_logger.log("Login failed for user #{login}. Reason: #{custom_message}", MarkusLogger::ERROR)
$CHILD_STATUS.exitstatus.to_s
else
m_logger.log("User '#{login}' failed to log in.", MarkusLogger::ERROR)
AUTHENTICATE_ERROR
end
end
end
# Helper methods -----------------------------------------------------
def autotest_user?
self.instance_of?(AutotestUser)
end
def admin_user?
self.instance_of?(AdminUser)
end
def set_display_name
strip_name
self.display_name ||= "#{self.first_name} #{self.last_name}"
end
def set_time_zone
self.time_zone ||= Time.zone.name
end
# Reset API key for user model. The key is a SHA2 512 bit long digest,
# which is in turn MD5 digested and Base64 encoded so that it doesn't
# include bad HTTP characters.
#
# TODO: If we end up using this heavily we should probably let this key
# expire every X days/hours/weeks. When it does, a new token should be
# automatically generated.
def reset_api_key
key = generate_api_key
md5 = Digest::MD5.new
md5.update(key)
# base64 encode md5 hash
self.update(api_key: Base64.encode64(md5.to_s).strip)
end
private
# Create some random, hard to guess SHA2 512 bit long
# digest.
def generate_api_key
digest = Digest::SHA2.new(512)
# generate a unique token
unique_seed = SecureRandom.hex(20)
digest.update("#{unique_seed} SECRET! #{Time.current.to_f}").to_s
end
# strip input string
def strip_name
if self.user_name
self.user_name = self.user_name.strip
end
if self.last_name
self.last_name = self.last_name.strip
end
if self.first_name
self.first_name = self.first_name.strip
end
if self.email
self.email = self.email.strip
end
if self.id_number
self.id_number = self.id_number.strip
end
end
def nillify_empty_email_and_id_number
self.email = nil if self.email.blank?
self.id_number = nil if self.id_number.blank?
end
end
| 36.154362 | 102 | 0.702432 |
1c1111796f6209175a88daf2da94eee183d21b20 | 486 | class ContactUsPage < Fae::StaticPage
@slug = 'contact_us'
# required to set the has_one associations, Fae::StaticPage will build these associations dynamically
def self.fae_fields
{
hero: { type: Fae::Image, languages: Fae.languages.keys },
email: { type: Fae::TextField },
body: {
type: Fae::TextArea,
languages: [:en, :zh],
validates: {
length: {
maximum: 150
}
}
}
}
end
end
| 21.130435 | 103 | 0.559671 |
1c1f18e72dbc408f48e0d9138fba2aad2bd7d568 | 26 | module HousesHelper
end
| 5.2 | 19 | 0.807692 |
ff286c13e1773f2b144d46fa0d3b940dbc685bea | 4,221 | Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Code is not reloaded between requests.
config.cache_classes = true
# Eager load code on boot. This eager loads most of Rails and
# your application in memory, allowing both threaded web servers
# and those relying on copy on write to perform better.
# Rake tasks automatically ignore this option for performance.
config.eager_load = true
# Full error reports are disabled and caching is turned on.
config.consider_all_requests_local = false
config.action_controller.perform_caching = true
# Attempt to read encrypted secrets from `config/secrets.yml.enc`.
# Requires an encryption key in `ENV["RAILS_MASTER_KEY"]` or
# `config/secrets.yml.key`.
config.read_encrypted_secrets = true
# Disable serving static files from the `/public` folder by default since
# Apache or NGINX already handles this.
config.public_file_server.enabled = ENV['RAILS_SERVE_STATIC_FILES'].present?
# Compress JavaScripts and CSS.
config.assets.js_compressor = :uglifier
# config.assets.css_compressor = :sass
# Do not fallback to assets pipeline if a precompiled asset is missed.
config.assets.compile = false
# `config.assets.precompile` and `config.assets.version` have moved to config/initializers/assets.rb
# Enable serving of images, stylesheets, and JavaScripts from an asset server.
# config.action_controller.asset_host = 'http://assets.example.com'
# Specifies the header that your server uses for sending files.
# config.action_dispatch.x_sendfile_header = 'X-Sendfile' # for Apache
# config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for NGINX
# Mount Action Cable outside main process or domain
# config.action_cable.mount_path = nil
# config.action_cable.url = 'wss://example.com/cable'
# config.action_cable.allowed_request_origins = [ 'http://example.com', /http:\/\/example.*/ ]
# Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
config.force_ssl = true
# Use the lowest log level to ensure availability of diagnostic information
# when problems arise.
config.log_level = :debug
# Prepend all log lines with the following tags.
config.log_tags = [ :request_id ]
# Use a different cache store in production.
# config.cache_store = :mem_cache_store
# Use a real queuing backend for Active Job (and separate queues per environment)
# config.active_job.queue_adapter = :resque
# config.active_job.queue_name_prefix = "sample_app_#{Rails.env}"
config.action_mailer.perform_caching = false
# Ignore bad email addresses and do not raise email delivery errors.
# Set this to true and configure the email server for immediate delivery to raise delivery errors.
config.action_mailer.raise_delivery_errors = true
config.action_mailer.delivery_method = :smtp
host = 'radiant-chamber-90388.herokuapp.com'
config.action_mailer.default_url_options = { host: host }
ActionMailer::Base.smtp_settings = {
:address => 'smtp.sendgrid.net',
:port => '587',
:authentication => :plain,
:user_name => ENV['SENDGRID_USERNAME'],
:password => ENV['SENDGRID_PASSWORD'],
:domain => 'herokuapp.com',
:enable_starttls_auto => true
}
# Enable locale fallbacks for I18n (makes lookups for any locale fall back to
# the I18n.default_locale when a translation cannot be found).
config.i18n.fallbacks = true
# Send deprecation notices to registered listeners.
config.active_support.deprecation = :notify
# Use default logging formatter so that PID and timestamp are not suppressed.
config.log_formatter = ::Logger::Formatter.new
# Use a different logger for distributed setups.
# require 'syslog/logger'
# config.logger = ActiveSupport::TaggedLogging.new(Syslog::Logger.new 'app-name')
if ENV["RAILS_LOG_TO_STDOUT"].present?
logger = ActiveSupport::Logger.new(STDOUT)
logger.formatter = config.log_formatter
config.logger = ActiveSupport::TaggedLogging.new(logger)
end
# Do not dump schema after migrations.
config.active_record.dump_schema_after_migration = false
end
| 40.586538 | 102 | 0.749112 |
3378b1438bd2705f6c42e099a869f0931452711f | 484 | cask 'petite-chez-scheme' do
version '8.4'
sha256 '3ed7200c3e265d36d03129569a78bfde8aedaea1ec7565d3c7a4daca26389701'
url "http://www.scheme.com/download/pcsv#{version}-ta6osx-1.pkg.tar.gz"
name 'Petite Chez Scheme'
homepage 'http://www.scheme.com/petitechezscheme.html'
license :unknown # TODO: change license and remove this comment; ':unknown' is a machine-generated placeholder
pkg "pcsv#{version}-ta6osx-1.pkg"
uninstall :pkgutil => 'com.scheme.chezscheme'
end
| 34.571429 | 112 | 0.760331 |
ac4981ee76913402e5a9c3c3f77c7fe38758da33 | 9,623 | require 'sexp_processor'
require 'set'
require 'active_support/inflector'
#This is a mixin containing utility methods.
module Brakeman::Util
QUERY_PARAMETERS = Sexp.new(:call, Sexp.new(:call, nil, :request, Sexp.new(:arglist)), :query_parameters, Sexp.new(:arglist))
PATH_PARAMETERS = Sexp.new(:call, Sexp.new(:call, nil, :request, Sexp.new(:arglist)), :path_parameters, Sexp.new(:arglist))
REQUEST_PARAMETERS = Sexp.new(:call, Sexp.new(:call, nil, :request, Sexp.new(:arglist)), :request_parameters, Sexp.new(:arglist))
REQUEST_ENV = Sexp.new(:call, Sexp.new(:call, nil, :request, Sexp.new(:arglist)), :env, Sexp.new(:arglist))
PARAMETERS = Sexp.new(:call, nil, :params, Sexp.new(:arglist))
COOKIES = Sexp.new(:call, nil, :cookies, Sexp.new(:arglist))
SESSION = Sexp.new(:call, nil, :session, Sexp.new(:arglist))
ALL_PARAMETERS = Set[PARAMETERS, QUERY_PARAMETERS, PATH_PARAMETERS, REQUEST_PARAMETERS]
#Convert a string from "something_like_this" to "SomethingLikeThis"
#
#Taken from ActiveSupport.
def camelize lower_case_and_underscored_word
lower_case_and_underscored_word.to_s.gsub(/\/(.?)/) { "::#{$1.upcase}" }.gsub(/(?:^|_)(.)/) { $1.upcase }
end
#Convert a string from "Something::LikeThis" to "something/like_this"
#
#Taken from ActiveSupport.
def underscore camel_cased_word
camel_cased_word.to_s.gsub(/::/, '/').
gsub(/([A-Z]+)([A-Z][a-z])/,'\1_\2').
gsub(/([a-z\d])([A-Z])/,'\1_\2').
tr("-", "_").
downcase
end
#Use ActiveSupport::Inflector to pluralize a word.
def pluralize word
ActiveSupport::Inflector.pluralize word
end
#Takes an Sexp like
# (:hash, (:lit, :key), (:str, "value"))
#and yields the key and value pairs to the given block.
#
#For example:
#
# h = Sexp.new(:hash, (:lit, :name), (:str, "bob"), (:lit, :name), (:str, "jane"))
# names = []
# hash_iterate(h) do |key, value|
# if symbol? key and key[1] == :name
# names << value[1]
# end
# end
# names #["bob"]
def hash_iterate hash
1.step(hash.length - 1, 2) do |i|
yield hash[i], hash[i + 1]
end
end
#Insert value into Hash Sexp
def hash_insert hash, key, value
index = 1
hash_iterate hash.dup do |k,v|
if k == key
hash[index + 1] = value
return hash
end
index += 2
end
hash << key << value
hash
end
#Get value from hash using key.
#
#If _key_ is a Symbol, it will be converted to a Sexp(:lit, key).
def hash_access hash, key
if key.is_a? Symbol
key = Sexp.new(:lit, key)
end
if index = hash.find_index(key) and index > 0
return hash[index + 1]
end
nil
end
#Adds params, session, and cookies to environment
#so they can be replaced by their respective Sexps.
def set_env_defaults
@env[PARAMETERS] = Sexp.new(:params)
@env[SESSION] = Sexp.new(:session)
@env[COOKIES] = Sexp.new(:cookies)
end
#Check if _exp_ represents a hash: s(:hash, {...})
#This also includes pseudo hashes params, session, and cookies.
def hash? exp
exp.is_a? Sexp and (exp.node_type == :hash or
exp.node_type == :params or
exp.node_type == :session or
exp.node_type == :cookies)
end
#Check if _exp_ represents an array: s(:array, [...])
def array? exp
exp.is_a? Sexp and exp.node_type == :array
end
#Check if _exp_ represents a String: s(:str, "...")
def string? exp
exp.is_a? Sexp and exp.node_type == :str
end
#Check if _exp_ represents a Symbol: s(:lit, :...)
def symbol? exp
exp.is_a? Sexp and exp.node_type == :lit and exp[1].is_a? Symbol
end
#Check if _exp_ represents a method call: s(:call, ...)
def call? exp
exp.is_a? Sexp and exp.node_type == :call
end
#Check if _exp_ represents a Regexp: s(:lit, /.../)
def regexp? exp
exp.is_a? Sexp and exp.node_type == :lit and exp[1].is_a? Regexp
end
#Check if _exp_ represents an Integer: s(:lit, ...)
def integer? exp
exp.is_a? Sexp and exp.node_type == :lit and exp[1].is_a? Integer
end
#Check if _exp_ represents a number: s(:lit, ...)
def number? exp
exp.is_a? Sexp and exp.node_type == :lit and exp[1].is_a? Numeric
end
#Check if _exp_ represents a result: s(:result, ...)
def result? exp
exp.is_a? Sexp and exp.node_type == :result
end
#Check if _exp_ represents a :true, :lit, or :string node
def true? exp
exp.is_a? Sexp and (exp.node_type == :true or
exp.node_type == :lit or
exp.node_type == :string)
end
#Check if _exp_ represents a :false or :nil node
def false? exp
exp.is_a? Sexp and (exp.node_type == :false or
exp.node_type == :nil)
end
#Check if _exp_ is a params hash
def params? exp
if exp.is_a? Sexp
return true if exp.node_type == :params or ALL_PARAMETERS.include? exp
if exp.node_type == :call
if params? exp[1]
return true
elsif exp[2] == :[]
return params? exp[1]
end
end
end
false
end
def cookies? exp
if exp.is_a? Sexp
return true if exp.node_type == :cookies or exp == COOKIES
if exp.node_type == :call
if cookies? exp[1]
return true
elsif exp[2] == :[]
return cookies? exp[1]
end
end
end
false
end
def request_env? exp
call? exp and (exp == REQUEST_ENV or exp[1] == REQUEST_ENV)
end
#Check if exp is params, cookies, or request_env
def request_value? exp
params? exp or
cookies? exp or
request_env? exp
end
#Check if _exp_ is a Sexp.
def sexp? exp
exp.is_a? Sexp
end
#Check if _exp_ is a Sexp and the node type matches one of the given types.
def node_type? exp, *types
exp.is_a? Sexp and types.include? exp.node_type
end
#Returns true if the given _exp_ contains a :class node.
#
#Useful for checking if a module is just a module or if it is a namespace.
def contains_class? exp
todo = [exp]
until todo.empty?
current = todo.shift
if node_type? current, :class
return true
elsif sexp? current
todo = current[1..-1].concat todo
end
end
false
end
#Return file name related to given warning. Uses +warning.file+ if it exists
def file_for warning, tracker = nil
if tracker.nil?
tracker = @tracker || self.tracker
end
if warning.file
File.expand_path warning.file, tracker.options[:app_path]
else
case warning.warning_set
when :controller
file_by_name warning.controller, :controller, tracker
when :template
file_by_name warning.template[:name], :template, tracker
when :model
file_by_name warning.model, :model, tracker
when :warning
file_by_name warning.class, nil, tracker
else
nil
end
end
end
#Attempt to determine path to context file based on the reported name
#in the warning.
#
#For example,
#
# file_by_name FileController #=> "/rails/root/app/controllers/file_controller.rb
def file_by_name name, type, tracker = nil
return nil unless name
string_name = name.to_s
name = name.to_sym
unless type
if string_name =~ /Controller$/
type = :controller
elsif camelize(string_name) == string_name
type = :model
else
type = :template
end
end
path = tracker.options[:app_path]
case type
when :controller
if tracker.controllers[name] and tracker.controllers[name][:file]
path = tracker.controllers[name][:file]
else
path += "/app/controllers/#{underscore(string_name)}.rb"
end
when :model
if tracker.models[name] and tracker.models[name][:file]
path = tracker.models[name][:file]
else
path += "/app/controllers/#{underscore(string_name)}.rb"
end
when :template
if tracker.templates[name] and tracker.templates[name][:file]
path = tracker.templates[name][:file]
elsif string_name.include? " "
name = string_name.split[0].to_sym
path = file_for tracker, name, :template
else
path = nil
end
end
path
end
#Return array of lines surrounding the warning location from the original
#file.
def context_for warning, tracker = nil
file = file_for warning, tracker
context = []
return context unless warning.line and file and File.exist? file
current_line = 0
start_line = warning.line - 5
end_line = warning.line + 5
start_line = 1 if start_line < 0
File.open file do |f|
f.each_line do |line|
current_line += 1
next if line.strip == ""
if current_line > end_line
break
end
if current_line >= start_line
context << [current_line, line]
end
end
end
context
end
def truncate_table str
@terminal_width ||= ::HighLine::SystemExtensions::terminal_size[0]
lines = str.lines
lines.map do |line|
if line.chomp.length > @terminal_width
line[0..(@terminal_width - 3)] + ">>"
else
line
end
end.join
end
# rely on Terminal::Table to build the structure, extract the data out in CSV format
def table_to_csv table
output = CSV.generate_line(table.headings.cells.map{|cell| cell.to_s.strip})
table.rows.each do |row|
output << CSV.generate_line(row.cells.map{|cell| cell.to_s.strip})
end
output
end
end
| 26.078591 | 131 | 0.622467 |
1c727a7911068711dab4b657dc4dba93bd93bab9 | 1,485 | module PaginatingFind
module Helpers
DEFAULT_OPTIONS = { :name => :page, :window_size => 2,
:always_show_anchors => true, :link_to_current_page => false,
:params => {} }
def paginating_links(paginator, options = {}, html_options = {})
name = options[:name] || DEFAULT_OPTIONS[:name]
params = (options[:params] || DEFAULT_OPTIONS[:params]).clone
paginating_links_each(paginator, options) do |n|
params[name] = n
link_to(n.to_s, params, html_options)
end
end
def paginating_links_each(paginator, options = {})
options = DEFAULT_OPTIONS.merge(options)
window = ((paginator.page - options[:window_size] + 1)..(paginator.page + options[:window_size] - 1)).select {|w| w >= paginator.first_page && w <= paginator.last_page }
html = ''
if options[:always_show_anchors] && !window.include?(paginator.first_page)
html << yield(paginator.first_page)
html << ' ... ' unless window.first - 1 == paginator.first_page
html << ' '
end
window.each do |p|
if paginator.page == p && !options[:link_to_current_page]
html << p.to_s
else
html << yield(p)
end
html << ' '
end
if options[:always_show_anchors] && !window.include?(paginator.last_page)
html << ' ... ' unless window.last + 1 == paginator.last_page
html << yield(paginator.last_page)
end
html
end
end
end
| 30.9375 | 175 | 0.602694 |
26bb3224cf86976b4bf5227c83c6ccffb026d8a6 | 192 | require File.expand_path('../1.8.7', __FILE__)
module Regexp::Syntax
module Ruby
class V18 < Regexp::Syntax::Ruby::V187
def initialize
super
end
end
end
end
| 13.714286 | 46 | 0.619792 |
263b408133ff540f03e02a5cfebde47894eb6d9f | 1,955 | # This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 2019_02_28_150942) do
create_table "assignments", force: :cascade do |t|
t.integer "color_id"
t.integer "product_id"
t.integer "quantity"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.index ["color_id"], name: "index_assignments_on_color_id"
t.index ["product_id"], name: "index_assignments_on_product_id"
end
create_table "colors", force: :cascade do |t|
t.string "color"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.integer "quantity"
end
create_table "order_items", force: :cascade do |t|
t.integer "quantity"
t.integer "product_id"
t.integer "order_id"
t.decimal "total"
t.decimal "unit_price"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "orders", force: :cascade do |t|
t.decimal "subtotal"
t.decimal "total"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "products", force: :cascade do |t|
t.string "title"
t.decimal "price"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
end
| 34.298246 | 86 | 0.720716 |
6a066ffef3184febb9e5edd9160d6115f0f92938 | 12,858 | ##
# This module requires Metasploit: http://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'msf/core'
require 'net/ssh'
require 'sshkey' # TODO: Actually include this!
class Metasploit3 < Msf::Auxiliary
include Msf::Auxiliary::Scanner
include Msf::Auxiliary::AuthBrute
include Msf::Auxiliary::Report
def initialize
super(
'Name' => 'SSH Public Key Acceptance Scanner',
'Description' => %q{
This module can determine what public keys are configured for
key-based authentication across a range of machines, users, and
sets of known keys. The SSH protocol indicates whether a particular
key is accepted prior to the client performing the actual signed
authentication request. To use this module, a text file containing
one or more SSH keys should be provided. These can be private or
public, so long as no passphrase is set on the private keys.
If you have loaded a database plugin and connected to a database
this module will record authorized public keys and hosts so you can
track your process.
Key files may be a single public (unencrypted) key, or several public
keys concatenated together as an ASCII text file. Non-key data should be
silently ignored. Private keys will only utilize the public key component
stored within the key file.
},
'Author' => [
'todb',
'hdm',
'Stuart Morgan <stuart.morgan[at]mwrinfosecurity.com>', # Reworked the storage (db, credentials, notes, loot) only
],
'License' => MSF_LICENSE
)
register_options(
[
Opt::RPORT(22),
OptPath.new('KEY_FILE', [false, 'Filename of one or several cleartext public keys.'])
], self.class
)
register_advanced_options(
[
OptBool.new('SSH_DEBUG', [ false, 'Enable SSH debugging output (Extreme verbosity!)', false]),
OptBool.new('SSH_BYPASS', [ false, 'Verify that authentication was not bypassed when keys are found', false]),
OptString.new('SSH_KEYFILE_B64', [false, 'Raw data of an unencrypted SSH public key. This should be used by programmatic interfaces to this module only.', '']),
OptPath.new('KEY_DIR', [false, 'Directory of several keys. Filenames must not begin with a dot in order to be read.']),
OptInt.new('SSH_TIMEOUT', [ false, 'Specify the maximum time to negotiate a SSH session', 30])
]
)
deregister_options('RHOST','PASSWORD','PASS_FILE','BLANK_PASSWORDS','USER_AS_PASS')
@good_credentials = {}
@good_key = ''
@strip_passwords = true
end
def key_dir
datastore['KEY_DIR']
end
def rport
datastore['RPORT']
end
def ip
datastore['RHOST']
end
def read_keyfile(file)
if file == :keyfile_b64
keyfile = datastore['SSH_KEYFILE_B64'].unpack("m*").first
elsif file.kind_of? Array
keyfile = ''
file.each do |dir_entry|
next unless ::File.readable? dir_entry
keyfile << ::File.open(dir_entry, "rb") {|f| f.read(f.stat.size)}
end
else
keyfile = ::File.open(file, "rb") {|f| f.read(f.stat.size)}
end
keys = []
this_key = []
in_key = false
keyfile.split("\n").each do |line|
if line =~ /ssh-(dss|rsa)\s+/
keys << line
next
end
in_key = true if(line =~ /^-----BEGIN [RD]SA (PRIVATE|PUBLIC) KEY-----/)
this_key << line if in_key
if(line =~ /^-----END [RD]SA (PRIVATE|PUBLIC) KEY-----/)
in_key = false
keys << (this_key.join("\n") + "\n")
this_key = []
end
end
if keys.empty?
print_error "#{ip}:#{rport} SSH - No valid keys found"
end
return validate_keys(keys)
end
# Validates that the key isn't total garbage, and converts PEM formatted
# keys to SSH formatted keys.
def validate_keys(keys)
keepers = []
keys.each do |key|
if key =~ /ssh-(dss|rsa)/
# A public key has been provided
keepers << { :public => key, :private => "" }
next
else
# Use the mighty SSHKey library from James Miller to convert them on the fly.
# This is where a PRIVATE key has been provided
ssh_version = SSHKey.new(key).ssh_public_key rescue nil
keepers << { :public => ssh_version, :private => key } if ssh_version
next
end
# Needs a beginning
next unless key =~ /^-----BEGIN [RD]SA (PRIVATE|PUBLIC) KEY-----\x0d?\x0a/m
# Needs an end
next unless key =~ /\n-----END [RD]SA (PRIVATE|PUBLIC) KEY-----\x0d?\x0a?$/m
# Shouldn't have binary.
next unless key.scan(/[\x00-\x08\x0b\x0c\x0e-\x1f\x80-\xff]/).empty?
# Add more tests to test
keepers << { :public => key, :private => "" }
end
if keepers.empty?
print_error "#{ip}:#{rport} SSH - No valid keys found"
end
return keepers.uniq
end
def pull_cleartext_keys(keys)
cleartext_keys = []
keys.each do |key|
next unless key[:public]
next if key[:private] =~ /Proc-Type:.*ENCRYPTED/
this_key = { :public => key[:public].gsub(/\x0d/,""), :private => key[:private] }
next if cleartext_keys.include? this_key
cleartext_keys << this_key
end
if cleartext_keys.empty?
print_error "#{ip}:#{rport} SSH - No valid cleartext keys found"
end
return cleartext_keys
end
def do_login(ip, port, user)
if datastore['KEY_FILE'] and File.readable?(datastore['KEY_FILE'])
keys = read_keyfile(datastore['KEY_FILE'])
cleartext_keys = pull_cleartext_keys(keys)
msg = "#{ip}:#{rport} SSH - Trying #{cleartext_keys.size} cleartext key#{(cleartext_keys.size > 1) ? "s" : ""} per user."
elsif datastore['SSH_KEYFILE_B64'] && !datastore['SSH_KEYFILE_B64'].empty?
keys = read_keyfile(:keyfile_b64)
cleartext_keys = pull_cleartext_keys(keys)
msg = "#{ip}:#{rport} SSH - Trying #{cleartext_keys.size} cleartext key#{(cleartext_keys.size > 1) ? "s" : ""} per user (read from datastore)."
elsif datastore['KEY_DIR']
return :missing_keyfile unless(File.directory?(key_dir) && File.readable?(key_dir))
unless @key_files
@key_files = Dir.entries(key_dir).reject {|f| f =~ /^\x2e/}
end
these_keys = @key_files.map {|f| File.join(key_dir,f)}
keys = read_keyfile(these_keys)
cleartext_keys = pull_cleartext_keys(keys)
msg = "#{ip}:#{rport} SSH - Trying #{cleartext_keys.size} cleartext key#{(cleartext_keys.size > 1) ? "s" : ""} per user."
else
return :missing_keyfile
end
unless @alerted_with_msg
print_status msg
@alerted_with_msg = true
end
cleartext_keys.each_with_index do |key_data,key_idx|
key_info = ""
if key_data[:public] =~ /ssh\-(rsa|dss)\s+([^\s]+)\s+(.*)/
key_info = "- #{$3.strip}"
end
accepted = []
opt_hash = {
:auth_methods => ['publickey'],
:msframework => framework,
:msfmodule => self,
:port => port,
:key_data => key_data[:public],
:disable_agent => true,
:record_auth_info => true,
:skip_private_keys => true,
:config =>false,
:accepted_key_callback => Proc.new {|key| accepted << { :data => key_data, :key => key, :info => key_info } },
:proxies => datastore['Proxies']
}
opt_hash.merge!(:verbose => :debug) if datastore['SSH_DEBUG']
begin
ssh_socket = nil
::Timeout.timeout(datastore['SSH_TIMEOUT']) { ssh_socket = Net::SSH.start(ip, user, opt_hash) } rescue nil
if datastore['SSH_BYPASS'] and ssh_socket
data = nil
print_status("#{ip}:#{rport} SSH - User #{user} is being tested for authentication bypass...")
begin
::Timeout.timeout(5) { data = ssh_socket.exec!("help\nid\nuname -a").to_s }
rescue ::Exception
end
print_brute(:level => :good, :msg => "User #{user} successfully bypassed authentication: #{data.inspect} ") if data
end
::Timeout.timeout(1) { ssh_socket.close if ssh_socket } rescue nil
rescue Rex::ConnectionError
return :connection_error
rescue Net::SSH::Disconnect, ::EOFError
return :connection_disconnect
rescue Net::SSH::AuthenticationFailed
rescue Net::SSH::Exception
return [:fail,nil] # For whatever reason.
end
if accepted.length == 0
if @key_files
print_brute :level => :verror, :msg => "User #{user} does not accept key #{@key_files[key_idx+1]} #{key_info}"
else
print_brute :level => :verror, :msg => "User #{user} does not accept key #{key_idx+1} #{key_info}"
end
end
accepted.each do |key|
private_key_present = (key[:data][:private]!="") ? 'Yes' : 'No'
print_brute :level => :good, :msg => "Public key accepted: '#{user}' with key '#{key[:key][:fingerprint]}' (Private Key: #{private_key_present}) #{key_info}"
do_report(ip, rport, user, key)
end
end
end
def do_report(ip, port, user, key)
return unless framework.db.active
store_public_keyfile(ip,user,key[:fingerprint],key[:data][:public])
private_key_present = (key[:data][:private]!="") ? 'Yes' : 'No'
# Store a note relating to the public key test
note_information = {
user: user,
public_key: key[:data][:public],
private_key: private_key_present,
info: key[:info]
}
report_note(host: ip, port: port, type: "ssh.publickey.accepted", data: note_information, update: :unique_data)
if key[:data][:private] != ""
# Store these keys in loot
private_keyfile_path = store_private_keyfile(ip,user,key[:fingerprint],key[:data][:private])
# Use the proper credential method to store credentials that we have
service_data = {
address: ip,
port: port,
service_name: 'ssh',
protocol: 'tcp',
workspace_id: myworkspace_id
}
credential_data = {
module_fullname: self.fullname,
origin_type: :service,
private_data: key[:data][:private],
private_type: :ssh_key,
username: key[:key][:user],
}.merge(service_data)
login_data = {
core: create_credential(credential_data),
last_attempted_at: DateTime.now,
status: Metasploit::Model::Login::Status::SUCCESSFUL,
proof: private_keyfile_path
}.merge(service_data)
create_credential_login(login_data)
end
end
def existing_loot(ltype, key_id)
framework.db.loots(myworkspace).where(ltype: ltype).select {|l| l.info == key_id}.first
end
def store_public_keyfile(ip,user,key_id,key_data)
safe_username = user.gsub(/[^A-Za-z0-9]/,"_")
ktype = key_data.match(/ssh-(rsa|dss)/)[1] rescue nil
return unless ktype
ktype = "dsa" if ktype == "dss"
ltype = "host.unix.ssh.#{user}_#{ktype}_public"
keyfile = existing_loot(ltype, key_id)
return keyfile.path if keyfile
keyfile_path = store_loot(
ltype,
"application/octet-stream", # Text, but always want to mime-type attach it
ip,
(key_data + "\n"),
"#{safe_username}_#{ktype}.pub",
key_id
)
return keyfile_path
end
def store_private_keyfile(ip,user,key_id,key_data)
safe_username = user.gsub(/[^A-Za-z0-9]/,"_")
ktype = key_data.match(/-----BEGIN ([RD]SA) (?:PRIVATE|PUBLIC) KEY-----/)[1].downcase rescue nil
return unless ktype
ltype = "host.unix.ssh.#{user}_#{ktype}_private"
keyfile = existing_loot(ltype, key_id)
return keyfile.path if keyfile
keyfile_path = store_loot(
ltype,
"application/octet-stream", # Text, but always want to mime-type attach it
ip,
(key_data + "\n"),
"#{safe_username}_#{ktype}.private",
key_id
)
return keyfile_path
end
def run_host(ip)
# Since SSH collects keys and tries them all on one authentication session, it doesn't
# make sense to iteratively go through all the keys individually. So, ignore the pass variable,
# and try all available keys for all users.
each_user_pass do |user,pass|
ret, _ = do_login(ip, rport, user)
case ret
when :connection_error
vprint_error "#{ip}:#{rport} SSH - Could not connect"
:abort
when :connection_disconnect
vprint_error "#{ip}:#{rport} SSH - Connection timed out"
:abort
when :fail
vprint_error "#{ip}:#{rport} SSH - Failed: '#{user}'"
when :missing_keyfile
vprint_error "#{ip}:#{rport} SSH - Cannot read keyfile"
when :no_valid_keys
vprint_error "#{ip}:#{rport} SSH - No readable keys in keyfile"
end
end
end
end
| 34.657682 | 168 | 0.620625 |
1cddd87e91732c3c4b38d463bc480c79306880ea | 392 | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe MergeRequestBasicEntity do
let(:resource) { build(:merge_request) }
subject do
described_class.new(resource).as_json
end
it 'has public_merge_status as merge_status' do
expect(resource).to receive(:public_merge_status).and_return('checking')
expect(subject[:merge_status]).to eq 'checking'
end
end
| 21.777778 | 76 | 0.760204 |
6117be810e1fc8ffb75c02ff96fed267664c39bb | 1,819 | require_dependency "front_end_builds/application_controller"
module FrontEndBuilds
# This controller is responsible for serving the index.html
# based on the incoming params. This is what serves you front
# end.
#
# Best is not a resource, but we are going to isolate serving the
# best build to its own controller.
class BestsController < ApplicationController
include Rails.application.routes.url_helpers
before_filter :find_front_end, only: [:show]
def show
if @front_end
respond_to do |format|
format.html { render text: @front_end.with_head_tag(meta_tags) }
format.json { render json: { version: @front_end.id } }
end
else
# TODO install instructions, user needs to push build
render text: "not found", status: 404
end
end
private
def meta_tags
tags = {
csrf_param: request_forgery_protection_token,
csrf_token: form_authenticity_token,
front_end_build_version: @front_end.id,
front_end_build_params: use_params(:build_search_params).to_query,
front_end_build_url: front_end_builds_best_path(
use_params(:build_search_params).merge(format: :json)
)
}
tags
.map { |name, content|
"<meta name='#{name.to_s.dasherize}' content='#{content}' />"
}
.join("\n")
.to_s
end
def find_front_end
@front_end = FrontEndBuilds::Build.find_best(use_params(:build_search_params))
end
def build_search_params_rails_3
params.slice(:app_name, :id, :branch, :sha, :job)
end
def build_search_params_rails_4
params.permit(:app_name, :id, :branch, :sha, :job)
end
alias_method :build_search_params_rails_5, :build_search_params_rails_4
end
end
| 27.984615 | 84 | 0.669049 |
91fdb29c857821bd98ab92efd8d095cd7f4aa1bb | 55 | Qpdf.config = {
:exe_path => '/usr/local/bin/qpdf'
}
| 13.75 | 36 | 0.6 |
ace281b3ff2caa9d51cb378fa16b47f053f4a00f | 249 | maintainer "YOUR_COMPANY_NAME"
maintainer_email "YOUR_EMAIL"
license "All rights reserved"
description "Installs/Configures irssi"
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version "0.0.1"
| 35.571429 | 72 | 0.714859 |
013dfb416edde6ea323deaec86e51192a582a987 | 238 | require 'spec_helper'
require 'cantango/config/permit_registry_ex'
describe CanTango::Config::Permits::Registration do
subject { CanTango::Config::Permits.instance }
it_should_behave_like CanTango::Config::Permits::Registration
end | 26.444444 | 63 | 0.810924 |
284d2b778c482bf9181d8dbf845bbcc52b7eda1e | 181 | class RemoveThemeIdFromUsers < ActiveRecord::Migration
def self.up
remove_column :users, :theme_id
end
def self.down
add_column :users, :theme_id, :integer
end
end
| 18.1 | 54 | 0.734807 |
01599b086942d12aa05f6f2e84198240ccb64bde | 971 | require 'rails_helper'
module ActiveAdmin
RSpec.describe Resource, "Ordering" do
describe "#order_by" do
let(:application) { ActiveAdmin::Application.new }
let(:namespace) { ActiveAdmin::Namespace.new application, :admin }
let(:resource_config) { ActiveAdmin::Resource.new namespace, Post }
let(:dsl){ ActiveAdmin::ResourceDSL.new(resource_config) }
it "should register the ordering in the config" do
dsl.run_registration_block do
order_by(:age, &:to_sql)
end
expect(resource_config.ordering.size).to eq(1)
end
it "should allow to setup custom ordering class" do
MyOrderClause = Class.new(ActiveAdmin::OrderClause)
dsl.run_registration_block do
config.order_clause = MyOrderClause
end
expect(resource_config.order_clause).to eq(MyOrderClause)
expect(application.order_clause).to eq(ActiveAdmin::OrderClause)
end
end
end
end
| 33.482759 | 73 | 0.683831 |
5d995dda72f51c4f0c57f8a863da3ec85ed54b78 | 772 | require "test_helper"
class Nonsensor::MidpointDisplacementTest < Minitest::Test
def test_first_and_last_values_are_set
[1, 2, 3, 5, 7, 9, 10, 10.0, 11, 100, 1000].each do |batch_size|
gen = Nonsensor::MidpointDisplacement.new(start: batch_size, batch_size: batch_size)
values = gen.take!(batch_size)
assert_equal batch_size, values.first
assert_equal batch_size, values.last
end
end
def test_it_returns_numbers
gen = Nonsensor::MidpointDisplacement.new(batch_size: 100)
values = gen.take!(100)
values.each do |value|
assert_kind_of Numeric, value
end
end
def test_it_makes_more_batches
gen = Nonsensor::MidpointDisplacement.new(batch_size: 10)
values = gen.take!(50)
assert true
end
end
| 25.733333 | 90 | 0.713731 |
f7ac8f473d56ab65226386c240760697e0f953ac | 3,038 | module AASM
class InstanceBase
attr_accessor :from_state, :to_state, :current_event
def initialize(instance, name=:default) # instance of the class including AASM, name of the state machine
@instance = instance
@name = name
end
def current_state
@instance.aasm_read_state(@name)
end
def current_state=(state)
@instance.aasm_write_state_without_persistence(state, @name)
# @current_state = state
end
def enter_initial_state
state_name = determine_state_name(@instance.class.aasm(@name).initial_state)
state_object = state_object_for_name(state_name)
state_object.fire_callbacks(:before_enter, @instance)
# state_object.fire_callbacks(:enter, @instance)
self.current_state = state_name
state_object.fire_callbacks(:after_enter, @instance)
state_name
end
def human_state
AASM::Localizer.new.human_state_name(@instance.class, state_object_for_name(current_state))
end
def states(options={})
if options[:permitted]
# ugliness level 1000
permitted_event_names = events(:permitted => true).map(&:name)
transitions = @instance.class.aasm(@name).state_machine.events.values_at(*permitted_event_names).compact.map {|e| e.transitions_from_state(current_state) }
tos = transitions.map {|t| t[0] ? t[0].to : nil}.flatten.compact.map(&:to_sym).uniq
@instance.class.aasm(@name).states.select {|s| tos.include?(s.name.to_sym)}
else
@instance.class.aasm(@name).states
end
end
def events(options={})
state = options[:state] || current_state
events = @instance.class.aasm(@name).events.select {|e| e.transitions_from_state?(state) }
options[:reject] = Array(options[:reject])
events.reject! { |e| options[:reject].include?(e.name) }
if options[:permitted]
# filters the results of events_for_current_state so that only those that
# are really currently possible (given transition guards) are shown.
events.select! { |e| @instance.send("may_#{e.name}?") }
end
events
end
def state_object_for_name(name)
obj = @instance.class.aasm(@name).states.find {|s| s.name == name}
raise AASM::UndefinedState, "State :#{name} doesn't exist" if obj.nil?
obj
end
def determine_state_name(state)
case state
when Symbol, String
state
when Proc
state.call(@instance)
else
raise NotImplementedError, "Unrecognized state-type given. Expected Symbol, String, or Proc."
end
end
def may_fire_event?(name, *args)
if event = @instance.class.aasm(@name).state_machine.events[name]
!!event.may_fire?(@instance, *args)
else
false # unknown event
end
end
def set_current_state_with_persistence(state)
save_success = @instance.aasm_write_state(state, @name)
self.current_state = state if save_success
save_success
end
end
end
| 31.319588 | 163 | 0.666228 |
019bb4c88d9d78a3b5333367cd02d71d0f17b9f6 | 1,972 | class TinyFugue < Formula
desc "Programmable MUD client"
homepage "https://tinyfugue.sourceforge.io/"
url "https://downloads.sourceforge.net/project/tinyfugue/tinyfugue/5.0%20beta%208/tf-50b8.tar.gz"
version "5.0b8"
sha256 "3750a114cf947b1e3d71cecbe258cb830c39f3186c369e368d4662de9c50d989"
license "GPL-2.0-or-later"
revision 2
livecheck do
url :stable
regex(%r{url=.*?/tf[._-]v?(\d+(?:\.\d+)*(?:[a-z]\d+?)?)\.t}i)
strategy :sourceforge do |page, regex|
page.scan(regex).map { |match| match.first.sub(/^(\d)(\d)([a-z])/i, '\1.\2\3') }
end
end
bottle do
sha256 arm64_big_sur: "de2a1d16b807c1cede3b8f574a1dbaa5a8bda47b4c65307b33b975b9eec665f7"
sha256 big_sur: "c7e39f8d3cf009ff749208b5b2efa718a802a2ca82368273b1076a0607a10e76"
sha256 catalina: "d10777dd98ae76a048caed1179f7a65f8ee59256dcb94cfcd89ac1da0e135209"
sha256 mojave: "ea162f2b1644a44d95a2847ec34133661008fff66306e3eda790a25f253f2165"
sha256 high_sierra: "b1ddefa5c2a52f3399f5a90c0586d65e5e7ccc9940715cbe682a1a30e8dc6e76"
sha256 x86_64_linux: "c92a44ad82e402fb01b555a22f7e276a344d799b1b666ef76286a3397617770c"
end
depends_on "libnet"
depends_on "[email protected]"
depends_on "pcre"
uses_from_macos "ncurses"
conflicts_with "tee-clc", because: "both install a `tf` binary"
# pcre deprecated pcre_info. Switch to HB pcre-8.31 and pcre_fullinfo.
# Not reported upstream; project is in stasis since 2007.
patch do
url "https://raw.githubusercontent.com/Homebrew/formula-patches/85fa66a9dc80757ba32bf5d818d70fc26bb24b6f/tiny-fugue/5.0b8.patch"
sha256 "22f660dc0c0d0691ccaaacadf2f3c47afefbdc95639e46c6b4b77a0545b6a17c"
end
def install
system "./configure", "--disable-debug", "--disable-dependency-tracking",
"--prefix=#{prefix}",
"--enable-getaddrinfo",
"--enable-termcap=ncurses"
system "make", "install"
end
end
| 39.44 | 132 | 0.719067 |
e96067534e84b66d7b6fffc88e90774270684614 | 7,472 | require 'json'
def get_auth_info(job_url)
<<-DOC
You need to pass the environment variable JENKINS_USER_AUTH
It should be in the format <LDAP username>:<access token>
To find your access token, go to http://<jenkins-url>/me/configure
DOC
end
namespace :pl do
desc "do a local build"
task :local_build => "pl:fetch" do
# If we have a dirty source, bail, because changes won't get reflected in
# the package builds
Pkg::Util::Git.fail_on_dirty_source
Pkg::Util::RakeUtils.invoke_task("package:tar")
# where we want the packages to be copied to for the local build
nested_output = '../../../output'
pkg_path = '../pkg'
staging_path = 'pkg_artifacts'
FileUtils.cp(Dir.glob("pkg/*.gz").join(''), FileUtils.pwd)
# unpack the tarball we made during the build step
stdout, stderr, exitstatus = Pkg::Util::Execution.capture3(%(tar xf #{Dir.glob("*.gz").join('')}))
Pkg::Util::Execution.success?(exitstatus) or raise "Error unpacking tarball: #{stderr}"
Dir.chdir("#{Pkg::Config.project}-#{Pkg::Config.version}") do
Pkg::Config.final_mocks.split(" ").each do |mock|
platform = mock.split('-')[1..-2].join('-')
platform_path = platform.gsub(/\-/, '')
os, ver = /([a-zA-Z]+)(\d+)/.match(platform_path).captures
puts "===================================="
puts "Packaging for #{os} #{ver}"
puts "===================================="
stdout, stderr, exitstatus = Pkg::Util::Execution.capture3(%(bash controller.sh #{os} #{ver} #{staging_path}))
Pkg::Util::Execution.success?(exitstatus) or raise "Error running packaging: #{stdout}\n#{stderr}"
puts "#{stdout}\n#{stderr}"
# I'm so sorry
# These paths are hard-coded in packaging, so hard code here too.
# When everything is moved to artifactory this should be able
# to be fixed. --MMR, 2017-08-30
if Pkg::Config.build_pe
platform_path = "pe/rpm/#{os}-#{ver}-"
else
# carry forward defaults from mock.rake
repo = Pkg::Config.yum_repo_name || 'products'
platform_path = "#{os}/#{ver}/#{repo}/"
end
# We want to include the arches for el/sles/fedora/redhatfips paths
['x86_64', 'i386'].each do |arch|
target_dir = "#{pkg_path}/#{platform_path}#{arch}"
FileUtils.mkdir_p(target_dir) unless File.directory?(target_dir)
FileUtils.cp(Dir.glob("*#{os}#{ver}*.rpm"), target_dir)
end
end
Pkg::Config.cows.split(" ").each do |cow|
# So you might think, from looking at
# https://github.com/puppetlabs/packaging/blob/551be049ae0261f0dd1b632993d4fbe1ada63d9c/lib/packaging/deb/repo.rb#L72
# that we want the repo to default to 'main' if unset. However, looking
# deeper into that method we need repo to be '' if apt_repo_name is unset
# https://github.com/puppetlabs/packaging/blob/551be049ae0261f0dd1b632993d4fbe1ada63d9c/lib/packaging/deb/repo.rb#L106
repo = Pkg::Config.apt_repo_name || ''
platform = cow.split('-')[1..-2].join('-')
# Keep on keepin' on with hardcoded paths in packaging
# Hopefully this goes away with artifactory.
# --MMR, 2017-08-30
platform_path = "pe/deb/#{platform}"
unless Pkg::Config.build_pe
# get rid of the trailing slash if repo = ''
platform_path = "deb/#{platform}/#{repo}".sub(/\/$/, '')
end
FileUtils.mkdir_p("#{pkg_path}/#{platform_path}") unless File.directory?("#{pkg_path}/#{platform_path}")
# there's no differences in packaging for deb vs ubuntu so picking debian
# if that changes we'll need to fix that
puts "===================================="
puts "Packaging for #{platform}"
puts "===================================="
stdout, stderr, exitstatus = Pkg::Util::Execution.capture3(%(bash controller.sh debian #{platform} #{staging_path}))
Pkg::Util::Execution.success?(exitstatus) or raise "Error running packaging: #{stdout}\n#{stderr}"
puts "#{stdout}\n#{stderr}"
FileUtils.cp(Dir.glob("*#{platform}*.deb"), "#{pkg_path}/#{platform_path}")
end
FileUtils.cp_r(pkg_path, nested_output)
FileUtils.rm_r(staging_path)
end
end
desc "get the property and bundle artifacts ready"
task :prep_artifacts, [:output_dir] => "pl:fetch" do |t, args|
props = Pkg::Config.config_to_yaml
bundle = Pkg::Util::Git.git_bundle('HEAD')
FileUtils.cp(props, "#{args[:output_dir]}/BUILD_PROPERTIES")
FileUtils.cp(bundle, "#{args[:output_dir]}/PROJECT_BUNDLE")
end
namespace :jenkins do
desc "trigger jenkins packaging job"
task :trigger_build, [:auth_string, :job_url] do |t, args|
unless args[:auth_string] =~ /:/
# Old-style bare-token "build?token=<foo>" is no longer supported.
raise "JENKINS_USER_AUTH must have the format <LDAP username>:<access token>"
end
Pkg::Util::RakeUtils.invoke_task("pl:prep_artifacts", Dir.pwd)
curl_opts = [
'--location',
"--user #{args[:auth_string]}",
'--request POST',
"--form file0=@#{Dir.pwd}/BUILD_PROPERTIES",
"--form file1=@#{Dir.pwd}/PROJECT_BUNDLE",
]
parameter_json = {
parameter: [
{
name: 'BUILD_PROPERTIES',
file: 'file0'
},
{
name: 'PROJECT_BUNDLE',
file: 'file1'
},
{
name: 'COWS',
value: Pkg::Config.cows
},
{
name: 'MOCKS',
value: Pkg::Config.final_mocks
}
]
}
if Pkg::Config.build_pe
Pkg::Util.check_var('PE_VER', ENV['PE_VER'])
parameter_json[:parameter] << {
name: 'PE_VER',
value: ENV['PE_VER']
}
end
curl_opts << %(--form json='#{parameter_json.to_json}')
curl_url = "#{args[:job_url]}/build"
output, _ = Pkg::Util::Net.curl_form_data(curl_url, curl_opts)
http_response = output.scan(/^HTTP.*$/).last
# Print an error unless it looks like we curl'd successfully
unless http_response =~ /2\d\d/
raise "HTTP response: #{http_response}\n\n#{get_auth_info(args[:job_url])}"
end
Pkg::Util::Net.print_url_info(args[:job_url])
package_url = "#{Pkg::Config.builds_server}/#{Pkg::Config.project}/#{Pkg::Config.ref}"
puts "After the build job is completed, packages will be available at:"
puts package_url
end
desc "trigger jenkins packaging job with local auth"
task :trigger_build_local_auth => "pl:fetch" do
if Pkg::Config.build_pe
jenkins_hostname = 'jenkins-enterprise.delivery.puppetlabs.net'
stream = 'enterprise'
else
jenkins_hostname = 'jenkins-platform.delivery.puppetlabs.net'
stream = 'platform'
end
job_url = "https://#{jenkins_hostname}/job/#{stream}_various-packaging-jobs_packaging-os-clj_lein-ezbake-generic"
begin
auth = Pkg::Util.check_var('JENKINS_USER_AUTH', ENV['JENKINS_USER_AUTH'])
rescue
STDERR.puts(get_auth_info(job_url))
end
begin
Pkg::Util::RakeUtils.invoke_task("pl:jenkins:trigger_build", auth, job_url)
rescue => e
STDERR.puts("\nError triggering job: #{job_url}")
STDERR.puts(e)
end
end
end
end
| 39.326316 | 126 | 0.602516 |
6a36641171dd2900175d8e61869926851f9c180b | 8,126 | require 'spec_helper'
require 'rest_helper'
require 'active_support'
require 'pp'
describe ActiveOrient::OrientDB do
before(:all) do
reset_database
ORD.create_class "V","E"
ORD.create_vertex_class 'dataset', :the_dataset
ORD.create_class 'linked_data'
end
# let(:rest_class) { (Class.new { include HCTW::Rest } ).new }
context "create ActiveOrient::Model classes" do
before(:all) do
# create classes Abstract, Depends and DependsOn
ORD.create_class 'abstract'
ORD.create_class( 'depends') { Abstract }
ORD.create_class( 'depends_on' ){ Depends }
end
it 'create a abstract class' do
expect( Abstract.superclass ).to be ActiveOrient::Model
end
it 'create a class_hierachie' do
expect(Depends.new).to be_a ActiveOrient::Base
expect(Depends.new).to be_a ActiveOrient::Model
expect(Depends.superclass ).to be Abstract
expect(Depends.superclass.superclass ).to be ActiveOrient::Model
expect(Depends.superclass.superclass.superclass ).to be ActiveOrient::Base
end
it 'ensure that methods defined later are passed through the object-tree' do
class Abstract # :nodoc:
def test
"test"
end
end
doa = Depends.new
expect( doa.test).to eq "test"
end
it "operations on dependson" do
expect( DependsOn.new ).to be_a ActiveOrient::Model
expect( DependsOn.superclass).to be Depends
doa = DependsOn.new
expect( doa.test).to eq "test" ## this works only if the previous test is performed prior to this one
end
# ## raises NameError: uninitialized Constant Quatsch
# it "allocate with a non existing superclass", focus:true do
# ActiveOrient::Model.orientdb_class name: 'quatsch', superclass: 'unsinn'
# expect( Quatsch.new ).to be_a ActiveOrient::Model
# expect( Quatsch.superclass ).to be Unsinn
# expect( Unsinn.superclass).to be ActiveOrient::Model
# end
end
context "create classes" do
it "create a single class" do
ORD.create_class "erste_klasse"
expect( ErsteKlasse.new ).to be_a ActiveOrient::Model
expect( ErsteKlasse.ref_name).to eq "erste_klasse"
m= ORD.create_class "erste_SYMBOL_klasse"
expect(m).to be ErsteSymbolKlasse
expect(m.ref_name).to eq "erste_SYMBOL_klasse"
end
# ## deactivated for now
# it "create a class hierachy " do
# cl_hash= { Z: [ :test1, :test2, 'test3'], :UZ => 'reisser' }
#
# m = Hash[ ORD.create_class( cl_hash ){ ORD.create_class( 'GT') } ]
# expect(m).to eq Z => [Test1, Test2, Test3], UZ => Reisser
# end
#
# it "complex hierarchy" do
#
# m= Hash[ ORD.create_class( { TZV: [ :A, :B, C: [:c1,:c3,:c2] ], EIZR: [:has_content, :becomes_hot ]} ) ]
# expect( m.keys ).to eq [TZV, EIZR ]
# expect( m[TZV] ).to eq [A, B, [[C, [C1, C3, C2]]]]
# expect( m[EIZR] ).to eq [HasContent, BecomesHot]
#
# end
it "create vertex classes through block" do
classes_simple = [ :one_z, :two_z, :three_z]
klasses = ORD.create_class( *classes_simple ){ 'V' }
classes_simple.each{|y| expect( ActiveOrient.database_classes.keys).to include y.to_s }
expect( klasses ).to have( 3 ).items
klasses.each{|x| expect(x.superclass).to eq V }
end
it "create and delete an Edge " do
edge_name = 'the_edge'
# ActiveOrient::Model::E.delete_class
model = ORD.create_edge_class edge_name
expect( model.new ).to be_a ActiveOrient::Model
expect( model.superclass ).to eq E
## a freshly initiated edge does not have "in" and "out" properties and thus does not look like an edge
expect( model.new.is_edge? ).to be_falsy
expect( ORD.classname model ).to eq edge_name.underscore
model.delete_class
expect( ORD.database_classes ).not_to include edge_name
end
end
context "create and delete records " do
before(:all) do
ORD.create_edge_class :the_edge
ORD.create_vertex_class :vertex1,:vertex2
end
it "populate database-table with data and subsequent delete them" do
records = (1 .. 100).map{|y| Vertex1.create testentry: y }
cachesize= ActiveOrient::Base.display_rid.size
expect( Vertex1.count ).to eq 100
expect( records ).to have(100).items
Vertex1.delete_record *records
expect( Vertex1.count ).to be_zero
newcachesize= ActiveOrient::Base.display_rid.size
expect( cachesize - newcachesize).to eq 100
end
it "populate database with data and connect them via an edge" do
record1 = (0 .. 99).map{|y| Vertex1.create testentry: y }
record2 = (:a .. :z).map{|y| Vertex2.create testentry: y }
expect(record1).to have(100).items
expect(record2).to have(26).items
cachesize= ActiveOrient::Base.display_rid.size
expect {
DB.create_edge TheEdge do | attributes |
('a'.ord .. 'z'.ord).map do |o|
{ from: record1.find{|x| x.testentry == o },
to: record2.find{ |x| x.testentry.to_s.ord == o } ,
attributes: attributes.merge( key: o.chr ) }
end
end }.to change{ TheEdge.count }
newcachesize= ActiveOrient::Base.display_rid.size
expect( cachesize - newcachesize).to be > 0
end
end
context "populate records with data" do
context "update records " do
before(:all) do
TheDataset.create_property :the_date, type: 'Date', index: :unique
TheDataset.create_property :the_value, type: 'String' , index: :unique
TheDataset.create_property :the_other_element, type: 'String'
end
it "add to records" do
TheDataset.create the_value: 'TestValue', the_other_value: 'a string',
the_date: Date.new(2015,11,11)
TheDataset.create the_value: 'TestValue2', the_other_value: 'a string2',
the_date: Date.new(2015,11,14)
expect( TheDataset.count).to eq 2
end
it "update via upsert" do
TheDataset.create the_value: 'TestValue3', the_other_value: 'a string2',
the_date: Date.new(2015,11,17)
## insert dataset
expect{ @orginal= DB.upsert TheDataset, set: {the_value: 'TestValue4', the_other_value: 'a string2'},
where: {the_date: Date.new(2015,11,15) } }.to change{ TheDataset.count }.by 1
## update dataset
# orginal = ORD.get_records(from: TheDataset, where: { the_date: Date.new(2015,11,14) }, limit: 1).pop
expect{ @updated= DB.upsert TheDataset, set: {the_value: 'TestValue5', the_other_value: 'a string6'},
where: { the_date: Date.new(2015,11,14) } }.not_to change { TheDataset.count }
# updated = ORD.get_records(from: TheDataset, where: { the_date: Date.new(2015,11,14) }, limit: 1).pop
#puts "The original: "+ @orginal.to_human
#puts "The update : "+ @updated.to_human
expect( @orginal.the_value).not_to eq @updated.the_value
# insert dataset and perfom action with created object
new_record = DB.upsert( TheDataset,
set: {the_value: 'TestValue40', the_other_value: 'a string02'},
where: {the_date: Date.new(2015,11,14)} ) do | the_new_record |
# expect( the_new_record ).to be_a ActiveOrient::Model
# expect( the_new_record.the_value).to eq 'TestValue40'
end
# }.to change{ TheDataset.count }.by 1
expect( new_record.the_value ).to eq 'TestValue40'
end
end
end
# this interferes with other test, thus placing it to the end
context "play with naming conventions" do
it "the standard case" do
m = ORD.create_class "erster_test"
expect(m).to be ErsterTest
expect(m.ref_name).to eq "erster_test"
end
it "change the naming convention" do
## We want to represent all Edges with Uppercase-Letters
class E < ActiveOrient::Model # :nodoc:
def self.naming_convention name=nil
name.present? ? name.upcase : ref_name.upcase
end
end
m = ORD.create_class( "zweiter" ){ :E }
puts m.classname
expect(m.superclass).to be E
expect(m).to be ZWEITER
expect(m.ref_name).to eq "zweiter"
end
end
end
| 35.484716 | 113 | 0.654689 |
6a82c3ced3ff5fff76eb921873e278031c20e7af | 59 | module Frank
class Action < ActiveRecord::Base
end
end
| 11.8 | 35 | 0.745763 |
0353c45d233477e9aee53fed6f9e61cb73ccfb3b | 4,021 | Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Code is not reloaded between requests.
config.cache_classes = true
# Eager load code on boot. This eager loads most of Rails and
# your application in memory, allowing both threaded web servers
# and those relying on copy on write to perform better.
# Rake tasks automatically ignore this option for performance.
config.eager_load = true
# Full error reports are disabled and caching is turned on.
config.consider_all_requests_local = false
config.action_controller.perform_caching = true
# Enable Rack::Cache to put a simple HTTP cache in front of your application
# Add `rack-cache` to your Gemfile before enabling this.
# For large-scale production use, consider using a caching reverse proxy like
# NGINX, varnish or squid.
# config.action_dispatch.rack_cache = true
# Disable serving static files from the `/public` folder by default since
# Apache or NGINX already handles this.
config.serve_static_files = ENV['RAILS_SERVE_STATIC_FILES'].present?
# Compress JavaScripts and CSS.
config.assets.js_compressor = :uglifier
# config.assets.css_compressor = :sass
# Do not fallback to assets pipeline if a precompiled asset is missed.
config.assets.compile = false
# Asset digests allow you to set far-future HTTP expiration dates on all assets,
# yet still be able to expire them through the digest params.
config.assets.digest = true
# `config.assets.precompile` and `config.assets.version` have moved to config/initializers/assets.rb
# Specifies the header that your server uses for sending files.
# config.action_dispatch.x_sendfile_header = 'X-Sendfile' # for Apache
# config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for NGINX
# Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
# config.force_ssl = true
# Use the lowest log level to ensure availability of diagnostic information
# when problems arise.
config.log_level = :debug
# Prepend all log lines with the following tags.
# config.log_tags = [ :subdomain, :uuid ]
# Use a different logger for distributed setups.
# config.logger = ActiveSupport::TaggedLogging.new(SyslogLogger.new)
# Use a different cache store in production.
# config.cache_store = :mem_cache_store
# Enable serving of images, stylesheets, and JavaScripts from an asset server.
# config.action_controller.asset_host = 'http://assets.example.com'
# Ignore bad email addresses and do not raise email delivery errors.
# Set this to true and configure the email server for immediate delivery to raise delivery errors.
# config.action_mailer.raise_delivery_errors = false
# Enable locale fallbacks for I18n (makes lookups for any locale fall back to
# the I18n.default_locale when a translation cannot be found).
config.i18n.fallbacks = true
# Send deprecation notices to registered listeners.
config.active_support.deprecation = :notify
# Use default logging formatter so that PID and timestamp are not suppressed.
config.log_formatter = ::Logger::Formatter.new
# Do not dump schema after migrations.
config.active_record.dump_schema_after_migration = false
config.action_mailer.default_url_options = { host: 'http://goodbooks4devs.herokuapp.com/' }
ActionMailer::Base.delivery_method = :smtp
ActionMailer::Base.perform_deliveries = true
ActionMailer::Base.smtp_settings = {
address: 'smtp.sendgrid.net',
port: '587',
authentication: :plain,
user_name: ENV['username_sendgrid'],
password: ENV['password_sendgrid'],
domain: 'heroku.com',
enable_starttls_auto: true
}
config.paperclip_defaults = {
storage: :s3,
s3_host_name: 's3-sa-east-1.amazonaws.com',
s3_credentials: {
bucket: ENV['S3_BUCKET_NAME'],
access_key_id: ENV['AWS_ACCESS_KEY_ID'],
secret_access_key: ENV['AWS_SECRET_ACCESS_KEY']
}
}
end
| 38.663462 | 102 | 0.751057 |
012f22ccf2a66591e9a4e18083332411c93fb8bc | 814 | class Issue < ApplicationRecord
validates :issue_date, uniqueness: true, presence: true
scope :published, -> { where(published: true) }
has_many :articles
after_save :touch_articles
validate :is_wednesday
paginates_per 12
def to_param
self.issue_date.to_s
end
def standard_articles
self.articles.standard.by_position
end
def month
self.issue_date.strftime('%B')
end
def featured_article
standard_articles.by_position.first
end
def self.current
where(published: true).order('issue_date desc').first
end
private
def touch_articles
articles.update_all(updated_at: Time.zone.now)
end
def is_wednesday
return false unless self.issue_date
if !issue_date.wednesday?
errors.add(:issue_date, "is not a Wednesday")
end
end
end
| 17.319149 | 57 | 0.723587 |
e8b29572af7894825bacfef767acca91f125739b | 2,632 | module FBEvents
$: << './'
require 'open-uri'
require 'openssl'
require 'yaml'
require 'json'
require 'date'
VERSION = '1.0.0'
def FBEvents.get(group_id, client_id, client_secret, resource)
ScmsUtils.log( "Getting FaceBook Access Token" )
access_token = FBEvents.get_access_token(client_id, client_secret)
ScmsUtils.successLog( access_token )
ScmsUtils.log( "Getting FaceBook Events" )
events = FBEvents.get_events(group_id, access_token)
if events != nil && events["data"].length > 0
ScmsUtils.log( "Getting individual event details" )
events_resource = FBEvents.load(resource)
if events_resource == FALSE.class
ScmsUtils.log( "Creating events list" )
events_resource = Hash.new
end
events_array = Array.new
if events_resource != false
events["data"].sort_by{|start_time|}.reverse.each {|event|
start_time = Date.parse(event["start_time"])
if start_time < Date.today
puts "Skiping #{event["name"]} because its in the past"
else
ScmsUtils.successLog( "#{event["name"]} - start_time: #{start_time.strftime("%d/%m/%Y")}" )
events_array << FBEvents.get_event(event["id"], access_token)
end
}
end
events_resource["events"] = events_array
FBEvents.save(resource, events_resource)
else
puts "No Future Events"
puts events
end
end
def FBEvents.get_access_token(client_id, client_secret)
accessTokenUri = URI.encode("https://graph.facebook.com/oauth/access_token?client_id=#{client_id}&client_secret=#{client_secret}&grant_type=client_credentials")
accessToken = open(accessTokenUri, :ssl_verify_mode => OpenSSL::SSL::VERIFY_NONE) {|io|
io.read
}
return accessToken
end
def FBEvents.get_events(group_id, accessToken)
eventsUri = "https://graph.facebook.com/#{group_id}/events?#{accessToken}"
#puts eventsUri
eventsUri = URI.encode(eventsUri)
events = open(eventsUri, :ssl_verify_mode => OpenSSL::SSL::VERIFY_NONE) {|io|
io.read
}
return JSON.parse(events)
end
def FBEvents.get_event(event_id, accessToken)
eventUri = URI.encode("https://graph.facebook.com/#{event_id}?#{accessToken}")
event = open(eventUri, :ssl_verify_mode => OpenSSL::SSL::VERIFY_NONE) {|io|
io.read
}
return JSON.parse(event)
end
def FBEvents.save(file_path, hash)
if hash != nil
File.open(file_path, "w") {|file| file.puts(hash.to_yaml) }
else
ScmsUtils.errLog( "No event data from Facebook" )
end
end
def FBEvents.load(file_path)
yaml = ""
File.open(file_path, "r") {|file|
yaml = YAML.load(file)
}
return yaml
end
end
| 29.244444 | 162 | 0.68921 |
03589bc117d21ee74a51fc639c44d9d5d14f67eb | 962 | #ruby 1.9.3
inp = gets.chomp
arr = inp.split(' ').map(&:to_i)
avg = arr.inject(:+) / arr.count
puts case avg
when 91..100 then 'A'
when 81..90 then 'B'
when 71..80 then 'C'
when 61..70 then 'D'
else 'F'
end
=begin
Title: The Report Card!
Problem: Priyanka is Professor at Zing University. Help the professor in assigning grades to her students.
The mean score of three subjects is to graded into 'A', 'B', 'C' or 'D' or 'F' depending upon the marks scored.
For the score >90 and score =<100, 'A' is graded.
For score >80 and score =<90, 'B' is graded.
For the score >70 and score <=80, 'C' is graded.
For the score >60 and score <=70, 'D' is graded.
For the score =< 60 'F' is graded.
Input: Input contains the marks obtained by student in 3 subjects separated by a space.
Output: Output should be the grade scored by the student.
Constraints: 0 β€ Marks obtained in each subject β€ 100
Sample Input: 100 75 54
Sample Output:
C
=end
| 28.294118 | 113 | 0.682952 |
ed8c8275ba10d8843770a79cef54b0a037a4b05d | 340 | require 'rdoc/test_case'
class TestRDocRubyToken < RDoc::TestCase
def test_Token_text
token = RDoc::RubyToken::Token.new 0, 0, 0, 'text'
assert_equal 'text', token.text
end
def test_TkOp_name
token = RDoc::RubyToken::TkOp.new 0, 0, 0, '&'
assert_equal '&', token.text
assert_equal '&', token.name
end
end
| 17 | 54 | 0.664706 |
ffa051d4b4788f5a1f7a5f8eeec66e875db67350 | 2,199 | require File.expand_path("../../spec_helper", __FILE__)
describe CouchRest::Exception do
it "returns a 'message' equal to the class name if the message is not set, because 'message' should not be nil" do
e = CouchRest::Exception.new
expect(e.message).to eq "CouchRest::Exception"
end
it "returns the 'message' that was set" do
e = CouchRest::Exception.new
message = "An explicitly set message"
e.message = message
expect(e.message).to eq message
end
it "sets the exception message to ErrorMessage" do
expect(CouchRest::NotFound.new.message).to eq 'Not Found'
end
it "contains exceptions in CouchRest" do
expect(CouchRest::Unauthorized.new).to be_a_kind_of(CouchRest::Exception)
end
end
describe CouchRest::RequestFailed do
before do
@response = double('HTTP Response', :status => 500)
end
it "stores the http response on the exception" do
response = "response"
begin
raise CouchRest::RequestFailed, response
rescue CouchRest::RequestFailed => e
expect(e.response).to eq response
end
end
it "http_code convenience method for fetching the code as an integer" do
expect(CouchRest::RequestFailed.new(@response).http_code).to eq 500
end
it "http_body convenience method for fetching the body (decoding when necessary)" do
expect(CouchRest::RequestFailed.new(@response).http_code).to eq 500
expect(CouchRest::RequestFailed.new(@response).message).to eq 'HTTP status code 500'
end
it "shows the status code in the message" do
expect(CouchRest::RequestFailed.new(@response).to_s).to match(/500/)
end
end
describe CouchRest::NotFound do
it "also has the http response attached" do
response = "response"
begin
raise CouchRest::NotFound, response
rescue CouchRest::NotFound => e
expect(e.response).to eq response
end
end
it 'stores the body on the response of the exception' do
body = "body"
stub_request(:get, "http://www.example.com").to_return(:body => body, :status => 404)
begin
CouchRest.get "http://www.example.com"
raise
rescue CouchRest::NotFound => e
expect(e.response.body).to eq body
end
end
end
| 29.32 | 116 | 0.703956 |
5d070b3e51c21454db601ff8581ed1765b516c60 | 149 | class CreateWithCarrierwaves < ActiveRecord::Migration
def change
create_table :with_carrierwaves do |t|
t.string :pdf
end
end
end
| 18.625 | 54 | 0.724832 |
26e8c4fb7a9da08b63933d03ed0cffee89e4bb77 | 2,835 | ##########################################################################
# Copyright 2018 ThoughtWorks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
module Pages
class GeneralSettingsPage < AppBase
element :env_variable ,"#variables"
element :parameter, ".popup_form #params"
element :secure_variable ,"#variables_secure"
element :task_save, "button[value='SAVE']"
element :save_message, "#message_pane > p"
def on_tab(tab_name)
page.find('a', text: tab_name).click
end
def add_env_variable(variable_name , variable_value)
env_variable.find('tbody').find_all('tr').last.find('.environment_variable_name').set(variable_name)
env_variable.find('tbody').find_all('tr').last.find('.environment_variable_value').set(variable_value)
env_variable.find('a' , text: 'Add').click
end
def add_sec_env_variable(variable_name , variable_value)
secure_variable.find('tbody').find_all('tr').last.find('.environment_variable_name').set(variable_name)
secure_variable.find('tbody').find_all('tr').last.find('.environment_variable_value').set(variable_value)
secure_variable.find('a' , text: 'Add').click
end
def get_message
save_message.text
end
def verify_secure_variables_table_row(key)
secure_variable.find('tbody').find_all('tr').each do |tr|
return if tr.find('.environment_variable_name').value == key
end
end
def verify_variables_table_row(key,value)
env_variable.find('tbody').find_all('tr').each do |tr|
return if tr.find('.environment_variable_name').value == key && tr.find('.environment_variable_value').value == value
end
end
def verify_parameters_table_row(key,value)
parameter.find('tbody').find_all('tr').each do |tr|
return if tr.find('.environment_variable_name').value == key && tr.find('.environment_variable_value').value == value
end
end
def is_link_exist?(link)
page.has_css?('.menu_link',text:link)
end
end
end
| 38.310811 | 133 | 0.617284 |
282970a699de317a7f79dc74ee719c22d04cb6fb | 170 | # This file is used by Rack-based servers to start the application.
require ::File.expand_path('../config/environment', __FILE__)
run EndangeredPetsSample::Application
| 34 | 67 | 0.788235 |
38146a44fce13862494529223d0f7ae43cef7e0f | 934 | # frozen_string_literal: true
require File.expand_path('lib/jekyll-last-modified-at/version.rb', __dir__)
Gem::Specification.new do |s|
s.name = 'jekyll-last-modified-at'
s.version = Jekyll::LastModifiedAt::VERSION
s.summary = 'A liquid tag for Jekyll to indicate the last time a file was modified.'
s.authors = 'Garen J. Torikian'
s.homepage = 'https://github.com/gjtorikian/jekyll-last-modified-at'
s.license = 'MIT'
s.files = Dir['lib/**/*.rb']
s.add_dependency 'jekyll', '>= 3.7', ' < 5.0'
s.add_dependency 'posix-spawn', '~> 0.3.9'
s.add_development_dependency 'rake'
s.add_development_dependency 'rspec', '~> 3.4'
s.add_development_dependency 'rubocop'
s.add_development_dependency 'rubocop-performance'
s.add_development_dependency 'rubocop-standard'
s.add_development_dependency 'spork'
end
| 40.608696 | 100 | 0.648822 |
616030cf8eb7cd839b74a798bc0445ee05631d98 | 1,140 | test_name 'C3426 - clone (file path)'
# Globals
repo_name = 'testrepo_clone'
hosts.each do |host|
tmpdir = host.tmpdir('vcsrepo')
step 'setup - create repo' do
install_package(host, 'git')
my_root = File.expand_path(File.join(File.dirname(__FILE__), '../../../..'))
scp_to(host, "#{my_root}/acceptance/files/create_git_repo.sh", tmpdir)
on(host, "cd #{tmpdir} && ./create_git_repo.sh")
end
teardown do
on(host, "rm -fr #{tmpdir}")
end
step 'clone with puppet' do
pp = <<-EOS
vcsrepo { "#{tmpdir}/#{repo_name}":
ensure => present,
source => "#{tmpdir}/testrepo.git",
provider => git,
}
EOS
apply_manifest_on(host, pp, :catch_failures => true)
apply_manifest_on(host, pp, :catch_changes => true)
end
step "verify checkout is on the master branch" do
on(host, "ls #{tmpdir}/#{repo_name}/.git/") do |res|
fail_test('checkout not found') unless res.stdout.include? "HEAD"
end
on(host, "cat #{tmpdir}/#{repo_name}/.git/HEAD") do |res|
fail_test('master not found') unless res.stdout.include? "ref: refs/heads/master"
end
end
end
| 26.511628 | 87 | 0.630702 |
e9702c981bb06ad22ec9c01a5f66322088874f65 | 243 | # frozen_string_literal: true
class CreateComments < ActiveRecord::Migration[6.0]
def change
create_table :comments do |t|
t.integer :user_id
t.integer :opinion_id
t.text :content
t.timestamps
end
end
end
| 17.357143 | 51 | 0.674897 |
1c0f68f508a3ee693cff39441a46fffdfa25e75a | 275 | # encoding: utf-8
require "logstash/devutils/rspec/spec_helper"
require "logstash/outputs/websocket"
describe "output/websocket" do
subject(:output) { LogStash::Outputs::WebSocket.new }
it "should register" do
expect {output.register}.to_not raise_error
end
end
| 21.153846 | 55 | 0.756364 |
6af3d4ea1ddb541faf678acb9de99df07911d4ce | 6,908 | #
# Author:: Adam Jacob (<[email protected]>)
# Copyright:: Copyright (c) 2009 Opscode, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'chef/knife'
class Chef
class Knife
class Configure < Knife
attr_reader :chef_server, :new_client_name, :admin_client_name, :admin_client_key
attr_reader :chef_repo, :new_client_key, :validation_client_name, :validation_key
deps do
require 'ohai'
Chef::Knife::ClientCreate.load_deps
Chef::Knife::UserCreate.load_deps
end
banner "knife configure (options)"
option :repository,
:short => "-r REPO",
:long => "--repository REPO",
:description => "The path to the chef-repo"
option :initial,
:short => "-i",
:long => "--initial",
:boolean => true,
:description => "Use to create a API client, typically an administrator client on a freshly-installed server"
option :admin_client_name,
:long => "--admin-client-name NAME",
:description => "The name of the client, typically the name of the admin client"
option :admin_client_key,
:long => "--admin-client-key PATH",
:description => "The path to the private key used by the client, typically a file named admin.pem"
option :validation_client_name,
:long => "--validation-client-name NAME",
:description => "The name of the validation client, typically a client named chef-validator"
option :validation_key,
:long => "--validation-key PATH",
:description => "The path to the validation key used by the client, typically a file named validation.pem"
def configure_chef
# We are just faking out the system so that you can do this without a key specified
Chef::Config[:node_name] = 'woot'
super
Chef::Config[:node_name] = nil
end
def run
ask_user_for_config_path
FileUtils.mkdir_p(chef_config_path)
ask_user_for_config
::File.open(config[:config_file], "w") do |f|
f.puts <<-EOH
log_level :info
log_location STDOUT
node_name '#{new_client_name}'
client_key '#{new_client_key}'
validation_client_name '#{validation_client_name}'
validation_key '#{validation_key}'
chef_server_url '#{chef_server}'
syntax_check_cache_path '#{File.join(chef_config_path, "syntax_check_cache")}'
EOH
unless chef_repo.empty?
f.puts "cookbook_path [ '#{chef_repo}/cookbooks' ]"
end
end
if config[:initial]
ui.msg("Creating initial API user...")
Chef::Config[:chef_server_url] = chef_server
Chef::Config[:node_name] = admin_client_name
Chef::Config[:client_key] = admin_client_key
user_create = Chef::Knife::UserCreate.new
user_create.name_args = [ new_client_name ]
user_create.config[:user_password] = config[:user_password] ||
ui.ask("Please enter a password for the new user: ") {|q| q.echo = false}
user_create.config[:admin] = true
user_create.config[:file] = new_client_key
user_create.config[:yes] = true
user_create.config[:disable_editing] = true
user_create.run
else
ui.msg("*****")
ui.msg("")
ui.msg("You must place your client key in:")
ui.msg(" #{new_client_key}")
ui.msg("Before running commands with Knife!")
ui.msg("")
ui.msg("*****")
ui.msg("")
ui.msg("You must place your validation key in:")
ui.msg(" #{validation_key}")
ui.msg("Before generating instance data with Knife!")
ui.msg("")
ui.msg("*****")
end
ui.msg("Configuration file written to #{config[:config_file]}")
end
def ask_user_for_config_path
config[:config_file] ||= ask_question("Where should I put the config file? ", :default => "#{Chef::Config[:user_home]}/.chef/knife.rb")
# have to use expand path to expand the tilde character to the user's home
config[:config_file] = File.expand_path(config[:config_file])
if File.exists?(config[:config_file])
confirm("Overwrite #{config[:config_file]}")
end
end
def ask_user_for_config
server_name = guess_servername
@chef_server = config[:chef_server_url] || ask_question("Please enter the chef server URL: ", :default => "https://#{server_name}:443")
if config[:initial]
@new_client_name = config[:node_name] || ask_question("Please enter a name for the new user: ", :default => Etc.getlogin)
@admin_client_name = config[:admin_client_name] || ask_question("Please enter the existing admin name: ", :default => 'admin')
@admin_client_key = config[:admin_client_key] || ask_question("Please enter the location of the existing admin's private key: ", :default => '/etc/chef-server/admin.pem')
@admin_client_key = File.expand_path(@admin_client_key)
else
@new_client_name = config[:node_name] || ask_question("Please enter an existing username or clientname for the API: ", :default => Etc.getlogin)
end
@validation_client_name = config[:validation_client_name] || ask_question("Please enter the validation clientname: ", :default => 'chef-validator')
@validation_key = config[:validation_key] || ask_question("Please enter the location of the validation key: ", :default => '/etc/chef-server/chef-validator.pem')
@validation_key = File.expand_path(@validation_key)
@chef_repo = config[:repository] || ask_question("Please enter the path to a chef repository (or leave blank): ")
@new_client_key = config[:client_key] || File.join(chef_config_path, "#{@new_client_name}.pem")
@new_client_key = File.expand_path(@new_client_key)
end
def guess_servername
o = Ohai::System.new
o.require_plugin 'os'
o.require_plugin 'hostname'
o[:fqdn] || 'localhost'
end
def config_file
config[:config_file]
end
def chef_config_path
File.dirname(config_file)
end
end
end
end
| 40.397661 | 186 | 0.632455 |
624ff9a29206ec4cbace16ed28915eb1af3ba6a7 | 2,050 | require 'net/https'
require 'json'
require 'rest-client'
OSU_AUTH_URL = 'https://osu.ppy.sh/oauth/authorize'.freeze
OSU_TOKEN_URL = 'https://osu.ppy.sh/oauth/token'.freeze
OSU_SELF_REQUEST_URL = 'https://osu.ppy.sh/api/v2/me'.freeze
class OsuAuthRequest < ApplicationRecord
before_create { self.nonce = SecureRandom.uuid }
belongs_to :player
belongs_to :discord_server
def authorisation_link
params = {
'client_id': ENV.fetch('OSU_CLIENT_ID'),
'redirect_uri': ENV.fetch('OSU_CALLBACK_URL'),
'response_type': 'code',
'scope': 'identify',
'state': nonce,
}
"#{OSU_AUTH_URL}?#{params.to_query}"
end
def process_code_response(code)
if created_at < 10.minutes.ago
raise OsuAuthErrors::TimeoutError, 'Auth request timed out. Please restart the registration process.'
end
params = {
'client_id': ENV.fetch('OSU_CLIENT_ID'),
'client_secret': ENV.fetch('OSU_CLIENT_SECRET'),
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': ENV.fetch('OSU_CALLBACK_URL'),
}
begin
token_response = RestClient.post(
OSU_TOKEN_URL,
params,
{ accept: :json, content_type: :json }
)
rescue RestClient::ExceptionWithResponse => e
logger.error("Failed to exchange code for osu! API token for auth request #{id}. Got #{e.response.code} response #{e.response.body}")
raise OsuAuthErrors::UnauthorisedError, 'osu! authorisation failed!'
end
token_json = JSON.parse(token_response.body)
begin
user_response = RestClient.get(
OSU_SELF_REQUEST_URL,
{ accept: :json, content_type: :json, authorization: "Bearer #{token_json['access_token']}" }
)
rescue RestClient::ExceptionWithResponse => e
logger.error("Failed to retrieve user details for auth request #{id}. Got #{e.response.code} response #{e.response.body}")
raise OsuAuthErrors::OsuAuthError, 'Failed to retrieve user from osu! API.'
end
JSON.parse(user_response.body)
end
end
| 31.060606 | 139 | 0.678049 |
abf926a9550e01ae643f44a0d47d2ec621d01c29 | 1,312 | require_relative '../spec_helper'
describe Dogapi::Client do
USER_HANDLE = '[email protected]'.freeze
USER_DESCRIPTION = {
handle: USER_HANDLE,
name: 'TEST'
}.freeze
USER_EMAILS = (1..4).map { |i| "test#{i}@example.com" }
describe '#invite' do
it_behaves_like 'an api method with options',
:invite, [USER_EMAILS],
:post, '/invite_users', 'emails' => USER_EMAILS
end
describe '#create_user' do
it_behaves_like 'an api method',
:create_user, [USER_DESCRIPTION],
:post, '/user', USER_DESCRIPTION
end
describe '#get_user' do
it_behaves_like 'an api method',
:get_user, [USER_HANDLE],
:get, "/user/#{USER_HANDLE}"
end
describe '#get_all_users' do
it_behaves_like 'an api method',
:get_all_users, [],
:get, '/user'
end
describe '#update_user' do
it_behaves_like 'an api method',
:update_user, [USER_HANDLE, USER_DESCRIPTION],
:put, "/user/#{USER_HANDLE}", USER_DESCRIPTION
end
describe '#disable_user' do
it_behaves_like 'an api method',
:disable_user, [USER_HANDLE],
:delete, "/user/#{USER_HANDLE}"
end
end
| 27.914894 | 67 | 0.565549 |
21d9ae5442d5daa042a63d354cd3e85e325bfffa | 823 | # == Schema Information
#
# Table name: browsing_histories
#
# id :integer not null, primary key
# user_id :integer
# listing_id :integer
# viewed_at :datetime
# created_at :datetime not null
# updated_at :datetime not null
#
# Indexes
#
# index_browsing_histories_on_listing_id (listing_id)
# index_browsing_histories_on_user_id (user_id)
# index_browsing_histories_on_viewed_at (viewed_at)
#
class BrowsingHistory < ApplicationRecord
belongs_to :user
belongs_to :listing
validates :user_id, presence: true
validates :listing_id, presence: true
class << self
def insert_record(user_id, listing_id)
BrowsingHistory.create(
user_id: user_id,
listing_id: listing_id,
viewed_at: Time.zone.now.to_date
)
end
end
end
| 23.514286 | 55 | 0.693803 |
d52f0d7273135bdb6a1a09bda03914ae9be198b5 | 9,305 | #==============================================================================
# ** Scene_Battle
#------------------------------------------------------------------------------
# This class performs battle screen processing.
#==============================================================================
class Scene_Battle < Scene_Base
include EBJB
#//////////////////////////////////////////////////////////////////////////
# * Public Methods
#//////////////////////////////////////////////////////////////////////////
#--------------------------------------------------------------------------
# * Alias start
#--------------------------------------------------------------------------
alias start_bc_inputskills start unless $@
def start
start_bc_inputskills
@button_inputting = false
@input = false
@input_window = Window_InputSkill.new(440, 50, 180, 56, nil)
@input_window.opacity = 0
@input_window.active = false
@input_window.visible = false
end
#--------------------------------------------------------------------------
# * Alias terminate
#--------------------------------------------------------------------------
alias terminate_bc_inputskills terminate unless $@
def terminate
terminate_bc_inputskills
@input_window.dispose if @input_window != nil
end
#--------------------------------------------------------------------------
# * Alias update_basic
#--------------------------------------------------------------------------
alias update_basic_bc_inputskills update_basic unless $@
def update_basic(main = false)
update_basic_bc_inputskills(main)
@input_window.update
end
#--------------------------------------------------------------------------
# * Alias custom_actor_command_active?
#--------------------------------------------------------------------------
alias custom_actor_command_active_bc_inputskills? custom_actor_command_active? unless $@
def custom_actor_command_active?
if @button_inputting
return true
else
return custom_actor_command_active_bc_inputskills?
end
end
#--------------------------------------------------------------------------
# * Alias update_actor_command_input
#--------------------------------------------------------------------------
alias update_actor_command_input_bc_inputskills update_actor_command_input unless $@
def update_actor_command_input
if @button_inputting
update_button_inputting # Button inputting (for Blitz Command)
else
update_actor_command_input_bc_inputskills
end
end
#--------------------------------------------------------------------------
# * Alias execute_battle_commands
#--------------------------------------------------------------------------
alias execute_battle_commands_bc_inputskills execute_battle_commands unless $@
def execute_battle_commands(actor)
command = @actor_command_window.selected_command
if (command.is_a?(List_Command) && command.filter != nil && command.filter.mode == "inputskills" ) ||
command.is_a?(InputSkill_Command)
@input = true
end
execute_battle_commands_bc_inputskills(actor)
end
#--------------------------------------------------------------------------
# * Execute Battle Actions
#--------------------------------------------------------------------------
alias execute_action_bc_inputskills execute_action unless $@
def execute_action
if @active_battler.action.failed_input?
execute_action_failed_input
@active_battler.action.clear
else
execute_action_bc_inputskills
end
end
#--------------------------------------------------------------------------
# * Execute Battle Action: Failed Skill Input
#--------------------------------------------------------------------------
def execute_action_failed_input
@top_help_window.set_text("Incorrect Skill input")
@top_help_window.visible = true
@active_battler.stamina_wait = true
@active_battler.empty_stamina
execute_basic_action(nil)
@top_help_window.visible = false
end
#//////////////////////////////////////////////////////////////////////////
# * Scene Commands
#//////////////////////////////////////////////////////////////////////////
#--------------------------------------------------------------------------
# * Alias confirm_enemy_selection
#--------------------------------------------------------------------------
alias confirm_enemy_selection_bc_inputskills confirm_enemy_selection unless $@
def confirm_enemy_selection(actor)
if @input
start_button_inputting
else
confirm_enemy_selection_bc_inputskills(actor)
end
end
#--------------------------------------------------------------------------
# * Alias confirm_actor_selection
#--------------------------------------------------------------------------
alias confirm_actor_selection_bc_inputskills confirm_actor_selection unless $@
def confirm_actor_selection(actor)
if @input
start_button_inputting
else
confirm_actor_selection_bc_inputskills(actor)
end
end
#--------------------------------------------------------------------------
# * Alias confirm_no_selection_skill
#--------------------------------------------------------------------------
alias confirm_no_selection_skill_bc_inputskills confirm_no_selection_skill unless $@
def confirm_no_selection_skill(skill)
if @input
start_button_inputting
else
confirm_no_selection_skill_bc_inputskills(skill)
end
end
#--------------------------------------------------------------------------
# * Setup everything needed to start listening for button inputs
#--------------------------------------------------------------------------
def start_button_inputting
actor = $game_party.members[@actor_index]
input_command = BATTLECOMMANDS_CONFIG::BC_INPUTSKILLS_COMMANDS.select{|x| x.skill_id == actor.action.skill.id}.first
@button_inputting = true
@button_index = 0
@skill_input = input_command.button_inputs
@button_time = input_command.button_time
@input_window.window_update(input_command)
@input_window.visible = true
deactivate_stamina(0)
end
#--------------------------------------------------------------------------
# * Reset everything changed when listening for button inputs ends
#--------------------------------------------------------------------------
def end_button_inputting
end_skill_selection()
end_target_actor_selection(false)
end_target_enemy_selection(false)
@input = false
@button_inputting = false
@button_index = nil
@skill_input = nil
@button_time = nil
@input_window.visible = false
activate_stamina(0)
end
#--------------------------------------------------------------------------
# * Wait until player presses a button. Returns false if doesn't match
# input string or if time is reduced to 0. Else returns true.
#--------------------------------------------------------------------------
def update_button_inputting
actor = $game_party.members[@actor_index]
current_input = get_current_input()
@button_time -= 1
@input_window.update_values(@button_index, @button_time)
if current_input != nil
if @skill_input[@button_index] == current_input
@button_index += 1
if @skill_input[@button_index].nil?
Sound.play_decision
add_to_battleline(actor)
end_actor_command_selection()
end_button_inputting()
else
Sound.play_cursor
end
else
Sound.play_buzzer
actor.action.set_failed_input
add_to_battleline(actor)
end_actor_command_selection()
end_button_inputting()
end
end
# Timer ends with no complete blitz
if @button_time == 0
Sound.play_buzzer
actor.action.set_failed_input
add_to_battleline(actor)
end_actor_command_selection()
end_button_inputting()
end
end
#//////////////////////////////////////////////////////////////////////////
# * Private Methods
#//////////////////////////////////////////////////////////////////////////
#--------------------------------------------------------------------------
# *
#--------------------------------------------------------------------------
def get_current_input()
if Input.trigger?(Input::A)
return Input::A
elsif Input.trigger?(Input::B)
return Input::B
elsif Input.trigger?(Input::C)
return Input::C
elsif Input.trigger?(Input::X)
return Input::X
elsif Input.trigger?(Input::Y)
return Input::Y
elsif Input.trigger?(Input::Z)
return Input::Z
elsif Input.trigger?(Input::L)
return Input::L
elsif Input.trigger?(Input::R)
return Input::R
elsif Input.trigger?(Input::LEFT)
return Input::LEFT
elsif Input.trigger?(Input::RIGHT)
return Input::RIGHT
elsif Input.trigger?(Input::DOWN)
return Input::DOWN
elsif Input.trigger?(Input::UP)
return Input::UP
end
end
private :get_current_input
end
| 34.850187 | 120 | 0.478882 |
d53dd2acbd8a32c2f2e51454852449348993867b | 966 | #
# Copyright 2016, SUSE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
define :openstack_pacemaker_controller_location_ignoring_upgrade_for do
resource = params[:name]
location_name = "l-#{resource}-controller"
pacemaker_location location_name do
definition OpenStackHAHelper.controller_only_location_ignoring_upgrade(location_name, resource)
action :update
only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) }
end
location_name
end
| 35.777778 | 99 | 0.781573 |
1ccc17d625295a2381e7c1d5931ea6c085563a57 | 525 | require "spec_helper"
describe "Element#append_to" do
html <<-HTML
<div id="foo"></div>
<div id="bar"></div>
<div id="baz"></div>
HTML
it "should insert the receiver into the target element" do
Element.find('#foo').children.size.should == 0
Element.parse('<ul class="kapow"></ul>').append_to Element.find('#foo')
Element.find('#foo').children.class_name.should == "kapow"
Element.find('#bar').append_to Element.find('#baz')
Element.find('#baz').children.id.should == "bar"
end
end
| 26.25 | 75 | 0.641905 |
6254fcc7605cf69b7ee4e53a25a51a6ffb8d875f | 6,331 | module Evening_Standup_Commands
def gathering_responds_from_form_evening(responds:,
arguments_from_form:,
creating_standup: )
arguments_from_form.each_with_index do |u, index|
if !creating_standup && first_argument(index)
responds.append( u[1][:edit_option][:selected_option][:value])
else
responds.append u[1][:input][:value].nil? ? ":ultrafast_serin_spiner:" : u[1][:input][:value]
end
end
end
def post_public_evening(slack_client:,
command_channel:,
name_of_user:,
word:,
pic:,
value_1_if_editing_existing_standup:)
slack_client.chat_postMessage(
channel: command_channel,
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "Standup wieczorny: "+
"#{name_of_user}",
"emoji": true
}
},
],
"attachments": [
fields:[
{
"title": "1. Co ukoΕczone?",
"value": word[0 + value_1_if_editing_existing_standup],
"short": false
},
{
"title": "2. Co nieukoΕczone?",
"value": word[1 + value_1_if_editing_existing_standup],
"short": false
},
{
"title": "3. Blockery podczas dnia",
"value": word[2 + value_1_if_editing_existing_standup],
"short": false
},
{
"title": "4. Jakie wnioski?",
"value": word[3 + value_1_if_editing_existing_standup],
"short": false
},
{
"title": "PR'ki, tickeciki itd.",
"value": word[4 + value_1_if_editing_existing_standup],
"short": false
},
],
color: "#1B4D3E",
thumb_url: "#{pic}",
],
)
end
def edit_public_evening(slack_client:,
command_channel:,
name_of_user:,
word:,
pic:,
ts:,
value_1_if_editing_existing_standup:)
slack_client.chat_update(
channel: command_channel,
ts: ts,
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "Standup wieczorny: "+
"#{name_of_user}",
"emoji": true
}
},
],
"attachments": [
fields:[
{
"title": "1. Co ukoΕczone?",
"value": word[0 + value_1_if_editing_existing_standup],
"short": false
},
{
"title": "2. Co nieukoΕczone?",
"value": word[1 + value_1_if_editing_existing_standup],
"short": false
},
{
"title": "3. Blockery podczas dnia",
"value": word[2 + value_1_if_editing_existing_standup],
"short": false
},
{
"title": "4. Jakie wnioski?",
"value": word[3 + value_1_if_editing_existing_standup],
"short": false
},
{
"title": "PR'ki, tickeciki itd.",
"value": word[4 + value_1_if_editing_existing_standup],
"short": false
},
],
color: "#1B4D3E",
thumb_url: "#{pic}",
],
)
end
def get_json_evening
[
{
"type": "header",
"text": {
"type": "plain_text",
"text": "Wieczorny Standup",
"emoji": true
}
},
{
"type": "input",
"element": {
"type": "plain_text_input",
"multiline": true,
"action_id": "input"
},
"label": {
"type": "plain_text",
"text": "1. Co udaΕo ci sie dzisiaj skoΕczyΔ?",
"emoji": true
},
},
{
"type": "divider"
},
{
"type": "divider"
},
{
"type": "input",
"element": {
"type": "plain_text_input",
"multiline": true,
"action_id": "input"
},
"label": {
"type": "plain_text",
"text": "2. KtΓ³re zadaΕ nie zostaΕy zakoΕczone i na jakim etapie dzisiaj "+
"je pozostawiasz ? (pamiΔtaΕeΕ ΕΌeby wypchnΔ
Δ je do repo?)",
"emoji": true
}
},
{
"type": "divider"
},
{
"type": "divider"
},
{
"type": "input",
"element": {
"type": "plain_text_input",
"multiline": true,
"action_id": "input"
},
"label": {
"type": "plain_text",
"text": "3. PojawiΕy siΔ jakieΕ blockery?",
"emoji": true
}
},
{
"type": "divider"
},
{
"type": "divider"
},
{
"type": "input",
"element": {
"type": "plain_text_input",
"multiline": true,
"action_id": "input"
},
"label": {
"type": "plain_text",
"text": "4. Czego nowego siΔ dziΕ nauczyΕeΕ / dowiedziaΕeΕ?"+
"A jeΕli niczego to czego w danym temacie chciaΕbyΕ siΔ +"+
"dowiedzieΔ ? Daj nam sobie pomΓ³c",
"emoji": true
}
},
{
"type": "divider"
},
{
"type": "divider"
},
{
"type": "input",
"element": {
"type": "plain_text_input",
"multiline": true,
"action_id": "input"
},
"label": {
"type": "plain_text",
"text": "Tutaj wrzuΔ swoje tickety/pry oraz czas ich wykonania - spokojnie, opcjonalne",
"emoji": true
}
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"text": "PotwierdΕΊ",
"emoji": true
},
"value": "click_me_123",
"action_id": "actionId-1"
}
]
}
]
end
end | 26.161157 | 101 | 0.423788 |
6278436a224d7911421f502c558e81570b63f2cc | 137,592 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'date'
require 'google/apis/core/base_service'
require 'google/apis/core/json_representation'
require 'google/apis/core/hashable'
require 'google/apis/errors'
module Google
module Apis
module AppengineV1beta
# Google Cloud Endpoints (https://cloud.google.com/appengine/docs/python/
# endpoints/) configuration for API handlers.
class ApiConfigHandler
include Google::Apis::Core::Hashable
# Action to take when users access resources that require authentication.
# Defaults to redirect.
# Corresponds to the JSON property `authFailAction`
# @return [String]
attr_accessor :auth_fail_action
# Level of login required to access this resource. Defaults to optional.
# Corresponds to the JSON property `login`
# @return [String]
attr_accessor :login
# Path to the script from the application root directory.
# Corresponds to the JSON property `script`
# @return [String]
attr_accessor :script
# Security (HTTPS) enforcement for this URL.
# Corresponds to the JSON property `securityLevel`
# @return [String]
attr_accessor :security_level
# URL to serve the endpoint at.
# Corresponds to the JSON property `url`
# @return [String]
attr_accessor :url
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@auth_fail_action = args[:auth_fail_action] if args.key?(:auth_fail_action)
@login = args[:login] if args.key?(:login)
@script = args[:script] if args.key?(:script)
@security_level = args[:security_level] if args.key?(:security_level)
@url = args[:url] if args.key?(:url)
end
end
# Uses Google Cloud Endpoints to handle requests.
class ApiEndpointHandler
include Google::Apis::Core::Hashable
# Path to the script from the application root directory.
# Corresponds to the JSON property `scriptPath`
# @return [String]
attr_accessor :script_path
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@script_path = args[:script_path] if args.key?(:script_path)
end
end
# An Application resource contains the top-level configuration of an App Engine
# application.
class Application
include Google::Apis::Core::Hashable
# Google Apps authentication domain that controls which users can access this
# application.Defaults to open access for any Google Account.
# Corresponds to the JSON property `authDomain`
# @return [String]
attr_accessor :auth_domain
# Google Cloud Storage bucket that can be used for storing files associated with
# this application. This bucket is associated with the application and can be
# used by the gcloud deployment commands.@OutputOnly
# Corresponds to the JSON property `codeBucket`
# @return [String]
attr_accessor :code_bucket
# Google Cloud Storage bucket that can be used by this application to store
# content.@OutputOnly
# Corresponds to the JSON property `defaultBucket`
# @return [String]
attr_accessor :default_bucket
# Cookie expiration policy for this application.
# Corresponds to the JSON property `defaultCookieExpiration`
# @return [String]
attr_accessor :default_cookie_expiration
# Hostname used to reach this application, as resolved by App Engine.@OutputOnly
# Corresponds to the JSON property `defaultHostname`
# @return [String]
attr_accessor :default_hostname
# HTTP path dispatch rules for requests to the application that do not
# explicitly target a service or version. Rules are order-dependent. Up to 20
# dispatch rules can be supported.@OutputOnly
# Corresponds to the JSON property `dispatchRules`
# @return [Array<Google::Apis::AppengineV1beta::UrlDispatchRule>]
attr_accessor :dispatch_rules
# The feature specific settings to be used in the application. These define
# behaviors that are user configurable.
# Corresponds to the JSON property `featureSettings`
# @return [Google::Apis::AppengineV1beta::FeatureSettings]
attr_accessor :feature_settings
# The Google Container Registry domain used for storing managed build docker
# images for this application.
# Corresponds to the JSON property `gcrDomain`
# @return [String]
attr_accessor :gcr_domain
# Identity-Aware Proxy
# Corresponds to the JSON property `iap`
# @return [Google::Apis::AppengineV1beta::IdentityAwareProxy]
attr_accessor :iap
# Identifier of the Application resource. This identifier is equivalent to the
# project ID of the Google Cloud Platform project where you want to deploy your
# application. Example: myapp.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# Location from which this application runs. Application instances run out of
# the data centers in the specified location, which is also where all of the
# application's end user content is stored.Defaults to us-central.View the list
# of supported locations (https://cloud.google.com/appengine/docs/locations).
# Corresponds to the JSON property `locationId`
# @return [String]
attr_accessor :location_id
# Full path to the Application resource in the API. Example: apps/myapp.@
# OutputOnly
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Serving status of this application.
# Corresponds to the JSON property `servingStatus`
# @return [String]
attr_accessor :serving_status
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@auth_domain = args[:auth_domain] if args.key?(:auth_domain)
@code_bucket = args[:code_bucket] if args.key?(:code_bucket)
@default_bucket = args[:default_bucket] if args.key?(:default_bucket)
@default_cookie_expiration = args[:default_cookie_expiration] if args.key?(:default_cookie_expiration)
@default_hostname = args[:default_hostname] if args.key?(:default_hostname)
@dispatch_rules = args[:dispatch_rules] if args.key?(:dispatch_rules)
@feature_settings = args[:feature_settings] if args.key?(:feature_settings)
@gcr_domain = args[:gcr_domain] if args.key?(:gcr_domain)
@iap = args[:iap] if args.key?(:iap)
@id = args[:id] if args.key?(:id)
@location_id = args[:location_id] if args.key?(:location_id)
@name = args[:name] if args.key?(:name)
@serving_status = args[:serving_status] if args.key?(:serving_status)
end
end
# An SSL certificate that a user has been authorized to administer. A user is
# authorized to administer any certificate that applies to one of their
# authorized domains.
class AuthorizedCertificate
include Google::Apis::Core::Hashable
# An SSL certificate obtained from a certificate authority.
# Corresponds to the JSON property `certificateRawData`
# @return [Google::Apis::AppengineV1beta::CertificateRawData]
attr_accessor :certificate_raw_data
# The user-specified display name of the certificate. This is not guaranteed to
# be unique. Example: My Certificate.
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# Aggregate count of the domain mappings with this certificate mapped. This
# count includes domain mappings on applications for which the user does not
# have VIEWER permissions.Only returned by GET or LIST requests when
# specifically requested by the view=FULL_CERTIFICATE option.@OutputOnly
# Corresponds to the JSON property `domainMappingsCount`
# @return [Fixnum]
attr_accessor :domain_mappings_count
# Topmost applicable domains of this certificate. This certificate applies to
# these domains and their subdomains. Example: example.com.@OutputOnly
# Corresponds to the JSON property `domainNames`
# @return [Array<String>]
attr_accessor :domain_names
# The time when this certificate expires. To update the renewal time on this
# certificate, upload an SSL certificate with a different expiration time using
# AuthorizedCertificates.UpdateAuthorizedCertificate.@OutputOnly
# Corresponds to the JSON property `expireTime`
# @return [String]
attr_accessor :expire_time
# Relative name of the certificate. This is a unique value autogenerated on
# AuthorizedCertificate resource creation. Example: 12345.@OutputOnly
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# A certificate managed by App Engine.
# Corresponds to the JSON property `managedCertificate`
# @return [Google::Apis::AppengineV1beta::ManagedCertificate]
attr_accessor :managed_certificate
# Full path to the AuthorizedCertificate resource in the API. Example: apps/
# myapp/authorizedCertificates/12345.@OutputOnly
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The full paths to user visible Domain Mapping resources that have this
# certificate mapped. Example: apps/myapp/domainMappings/example.com.This may
# not represent the full list of mapped domain mappings if the user does not
# have VIEWER permissions on all of the applications that have this certificate
# mapped. See domain_mappings_count for a complete count.Only returned by GET or
# LIST requests when specifically requested by the view=FULL_CERTIFICATE option.@
# OutputOnly
# Corresponds to the JSON property `visibleDomainMappings`
# @return [Array<String>]
attr_accessor :visible_domain_mappings
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@certificate_raw_data = args[:certificate_raw_data] if args.key?(:certificate_raw_data)
@display_name = args[:display_name] if args.key?(:display_name)
@domain_mappings_count = args[:domain_mappings_count] if args.key?(:domain_mappings_count)
@domain_names = args[:domain_names] if args.key?(:domain_names)
@expire_time = args[:expire_time] if args.key?(:expire_time)
@id = args[:id] if args.key?(:id)
@managed_certificate = args[:managed_certificate] if args.key?(:managed_certificate)
@name = args[:name] if args.key?(:name)
@visible_domain_mappings = args[:visible_domain_mappings] if args.key?(:visible_domain_mappings)
end
end
# A domain that a user has been authorized to administer. To authorize use of a
# domain, verify ownership via Webmaster Central (https://www.google.com/
# webmasters/verification/home).
class AuthorizedDomain
include Google::Apis::Core::Hashable
# Fully qualified domain name of the domain authorized for use. Example: example.
# com.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# Full path to the AuthorizedDomain resource in the API. Example: apps/myapp/
# authorizedDomains/example.com.@OutputOnly
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@id = args[:id] if args.key?(:id)
@name = args[:name] if args.key?(:name)
end
end
# Automatic scaling is based on request rate, response latencies, and other
# application metrics.
class AutomaticScaling
include Google::Apis::Core::Hashable
# The time period that the Autoscaler (https://cloud.google.com/compute/docs/
# autoscaler/) should wait before it starts collecting information from a new
# instance. This prevents the autoscaler from collecting information when the
# instance is initializing, during which the collected usage would not be
# reliable. Only applicable in the App Engine flexible environment.
# Corresponds to the JSON property `coolDownPeriod`
# @return [String]
attr_accessor :cool_down_period
# Target scaling by CPU usage.
# Corresponds to the JSON property `cpuUtilization`
# @return [Google::Apis::AppengineV1beta::CpuUtilization]
attr_accessor :cpu_utilization
# Target scaling by user-provided metrics.
# Corresponds to the JSON property `customMetrics`
# @return [Array<Google::Apis::AppengineV1beta::CustomMetric>]
attr_accessor :custom_metrics
# Target scaling by disk usage. Only applicable in the App Engine flexible
# environment.
# Corresponds to the JSON property `diskUtilization`
# @return [Google::Apis::AppengineV1beta::DiskUtilization]
attr_accessor :disk_utilization
# Number of concurrent requests an automatic scaling instance can accept before
# the scheduler spawns a new instance.Defaults to a runtime-specific value.
# Corresponds to the JSON property `maxConcurrentRequests`
# @return [Fixnum]
attr_accessor :max_concurrent_requests
# Maximum number of idle instances that should be maintained for this version.
# Corresponds to the JSON property `maxIdleInstances`
# @return [Fixnum]
attr_accessor :max_idle_instances
# Maximum amount of time that a request should wait in the pending queue before
# starting a new instance to handle it.
# Corresponds to the JSON property `maxPendingLatency`
# @return [String]
attr_accessor :max_pending_latency
# Maximum number of instances that should be started to handle requests for this
# version.
# Corresponds to the JSON property `maxTotalInstances`
# @return [Fixnum]
attr_accessor :max_total_instances
# Minimum number of idle instances that should be maintained for this version.
# Only applicable for the default version of a service.
# Corresponds to the JSON property `minIdleInstances`
# @return [Fixnum]
attr_accessor :min_idle_instances
# Minimum amount of time a request should wait in the pending queue before
# starting a new instance to handle it.
# Corresponds to the JSON property `minPendingLatency`
# @return [String]
attr_accessor :min_pending_latency
# Minimum number of running instances that should be maintained for this version.
# Corresponds to the JSON property `minTotalInstances`
# @return [Fixnum]
attr_accessor :min_total_instances
# Target scaling by network usage. Only applicable in the App Engine flexible
# environment.
# Corresponds to the JSON property `networkUtilization`
# @return [Google::Apis::AppengineV1beta::NetworkUtilization]
attr_accessor :network_utilization
# Target scaling by request utilization. Only applicable in the App Engine
# flexible environment.
# Corresponds to the JSON property `requestUtilization`
# @return [Google::Apis::AppengineV1beta::RequestUtilization]
attr_accessor :request_utilization
# Scheduler settings for standard environment.
# Corresponds to the JSON property `standardSchedulerSettings`
# @return [Google::Apis::AppengineV1beta::StandardSchedulerSettings]
attr_accessor :standard_scheduler_settings
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@cool_down_period = args[:cool_down_period] if args.key?(:cool_down_period)
@cpu_utilization = args[:cpu_utilization] if args.key?(:cpu_utilization)
@custom_metrics = args[:custom_metrics] if args.key?(:custom_metrics)
@disk_utilization = args[:disk_utilization] if args.key?(:disk_utilization)
@max_concurrent_requests = args[:max_concurrent_requests] if args.key?(:max_concurrent_requests)
@max_idle_instances = args[:max_idle_instances] if args.key?(:max_idle_instances)
@max_pending_latency = args[:max_pending_latency] if args.key?(:max_pending_latency)
@max_total_instances = args[:max_total_instances] if args.key?(:max_total_instances)
@min_idle_instances = args[:min_idle_instances] if args.key?(:min_idle_instances)
@min_pending_latency = args[:min_pending_latency] if args.key?(:min_pending_latency)
@min_total_instances = args[:min_total_instances] if args.key?(:min_total_instances)
@network_utilization = args[:network_utilization] if args.key?(:network_utilization)
@request_utilization = args[:request_utilization] if args.key?(:request_utilization)
@standard_scheduler_settings = args[:standard_scheduler_settings] if args.key?(:standard_scheduler_settings)
end
end
# A service with basic scaling will create an instance when the application
# receives a request. The instance will be turned down when the app becomes idle.
# Basic scaling is ideal for work that is intermittent or driven by user
# activity.
class BasicScaling
include Google::Apis::Core::Hashable
# Duration of time after the last request that an instance must wait before the
# instance is shut down.
# Corresponds to the JSON property `idleTimeout`
# @return [String]
attr_accessor :idle_timeout
# Maximum number of instances to create for this version.
# Corresponds to the JSON property `maxInstances`
# @return [Fixnum]
attr_accessor :max_instances
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@idle_timeout = args[:idle_timeout] if args.key?(:idle_timeout)
@max_instances = args[:max_instances] if args.key?(:max_instances)
end
end
# Request message for Firewall.BatchUpdateIngressRules.
class BatchUpdateIngressRulesRequest
include Google::Apis::Core::Hashable
# A list of FirewallRules to replace the existing set.
# Corresponds to the JSON property `ingressRules`
# @return [Array<Google::Apis::AppengineV1beta::FirewallRule>]
attr_accessor :ingress_rules
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@ingress_rules = args[:ingress_rules] if args.key?(:ingress_rules)
end
end
# Response message for Firewall.UpdateAllIngressRules.
class BatchUpdateIngressRulesResponse
include Google::Apis::Core::Hashable
# The full list of ingress FirewallRules for this application.
# Corresponds to the JSON property `ingressRules`
# @return [Array<Google::Apis::AppengineV1beta::FirewallRule>]
attr_accessor :ingress_rules
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@ingress_rules = args[:ingress_rules] if args.key?(:ingress_rules)
end
end
# Google Cloud Build information.
class BuildInfo
include Google::Apis::Core::Hashable
# The Google Cloud Build id. Example: "f966068f-08b2-42c8-bdfe-74137dff2bf9"
# Corresponds to the JSON property `cloudBuildId`
# @return [String]
attr_accessor :cloud_build_id
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@cloud_build_id = args[:cloud_build_id] if args.key?(:cloud_build_id)
end
end
# An SSL certificate obtained from a certificate authority.
class CertificateRawData
include Google::Apis::Core::Hashable
# Unencrypted PEM encoded RSA private key. This field is set once on certificate
# creation and then encrypted. The key size must be 2048 bits or fewer. Must
# include the header and footer. Example: <pre> -----BEGIN RSA PRIVATE KEY----- <
# unencrypted_key_value> -----END RSA PRIVATE KEY----- </pre> @InputOnly
# Corresponds to the JSON property `privateKey`
# @return [String]
attr_accessor :private_key
# PEM encoded x.509 public key certificate. This field is set once on
# certificate creation. Must include the header and footer. Example: <pre> -----
# BEGIN CERTIFICATE----- <certificate_value> -----END CERTIFICATE----- </pre>
# Corresponds to the JSON property `publicCertificate`
# @return [String]
attr_accessor :public_certificate
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@private_key = args[:private_key] if args.key?(:private_key)
@public_certificate = args[:public_certificate] if args.key?(:public_certificate)
end
end
# Options for the build operations performed as a part of the version deployment.
# Only applicable for App Engine flexible environment when creating a version
# using source code directly.
class CloudBuildOptions
include Google::Apis::Core::Hashable
# Path to the yaml file used in deployment, used to determine runtime
# configuration details.Required for flexible environment builds.See https://
# cloud.google.com/appengine/docs/standard/python/config/appref for more details.
# Corresponds to the JSON property `appYamlPath`
# @return [String]
attr_accessor :app_yaml_path
# The Cloud Build timeout used as part of any dependent builds performed by
# version creation. Defaults to 10 minutes.
# Corresponds to the JSON property `cloudBuildTimeout`
# @return [String]
attr_accessor :cloud_build_timeout
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@app_yaml_path = args[:app_yaml_path] if args.key?(:app_yaml_path)
@cloud_build_timeout = args[:cloud_build_timeout] if args.key?(:cloud_build_timeout)
end
end
# Docker image that is used to create a container and start a VM instance for
# the version that you deploy. Only applicable for instances running in the App
# Engine flexible environment.
class ContainerInfo
include Google::Apis::Core::Hashable
# URI to the hosted container image in Google Container Registry. The URI must
# be fully qualified and include a tag or digest. Examples: "gcr.io/my-project/
# image:tag" or "gcr.io/my-project/image@digest"
# Corresponds to the JSON property `image`
# @return [String]
attr_accessor :image
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@image = args[:image] if args.key?(:image)
end
end
# Target scaling by CPU usage.
class CpuUtilization
include Google::Apis::Core::Hashable
# Period of time over which CPU utilization is calculated.
# Corresponds to the JSON property `aggregationWindowLength`
# @return [String]
attr_accessor :aggregation_window_length
# Target CPU utilization ratio to maintain when scaling. Must be between 0 and 1.
# Corresponds to the JSON property `targetUtilization`
# @return [Float]
attr_accessor :target_utilization
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@aggregation_window_length = args[:aggregation_window_length] if args.key?(:aggregation_window_length)
@target_utilization = args[:target_utilization] if args.key?(:target_utilization)
end
end
# Metadata for the given google.longrunning.Operation during a google.appengine.
# v1.CreateVersionRequest.
class CreateVersionMetadataV1
include Google::Apis::Core::Hashable
# The Cloud Build ID if one was created as part of the version create. @
# OutputOnly
# Corresponds to the JSON property `cloudBuildId`
# @return [String]
attr_accessor :cloud_build_id
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@cloud_build_id = args[:cloud_build_id] if args.key?(:cloud_build_id)
end
end
# Metadata for the given google.longrunning.Operation during a google.appengine.
# v1alpha.CreateVersionRequest.
class CreateVersionMetadataV1Alpha
include Google::Apis::Core::Hashable
# The Cloud Build ID if one was created as part of the version create. @
# OutputOnly
# Corresponds to the JSON property `cloudBuildId`
# @return [String]
attr_accessor :cloud_build_id
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@cloud_build_id = args[:cloud_build_id] if args.key?(:cloud_build_id)
end
end
# Metadata for the given google.longrunning.Operation during a google.appengine.
# v1beta.CreateVersionRequest.
class CreateVersionMetadataV1Beta
include Google::Apis::Core::Hashable
# The Cloud Build ID if one was created as part of the version create. @
# OutputOnly
# Corresponds to the JSON property `cloudBuildId`
# @return [String]
attr_accessor :cloud_build_id
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@cloud_build_id = args[:cloud_build_id] if args.key?(:cloud_build_id)
end
end
# Allows autoscaling based on Stackdriver metrics.
class CustomMetric
include Google::Apis::Core::Hashable
# Allows filtering on the metric's fields.
# Corresponds to the JSON property `filter`
# @return [String]
attr_accessor :filter
# The name of the metric.
# Corresponds to the JSON property `metricName`
# @return [String]
attr_accessor :metric_name
# May be used instead of target_utilization when an instance can handle a
# specific amount of work/resources and the metric value is equal to the current
# amount of work remaining. The autoscaler will try to keep the number of
# instances equal to the metric value divided by single_instance_assignment.
# Corresponds to the JSON property `singleInstanceAssignment`
# @return [Float]
attr_accessor :single_instance_assignment
# The type of the metric. Must be a string representing a Stackdriver metric
# type e.g. GAGUE, DELTA_PER_SECOND, etc.
# Corresponds to the JSON property `targetType`
# @return [String]
attr_accessor :target_type
# The target value for the metric.
# Corresponds to the JSON property `targetUtilization`
# @return [Float]
attr_accessor :target_utilization
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@filter = args[:filter] if args.key?(:filter)
@metric_name = args[:metric_name] if args.key?(:metric_name)
@single_instance_assignment = args[:single_instance_assignment] if args.key?(:single_instance_assignment)
@target_type = args[:target_type] if args.key?(:target_type)
@target_utilization = args[:target_utilization] if args.key?(:target_utilization)
end
end
# Request message for Instances.DebugInstance.
class DebugInstanceRequest
include Google::Apis::Core::Hashable
# Public SSH key to add to the instance. Examples:
# [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]
# [USERNAME]:ssh-rsa [KEY_VALUE] google-ssh `"userName":"[USERNAME]","expireOn":"
# [EXPIRE_TIME]"`For more information, see Adding and Removing SSH Keys (https://
# cloud.google.com/compute/docs/instances/adding-removing-ssh-keys).
# Corresponds to the JSON property `sshKey`
# @return [String]
attr_accessor :ssh_key
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@ssh_key = args[:ssh_key] if args.key?(:ssh_key)
end
end
# Code and application artifacts used to deploy a version to App Engine.
class Deployment
include Google::Apis::Core::Hashable
# Google Cloud Build information.
# Corresponds to the JSON property `build`
# @return [Google::Apis::AppengineV1beta::BuildInfo]
attr_accessor :build
# Options for the build operations performed as a part of the version deployment.
# Only applicable for App Engine flexible environment when creating a version
# using source code directly.
# Corresponds to the JSON property `cloudBuildOptions`
# @return [Google::Apis::AppengineV1beta::CloudBuildOptions]
attr_accessor :cloud_build_options
# Docker image that is used to create a container and start a VM instance for
# the version that you deploy. Only applicable for instances running in the App
# Engine flexible environment.
# Corresponds to the JSON property `container`
# @return [Google::Apis::AppengineV1beta::ContainerInfo]
attr_accessor :container
# Manifest of the files stored in Google Cloud Storage that are included as part
# of this version. All files must be readable using the credentials supplied
# with this call.
# Corresponds to the JSON property `files`
# @return [Hash<String,Google::Apis::AppengineV1beta::FileInfo>]
attr_accessor :files
# The zip file information for a zip deployment.
# Corresponds to the JSON property `zip`
# @return [Google::Apis::AppengineV1beta::ZipInfo]
attr_accessor :zip
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@build = args[:build] if args.key?(:build)
@cloud_build_options = args[:cloud_build_options] if args.key?(:cloud_build_options)
@container = args[:container] if args.key?(:container)
@files = args[:files] if args.key?(:files)
@zip = args[:zip] if args.key?(:zip)
end
end
# Target scaling by disk usage. Only applicable in the App Engine flexible
# environment.
class DiskUtilization
include Google::Apis::Core::Hashable
# Target bytes read per second.
# Corresponds to the JSON property `targetReadBytesPerSecond`
# @return [Fixnum]
attr_accessor :target_read_bytes_per_second
# Target ops read per seconds.
# Corresponds to the JSON property `targetReadOpsPerSecond`
# @return [Fixnum]
attr_accessor :target_read_ops_per_second
# Target bytes written per second.
# Corresponds to the JSON property `targetWriteBytesPerSecond`
# @return [Fixnum]
attr_accessor :target_write_bytes_per_second
# Target ops written per second.
# Corresponds to the JSON property `targetWriteOpsPerSecond`
# @return [Fixnum]
attr_accessor :target_write_ops_per_second
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@target_read_bytes_per_second = args[:target_read_bytes_per_second] if args.key?(:target_read_bytes_per_second)
@target_read_ops_per_second = args[:target_read_ops_per_second] if args.key?(:target_read_ops_per_second)
@target_write_bytes_per_second = args[:target_write_bytes_per_second] if args.key?(:target_write_bytes_per_second)
@target_write_ops_per_second = args[:target_write_ops_per_second] if args.key?(:target_write_ops_per_second)
end
end
# A domain serving an App Engine application.
class DomainMapping
include Google::Apis::Core::Hashable
# Relative name of the domain serving the application. Example: example.com.
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# Full path to the DomainMapping resource in the API. Example: apps/myapp/
# domainMapping/example.com.@OutputOnly
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The resource records required to configure this domain mapping. These records
# must be added to the domain's DNS configuration in order to serve the
# application via this domain mapping.@OutputOnly
# Corresponds to the JSON property `resourceRecords`
# @return [Array<Google::Apis::AppengineV1beta::ResourceRecord>]
attr_accessor :resource_records
# SSL configuration for a DomainMapping resource.
# Corresponds to the JSON property `sslSettings`
# @return [Google::Apis::AppengineV1beta::SslSettings]
attr_accessor :ssl_settings
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@id = args[:id] if args.key?(:id)
@name = args[:name] if args.key?(:name)
@resource_records = args[:resource_records] if args.key?(:resource_records)
@ssl_settings = args[:ssl_settings] if args.key?(:ssl_settings)
end
end
# A generic empty message that you can re-use to avoid defining duplicated empty
# messages in your APIs. A typical example is to use it as the request or the
# response type of an API method. For instance:
# service Foo `
# rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
# `
# The JSON representation for Empty is empty JSON object ``.
class Empty
include Google::Apis::Core::Hashable
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
end
end
# Cloud Endpoints (https://cloud.google.com/endpoints) configuration. The
# Endpoints API Service provides tooling for serving Open API and gRPC endpoints
# via an NGINX proxy. Only valid for App Engine Flexible environment deployments.
# The fields here refer to the name and configuration ID of a "service" resource
# in the Service Management API (https://cloud.google.com/service-management/
# overview).
class EndpointsApiService
include Google::Apis::Core::Hashable
# Endpoints service configuration ID as specified by the Service Management API.
# For example "2016-09-19r1".By default, the rollout strategy for Endpoints is
# RolloutStrategy.FIXED. This means that Endpoints starts up with a particular
# configuration ID. When a new configuration is rolled out, Endpoints must be
# given the new configuration ID. The config_id field is used to give the
# configuration ID and is required in this case.Endpoints also has a rollout
# strategy called RolloutStrategy.MANAGED. When using this, Endpoints fetches
# the latest configuration and does not need the configuration ID. In this case,
# config_id must be omitted.
# Corresponds to the JSON property `configId`
# @return [String]
attr_accessor :config_id
# Enable or disable trace sampling. By default, this is set to false for enabled.
# Corresponds to the JSON property `disableTraceSampling`
# @return [Boolean]
attr_accessor :disable_trace_sampling
alias_method :disable_trace_sampling?, :disable_trace_sampling
# Endpoints service name which is the name of the "service" resource in the
# Service Management API. For example "myapi.endpoints.myproject.cloud.goog"
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Endpoints rollout strategy. If FIXED, config_id must be specified. If MANAGED,
# config_id must be omitted.
# Corresponds to the JSON property `rolloutStrategy`
# @return [String]
attr_accessor :rollout_strategy
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@config_id = args[:config_id] if args.key?(:config_id)
@disable_trace_sampling = args[:disable_trace_sampling] if args.key?(:disable_trace_sampling)
@name = args[:name] if args.key?(:name)
@rollout_strategy = args[:rollout_strategy] if args.key?(:rollout_strategy)
end
end
# The entrypoint for the application.
class Entrypoint
include Google::Apis::Core::Hashable
# The format should be a shell command that can be fed to bash -c.
# Corresponds to the JSON property `shell`
# @return [String]
attr_accessor :shell
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@shell = args[:shell] if args.key?(:shell)
end
end
# Custom static error page to be served when an error occurs.
class ErrorHandler
include Google::Apis::Core::Hashable
# Error condition this handler applies to.
# Corresponds to the JSON property `errorCode`
# @return [String]
attr_accessor :error_code
# MIME type of file. Defaults to text/html.
# Corresponds to the JSON property `mimeType`
# @return [String]
attr_accessor :mime_type
# Static file content to be served for this error.
# Corresponds to the JSON property `staticFile`
# @return [String]
attr_accessor :static_file
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@error_code = args[:error_code] if args.key?(:error_code)
@mime_type = args[:mime_type] if args.key?(:mime_type)
@static_file = args[:static_file] if args.key?(:static_file)
end
end
# The feature specific settings to be used in the application. These define
# behaviors that are user configurable.
class FeatureSettings
include Google::Apis::Core::Hashable
# Boolean value indicating if split health checks should be used instead of the
# legacy health checks. At an app.yaml level, this means defaulting to '
# readiness_check' and 'liveness_check' values instead of 'health_check' ones.
# Once the legacy 'health_check' behavior is deprecated, and this value is
# always true, this setting can be removed.
# Corresponds to the JSON property `splitHealthChecks`
# @return [Boolean]
attr_accessor :split_health_checks
alias_method :split_health_checks?, :split_health_checks
# If true, use Container-Optimized OS (https://cloud.google.com/container-
# optimized-os/) base image for VMs, rather than a base Debian image.
# Corresponds to the JSON property `useContainerOptimizedOs`
# @return [Boolean]
attr_accessor :use_container_optimized_os
alias_method :use_container_optimized_os?, :use_container_optimized_os
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@split_health_checks = args[:split_health_checks] if args.key?(:split_health_checks)
@use_container_optimized_os = args[:use_container_optimized_os] if args.key?(:use_container_optimized_os)
end
end
# Single source file that is part of the version to be deployed. Each source
# file that is deployed must be specified separately.
class FileInfo
include Google::Apis::Core::Hashable
# The MIME type of the file.Defaults to the value from Google Cloud Storage.
# Corresponds to the JSON property `mimeType`
# @return [String]
attr_accessor :mime_type
# The SHA1 hash of the file, in hex.
# Corresponds to the JSON property `sha1Sum`
# @return [String]
attr_accessor :sha1_sum
# URL source to use to fetch this file. Must be a URL to a resource in Google
# Cloud Storage in the form 'http(s)://storage.googleapis.com/<bucket>/<object>'.
# Corresponds to the JSON property `sourceUrl`
# @return [String]
attr_accessor :source_url
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@mime_type = args[:mime_type] if args.key?(:mime_type)
@sha1_sum = args[:sha1_sum] if args.key?(:sha1_sum)
@source_url = args[:source_url] if args.key?(:source_url)
end
end
# A single firewall rule that is evaluated against incoming traffic and provides
# an action to take on matched requests.
class FirewallRule
include Google::Apis::Core::Hashable
# The action to take on matched requests.
# Corresponds to the JSON property `action`
# @return [String]
attr_accessor :action
# An optional string description of this rule. This field has a maximum length
# of 100 characters.
# Corresponds to the JSON property `description`
# @return [String]
attr_accessor :description
# A positive integer between 1, Int32.MaxValue-1 that defines the order of rule
# evaluation. Rules with the lowest priority are evaluated first.A default rule
# at priority Int32.MaxValue matches all IPv4 and IPv6 traffic when no previous
# rule matches. Only the action of this rule can be modified by the user.
# Corresponds to the JSON property `priority`
# @return [Fixnum]
attr_accessor :priority
# IP address or range, defined using CIDR notation, of requests that this rule
# applies to. You can use the wildcard character "*" to match all IPs equivalent
# to "0/0" and "::/0" together. Examples: 192.168.1.1 or 192.168.0.0/16 or 2001:
# db8::/32 or 2001:0db8:0000:0042:0000:8a2e:0370:7334.<p>Truncation will be
# silently performed on addresses which are not properly truncated. For example,
# 1.2.3.4/24 is accepted as the same address as 1.2.3.0/24. Similarly, for IPv6,
# 2001:db8::1/32 is accepted as the same address as 2001:db8::/32.
# Corresponds to the JSON property `sourceRange`
# @return [String]
attr_accessor :source_range
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@action = args[:action] if args.key?(:action)
@description = args[:description] if args.key?(:description)
@priority = args[:priority] if args.key?(:priority)
@source_range = args[:source_range] if args.key?(:source_range)
end
end
# Health checking configuration for VM instances. Unhealthy instances are killed
# and replaced with new instances. Only applicable for instances in App Engine
# flexible environment.
class HealthCheck
include Google::Apis::Core::Hashable
# Interval between health checks.
# Corresponds to the JSON property `checkInterval`
# @return [String]
attr_accessor :check_interval
# Whether to explicitly disable health checks for this instance.
# Corresponds to the JSON property `disableHealthCheck`
# @return [Boolean]
attr_accessor :disable_health_check
alias_method :disable_health_check?, :disable_health_check
# Number of consecutive successful health checks required before receiving
# traffic.
# Corresponds to the JSON property `healthyThreshold`
# @return [Fixnum]
attr_accessor :healthy_threshold
# Host header to send when performing an HTTP health check. Example: "myapp.
# appspot.com"
# Corresponds to the JSON property `host`
# @return [String]
attr_accessor :host
# Number of consecutive failed health checks required before an instance is
# restarted.
# Corresponds to the JSON property `restartThreshold`
# @return [Fixnum]
attr_accessor :restart_threshold
# Time before the health check is considered failed.
# Corresponds to the JSON property `timeout`
# @return [String]
attr_accessor :timeout
# Number of consecutive failed health checks required before removing traffic.
# Corresponds to the JSON property `unhealthyThreshold`
# @return [Fixnum]
attr_accessor :unhealthy_threshold
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@check_interval = args[:check_interval] if args.key?(:check_interval)
@disable_health_check = args[:disable_health_check] if args.key?(:disable_health_check)
@healthy_threshold = args[:healthy_threshold] if args.key?(:healthy_threshold)
@host = args[:host] if args.key?(:host)
@restart_threshold = args[:restart_threshold] if args.key?(:restart_threshold)
@timeout = args[:timeout] if args.key?(:timeout)
@unhealthy_threshold = args[:unhealthy_threshold] if args.key?(:unhealthy_threshold)
end
end
# Identity-Aware Proxy
class IdentityAwareProxy
include Google::Apis::Core::Hashable
# Whether the serving infrastructure will authenticate and authorize all
# incoming requests.If true, the oauth2_client_id and oauth2_client_secret
# fields must be non-empty.
# Corresponds to the JSON property `enabled`
# @return [Boolean]
attr_accessor :enabled
alias_method :enabled?, :enabled
# OAuth2 client ID to use for the authentication flow.
# Corresponds to the JSON property `oauth2ClientId`
# @return [String]
attr_accessor :oauth2_client_id
# InputOnly OAuth client info required to generate client id to be used for IAP.
# Corresponds to the JSON property `oauth2ClientInfo`
# @return [Google::Apis::AppengineV1beta::OAuth2ClientInfo]
attr_accessor :oauth2_client_info
# OAuth2 client secret to use for the authentication flow.For security reasons,
# this value cannot be retrieved via the API. Instead, the SHA-256 hash of the
# value is returned in the oauth2_client_secret_sha256 field.@InputOnly
# Corresponds to the JSON property `oauth2ClientSecret`
# @return [String]
attr_accessor :oauth2_client_secret
# Hex-encoded SHA-256 hash of the client secret.@OutputOnly
# Corresponds to the JSON property `oauth2ClientSecretSha256`
# @return [String]
attr_accessor :oauth2_client_secret_sha256
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@enabled = args[:enabled] if args.key?(:enabled)
@oauth2_client_id = args[:oauth2_client_id] if args.key?(:oauth2_client_id)
@oauth2_client_info = args[:oauth2_client_info] if args.key?(:oauth2_client_info)
@oauth2_client_secret = args[:oauth2_client_secret] if args.key?(:oauth2_client_secret)
@oauth2_client_secret_sha256 = args[:oauth2_client_secret_sha256] if args.key?(:oauth2_client_secret_sha256)
end
end
# An Instance resource is the computing unit that App Engine uses to
# automatically scale an application.
class Instance
include Google::Apis::Core::Hashable
# App Engine release this instance is running on.@OutputOnly
# Corresponds to the JSON property `appEngineRelease`
# @return [String]
attr_accessor :app_engine_release
# Availability of the instance.@OutputOnly
# Corresponds to the JSON property `availability`
# @return [String]
attr_accessor :availability
# Average latency (ms) over the last minute.@OutputOnly
# Corresponds to the JSON property `averageLatency`
# @return [Fixnum]
attr_accessor :average_latency
# Number of errors since this instance was started.@OutputOnly
# Corresponds to the JSON property `errors`
# @return [Fixnum]
attr_accessor :errors
# Relative name of the instance within the version. Example: instance-1.@
# OutputOnly
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# Total memory in use (bytes).@OutputOnly
# Corresponds to the JSON property `memoryUsage`
# @return [Fixnum]
attr_accessor :memory_usage
# Full path to the Instance resource in the API. Example: apps/myapp/services/
# default/versions/v1/instances/instance-1.@OutputOnly
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Average queries per second (QPS) over the last minute.@OutputOnly
# Corresponds to the JSON property `qps`
# @return [Float]
attr_accessor :qps
# Number of requests since this instance was started.@OutputOnly
# Corresponds to the JSON property `requests`
# @return [Fixnum]
attr_accessor :requests
# Time that this instance was started.@OutputOnly
# Corresponds to the JSON property `startTime`
# @return [String]
attr_accessor :start_time
# Whether this instance is in debug mode. Only applicable for instances in App
# Engine flexible environment.@OutputOnly
# Corresponds to the JSON property `vmDebugEnabled`
# @return [Boolean]
attr_accessor :vm_debug_enabled
alias_method :vm_debug_enabled?, :vm_debug_enabled
# Virtual machine ID of this instance. Only applicable for instances in App
# Engine flexible environment.@OutputOnly
# Corresponds to the JSON property `vmId`
# @return [String]
attr_accessor :vm_id
# The IP address of this instance. Only applicable for instances in App Engine
# flexible environment.@OutputOnly
# Corresponds to the JSON property `vmIp`
# @return [String]
attr_accessor :vm_ip
# Name of the virtual machine where this instance lives. Only applicable for
# instances in App Engine flexible environment.@OutputOnly
# Corresponds to the JSON property `vmName`
# @return [String]
attr_accessor :vm_name
# Status of the virtual machine where this instance lives. Only applicable for
# instances in App Engine flexible environment.@OutputOnly
# Corresponds to the JSON property `vmStatus`
# @return [String]
attr_accessor :vm_status
# Zone where the virtual machine is located. Only applicable for instances in
# App Engine flexible environment.@OutputOnly
# Corresponds to the JSON property `vmZoneName`
# @return [String]
attr_accessor :vm_zone_name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@app_engine_release = args[:app_engine_release] if args.key?(:app_engine_release)
@availability = args[:availability] if args.key?(:availability)
@average_latency = args[:average_latency] if args.key?(:average_latency)
@errors = args[:errors] if args.key?(:errors)
@id = args[:id] if args.key?(:id)
@memory_usage = args[:memory_usage] if args.key?(:memory_usage)
@name = args[:name] if args.key?(:name)
@qps = args[:qps] if args.key?(:qps)
@requests = args[:requests] if args.key?(:requests)
@start_time = args[:start_time] if args.key?(:start_time)
@vm_debug_enabled = args[:vm_debug_enabled] if args.key?(:vm_debug_enabled)
@vm_id = args[:vm_id] if args.key?(:vm_id)
@vm_ip = args[:vm_ip] if args.key?(:vm_ip)
@vm_name = args[:vm_name] if args.key?(:vm_name)
@vm_status = args[:vm_status] if args.key?(:vm_status)
@vm_zone_name = args[:vm_zone_name] if args.key?(:vm_zone_name)
end
end
# Third-party Python runtime library that is required by the application.
class Library
include Google::Apis::Core::Hashable
# Name of the library. Example: "django".
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Version of the library to select, or "latest".
# Corresponds to the JSON property `version`
# @return [String]
attr_accessor :version
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@name = args[:name] if args.key?(:name)
@version = args[:version] if args.key?(:version)
end
end
# Response message for AuthorizedCertificates.ListAuthorizedCertificates.
class ListAuthorizedCertificatesResponse
include Google::Apis::Core::Hashable
# The SSL certificates the user is authorized to administer.
# Corresponds to the JSON property `certificates`
# @return [Array<Google::Apis::AppengineV1beta::AuthorizedCertificate>]
attr_accessor :certificates
# Continuation token for fetching the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@certificates = args[:certificates] if args.key?(:certificates)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# Response message for AuthorizedDomains.ListAuthorizedDomains.
class ListAuthorizedDomainsResponse
include Google::Apis::Core::Hashable
# The authorized domains belonging to the user.
# Corresponds to the JSON property `domains`
# @return [Array<Google::Apis::AppengineV1beta::AuthorizedDomain>]
attr_accessor :domains
# Continuation token for fetching the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@domains = args[:domains] if args.key?(:domains)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# Response message for DomainMappings.ListDomainMappings.
class ListDomainMappingsResponse
include Google::Apis::Core::Hashable
# The domain mappings for the application.
# Corresponds to the JSON property `domainMappings`
# @return [Array<Google::Apis::AppengineV1beta::DomainMapping>]
attr_accessor :domain_mappings
# Continuation token for fetching the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@domain_mappings = args[:domain_mappings] if args.key?(:domain_mappings)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# Response message for Firewall.ListIngressRules.
class ListIngressRulesResponse
include Google::Apis::Core::Hashable
# The ingress FirewallRules for this application.
# Corresponds to the JSON property `ingressRules`
# @return [Array<Google::Apis::AppengineV1beta::FirewallRule>]
attr_accessor :ingress_rules
# Continuation token for fetching the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@ingress_rules = args[:ingress_rules] if args.key?(:ingress_rules)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# Response message for Instances.ListInstances.
class ListInstancesResponse
include Google::Apis::Core::Hashable
# The instances belonging to the requested version.
# Corresponds to the JSON property `instances`
# @return [Array<Google::Apis::AppengineV1beta::Instance>]
attr_accessor :instances
# Continuation token for fetching the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@instances = args[:instances] if args.key?(:instances)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# The response message for Locations.ListLocations.
class ListLocationsResponse
include Google::Apis::Core::Hashable
# A list of locations that matches the specified filter in the request.
# Corresponds to the JSON property `locations`
# @return [Array<Google::Apis::AppengineV1beta::Location>]
attr_accessor :locations
# The standard List next-page token.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@locations = args[:locations] if args.key?(:locations)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
end
end
# The response message for Operations.ListOperations.
class ListOperationsResponse
include Google::Apis::Core::Hashable
# The standard List next-page token.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# A list of operations that matches the specified filter in the request.
# Corresponds to the JSON property `operations`
# @return [Array<Google::Apis::AppengineV1beta::Operation>]
attr_accessor :operations
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@operations = args[:operations] if args.key?(:operations)
end
end
# Response message for Services.ListServices.
class ListServicesResponse
include Google::Apis::Core::Hashable
# Continuation token for fetching the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# The services belonging to the requested application.
# Corresponds to the JSON property `services`
# @return [Array<Google::Apis::AppengineV1beta::Service>]
attr_accessor :services
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@services = args[:services] if args.key?(:services)
end
end
# Response message for Versions.ListVersions.
class ListVersionsResponse
include Google::Apis::Core::Hashable
# Continuation token for fetching the next page of results.
# Corresponds to the JSON property `nextPageToken`
# @return [String]
attr_accessor :next_page_token
# The versions belonging to the requested service.
# Corresponds to the JSON property `versions`
# @return [Array<Google::Apis::AppengineV1beta::Version>]
attr_accessor :versions
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@next_page_token = args[:next_page_token] if args.key?(:next_page_token)
@versions = args[:versions] if args.key?(:versions)
end
end
# Health checking configuration for VM instances. Unhealthy instances are killed
# and replaced with new instances.
class LivenessCheck
include Google::Apis::Core::Hashable
# Interval between health checks.
# Corresponds to the JSON property `checkInterval`
# @return [String]
attr_accessor :check_interval
# Number of consecutive failed checks required before considering the VM
# unhealthy.
# Corresponds to the JSON property `failureThreshold`
# @return [Fixnum]
attr_accessor :failure_threshold
# Host header to send when performing a HTTP Liveness check. Example: "myapp.
# appspot.com"
# Corresponds to the JSON property `host`
# @return [String]
attr_accessor :host
# The initial delay before starting to execute the checks.
# Corresponds to the JSON property `initialDelay`
# @return [String]
attr_accessor :initial_delay
# The request path.
# Corresponds to the JSON property `path`
# @return [String]
attr_accessor :path
# Number of consecutive successful checks required before considering the VM
# healthy.
# Corresponds to the JSON property `successThreshold`
# @return [Fixnum]
attr_accessor :success_threshold
# Time before the check is considered failed.
# Corresponds to the JSON property `timeout`
# @return [String]
attr_accessor :timeout
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@check_interval = args[:check_interval] if args.key?(:check_interval)
@failure_threshold = args[:failure_threshold] if args.key?(:failure_threshold)
@host = args[:host] if args.key?(:host)
@initial_delay = args[:initial_delay] if args.key?(:initial_delay)
@path = args[:path] if args.key?(:path)
@success_threshold = args[:success_threshold] if args.key?(:success_threshold)
@timeout = args[:timeout] if args.key?(:timeout)
end
end
# A resource that represents Google Cloud Platform location.
class Location
include Google::Apis::Core::Hashable
# The friendly name for this location, typically a nearby city name. For example,
# "Tokyo".
# Corresponds to the JSON property `displayName`
# @return [String]
attr_accessor :display_name
# Cross-service attributes for the location. For example
# `"cloud.googleapis.com/region": "us-east1"`
# Corresponds to the JSON property `labels`
# @return [Hash<String,String>]
attr_accessor :labels
# The canonical id for this location. For example: "us-east1".
# Corresponds to the JSON property `locationId`
# @return [String]
attr_accessor :location_id
# Service-specific metadata. For example the available capacity at the given
# location.
# Corresponds to the JSON property `metadata`
# @return [Hash<String,Object>]
attr_accessor :metadata
# Resource name for the location, which may vary between implementations. For
# example: "projects/example-project/locations/us-east1"
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@display_name = args[:display_name] if args.key?(:display_name)
@labels = args[:labels] if args.key?(:labels)
@location_id = args[:location_id] if args.key?(:location_id)
@metadata = args[:metadata] if args.key?(:metadata)
@name = args[:name] if args.key?(:name)
end
end
# Metadata for the given google.cloud.location.Location.
class LocationMetadata
include Google::Apis::Core::Hashable
# App Engine flexible environment is available in the given location.@OutputOnly
# Corresponds to the JSON property `flexibleEnvironmentAvailable`
# @return [Boolean]
attr_accessor :flexible_environment_available
alias_method :flexible_environment_available?, :flexible_environment_available
# App Engine standard environment is available in the given location.@OutputOnly
# Corresponds to the JSON property `standardEnvironmentAvailable`
# @return [Boolean]
attr_accessor :standard_environment_available
alias_method :standard_environment_available?, :standard_environment_available
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@flexible_environment_available = args[:flexible_environment_available] if args.key?(:flexible_environment_available)
@standard_environment_available = args[:standard_environment_available] if args.key?(:standard_environment_available)
end
end
# A certificate managed by App Engine.
class ManagedCertificate
include Google::Apis::Core::Hashable
# Time at which the certificate was last renewed. The renewal process is fully
# managed. Certificate renewal will automatically occur before the certificate
# expires. Renewal errors can be tracked via ManagementStatus.@OutputOnly
# Corresponds to the JSON property `lastRenewalTime`
# @return [String]
attr_accessor :last_renewal_time
# Status of certificate management. Refers to the most recent certificate
# acquisition or renewal attempt.@OutputOnly
# Corresponds to the JSON property `status`
# @return [String]
attr_accessor :status
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@last_renewal_time = args[:last_renewal_time] if args.key?(:last_renewal_time)
@status = args[:status] if args.key?(:status)
end
end
# A service with manual scaling runs continuously, allowing you to perform
# complex initialization and rely on the state of its memory over time.
class ManualScaling
include Google::Apis::Core::Hashable
# Number of instances to assign to the service at the start. This number can
# later be altered by using the Modules API (https://cloud.google.com/appengine/
# docs/python/modules/functions) set_num_instances() function.
# Corresponds to the JSON property `instances`
# @return [Fixnum]
attr_accessor :instances
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@instances = args[:instances] if args.key?(:instances)
end
end
# Extra network settings. Only applicable in the App Engine flexible environment.
class Network
include Google::Apis::Core::Hashable
# List of ports, or port pairs, to forward from the virtual machine to the
# application container. Only applicable in the App Engine flexible environment.
# Corresponds to the JSON property `forwardedPorts`
# @return [Array<String>]
attr_accessor :forwarded_ports
# Tag to apply to the instance during creation. Only applicable in the App
# Engine flexible environment.
# Corresponds to the JSON property `instanceTag`
# @return [String]
attr_accessor :instance_tag
# Google Compute Engine network where the virtual machines are created. Specify
# the short name, not the resource path.Defaults to default.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Enable session affinity.
# Corresponds to the JSON property `sessionAffinity`
# @return [Boolean]
attr_accessor :session_affinity
alias_method :session_affinity?, :session_affinity
# Google Cloud Platform sub-network where the virtual machines are created.
# Specify the short name, not the resource path.If a subnetwork name is
# specified, a network name will also be required unless it is for the default
# network.
# If the network that the instance is being created in is a Legacy network, then
# the IP address is allocated from the IPv4Range.
# If the network that the instance is being created in is an auto Subnet Mode
# Network, then only network name should be specified (not the subnetwork_name)
# and the IP address is created from the IPCidrRange of the subnetwork that
# exists in that zone for that network.
# If the network that the instance is being created in is a custom Subnet Mode
# Network, then the subnetwork_name must be specified and the IP address is
# created from the IPCidrRange of the subnetwork.If specified, the subnetwork
# must exist in the same region as the App Engine flexible environment
# application.
# Corresponds to the JSON property `subnetworkName`
# @return [String]
attr_accessor :subnetwork_name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@forwarded_ports = args[:forwarded_ports] if args.key?(:forwarded_ports)
@instance_tag = args[:instance_tag] if args.key?(:instance_tag)
@name = args[:name] if args.key?(:name)
@session_affinity = args[:session_affinity] if args.key?(:session_affinity)
@subnetwork_name = args[:subnetwork_name] if args.key?(:subnetwork_name)
end
end
# Target scaling by network usage. Only applicable in the App Engine flexible
# environment.
class NetworkUtilization
include Google::Apis::Core::Hashable
# Target bytes received per second.
# Corresponds to the JSON property `targetReceivedBytesPerSecond`
# @return [Fixnum]
attr_accessor :target_received_bytes_per_second
# Target packets received per second.
# Corresponds to the JSON property `targetReceivedPacketsPerSecond`
# @return [Fixnum]
attr_accessor :target_received_packets_per_second
# Target bytes sent per second.
# Corresponds to the JSON property `targetSentBytesPerSecond`
# @return [Fixnum]
attr_accessor :target_sent_bytes_per_second
# Target packets sent per second.
# Corresponds to the JSON property `targetSentPacketsPerSecond`
# @return [Fixnum]
attr_accessor :target_sent_packets_per_second
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@target_received_bytes_per_second = args[:target_received_bytes_per_second] if args.key?(:target_received_bytes_per_second)
@target_received_packets_per_second = args[:target_received_packets_per_second] if args.key?(:target_received_packets_per_second)
@target_sent_bytes_per_second = args[:target_sent_bytes_per_second] if args.key?(:target_sent_bytes_per_second)
@target_sent_packets_per_second = args[:target_sent_packets_per_second] if args.key?(:target_sent_packets_per_second)
end
end
#
class OAuth2ClientInfo
include Google::Apis::Core::Hashable
# Application name to be used in OAuth consent screen.
# Corresponds to the JSON property `applicationName`
# @return [String]
attr_accessor :application_name
# Nameof the client to be generated. Optional - If not provided, the name will
# be autogenerated by the backend.
# Corresponds to the JSON property `clientName`
# @return [String]
attr_accessor :client_name
# Developer's information to be used in OAuth consent screen.
# Corresponds to the JSON property `developerEmailAddress`
# @return [String]
attr_accessor :developer_email_address
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@application_name = args[:application_name] if args.key?(:application_name)
@client_name = args[:client_name] if args.key?(:client_name)
@developer_email_address = args[:developer_email_address] if args.key?(:developer_email_address)
end
end
# This resource represents a long-running operation that is the result of a
# network API call.
class Operation
include Google::Apis::Core::Hashable
# If the value is false, it means the operation is still in progress. If true,
# the operation is completed, and either error or response is available.
# Corresponds to the JSON property `done`
# @return [Boolean]
attr_accessor :done
alias_method :done?, :done
# The Status type defines a logical error model that is suitable for different
# programming environments, including REST APIs and RPC APIs. It is used by gRPC
# (https://github.com/grpc). The error model is designed to be:
# Simple to use and understand for most users
# Flexible enough to meet unexpected needsOverviewThe Status message contains
# three pieces of data: error code, error message, and error details. The error
# code should be an enum value of google.rpc.Code, but it may accept additional
# error codes if needed. The error message should be a developer-facing English
# message that helps developers understand and resolve the error. If a localized
# user-facing error message is needed, put the localized message in the error
# details or localize it in the client. The optional error details may contain
# arbitrary information about the error. There is a predefined set of error
# detail types in the package google.rpc that can be used for common error
# conditions.Language mappingThe Status message is the logical representation of
# the error model, but it is not necessarily the actual wire format. When the
# Status message is exposed in different client libraries and different wire
# protocols, it can be mapped differently. For example, it will likely be mapped
# to some exceptions in Java, but more likely mapped to some error codes in C.
# Other usesThe error model and the Status message can be used in a variety of
# environments, either with or without APIs, to provide a consistent developer
# experience across different environments.Example uses of this error model
# include:
# Partial errors. If a service needs to return partial errors to the client, it
# may embed the Status in the normal response to indicate the partial errors.
# Workflow errors. A typical workflow has multiple steps. Each step may have a
# Status message for error reporting.
# Batch operations. If a client uses batch request and batch response, the
# Status message should be used directly inside batch response, one for each
# error sub-response.
# Asynchronous operations. If an API call embeds asynchronous operation results
# in its response, the status of those operations should be represented directly
# using the Status message.
# Logging. If some API errors are stored in logs, the message Status could be
# used directly after any stripping needed for security/privacy reasons.
# Corresponds to the JSON property `error`
# @return [Google::Apis::AppengineV1beta::Status]
attr_accessor :error
# Service-specific metadata associated with the operation. It typically contains
# progress information and common metadata such as create time. Some services
# might not provide such metadata. Any method that returns a long-running
# operation should document the metadata type, if any.
# Corresponds to the JSON property `metadata`
# @return [Hash<String,Object>]
attr_accessor :metadata
# The server-assigned name, which is only unique within the same service that
# originally returns it. If you use the default HTTP mapping, the name should
# have the format of operations/some/unique/name.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# The normal response of the operation in case of success. If the original
# method returns no data on success, such as Delete, the response is google.
# protobuf.Empty. If the original method is standard Get/Create/Update, the
# response should be the resource. For other methods, the response should have
# the type XxxResponse, where Xxx is the original method name. For example, if
# the original method name is TakeSnapshot(), the inferred response type is
# TakeSnapshotResponse.
# Corresponds to the JSON property `response`
# @return [Hash<String,Object>]
attr_accessor :response
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@done = args[:done] if args.key?(:done)
@error = args[:error] if args.key?(:error)
@metadata = args[:metadata] if args.key?(:metadata)
@name = args[:name] if args.key?(:name)
@response = args[:response] if args.key?(:response)
end
end
# Metadata for the given google.longrunning.Operation.
class OperationMetadata
include Google::Apis::Core::Hashable
# Timestamp that this operation completed.@OutputOnly
# Corresponds to the JSON property `endTime`
# @return [String]
attr_accessor :end_time
# Timestamp that this operation was created.@OutputOnly
# Corresponds to the JSON property `insertTime`
# @return [String]
attr_accessor :insert_time
# API method that initiated this operation. Example: google.appengine.v1beta4.
# Version.CreateVersion.@OutputOnly
# Corresponds to the JSON property `method`
# @return [String]
attr_accessor :method_prop
# Type of this operation. Deprecated, use method field instead. Example: "
# create_version".@OutputOnly
# Corresponds to the JSON property `operationType`
# @return [String]
attr_accessor :operation_type
# Name of the resource that this operation is acting on. Example: apps/myapp/
# modules/default.@OutputOnly
# Corresponds to the JSON property `target`
# @return [String]
attr_accessor :target
# User who requested this operation.@OutputOnly
# Corresponds to the JSON property `user`
# @return [String]
attr_accessor :user
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@end_time = args[:end_time] if args.key?(:end_time)
@insert_time = args[:insert_time] if args.key?(:insert_time)
@method_prop = args[:method_prop] if args.key?(:method_prop)
@operation_type = args[:operation_type] if args.key?(:operation_type)
@target = args[:target] if args.key?(:target)
@user = args[:user] if args.key?(:user)
end
end
# Metadata for the given google.longrunning.Operation.
class OperationMetadataV1
include Google::Apis::Core::Hashable
# Metadata for the given google.longrunning.Operation during a google.appengine.
# v1.CreateVersionRequest.
# Corresponds to the JSON property `createVersionMetadata`
# @return [Google::Apis::AppengineV1beta::CreateVersionMetadataV1]
attr_accessor :create_version_metadata
# Time that this operation completed.@OutputOnly
# Corresponds to the JSON property `endTime`
# @return [String]
attr_accessor :end_time
# Ephemeral message that may change every time the operation is polled. @
# OutputOnly
# Corresponds to the JSON property `ephemeralMessage`
# @return [String]
attr_accessor :ephemeral_message
# Time that this operation was created.@OutputOnly
# Corresponds to the JSON property `insertTime`
# @return [String]
attr_accessor :insert_time
# API method that initiated this operation. Example: google.appengine.v1.
# Versions.CreateVersion.@OutputOnly
# Corresponds to the JSON property `method`
# @return [String]
attr_accessor :method_prop
# Name of the resource that this operation is acting on. Example: apps/myapp/
# services/default.@OutputOnly
# Corresponds to the JSON property `target`
# @return [String]
attr_accessor :target
# User who requested this operation.@OutputOnly
# Corresponds to the JSON property `user`
# @return [String]
attr_accessor :user
# Durable messages that persist on every operation poll. @OutputOnly
# Corresponds to the JSON property `warning`
# @return [Array<String>]
attr_accessor :warning
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@create_version_metadata = args[:create_version_metadata] if args.key?(:create_version_metadata)
@end_time = args[:end_time] if args.key?(:end_time)
@ephemeral_message = args[:ephemeral_message] if args.key?(:ephemeral_message)
@insert_time = args[:insert_time] if args.key?(:insert_time)
@method_prop = args[:method_prop] if args.key?(:method_prop)
@target = args[:target] if args.key?(:target)
@user = args[:user] if args.key?(:user)
@warning = args[:warning] if args.key?(:warning)
end
end
# Metadata for the given google.longrunning.Operation.
class OperationMetadataV1Alpha
include Google::Apis::Core::Hashable
# Metadata for the given google.longrunning.Operation during a google.appengine.
# v1alpha.CreateVersionRequest.
# Corresponds to the JSON property `createVersionMetadata`
# @return [Google::Apis::AppengineV1beta::CreateVersionMetadataV1Alpha]
attr_accessor :create_version_metadata
# Time that this operation completed.@OutputOnly
# Corresponds to the JSON property `endTime`
# @return [String]
attr_accessor :end_time
# Ephemeral message that may change every time the operation is polled. @
# OutputOnly
# Corresponds to the JSON property `ephemeralMessage`
# @return [String]
attr_accessor :ephemeral_message
# Time that this operation was created.@OutputOnly
# Corresponds to the JSON property `insertTime`
# @return [String]
attr_accessor :insert_time
# API method that initiated this operation. Example: google.appengine.v1alpha.
# Versions.CreateVersion.@OutputOnly
# Corresponds to the JSON property `method`
# @return [String]
attr_accessor :method_prop
# Name of the resource that this operation is acting on. Example: apps/myapp/
# services/default.@OutputOnly
# Corresponds to the JSON property `target`
# @return [String]
attr_accessor :target
# User who requested this operation.@OutputOnly
# Corresponds to the JSON property `user`
# @return [String]
attr_accessor :user
# Durable messages that persist on every operation poll. @OutputOnly
# Corresponds to the JSON property `warning`
# @return [Array<String>]
attr_accessor :warning
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@create_version_metadata = args[:create_version_metadata] if args.key?(:create_version_metadata)
@end_time = args[:end_time] if args.key?(:end_time)
@ephemeral_message = args[:ephemeral_message] if args.key?(:ephemeral_message)
@insert_time = args[:insert_time] if args.key?(:insert_time)
@method_prop = args[:method_prop] if args.key?(:method_prop)
@target = args[:target] if args.key?(:target)
@user = args[:user] if args.key?(:user)
@warning = args[:warning] if args.key?(:warning)
end
end
# Metadata for the given google.longrunning.Operation.
class OperationMetadataV1Beta
include Google::Apis::Core::Hashable
# Metadata for the given google.longrunning.Operation during a google.appengine.
# v1beta.CreateVersionRequest.
# Corresponds to the JSON property `createVersionMetadata`
# @return [Google::Apis::AppengineV1beta::CreateVersionMetadataV1Beta]
attr_accessor :create_version_metadata
# Time that this operation completed.@OutputOnly
# Corresponds to the JSON property `endTime`
# @return [String]
attr_accessor :end_time
# Ephemeral message that may change every time the operation is polled. @
# OutputOnly
# Corresponds to the JSON property `ephemeralMessage`
# @return [String]
attr_accessor :ephemeral_message
# Time that this operation was created.@OutputOnly
# Corresponds to the JSON property `insertTime`
# @return [String]
attr_accessor :insert_time
# API method that initiated this operation. Example: google.appengine.v1beta.
# Versions.CreateVersion.@OutputOnly
# Corresponds to the JSON property `method`
# @return [String]
attr_accessor :method_prop
# Name of the resource that this operation is acting on. Example: apps/myapp/
# services/default.@OutputOnly
# Corresponds to the JSON property `target`
# @return [String]
attr_accessor :target
# User who requested this operation.@OutputOnly
# Corresponds to the JSON property `user`
# @return [String]
attr_accessor :user
# Durable messages that persist on every operation poll. @OutputOnly
# Corresponds to the JSON property `warning`
# @return [Array<String>]
attr_accessor :warning
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@create_version_metadata = args[:create_version_metadata] if args.key?(:create_version_metadata)
@end_time = args[:end_time] if args.key?(:end_time)
@ephemeral_message = args[:ephemeral_message] if args.key?(:ephemeral_message)
@insert_time = args[:insert_time] if args.key?(:insert_time)
@method_prop = args[:method_prop] if args.key?(:method_prop)
@target = args[:target] if args.key?(:target)
@user = args[:user] if args.key?(:user)
@warning = args[:warning] if args.key?(:warning)
end
end
# Metadata for the given google.longrunning.Operation.
class OperationMetadataV1Beta5
include Google::Apis::Core::Hashable
# Timestamp that this operation completed.@OutputOnly
# Corresponds to the JSON property `endTime`
# @return [String]
attr_accessor :end_time
# Timestamp that this operation was created.@OutputOnly
# Corresponds to the JSON property `insertTime`
# @return [String]
attr_accessor :insert_time
# API method name that initiated this operation. Example: google.appengine.
# v1beta5.Version.CreateVersion.@OutputOnly
# Corresponds to the JSON property `method`
# @return [String]
attr_accessor :method_prop
# Name of the resource that this operation is acting on. Example: apps/myapp/
# services/default.@OutputOnly
# Corresponds to the JSON property `target`
# @return [String]
attr_accessor :target
# User who requested this operation.@OutputOnly
# Corresponds to the JSON property `user`
# @return [String]
attr_accessor :user
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@end_time = args[:end_time] if args.key?(:end_time)
@insert_time = args[:insert_time] if args.key?(:insert_time)
@method_prop = args[:method_prop] if args.key?(:method_prop)
@target = args[:target] if args.key?(:target)
@user = args[:user] if args.key?(:user)
end
end
# Readiness checking configuration for VM instances. Unhealthy instances are
# removed from traffic rotation.
class ReadinessCheck
include Google::Apis::Core::Hashable
# A maximum time limit on application initialization, measured from moment the
# application successfully replies to a healthcheck until it is ready to serve
# traffic.
# Corresponds to the JSON property `appStartTimeout`
# @return [String]
attr_accessor :app_start_timeout
# Interval between health checks.
# Corresponds to the JSON property `checkInterval`
# @return [String]
attr_accessor :check_interval
# Number of consecutive failed checks required before removing traffic.
# Corresponds to the JSON property `failureThreshold`
# @return [Fixnum]
attr_accessor :failure_threshold
# Host header to send when performing a HTTP Readiness check. Example: "myapp.
# appspot.com"
# Corresponds to the JSON property `host`
# @return [String]
attr_accessor :host
# The request path.
# Corresponds to the JSON property `path`
# @return [String]
attr_accessor :path
# Number of consecutive successful checks required before receiving traffic.
# Corresponds to the JSON property `successThreshold`
# @return [Fixnum]
attr_accessor :success_threshold
# Time before the check is considered failed.
# Corresponds to the JSON property `timeout`
# @return [String]
attr_accessor :timeout
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@app_start_timeout = args[:app_start_timeout] if args.key?(:app_start_timeout)
@check_interval = args[:check_interval] if args.key?(:check_interval)
@failure_threshold = args[:failure_threshold] if args.key?(:failure_threshold)
@host = args[:host] if args.key?(:host)
@path = args[:path] if args.key?(:path)
@success_threshold = args[:success_threshold] if args.key?(:success_threshold)
@timeout = args[:timeout] if args.key?(:timeout)
end
end
# Request message for 'Applications.RepairApplication'.
class RepairApplicationRequest
include Google::Apis::Core::Hashable
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
end
end
# Target scaling by request utilization. Only applicable in the App Engine
# flexible environment.
class RequestUtilization
include Google::Apis::Core::Hashable
# Target number of concurrent requests.
# Corresponds to the JSON property `targetConcurrentRequests`
# @return [Fixnum]
attr_accessor :target_concurrent_requests
# Target requests per second.
# Corresponds to the JSON property `targetRequestCountPerSecond`
# @return [Fixnum]
attr_accessor :target_request_count_per_second
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@target_concurrent_requests = args[:target_concurrent_requests] if args.key?(:target_concurrent_requests)
@target_request_count_per_second = args[:target_request_count_per_second] if args.key?(:target_request_count_per_second)
end
end
# A DNS resource record.
class ResourceRecord
include Google::Apis::Core::Hashable
# Relative name of the object affected by this record. Only applicable for CNAME
# records. Example: 'www'.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Data for this record. Values vary by record type, as defined in RFC 1035 (
# section 5) and RFC 1034 (section 3.6.1).
# Corresponds to the JSON property `rrdata`
# @return [String]
attr_accessor :rrdata
# Resource record type. Example: AAAA.
# Corresponds to the JSON property `type`
# @return [String]
attr_accessor :type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@name = args[:name] if args.key?(:name)
@rrdata = args[:rrdata] if args.key?(:rrdata)
@type = args[:type] if args.key?(:type)
end
end
# Machine resources for a version.
class Resources
include Google::Apis::Core::Hashable
# Number of CPU cores needed.
# Corresponds to the JSON property `cpu`
# @return [Float]
attr_accessor :cpu
# Disk size (GB) needed.
# Corresponds to the JSON property `diskGb`
# @return [Float]
attr_accessor :disk_gb
# Memory (GB) needed.
# Corresponds to the JSON property `memoryGb`
# @return [Float]
attr_accessor :memory_gb
# User specified volumes.
# Corresponds to the JSON property `volumes`
# @return [Array<Google::Apis::AppengineV1beta::Volume>]
attr_accessor :volumes
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@cpu = args[:cpu] if args.key?(:cpu)
@disk_gb = args[:disk_gb] if args.key?(:disk_gb)
@memory_gb = args[:memory_gb] if args.key?(:memory_gb)
@volumes = args[:volumes] if args.key?(:volumes)
end
end
# Executes a script to handle the request that matches the URL pattern.
class ScriptHandler
include Google::Apis::Core::Hashable
# Path to the script from the application root directory.
# Corresponds to the JSON property `scriptPath`
# @return [String]
attr_accessor :script_path
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@script_path = args[:script_path] if args.key?(:script_path)
end
end
# A Service resource is a logical component of an application that can share
# state and communicate in a secure fashion with other services. For example, an
# application that handles customer requests might include separate services to
# handle tasks such as backend data analysis or API requests from mobile devices.
# Each service has a collection of versions that define a specific set of code
# used to implement the functionality of that service.
class Service
include Google::Apis::Core::Hashable
# Relative name of the service within the application. Example: default.@
# OutputOnly
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# Full path to the Service resource in the API. Example: apps/myapp/services/
# default.@OutputOnly
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Traffic routing configuration for versions within a single service. Traffic
# splits define how traffic directed to the service is assigned to versions.
# Corresponds to the JSON property `split`
# @return [Google::Apis::AppengineV1beta::TrafficSplit]
attr_accessor :split
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@id = args[:id] if args.key?(:id)
@name = args[:name] if args.key?(:name)
@split = args[:split] if args.key?(:split)
end
end
# SSL configuration for a DomainMapping resource.
class SslSettings
include Google::Apis::Core::Hashable
# ID of the AuthorizedCertificate resource configuring SSL for the application.
# Clearing this field will remove SSL support.By default, a managed certificate
# is automatically created for every domain mapping. To omit SSL support or to
# configure SSL manually, specify SslManagementType.MANUAL on a CREATE or UPDATE
# request. You must be authorized to administer the AuthorizedCertificate
# resource to manually map it to a DomainMapping resource. Example: 12345.
# Corresponds to the JSON property `certificateId`
# @return [String]
attr_accessor :certificate_id
# ID of the managed AuthorizedCertificate resource currently being provisioned,
# if applicable. Until the new managed certificate has been successfully
# provisioned, the previous SSL state will be preserved. Once the provisioning
# process completes, the certificate_id field will reflect the new managed
# certificate and this field will be left empty. To remove SSL support while
# there is still a pending managed certificate, clear the certificate_id field
# with an UpdateDomainMappingRequest.@OutputOnly
# Corresponds to the JSON property `pendingManagedCertificateId`
# @return [String]
attr_accessor :pending_managed_certificate_id
# SSL management type for this domain. If AUTOMATIC, a managed certificate is
# automatically provisioned. If MANUAL, certificate_id must be manually
# specified in order to configure SSL for this domain.
# Corresponds to the JSON property `sslManagementType`
# @return [String]
attr_accessor :ssl_management_type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@certificate_id = args[:certificate_id] if args.key?(:certificate_id)
@pending_managed_certificate_id = args[:pending_managed_certificate_id] if args.key?(:pending_managed_certificate_id)
@ssl_management_type = args[:ssl_management_type] if args.key?(:ssl_management_type)
end
end
# Scheduler settings for standard environment.
class StandardSchedulerSettings
include Google::Apis::Core::Hashable
# Maximum number of instances to run for this version. Set to zero to disable
# max_instances configuration.
# Corresponds to the JSON property `maxInstances`
# @return [Fixnum]
attr_accessor :max_instances
# Minimum number of instances to run for this version. Set to zero to disable
# min_instances configuration.
# Corresponds to the JSON property `minInstances`
# @return [Fixnum]
attr_accessor :min_instances
# Target CPU utilization ratio to maintain when scaling.
# Corresponds to the JSON property `targetCpuUtilization`
# @return [Float]
attr_accessor :target_cpu_utilization
# Target throughput utilization ratio to maintain when scaling
# Corresponds to the JSON property `targetThroughputUtilization`
# @return [Float]
attr_accessor :target_throughput_utilization
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@max_instances = args[:max_instances] if args.key?(:max_instances)
@min_instances = args[:min_instances] if args.key?(:min_instances)
@target_cpu_utilization = args[:target_cpu_utilization] if args.key?(:target_cpu_utilization)
@target_throughput_utilization = args[:target_throughput_utilization] if args.key?(:target_throughput_utilization)
end
end
# Files served directly to the user for a given URL, such as images, CSS
# stylesheets, or JavaScript source files. Static file handlers describe which
# files in the application directory are static files, and which URLs serve them.
class StaticFilesHandler
include Google::Apis::Core::Hashable
# Whether files should also be uploaded as code data. By default, files declared
# in static file handlers are uploaded as static data and are only served to end
# users; they cannot be read by the application. If enabled, uploads are charged
# against both your code and static data storage resource quotas.
# Corresponds to the JSON property `applicationReadable`
# @return [Boolean]
attr_accessor :application_readable
alias_method :application_readable?, :application_readable
# Time a static file served by this handler should be cached by web proxies and
# browsers.
# Corresponds to the JSON property `expiration`
# @return [String]
attr_accessor :expiration
# HTTP headers to use for all responses from these URLs.
# Corresponds to the JSON property `httpHeaders`
# @return [Hash<String,String>]
attr_accessor :http_headers
# MIME type used to serve all files served by this handler.Defaults to file-
# specific MIME types, which are derived from each file's filename extension.
# Corresponds to the JSON property `mimeType`
# @return [String]
attr_accessor :mime_type
# Path to the static files matched by the URL pattern, from the application root
# directory. The path can refer to text matched in groupings in the URL pattern.
# Corresponds to the JSON property `path`
# @return [String]
attr_accessor :path
# Whether this handler should match the request if the file referenced by the
# handler does not exist.
# Corresponds to the JSON property `requireMatchingFile`
# @return [Boolean]
attr_accessor :require_matching_file
alias_method :require_matching_file?, :require_matching_file
# Regular expression that matches the file paths for all files that should be
# referenced by this handler.
# Corresponds to the JSON property `uploadPathRegex`
# @return [String]
attr_accessor :upload_path_regex
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@application_readable = args[:application_readable] if args.key?(:application_readable)
@expiration = args[:expiration] if args.key?(:expiration)
@http_headers = args[:http_headers] if args.key?(:http_headers)
@mime_type = args[:mime_type] if args.key?(:mime_type)
@path = args[:path] if args.key?(:path)
@require_matching_file = args[:require_matching_file] if args.key?(:require_matching_file)
@upload_path_regex = args[:upload_path_regex] if args.key?(:upload_path_regex)
end
end
# The Status type defines a logical error model that is suitable for different
# programming environments, including REST APIs and RPC APIs. It is used by gRPC
# (https://github.com/grpc). The error model is designed to be:
# Simple to use and understand for most users
# Flexible enough to meet unexpected needsOverviewThe Status message contains
# three pieces of data: error code, error message, and error details. The error
# code should be an enum value of google.rpc.Code, but it may accept additional
# error codes if needed. The error message should be a developer-facing English
# message that helps developers understand and resolve the error. If a localized
# user-facing error message is needed, put the localized message in the error
# details or localize it in the client. The optional error details may contain
# arbitrary information about the error. There is a predefined set of error
# detail types in the package google.rpc that can be used for common error
# conditions.Language mappingThe Status message is the logical representation of
# the error model, but it is not necessarily the actual wire format. When the
# Status message is exposed in different client libraries and different wire
# protocols, it can be mapped differently. For example, it will likely be mapped
# to some exceptions in Java, but more likely mapped to some error codes in C.
# Other usesThe error model and the Status message can be used in a variety of
# environments, either with or without APIs, to provide a consistent developer
# experience across different environments.Example uses of this error model
# include:
# Partial errors. If a service needs to return partial errors to the client, it
# may embed the Status in the normal response to indicate the partial errors.
# Workflow errors. A typical workflow has multiple steps. Each step may have a
# Status message for error reporting.
# Batch operations. If a client uses batch request and batch response, the
# Status message should be used directly inside batch response, one for each
# error sub-response.
# Asynchronous operations. If an API call embeds asynchronous operation results
# in its response, the status of those operations should be represented directly
# using the Status message.
# Logging. If some API errors are stored in logs, the message Status could be
# used directly after any stripping needed for security/privacy reasons.
class Status
include Google::Apis::Core::Hashable
# The status code, which should be an enum value of google.rpc.Code.
# Corresponds to the JSON property `code`
# @return [Fixnum]
attr_accessor :code
# A list of messages that carry the error details. There is a common set of
# message types for APIs to use.
# Corresponds to the JSON property `details`
# @return [Array<Hash<String,Object>>]
attr_accessor :details
# A developer-facing error message, which should be in English. Any user-facing
# error message should be localized and sent in the google.rpc.Status.details
# field, or localized by the client.
# Corresponds to the JSON property `message`
# @return [String]
attr_accessor :message
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@code = args[:code] if args.key?(:code)
@details = args[:details] if args.key?(:details)
@message = args[:message] if args.key?(:message)
end
end
# Traffic routing configuration for versions within a single service. Traffic
# splits define how traffic directed to the service is assigned to versions.
class TrafficSplit
include Google::Apis::Core::Hashable
# Mapping from version IDs within the service to fractional (0.000, 1]
# allocations of traffic for that version. Each version can be specified only
# once, but some versions in the service may not have any traffic allocation.
# Services that have traffic allocated cannot be deleted until either the
# service is deleted or their traffic allocation is removed. Allocations must
# sum to 1. Up to two decimal place precision is supported for IP-based splits
# and up to three decimal places is supported for cookie-based splits.
# Corresponds to the JSON property `allocations`
# @return [Hash<String,Float>]
attr_accessor :allocations
# Mechanism used to determine which version a request is sent to. The traffic
# selection algorithm will be stable for either type until allocations are
# changed.
# Corresponds to the JSON property `shardBy`
# @return [String]
attr_accessor :shard_by
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@allocations = args[:allocations] if args.key?(:allocations)
@shard_by = args[:shard_by] if args.key?(:shard_by)
end
end
# Rules to match an HTTP request and dispatch that request to a service.
class UrlDispatchRule
include Google::Apis::Core::Hashable
# Domain name to match against. The wildcard "*" is supported if specified
# before a period: "*.".Defaults to matching all domains: "*".
# Corresponds to the JSON property `domain`
# @return [String]
attr_accessor :domain
# Pathname within the host. Must start with a "/". A single "*" can be included
# at the end of the path.The sum of the lengths of the domain and path may not
# exceed 100 characters.
# Corresponds to the JSON property `path`
# @return [String]
attr_accessor :path
# Resource ID of a service in this application that should serve the matched
# request. The service must already exist. Example: default.
# Corresponds to the JSON property `service`
# @return [String]
attr_accessor :service
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@domain = args[:domain] if args.key?(:domain)
@path = args[:path] if args.key?(:path)
@service = args[:service] if args.key?(:service)
end
end
# URL pattern and description of how the URL should be handled. App Engine can
# handle URLs by executing application code or by serving static files uploaded
# with the version, such as images, CSS, or JavaScript.
class UrlMap
include Google::Apis::Core::Hashable
# Uses Google Cloud Endpoints to handle requests.
# Corresponds to the JSON property `apiEndpoint`
# @return [Google::Apis::AppengineV1beta::ApiEndpointHandler]
attr_accessor :api_endpoint
# Action to take when users access resources that require authentication.
# Defaults to redirect.
# Corresponds to the JSON property `authFailAction`
# @return [String]
attr_accessor :auth_fail_action
# Level of login required to access this resource. Not supported for Node.js in
# the App Engine standard environment.
# Corresponds to the JSON property `login`
# @return [String]
attr_accessor :login
# 30x code to use when performing redirects for the secure field. Defaults to
# 302.
# Corresponds to the JSON property `redirectHttpResponseCode`
# @return [String]
attr_accessor :redirect_http_response_code
# Executes a script to handle the request that matches the URL pattern.
# Corresponds to the JSON property `script`
# @return [Google::Apis::AppengineV1beta::ScriptHandler]
attr_accessor :script
# Security (HTTPS) enforcement for this URL.
# Corresponds to the JSON property `securityLevel`
# @return [String]
attr_accessor :security_level
# Files served directly to the user for a given URL, such as images, CSS
# stylesheets, or JavaScript source files. Static file handlers describe which
# files in the application directory are static files, and which URLs serve them.
# Corresponds to the JSON property `staticFiles`
# @return [Google::Apis::AppengineV1beta::StaticFilesHandler]
attr_accessor :static_files
# URL prefix. Uses regular expression syntax, which means regexp special
# characters must be escaped, but should not contain groupings. All URLs that
# begin with this prefix are handled by this handler, using the portion of the
# URL after the prefix as part of the file path.
# Corresponds to the JSON property `urlRegex`
# @return [String]
attr_accessor :url_regex
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@api_endpoint = args[:api_endpoint] if args.key?(:api_endpoint)
@auth_fail_action = args[:auth_fail_action] if args.key?(:auth_fail_action)
@login = args[:login] if args.key?(:login)
@redirect_http_response_code = args[:redirect_http_response_code] if args.key?(:redirect_http_response_code)
@script = args[:script] if args.key?(:script)
@security_level = args[:security_level] if args.key?(:security_level)
@static_files = args[:static_files] if args.key?(:static_files)
@url_regex = args[:url_regex] if args.key?(:url_regex)
end
end
# A Version resource is a specific set of source code and configuration files
# that are deployed into a service.
class Version
include Google::Apis::Core::Hashable
# Google Cloud Endpoints (https://cloud.google.com/appengine/docs/python/
# endpoints/) configuration for API handlers.
# Corresponds to the JSON property `apiConfig`
# @return [Google::Apis::AppengineV1beta::ApiConfigHandler]
attr_accessor :api_config
# Automatic scaling is based on request rate, response latencies, and other
# application metrics.
# Corresponds to the JSON property `automaticScaling`
# @return [Google::Apis::AppengineV1beta::AutomaticScaling]
attr_accessor :automatic_scaling
# A service with basic scaling will create an instance when the application
# receives a request. The instance will be turned down when the app becomes idle.
# Basic scaling is ideal for work that is intermittent or driven by user
# activity.
# Corresponds to the JSON property `basicScaling`
# @return [Google::Apis::AppengineV1beta::BasicScaling]
attr_accessor :basic_scaling
# Metadata settings that are supplied to this version to enable beta runtime
# features.
# Corresponds to the JSON property `betaSettings`
# @return [Hash<String,String>]
attr_accessor :beta_settings
# Time that this version was created.@OutputOnly
# Corresponds to the JSON property `createTime`
# @return [String]
attr_accessor :create_time
# Email address of the user who created this version.@OutputOnly
# Corresponds to the JSON property `createdBy`
# @return [String]
attr_accessor :created_by
# Duration that static files should be cached by web proxies and browsers. Only
# applicable if the corresponding StaticFilesHandler (https://cloud.google.com/
# appengine/docs/admin-api/reference/rest/v1beta/apps.services.versions#
# staticfileshandler) does not specify its own expiration time.Only returned in
# GET requests if view=FULL is set.
# Corresponds to the JSON property `defaultExpiration`
# @return [String]
attr_accessor :default_expiration
# Code and application artifacts used to deploy a version to App Engine.
# Corresponds to the JSON property `deployment`
# @return [Google::Apis::AppengineV1beta::Deployment]
attr_accessor :deployment
# Total size in bytes of all the files that are included in this version and
# currently hosted on the App Engine disk.@OutputOnly
# Corresponds to the JSON property `diskUsageBytes`
# @return [Fixnum]
attr_accessor :disk_usage_bytes
# Cloud Endpoints (https://cloud.google.com/endpoints) configuration. The
# Endpoints API Service provides tooling for serving Open API and gRPC endpoints
# via an NGINX proxy. Only valid for App Engine Flexible environment deployments.
# The fields here refer to the name and configuration ID of a "service" resource
# in the Service Management API (https://cloud.google.com/service-management/
# overview).
# Corresponds to the JSON property `endpointsApiService`
# @return [Google::Apis::AppengineV1beta::EndpointsApiService]
attr_accessor :endpoints_api_service
# The entrypoint for the application.
# Corresponds to the JSON property `entrypoint`
# @return [Google::Apis::AppengineV1beta::Entrypoint]
attr_accessor :entrypoint
# App Engine execution environment for this version.Defaults to standard.
# Corresponds to the JSON property `env`
# @return [String]
attr_accessor :env
# Environment variables available to the application.Only returned in GET
# requests if view=FULL is set.
# Corresponds to the JSON property `envVariables`
# @return [Hash<String,String>]
attr_accessor :env_variables
# Custom static error pages. Limited to 10KB per page.Only returned in GET
# requests if view=FULL is set.
# Corresponds to the JSON property `errorHandlers`
# @return [Array<Google::Apis::AppengineV1beta::ErrorHandler>]
attr_accessor :error_handlers
# An ordered list of URL-matching patterns that should be applied to incoming
# requests. The first matching URL handles the request and other request
# handlers are not attempted.Only returned in GET requests if view=FULL is set.
# Corresponds to the JSON property `handlers`
# @return [Array<Google::Apis::AppengineV1beta::UrlMap>]
attr_accessor :handlers
# Health checking configuration for VM instances. Unhealthy instances are killed
# and replaced with new instances. Only applicable for instances in App Engine
# flexible environment.
# Corresponds to the JSON property `healthCheck`
# @return [Google::Apis::AppengineV1beta::HealthCheck]
attr_accessor :health_check
# Relative name of the version within the service. Example: v1. Version names
# can contain only lowercase letters, numbers, or hyphens. Reserved names: "
# default", "latest", and any name with the prefix "ah-".
# Corresponds to the JSON property `id`
# @return [String]
attr_accessor :id
# Before an application can receive email or XMPP messages, the application must
# be configured to enable the service.
# Corresponds to the JSON property `inboundServices`
# @return [Array<String>]
attr_accessor :inbound_services
# Instance class that is used to run this version. Valid values are:
# AutomaticScaling: F1, F2, F4, F4_1G
# ManualScaling or BasicScaling: B1, B2, B4, B8, B4_1GDefaults to F1 for
# AutomaticScaling and B1 for ManualScaling or BasicScaling.
# Corresponds to the JSON property `instanceClass`
# @return [String]
attr_accessor :instance_class
# Configuration for third-party Python runtime libraries that are required by
# the application.Only returned in GET requests if view=FULL is set.
# Corresponds to the JSON property `libraries`
# @return [Array<Google::Apis::AppengineV1beta::Library>]
attr_accessor :libraries
# Health checking configuration for VM instances. Unhealthy instances are killed
# and replaced with new instances.
# Corresponds to the JSON property `livenessCheck`
# @return [Google::Apis::AppengineV1beta::LivenessCheck]
attr_accessor :liveness_check
# A service with manual scaling runs continuously, allowing you to perform
# complex initialization and rely on the state of its memory over time.
# Corresponds to the JSON property `manualScaling`
# @return [Google::Apis::AppengineV1beta::ManualScaling]
attr_accessor :manual_scaling
# Full path to the Version resource in the API. Example: apps/myapp/services/
# default/versions/v1.@OutputOnly
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Extra network settings. Only applicable in the App Engine flexible environment.
# Corresponds to the JSON property `network`
# @return [Google::Apis::AppengineV1beta::Network]
attr_accessor :network
# Files that match this pattern will not be built into this version. Only
# applicable for Go runtimes.Only returned in GET requests if view=FULL is set.
# Corresponds to the JSON property `nobuildFilesRegex`
# @return [String]
attr_accessor :nobuild_files_regex
# Readiness checking configuration for VM instances. Unhealthy instances are
# removed from traffic rotation.
# Corresponds to the JSON property `readinessCheck`
# @return [Google::Apis::AppengineV1beta::ReadinessCheck]
attr_accessor :readiness_check
# Machine resources for a version.
# Corresponds to the JSON property `resources`
# @return [Google::Apis::AppengineV1beta::Resources]
attr_accessor :resources
# Desired runtime. Example: python27.
# Corresponds to the JSON property `runtime`
# @return [String]
attr_accessor :runtime
# The version of the API in the given runtime environment. Please see the app.
# yaml reference for valid values at https://cloud.google.com/appengine/docs/
# standard/<language>/config/appref
# Corresponds to the JSON property `runtimeApiVersion`
# @return [String]
attr_accessor :runtime_api_version
# The channel of the runtime to use. Only available for some runtimes. Defaults
# to the default channel.
# Corresponds to the JSON property `runtimeChannel`
# @return [String]
attr_accessor :runtime_channel
# Current serving status of this version. Only the versions with a SERVING
# status create instances and can be billed.SERVING_STATUS_UNSPECIFIED is an
# invalid value. Defaults to SERVING.
# Corresponds to the JSON property `servingStatus`
# @return [String]
attr_accessor :serving_status
# Whether multiple requests can be dispatched to this version at once.
# Corresponds to the JSON property `threadsafe`
# @return [Boolean]
attr_accessor :threadsafe
alias_method :threadsafe?, :threadsafe
# Serving URL for this version. Example: "https://myversion-dot-myservice-dot-
# myapp.appspot.com"@OutputOnly
# Corresponds to the JSON property `versionUrl`
# @return [String]
attr_accessor :version_url
# Whether to deploy this version in a container on a virtual machine.
# Corresponds to the JSON property `vm`
# @return [Boolean]
attr_accessor :vm
alias_method :vm?, :vm
# VPC access connector specification.
# Corresponds to the JSON property `vpcAccessConnector`
# @return [Google::Apis::AppengineV1beta::VpcAccessConnector]
attr_accessor :vpc_access_connector
# The Google Compute Engine zones that are supported by this version in the App
# Engine flexible environment.
# Corresponds to the JSON property `zones`
# @return [Array<String>]
attr_accessor :zones
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@api_config = args[:api_config] if args.key?(:api_config)
@automatic_scaling = args[:automatic_scaling] if args.key?(:automatic_scaling)
@basic_scaling = args[:basic_scaling] if args.key?(:basic_scaling)
@beta_settings = args[:beta_settings] if args.key?(:beta_settings)
@create_time = args[:create_time] if args.key?(:create_time)
@created_by = args[:created_by] if args.key?(:created_by)
@default_expiration = args[:default_expiration] if args.key?(:default_expiration)
@deployment = args[:deployment] if args.key?(:deployment)
@disk_usage_bytes = args[:disk_usage_bytes] if args.key?(:disk_usage_bytes)
@endpoints_api_service = args[:endpoints_api_service] if args.key?(:endpoints_api_service)
@entrypoint = args[:entrypoint] if args.key?(:entrypoint)
@env = args[:env] if args.key?(:env)
@env_variables = args[:env_variables] if args.key?(:env_variables)
@error_handlers = args[:error_handlers] if args.key?(:error_handlers)
@handlers = args[:handlers] if args.key?(:handlers)
@health_check = args[:health_check] if args.key?(:health_check)
@id = args[:id] if args.key?(:id)
@inbound_services = args[:inbound_services] if args.key?(:inbound_services)
@instance_class = args[:instance_class] if args.key?(:instance_class)
@libraries = args[:libraries] if args.key?(:libraries)
@liveness_check = args[:liveness_check] if args.key?(:liveness_check)
@manual_scaling = args[:manual_scaling] if args.key?(:manual_scaling)
@name = args[:name] if args.key?(:name)
@network = args[:network] if args.key?(:network)
@nobuild_files_regex = args[:nobuild_files_regex] if args.key?(:nobuild_files_regex)
@readiness_check = args[:readiness_check] if args.key?(:readiness_check)
@resources = args[:resources] if args.key?(:resources)
@runtime = args[:runtime] if args.key?(:runtime)
@runtime_api_version = args[:runtime_api_version] if args.key?(:runtime_api_version)
@runtime_channel = args[:runtime_channel] if args.key?(:runtime_channel)
@serving_status = args[:serving_status] if args.key?(:serving_status)
@threadsafe = args[:threadsafe] if args.key?(:threadsafe)
@version_url = args[:version_url] if args.key?(:version_url)
@vm = args[:vm] if args.key?(:vm)
@vpc_access_connector = args[:vpc_access_connector] if args.key?(:vpc_access_connector)
@zones = args[:zones] if args.key?(:zones)
end
end
# Volumes mounted within the app container. Only applicable in the App Engine
# flexible environment.
class Volume
include Google::Apis::Core::Hashable
# Unique name for the volume.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
# Volume size in gigabytes.
# Corresponds to the JSON property `sizeGb`
# @return [Float]
attr_accessor :size_gb
# Underlying volume type, e.g. 'tmpfs'.
# Corresponds to the JSON property `volumeType`
# @return [String]
attr_accessor :volume_type
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@name = args[:name] if args.key?(:name)
@size_gb = args[:size_gb] if args.key?(:size_gb)
@volume_type = args[:volume_type] if args.key?(:volume_type)
end
end
# VPC access connector specification.
class VpcAccessConnector
include Google::Apis::Core::Hashable
# Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/
# us-central1/connectors/c1.
# Corresponds to the JSON property `name`
# @return [String]
attr_accessor :name
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@name = args[:name] if args.key?(:name)
end
end
# The zip file information for a zip deployment.
class ZipInfo
include Google::Apis::Core::Hashable
# An estimate of the number of files in a zip for a zip deployment. If set, must
# be greater than or equal to the actual number of files. Used for optimizing
# performance; if not provided, deployment may be slow.
# Corresponds to the JSON property `filesCount`
# @return [Fixnum]
attr_accessor :files_count
# URL of the zip file to deploy from. Must be a URL to a resource in Google
# Cloud Storage in the form 'http(s)://storage.googleapis.com/<bucket>/<object>'.
# Corresponds to the JSON property `sourceUrl`
# @return [String]
attr_accessor :source_url
def initialize(**args)
update!(**args)
end
# Update properties of this object
def update!(**args)
@files_count = args[:files_count] if args.key?(:files_count)
@source_url = args[:source_url] if args.key?(:source_url)
end
end
end
end
end
| 42.730435 | 139 | 0.642254 |
ff8052189c1cbeeaf0cf0da41795d10e9b396845 | 1,461 | module ThreadedTestString
class ::String
include Doing::Color
def highlight_errors
cols = `tput cols`.strip.to_i
string = dup
errs = string.scan(/(?<==\n)(?:Failure|Error):.*?(?=\n=+)/m)
errs.map! do |error|
err = error.dup
err.gsub!(%r{^(/.*?/)([^/:]+):(\d+):in (.*?)$}) do
m = Regexp.last_match
"#{m[1].white}#{m[2].bold.white}:#{m[3].yellow}:in #{m[4].cyan}"
end
err.gsub!(/(Failure|Error): (.*?)\((.*?)\):\n (.*?)(?=\n)/m) do
m = Regexp.last_match
[
m[1].bold.boldbgred.white,
m[3].bold.boldbgcyan.white,
m[2].bold.boldbgyellow.black,
" #{m[4]} ".bold.boldbgwhite.black.reset
].join(':'.boldblack.boldbgblack.reset)
end
err.gsub!(/(<.*?>) (was expected to) (.*?)\n( *<.*?>)./m) do
m = Regexp.last_match
"#{m[1].bold.green} #{m[2].white} #{m[3].boldwhite.boldbgred.reset}\n#{m[4].bold.white}"
end
err.gsub!(/(Finished in) ([\d.]+) (seconds)/) do
m = Regexp.last_match
"#{m[1].green} #{m[2].bold.white} #{m[3].green}"
end
err.gsub!(/(\d+) (failures)/) do
m = Regexp.last_match
"#{m[1].bold.red} #{m[2].red}"
end
err.gsub!(/100% passed/) do |m|
m.bold.green
end
err
end
errs.join("\n#{('=' * cols).blue}\n")
end
end
end
| 28.647059 | 98 | 0.461328 |
9100c176552e3b1dd6ebf53ee61213f97d80296b | 1,096 | require 'test_helper'
class ProductsControllerTest < ActionController::TestCase
setup do
@product = products(:one)
end
test "should get index" do
get :index
assert_response :success
assert_not_nil assigns(:products)
end
test "should get new" do
get :new
assert_response :success
end
test "should create product" do
assert_difference('Product.count') do
post :create, :product => @product.attributes
end
assert_redirected_to product_path(assigns(:product))
end
test "should show product" do
get :show, :id => @product.to_param
assert_response :success
end
test "should get edit" do
get :edit, :id => @product.to_param
assert_response :success
end
test "should update product" do
put :update, :id => @product.to_param, :product => @product.attributes
assert_redirected_to product_path(assigns(:product))
end
test "should destroy product" do
assert_difference('Product.count', -1) do
delete :destroy, :id => @product.to_param
end
assert_redirected_to products_path
end
end
| 21.92 | 74 | 0.69708 |
7a30e1dff06127e82196518992e9bd4105df5b29 | 1,779 | #!/usr/bin/env ruby
#
# $Id$
# $Revision$
#
# This script lists each payload module along with its length
# NOTE: No encoding or BadChar handling is performed
#
msfbase = __FILE__
while File.symlink?(msfbase)
msfbase = File.expand_path(File.readlink(msfbase), File.dirname(msfbase))
end
$:.unshift(File.expand_path(File.join(File.dirname(msfbase), '..', 'lib')))
require 'msfenv'
$:.unshift(ENV['MSF_LOCAL_LIB']) if ENV['MSF_LOCAL_LIB']
require 'rex'
require 'msf/ui'
require 'msf/base'
Indent = ' '
# Initialize the simplified framework instance.
$framework = Msf::Simple::Framework.create(
:module_types => [ Msf::MODULE_PAYLOAD ],
'DisableDatabase' => true
)
# Process special var/val pairs...
Msf::Ui::Common.process_cli_arguments($framework, ARGV)
options = ARGV.join(',')
tbl = Rex::Ui::Text::Table.new(
'Header' => 'Payload Lengths',
'Indent' => Indent.length,
'Columns' => [ 'Payload', 'Length' ]
)
enc = nil
$framework.payloads.each_module { |payload_name, mod|
len = 'Error: Unknown error!'
begin
# Create the payload instance
payload = mod.new
raise "Invalid payload" if not payload
# Set the variables from the cmd line
payload.datastore.import_options_from_s(options)
# Skip non-specified architectures
if (ds_arch = payload.datastore['ARCH'])
next if not payload.arch?(ds_arch)
end
# Skip non-specified platforms
if (ds_plat = payload.datastore['PLATFORM'])
ds_plat = Msf::Module::PlatformList.transform(ds_plat)
next if not payload.platform.supports?(ds_plat)
end
len = payload.size
if len > 0
len = len.to_s
else
len = "Error: Empty payload"
end
rescue
len = "Error: #{$!}"
end
tbl << [ payload_name, len ]
}
puts tbl.to_s
| 21.695122 | 75 | 0.672288 |
bf60b96938dc42d1e6c4d8c7ea47f759aa3a2f9f | 902 | module Users
module ParticipableService
extend ActiveSupport::Concern
included do
attr_reader :noteable
end
def noteable_owner
return [] unless noteable && noteable.author.present?
[as_hash(noteable.author)]
end
def participants_in_noteable
return [] unless noteable
users = noteable.participants(current_user)
sorted(users)
end
def sorted(users)
users.uniq.to_a.compact.sort_by(&:username).map do |user|
as_hash(user)
end
end
def groups
current_user.authorized_groups.sort_by(&:path).map do |group|
count = group.users.count
{ username: group.full_path, name: group.full_name, count: count, avatar_url: group.avatar_url }
end
end
private
def as_hash(user)
{ username: user.username, name: user.name, avatar_url: user.avatar_url }
end
end
end
| 21.47619 | 104 | 0.659645 |
21d95093e41908d28f66ee86057ecfe428c10c7d | 367 | class AddOgColumnsToGpArticleDocs < ActiveRecord::Migration[4.2]
def change
add_column :gp_article_docs, :og_type, :string
add_column :gp_article_docs, :og_title, :string
add_column :gp_article_docs, :og_description, :text
add_column :gp_article_docs, :og_description_use_body, :boolean
add_column :gp_article_docs, :og_image, :string
end
end
| 36.7 | 67 | 0.773842 |
1d6443e4feaf8392e5b4a947a7a64845aeb284db | 33,548 | # frozen_string_literal: true
#
# Cookbook:: aws-parallelcluster
# Attributes:: default
#
# Copyright:: 2013-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the
# License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# Base dir
default['cluster']['base_dir'] = '/opt/parallelcluster'
default['cluster']['sources_dir'] = "#{node['cluster']['base_dir']}/sources"
default['cluster']['scripts_dir'] = "#{node['cluster']['base_dir']}/scripts"
default['cluster']['license_dir'] = "#{node['cluster']['base_dir']}/licenses"
default['cluster']['configs_dir'] = "#{node['cluster']['base_dir']}/configs"
default['cluster']['shared_dir'] = "#{node['cluster']['base_dir']}/shared"
# Cluster config
default['cluster']['cluster_s3_bucket'] = nil
default['cluster']['cluster_config_s3_key'] = nil
default['cluster']['cluster_config_version'] = nil
default['cluster']['change_set_s3_key'] = nil
default['cluster']['instance_types_data_s3_key'] = nil
default['cluster']['cluster_config_path'] = "#{node['cluster']['shared_dir']}/cluster-config.yaml"
default['cluster']['previous_cluster_config_path'] = "#{node['cluster']['shared_dir']}/previous-cluster-config.yaml"
default['cluster']['change_set_path'] = "#{node['cluster']['shared_dir']}/change-set.json"
default['cluster']['launch_templates_config_path'] = "#{node['cluster']['shared_dir']}/launch-templates-config.json"
default['cluster']['instance_types_data_path'] = "#{node['cluster']['shared_dir']}/instance-types-data.json"
default['cluster']['computefleet_status_path'] = "#{node['cluster']['shared_dir']}/computefleet-status.json"
default['cluster']['reserved_base_uid'] = 400
# Python Version
default['cluster']['python-version'] = '3.7.10'
# plcuster-specific pyenv system installation root
default['cluster']['system_pyenv_root'] = "#{node['cluster']['base_dir']}/pyenv"
# Virtualenv Cookbook Name
default['cluster']['cookbook_virtualenv'] = 'cookbook_virtualenv'
# Virtualenv Node Name
default['cluster']['node_virtualenv'] = 'node_virtualenv'
# Virtualenv AWSBatch Name
default['cluster']['awsbatch_virtualenv'] = 'awsbatch_virtualenv'
# Virtualenv cfn-bootstrap Name
default['cluster']['cfn_bootstrap_virtualenv'] = 'cfn_bootstrap_virtualenv'
# Cookbook Virtualenv Path
default['cluster']['cookbook_virtualenv_path'] = "#{node['cluster']['system_pyenv_root']}/versions/#{node['cluster']['python-version']}/envs/#{node['cluster']['cookbook_virtualenv']}"
# Node Virtualenv Path
default['cluster']['node_virtualenv_path'] = "#{node['cluster']['system_pyenv_root']}/versions/#{node['cluster']['python-version']}/envs/#{node['cluster']['node_virtualenv']}"
# AWSBatch Virtualenv Path
default['cluster']['awsbatch_virtualenv_path'] = "#{node['cluster']['system_pyenv_root']}/versions/#{node['cluster']['python-version']}/envs/#{node['cluster']['awsbatch_virtualenv']}"
# cfn-bootstrap Virtualenv Path
default['cluster']['cfn_bootstrap_virtualenv_path'] = "#{node['cluster']['system_pyenv_root']}/versions/#{node['cluster']['python-version']}/envs/#{node['cluster']['cfn_bootstrap_virtualenv']}"
# Intel Packages
default['cluster']['psxe']['version'] = '2020.4-17'
default['cluster']['psxe']['noarch_packages'] = %w(intel-tbb-common-runtime intel-mkl-common-runtime intel-psxe-common-runtime
intel-ipp-common-runtime intel-ifort-common-runtime intel-icc-common-runtime
intel-daal-common-runtime intel-comp-common-runtime)
default['cluster']['psxe']['archful_packages']['i486'] = %w(intel-tbb-runtime intel-tbb-libs-runtime intel-comp-runtime
intel-daal-runtime intel-icc-runtime intel-ifort-runtime
intel-ipp-runtime intel-mkl-runtime intel-openmp-runtime)
default['cluster']['psxe']['archful_packages']['x86_64'] = node['cluster']['psxe']['archful_packages']['i486'] + %w(intel-mpi-runtime)
default['cluster']['intelhpc']['platform_name'] = value_for_platform(
'centos' => {
'~>7' => 'el7',
}
)
default['cluster']['intelhpc']['packages'] = %w(intel-hpc-platform-core-intel-runtime-advisory intel-hpc-platform-compat-hpc-advisory
intel-hpc-platform-core intel-hpc-platform-core-advisory intel-hpc-platform-hpc-cluster
intel-hpc-platform-compat-hpc intel-hpc-platform-core-intel-runtime)
default['cluster']['intelhpc']['version'] = '2018.0-7'
default['cluster']['intelhpc']['dependencies'] = %w(compat-libstdc++-33 nscd nss-pam-ldapd openssl098e)
default['cluster']['intelpython2']['version'] = '2019.4-088'
default['cluster']['intelpython3']['version'] = '2020.2-902'
# Intel MPI
default['cluster']['intelmpi']['version'] = '2021.4.0'
default['cluster']['intelmpi']['full_version'] = "#{node['cluster']['intelmpi']['version']}.441"
default['cluster']['intelmpi']['modulefile'] = "/opt/intel/mpi/#{node['cluster']['intelmpi']['version']}/modulefiles/mpi"
default['cluster']['intelmpi']['kitchen_test_string'] = 'Version 2021.4'
default['cluster']['intelmpi']['qt_version'] = '5.15.2'
# Arm Performance Library
default['cluster']['armpl']['major_minor_version'] = '21.0'
default['cluster']['armpl']['patch_version'] = '0'
default['cluster']['armpl']['version'] = "#{node['cluster']['armpl']['major_minor_version']}.#{node['cluster']['armpl']['patch_version']}"
default['cluster']['armpl']['gcc']['major_minor_version'] = '9.3'
default['cluster']['armpl']['gcc']['patch_version'] = '0'
default['cluster']['armpl']['gcc']['url'] = [
'https://ftp.gnu.org/gnu/gcc',
"gcc-#{node['cluster']['armpl']['gcc']['major_minor_version']}.#{node['cluster']['armpl']['gcc']['patch_version']}",
"gcc-#{node['cluster']['armpl']['gcc']['major_minor_version']}.#{node['cluster']['armpl']['gcc']['patch_version']}.tar.gz",
].join('/')
default['cluster']['armpl']['platform'] = value_for_platform(
'centos' => { '~>7' => 'RHEL-7' },
'amazon' => { '2' => 'RHEL-8' },
'ubuntu' => {
'18.04' => 'Ubuntu-18.04',
'20.04' => 'Ubuntu-20.04',
}
)
default['cluster']['armpl']['url'] = [
'archives/armpl',
node['cluster']['armpl']['platform'],
"arm-performance-libraries_#{node['cluster']['armpl']['version']}_#{node['cluster']['armpl']['platform']}_gcc-#{node['cluster']['armpl']['gcc']['major_minor_version']}.tar",
].join('/')
# Python packages
default['cluster']['parallelcluster-version'] = '3.2.0'
default['cluster']['parallelcluster-cookbook-version'] = '3.2.0'
default['cluster']['parallelcluster-node-version'] = '3.2.0'
default['cluster']['parallelcluster-awsbatch-cli-version'] = '1.0.0'
# URLs to software packages used during install recipes
# Slurm software
default['cluster']['slurm_plugin_dir'] = '/etc/parallelcluster/slurm_plugin'
default['cluster']['slurm']['version'] = '21-08-8-2'
default['cluster']['slurm']['url'] = "https://github.com/SchedMD/slurm/archive/slurm-#{node['cluster']['slurm']['version']}.tar.gz"
default['cluster']['slurm']['sha1'] = 'f7687c11f024fbbe5399b93906d1179adc5c3fb6'
default['cluster']['slurm']['user'] = 'slurm'
default['cluster']['slurm']['user_id'] = node['cluster']['reserved_base_uid'] + 1
default['cluster']['slurm']['group'] = node['cluster']['slurm']['user']
default['cluster']['slurm']['group_id'] = node['cluster']['slurm']['user_id']
default['cluster']['slurm']['install_dir'] = "/opt/slurm"
# Scheduler plugin Configuration
default['cluster']['scheduler_plugin']['name'] = 'pcluster-scheduler-plugin'
default['cluster']['scheduler_plugin']['user'] = default['cluster']['scheduler_plugin']['name']
default['cluster']['scheduler_plugin']['user_id'] = node['cluster']['reserved_base_uid'] + 4
default['cluster']['scheduler_plugin']['group'] = default['cluster']['scheduler_plugin']['user']
default['cluster']['scheduler_plugin']['group_id'] = default['cluster']['scheduler_plugin']['user_id']
default['cluster']['scheduler_plugin']['system_user_id_start'] = node['cluster']['reserved_base_uid'] + 10
default['cluster']['scheduler_plugin']['system_group_id_start'] = default['cluster']['scheduler_plugin']['system_user_id_start']
# Scheduler plugin event handler
default['cluster']['scheduler_plugin']['home'] = '/home/pcluster-scheduler-plugin'
default['cluster']['scheduler_plugin']['handler_log_out'] = '/var/log/parallelcluster/scheduler-plugin.out.log'
default['cluster']['scheduler_plugin']['handler_log_err'] = '/var/log/parallelcluster/scheduler-plugin.err.log'
default['cluster']['scheduler_plugin']['shared_dir'] = "#{node['cluster']['shared_dir']}/scheduler-plugin"
default['cluster']['scheduler_plugin']['local_dir'] = "#{node['cluster']['base_dir']}/scheduler-plugin"
default['cluster']['scheduler_plugin']['handler_dir'] = "#{node['cluster']['scheduler_plugin']['local_dir']}/.configs"
default['cluster']['scheduler_plugin']['scheduler_plugin_substack_outputs_path'] = "#{node['cluster']['shared_dir']}/scheduler-plugin-substack-outputs.json"
default['cluster']['scheduler_plugin']['python_version'] = '3.9.9'
default['cluster']['scheduler_plugin']['pyenv_root'] = "#{node['cluster']['scheduler_plugin']['shared_dir']}/pyenv"
default['cluster']['scheduler_plugin']['virtualenv'] = 'scheduler_plugin_virtualenv'
default['cluster']['scheduler_plugin']['virtualenv_path'] = [
node['cluster']['scheduler_plugin']['pyenv_root'],
'versions',
node['cluster']['scheduler_plugin']['python_version'],
'envs',
node['cluster']['scheduler_plugin']['virtualenv'],
].join('/')
# PMIx software
default['cluster']['pmix']['version'] = '3.2.3'
default['cluster']['pmix']['url'] = "https://github.com/openpmix/openpmix/releases/download/v#{node['cluster']['pmix']['version']}/pmix-#{node['cluster']['pmix']['version']}.tar.gz"
default['cluster']['pmix']['sha1'] = 'ed5c525baf1330d2303afb2b6bd2fd53ab0406a0'
# Munge
default['cluster']['munge']['munge_version'] = '0.5.14'
default['cluster']['munge']['munge_url'] = "https://github.com/dun/munge/archive/munge-#{node['cluster']['munge']['munge_version']}.tar.gz"
default['cluster']['munge']['user'] = 'munge'
default['cluster']['munge']['user_id'] = node['cluster']['reserved_base_uid'] + 2
default['cluster']['munge']['group'] = node['cluster']['munge']['user']
default['cluster']['munge']['group_id'] = node['cluster']['munge']['user_id']
# JWT
default['cluster']['jwt']['version'] = '1.12.0'
default['cluster']['jwt']['url'] = "https://github.com/benmcollins/libjwt/archive/refs/tags/v#{node['cluster']['jwt']['version']}.tar.gz"
default['cluster']['jwt']['sha1'] = '1c6fec984a8e0ca1122bfc3552a49f45bdb0c4e8'
# NVIDIA
default['cluster']['nvidia']['enabled'] = 'no'
default['cluster']['nvidia']['driver_version'] = '470.129.06'
default['cluster']['nvidia']['cuda_version'] = '11.4'
default['cluster']['nvidia']['driver_url_architecture_id'] = arm_instance? ? 'aarch64' : 'x86_64'
default['cluster']['nvidia']['cuda_url_architecture_id'] = arm_instance? ? 'linux_sbsa' : 'linux'
default['cluster']['nvidia']['driver_url'] = "https://us.download.nvidia.com/tesla/#{node['cluster']['nvidia']['driver_version']}/NVIDIA-Linux-#{node['cluster']['nvidia']['driver_url_architecture_id']}-#{node['cluster']['nvidia']['driver_version']}.run"
default['cluster']['nvidia']['cuda_url'] = "https://developer.download.nvidia.com/compute/cuda/11.4.4/local_installers/cuda_11.4.4_470.82.01_#{node['cluster']['nvidia']['cuda_url_architecture_id']}.run"
# NVIDIA fabric-manager
# The package name of Fabric Manager for alinux2 and centos7 is nvidia-fabric-manager-version
# For ubuntu, it is nvidia-fabricmanager-470_version
default['cluster']['nvidia']['fabricmanager']['package'] = value_for_platform(
'default' => "nvidia-fabric-manager",
'ubuntu' => { 'default' => "nvidia-fabricmanager-470" }
)
default['cluster']['nvidia']['fabricmanager']['repository_key'] = value_for_platform(
'default' => "D42D0685.pub",
'ubuntu' => { 'default' => "3bf863cc.pub" }
)
default['cluster']['nvidia']['fabricmanager']['version'] = value_for_platform(
'default' => node['cluster']['nvidia']['driver_version'],
# with apt a star is needed to match the package version
'ubuntu' => { 'default' => "#{node['cluster']['nvidia']['driver_version']}*" }
)
default['cluster']['nvidia']['fabricmanager']['repository_uri'] = value_for_platform(
'default' => "https://developer.download.nvidia._domain_/compute/cuda/repos/rhel7/x86_64",
'ubuntu' => { 'default' => "https://developer.download.nvidia._domain_/compute/cuda/repos/#{node['cluster']['base_os']}/x86_64" }
)
# NVIDIA GDRCopy
default['cluster']['nvidia']['gdrcopy']['version'] = '2.3'
default['cluster']['nvidia']['gdrcopy']['url'] = "https://github.com/NVIDIA/gdrcopy/archive/refs/tags/v#{node['cluster']['nvidia']['gdrcopy']['version']}.tar.gz"
default['cluster']['nvidia']['gdrcopy']['sha1'] = '8ee4f0e3c9d0454ff461742c69b0c0ee436e06e1'
default['cluster']['nvidia']['gdrcopy']['service'] = value_for_platform(
'ubuntu' => { 'default' => 'gdrdrv' },
'default' => 'gdrcopy'
)
# EFA
default['cluster']['efa']['installer_version'] = '1.16.0'
default['cluster']['efa']['installer_url'] = "https://efa-installer.amazonaws.com/aws-efa-installer-#{node['cluster']['efa']['installer_version']}.tar.gz"
default['cluster']['efa']['unsupported_aarch64_oses'] = %w(centos7)
# NICE DCV
default['cluster']['dcv_port'] = 8443
default['cluster']['dcv']['installed'] = 'yes'
default['cluster']['dcv']['version'] = '2022.0-12760'
if arm_instance?
default['cluster']['dcv']['supported_os'] = %w(centos7 ubuntu18 amazon2)
default['cluster']['dcv']['url_architecture_id'] = 'aarch64'
default['cluster']['dcv']['sha256sum'] = value_for_platform(
'centos' => {
'~>7' => "67c0260318916c12e63287c1e565d195b374590c1a90b027c405f34d0a6efa24",
},
'amazon' => { '2' => "67c0260318916c12e63287c1e565d195b374590c1a90b027c405f34d0a6efa24" },
'ubuntu' => { '18.04' => "0000bc8d51a695d48185ce31d514152e2788aba18c137d94b715912e9b092cab" }
)
else
default['cluster']['dcv']['supported_os'] = %w(centos7 ubuntu18 ubuntu20 amazon2)
default['cluster']['dcv']['url_architecture_id'] = 'x86_64'
default['cluster']['dcv']['sha256sum'] = value_for_platform(
'centos' => {
'~>7' => "c3f41fdbe4d9e5a5a92fe1619cdc22f015854f440012d291492c9fc8b0f0fce3",
},
'amazon' => { '2' => "c3f41fdbe4d9e5a5a92fe1619cdc22f015854f440012d291492c9fc8b0f0fce3" },
'ubuntu' => {
'18.04' => "d442b3c0a24ee03ec74e7f91f92c9b7eb260810085b642c04a67f6cc9b6d74ef",
'20.04' => "18546e3ce84e6790ad74159d0cb6088684477656520155c7f44f971b412b0db5",
}
)
end
if platform?('ubuntu')
# Unlike the other supported OSs, the DCV package names for Ubuntu use different architecture abbreviations than those used in the download URLs.
default['cluster']['dcv']['package_architecture_id'] = arm_instance? ? 'arm64' : 'amd64'
end
default['cluster']['dcv']['package'] = value_for_platform(
'centos' => {
'~>7' => "nice-dcv-#{node['cluster']['dcv']['version']}-el7-#{node['cluster']['dcv']['url_architecture_id']}",
},
'amazon' => { '2' => "nice-dcv-#{node['cluster']['dcv']['version']}-el7-#{node['cluster']['dcv']['url_architecture_id']}" },
'ubuntu' => {
'default' => "nice-dcv-#{node['cluster']['dcv']['version']}-#{node['cluster']['base_os']}-#{node['cluster']['dcv']['url_architecture_id']}",
}
)
default['cluster']['dcv']['server']['version'] = '2022.0.12760-1'
default['cluster']['dcv']['server'] = value_for_platform( # NICE DCV server package
'centos' => {
'~>7' => "nice-dcv-server-#{node['cluster']['dcv']['server']['version']}.el7.#{node['cluster']['dcv']['url_architecture_id']}.rpm",
},
'amazon' => { '2' => "nice-dcv-server-#{node['cluster']['dcv']['server']['version']}.el7.#{node['cluster']['dcv']['url_architecture_id']}.rpm" },
'ubuntu' => {
'default' => "nice-dcv-server_#{node['cluster']['dcv']['server']['version']}_#{node['cluster']['dcv']['package_architecture_id']}.#{node['cluster']['base_os']}.deb",
}
)
default['cluster']['dcv']['xdcv']['version'] = '2022.0.424-1'
default['cluster']['dcv']['xdcv'] = value_for_platform( # required to create virtual sessions
'centos' => {
'~>7' => "nice-xdcv-#{node['cluster']['dcv']['xdcv']['version']}.el7.#{node['cluster']['dcv']['url_architecture_id']}.rpm",
},
'amazon' => { '2' => "nice-xdcv-#{node['cluster']['dcv']['xdcv']['version']}.el7.#{node['cluster']['dcv']['url_architecture_id']}.rpm" },
'ubuntu' => {
'default' => "nice-xdcv_#{node['cluster']['dcv']['xdcv']['version']}_#{node['cluster']['dcv']['package_architecture_id']}.#{node['cluster']['base_os']}.deb",
}
)
default['cluster']['dcv']['gl']['version'] = '2022.0.961-1'
default['cluster']['dcv']['gl']['installer'] = value_for_platform( # required to enable GPU sharing
'centos' => {
'~>7' => "nice-dcv-gl-#{node['cluster']['dcv']['gl']['version']}.el7.#{node['cluster']['dcv']['url_architecture_id']}.rpm",
},
'amazon' => { '2' => "nice-dcv-gl-#{node['cluster']['dcv']['gl']['version']}.el7.#{node['cluster']['dcv']['url_architecture_id']}.rpm" },
'ubuntu' => {
'default' => "nice-dcv-gl_#{node['cluster']['dcv']['gl']['version']}_#{node['cluster']['dcv']['package_architecture_id']}.#{node['cluster']['base_os']}.deb",
}
)
default['cluster']['dcv']['web_viewer']['version'] = '2022.0.12760-1'
default['cluster']['dcv']['web_viewer'] = value_for_platform( # required to enable WEB client
'centos' => {
'~>7' => "nice-dcv-web-viewer-#{node['cluster']['dcv']['web_viewer']['version']}.el7.#{node['cluster']['dcv']['url_architecture_id']}.rpm",
},
'amazon' => { '2' => "nice-dcv-web-viewer-#{node['cluster']['dcv']['web_viewer']['version']}.el7.#{node['cluster']['dcv']['url_architecture_id']}.rpm" },
'ubuntu' => {
'default' => "nice-dcv-web-viewer_#{node['cluster']['dcv']['web_viewer']['version']}_#{node['cluster']['dcv']['package_architecture_id']}.#{node['cluster']['base_os']}.deb",
}
)
default['cluster']['dcv']['url'] = "https://d1uj6qtbmh3dt5.cloudfront.net/2022.0/Servers/#{node['cluster']['dcv']['package']}.tgz"
# DCV external authenticator configuration
default['cluster']['dcv']['authenticator']['user'] = "dcvextauth"
default['cluster']['dcv']['authenticator']['user_id'] = node['cluster']['reserved_base_uid'] + 3
default['cluster']['dcv']['authenticator']['group'] = node['cluster']['dcv']['authenticator']['user']
default['cluster']['dcv']['authenticator']['group_id'] = node['cluster']['dcv']['authenticator']['user_id']
default['cluster']['dcv']['authenticator']['user_home'] = "/home/#{node['cluster']['dcv']['authenticator']['user']}"
default['cluster']['dcv']['authenticator']['certificate'] = "/etc/parallelcluster/ext-auth-certificate.pem"
default['cluster']['dcv']['authenticator']['private_key'] = "/etc/parallelcluster/ext-auth-private-key.pem"
default['cluster']['dcv']['authenticator']['virtualenv'] = "dcv_authenticator_virtualenv"
default['cluster']['dcv']['authenticator']['virtualenv_path'] = [
node['cluster']['system_pyenv_root'],
'versions',
node['cluster']['python-version'],
'envs',
node['cluster']['dcv']['authenticator']['virtualenv'],
].join('/')
# CloudWatch Agent
default['cluster']['cloudwatch']['public_key_url'] = "https://s3.amazonaws.com/amazoncloudwatch-agent/assets/amazon-cloudwatch-agent.gpg"
default['cluster']['cloudwatch']['public_key_local_path'] = "#{node['cluster']['sources_dir']}/amazon-cloudwatch-agent.gpg"
# OpenSSH settings for AWS ParallelCluster instances
default['openssh']['server']['protocol'] = '2'
default['openssh']['server']['syslog_facility'] = 'AUTHPRIV'
default['openssh']['server']['permit_root_login'] = 'forced-commands-only'
default['openssh']['server']['password_authentication'] = 'no'
default['openssh']['server']['gssapi_authentication'] = 'yes'
default['openssh']['server']['gssapi_clean_up_credentials'] = 'yes'
default['openssh']['server']['subsystem'] = 'sftp /usr/libexec/openssh/sftp-server'
default['openssh']['server']['ciphers'] = 'aes128-cbc,aes192-cbc,aes256-cbc,aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]'
default['openssh']['server']['m_a_cs'] = '[email protected],[email protected],hmac-sha2-512,hmac-sha2-256'
default['openssh']['client']['gssapi_authentication'] = 'yes'
default['openssh']['client']['match'] = 'exec "ssh_target_checker.sh %h"'
# Disable StrictHostKeyChecking for target host in the cluster VPC
default['openssh']['client'][' _strict_host_key_checking'] = 'no'
# Do not store server key in the know hosts file to avoid scaling clashing
# that is when an new host gets the same IP of a previously terminated host
default['openssh']['client'][' _user_known_hosts_file'] = '/dev/null'
# ulimit settings
default['cluster']['filehandle_limit'] = 10_000
default['cluster']['memory_limit'] = 'unlimited'
# Platform defaults
#
default['cluster']['kernel_release'] = node['kernel']['release'] unless default['cluster'].key?('kernel_release')
case node['platform_family']
when 'rhel', 'amazon'
default['cluster']['kernel_devel_pkg']['name'] = "kernel-devel"
default['cluster']['kernel_devel_pkg']['version'] = node['kernel']['release'].chomp('.x86_64').chomp('.aarch64')
# Modulefile Directory
default['cluster']['modulefile_dir'] = "/usr/share/Modules/modulefiles"
# MODULESHOME
default['cluster']['moduleshome'] = "/usr/share/Modules"
# Config file used to set default MODULEPATH list
default['cluster']['modulepath_config_file'] = value_for_platform(
'centos' => {
'~>7' => "#{node['cluster']['moduleshome']}/init/.modulespath",
},
'amazon' => { 'default' => "#{node['cluster']['moduleshome']}/init/.modulespath" }
)
case node['platform']
when 'centos', 'redhat', 'scientific'
default['cluster']['base_packages'] = %w(vim ksh tcsh zsh openssl-devel ncurses-devel pam-devel net-tools openmotif-devel
libXmu-devel hwloc-devel libdb-devel tcl-devel automake autoconf pyparted libtool
httpd boost-devel redhat-lsb mlocate lvm2 R atlas-devel
blas-devel libffi-devel openssl-devel dkms mariadb-devel libedit-devel
libical-devel postgresql-devel postgresql-server sendmail libxml2-devel libglvnd-devel
mdadm python python-pip libssh2-devel libgcrypt-devel libevent-devel glibc-static bind-utils
iproute NetworkManager-config-routing-rules python3 python3-pip iptables libcurl-devel yum-plugin-versionlock
coreutils moreutils sssd sssd-tools sssd-ldap curl)
default['cluster']['rhel']['extra_repo'] = 'rhui-REGION-rhel-server-optional'
if node['platform_version'].to_i == 7 && node['kernel']['machine'] == 'aarch64'
# Do not install bind-utils on centos7+arm due to issue with package checksum
default['cluster']['base_packages'].delete('bind-utils')
end
when 'amazon'
default['cluster']['base_packages'] = %w(vim ksh tcsh zsh openssl-devel ncurses-devel pam-devel net-tools openmotif-devel
libXmu-devel hwloc-devel libdb-devel tcl-devel automake autoconf pyparted libtool
httpd boost-devel system-lsb mlocate atlas-devel glibc-static iproute
libffi-devel dkms mysql-devel libedit-devel postgresql-devel postgresql-server
sendmail cmake byacc libglvnd-devel mdadm libgcrypt-devel libevent-devel
libxml2-devel perl-devel tar gzip bison flex gcc gcc-c++ patch
rpm-build rpm-sign system-rpm-config cscope ctags diffstat doxygen elfutils
gcc-gfortran git indent intltool patchutils rcs subversion swig systemtap curl
jq wget python-pip NetworkManager-config-routing-rules libibverbs-utils
librdmacm-utils python3 python3-pip iptables libcurl-devel yum-plugin-versionlock
coreutils moreutils sssd sssd-tools sssd-ldap)
# Install R via amazon linux extras
default['cluster']['alinux_extras'] = ['R3.4']
end
default['cluster']['chrony']['service'] = "chronyd"
default['cluster']['chrony']['conf'] = "/etc/chrony.conf"
when 'debian'
default['openssh']['server']['subsystem'] = 'sftp internal-sftp'
default['cluster']['base_packages'] = %w(vim ksh tcsh zsh libssl-dev ncurses-dev libpam-dev net-tools libhwloc-dev dkms
tcl-dev automake autoconf libtool librrd-dev libapr1-dev libconfuse-dev
apache2 libboost-dev libdb-dev tcsh libncurses5-dev libpam0g-dev libxt-dev
libmotif-dev libxmu-dev libxft-dev libhwloc-dev man-db lvm2 python
r-base libblas-dev libffi-dev libxml2-dev mdadm
libgcrypt20-dev libmysqlclient-dev libevent-dev iproute2 python3 python3-pip
libatlas-base-dev libglvnd-dev iptables libcurl4-openssl-dev
coreutils moreutils sssd sssd-tools sssd-ldap curl)
case node['platform_version']
when '18.04'
default['cluster']['base_packages'].push('python-pip', 'python-parted')
when '20.04'
default['cluster']['base_packages'].push('python3-parted')
end
# Modulefile Directory
default['cluster']['modulefile_dir'] = "/usr/share/modules/modulefiles"
# MODULESHOME
default['cluster']['moduleshome'] = "/usr/share/modules"
# Config file used to set default MODULEPATH list
default['cluster']['modulepath_config_file'] = "#{node['cluster']['moduleshome']}/init/.modulespath"
default['cluster']['kernel_headers_pkg'] = "linux-headers-#{node['kernel']['release']}"
default['cluster']['chrony']['service'] = "chrony"
default['cluster']['chrony']['conf'] = "/etc/chrony/chrony.conf"
if Chef::VersionConstraint.new('>= 15.04').include?(node['platform_version'])
default['nfs']['service_provider']['idmap'] = Chef::Provider::Service::Systemd
default['nfs']['service_provider']['portmap'] = Chef::Provider::Service::Systemd
default['nfs']['service_provider']['lock'] = Chef::Provider::Service::Systemd
default['nfs']['service']['lock'] = 'rpc-statd'
default['nfs']['service']['idmap'] = 'nfs-idmapd'
end
end
# Default NFS mount options
default['cluster']['nfs']['hard_mount_options'] = 'hard,_netdev,noatime'
# Lustre defaults (for CentOS >=7.7 and Ubuntu)
default['cluster']['lustre']['public_key'] = value_for_platform(
'centos' => { '>=7.7' => "https://fsx-lustre-client-repo-public-keys.s3.amazonaws.com/fsx-rpm-public-key.asc" },
'ubuntu' => { 'default' => "https://fsx-lustre-client-repo-public-keys.s3.amazonaws.com/fsx-ubuntu-public-key.asc" }
)
# Lustre repo string is built following the official doc
# https://docs.aws.amazon.com/fsx/latest/LustreGuide/install-lustre-client.html
# 'centos' is used for arm and 'el' for x86_64
default['cluster']['lustre']['centos7']['base_url_prefix'] = arm_instance? ? 'centos' : 'el'
default['cluster']['lustre']['base_url'] = value_for_platform(
'centos' => {
# node['kernel']['machine'] contains the architecture: 'x86_64' or 'aarch64'
'default' => "https://fsx-lustre-client-repo.s3.amazonaws.com/#{default['cluster']['lustre']['centos7']['base_url_prefix']}/7.#{find_rhel_minor_version}/#{node['kernel']['machine']}/",
},
'ubuntu' => { 'default' => "https://fsx-lustre-client-repo.s3.amazonaws.com/ubuntu" }
)
# Lustre defaults (for CentOS 7.6 and 7.5 only)
default['cluster']['lustre']['version'] = value_for_platform(
'centos' => {
'7.6' => "2.10.6",
'7.5' => "2.10.5",
}
)
default['cluster']['lustre']['kmod_url'] = value_for_platform(
'centos' => {
'7.6' => "https://downloads.whamcloud.com/public/lustre/lustre-2.10.6/el7/client/RPMS/x86_64/kmod-lustre-client-2.10.6-1.el7.x86_64.rpm",
'7.5' => "https://downloads.whamcloud.com/public/lustre/lustre-2.10.5/el7.5.1804/client/RPMS/x86_64/kmod-lustre-client-2.10.5-1.el7.x86_64.rpm",
}
)
default['cluster']['lustre']['client_url'] = value_for_platform(
'centos' => {
'7.6' => "https://downloads.whamcloud.com/public/lustre/lustre-2.10.6/el7/client/RPMS/x86_64/lustre-client-2.10.6-1.el7.x86_64.rpm",
'7.5' => "https://downloads.whamcloud.com/public/lustre/lustre-2.10.5/el7.5.1804/client/RPMS/x86_64/lustre-client-2.10.5-1.el7.x86_64.rpm",
}
)
# Default gc_thresh values for performance at scale
default['cluster']['sysctl']['ipv4']['gc_thresh1'] = 0
default['cluster']['sysctl']['ipv4']['gc_thresh2'] = 15_360
default['cluster']['sysctl']['ipv4']['gc_thresh3'] = 16_384
# ParallelCluster internal variables (also in /etc/parallelcluster/cfnconfig)
default['cluster']['region'] = 'us-east-1'
default['cluster']['stack_name'] = nil
default['cluster']['preinstall'] = 'NONE'
default['cluster']['preinstall_args'] = 'NONE'
default['cluster']['postinstall'] = 'NONE'
default['cluster']['postinstall_args'] = 'NONE'
default['cluster']['scheduler'] = 'slurm'
default['cluster']['scheduler_slots'] = 'vcpus'
default['cluster']['scheduler_queue_name'] = nil
default['cluster']['instance_slots'] = '1'
default['cluster']['ephemeral_dir'] = '/scratch'
default['cluster']['ebs_shared_dirs'] = '/shared'
default['cluster']['proxy'] = 'NONE'
default['cluster']['node_type'] = nil
default['cluster']['cluster_user'] = 'ec2-user'
default['cluster']['head_node_private_ip'] = nil
default['cluster']['volume'] = ''
# ParallelCluster internal variables to configure active directory service
default['cluster']["directory_service"]["enabled"] = 'false'
default['cluster']["directory_service"]["domain_name"] = nil
default['cluster']["directory_service"]["domain_addr"] = nil
default['cluster']["directory_service"]["password_secret_arn"] = nil
default['cluster']["directory_service"]["domain_read_only_user"] = nil
default['cluster']["directory_service"]["ldap_tls_ca_cert"] = nil
default['cluster']["directory_service"]["ldap_tls_req_cert"] = nil
default['cluster']["directory_service"]["ldap_access_filter"] = nil
default['cluster']["directory_service"]["generate_ssh_keys_for_users"] = nil
default['cluster']['directory_service']['additional_sssd_configs'] = nil
default['cluster']['directory_service']['disabled_on_compute_nodes'] = nil
# Other ParallelCluster internal variables
default['cluster']['ddb_table'] = nil
default['cluster']['slurm_ddb_table'] = nil
default['cluster']['log_group_name'] = "NONE"
default['cluster']['disable_hyperthreading_manually'] = 'false'
default['cluster']['volume_fs_type'] = 'ext4'
default['cluster']['efs_shared_dirs'] = ''
default['cluster']['efs_fs_ids'] = ''
default['cluster']['cluster_admin_user'] = 'pcluster-admin'
default['cluster']['cluster_admin_user_id'] = node['cluster']['reserved_base_uid']
default['cluster']['cluster_admin_group'] = node['cluster']['cluster_admin_user']
default['cluster']['cluster_admin_group_id'] = node['cluster']['cluster_admin_user_id']
default['cluster']['fsx_shared_dirs'] = ''
default['cluster']['fsx_fs_ids'] = ''
default['cluster']['fsx_dns_names'] = ''
default['cluster']['fsx_mount_names'] = ''
default['cluster']['fsx_fs_types'] = ''
default['cluster']['fsx_volume_junction_paths'] = ''
default['cluster']['custom_node_package'] = nil
default['cluster']['custom_awsbatchcli_package'] = nil
default['cluster']['raid_shared_dir'] = ''
default['cluster']['raid_type'] = ''
default['cluster']['raid_vol_ids'] = ''
default['cluster']['dns_domain'] = nil
default['cluster']['use_private_hostname'] = 'false'
default['cluster']['add_node_hostnames_in_hosts_file'] = node['cluster']['use_private_hostname']
default['cluster']['skip_install_recipes'] = 'yes'
default['cluster']['enable_nss_slurm'] = node['cluster']['directory_service']['enabled']
default['cluster']['realmemory_to_ec2memory_ratio'] = 0.95
default['cluster']['slurm_node_reg_mem_percent'] = 75
# AWS domain
default['cluster']['aws_domain'] = aws_domain
# Official ami build
default['cluster']['is_official_ami_build'] = false
# Additional instance types data
default['cluster']['instance_types_data'] = nil
# IMDS
default['cluster']['head_node_imds_secured'] = 'true'
default['cluster']['head_node_imds_allowed_users'] = ['root', node['cluster']['cluster_admin_user'], node['cluster']['cluster_user']]
default['cluster']['head_node_imds_allowed_users'].append('dcv') if node['cluster']['dcv_enabled'] == 'head_node' && platform_supports_dcv?
default['cluster']['head_node_imds_allowed_users'].append(node['cluster']['scheduler_plugin']['user']) if node['cluster']['scheduler'] == 'plugin'
# Compute nodes bootstrap timeout
default['cluster']['compute_node_bootstrap_timeout'] = 1800
| 59.376991 | 253 | 0.672618 |
33bbbbbf2c1becb3f26ddc75bbba93beb02cbf4c | 128 | module Skyslope
class ChecklistType < BaseModel
attribute :checklist_type_id
attribute :checklist_type_name
end
end
| 18.285714 | 34 | 0.789063 |
617ad0739a9a26a5575a668bbd6b7003eba61b92 | 796 | # Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
require 'spec_helper'
describe 'client.snapshot#delete_repository' do
let(:expected_args) do
[
'DELETE',
url,
{},
nil,
nil
]
end
let(:url) do
'_snapshot/foo'
end
it 'performs the request' do
expect(client_double.snapshot.delete_repository(repository: 'foo')).to eq({})
end
context 'when multiple indices are specified' do
let(:url) do
'_snapshot/foo,bar'
end
it 'performs the request' do
expect(client_double.snapshot.delete_repository(repository: ['foo','bar'])).to eq({})
end
end
end
| 20.947368 | 91 | 0.658291 |
e9a4edaa527959e07fbbcbf15086c06201b265a1 | 65 | module RIM
module Version
Version = "1.4.8"
end
end
| 6.5 | 18 | 0.584615 |
ed551521b1dce6c638e9a787b750014d0a8972a0 | 1,117 | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe ::Gitlab::SubscriptionPortal, skip: Gitlab.jh? do
using RSpec::Parameterized::TableSyntax
where(:method_name, :test, :development, :result) do
:default_subscriptions_url | false | false | 'https://customers.gitlab.com'
:default_subscriptions_url | false | true | 'https://customers.stg.gitlab.com'
:default_subscriptions_url | true | false | 'https://customers.stg.gitlab.com'
:payment_form_url | false | false | 'https://customers.gitlab.com/payment_forms/cc_validation'
:payment_form_url | false | true | 'https://customers.stg.gitlab.com/payment_forms/cc_validation'
:payment_form_url | true | false | 'https://customers.stg.gitlab.com/payment_forms/cc_validation'
end
with_them do
subject { described_class.method(method_name).call }
before do
allow(Rails).to receive_message_chain(:env, :test?).and_return(test)
allow(Rails).to receive_message_chain(:env, :development?).and_return(development)
end
it { is_expected.to eq(result) }
end
end
| 39.892857 | 112 | 0.709937 |
1c0ec674016a5a04f05daf79a971891d336d625c | 4,248 | require_dependency "webinius_cms/application_controller"
module WebiniusCms
class PagesController < ApplicationController
layout 'webinius_cms/admin'
before_action :authorize, except: :show
before_action :set_page, only: [:show]
before_action :set_page_with_id, only: [:update, :edit, :destroy, :higher, :lower]
before_action :lookup_partials, only: [:new, :edit, :create, :update]
# GET /pages
# GET /pages.json
def index
@pages = Page.arrange(order: :position)
end
# GET /pages/1
# GET /pages/1.json
def show
render layout: 'webinius_cms/sites'
end
# GET /pages/new
def new
@page = Page.new(parent_id: params[:parent_id])
@page.documents.build
@page.images.build
end
# GET /pages/1/edit
def edit
@page.documents.build
@page.images.build
end
# POST /pages
# POST /pages.json
def create
@page = Page.new(page_params)
respond_to do |format|
if @page.save
format.html { redirect_to pages_url, notice: 'New Page was created.' }
format.json { render action: 'show', status: :created, location: @page }
else
@page.documents.build unless @page.documents.any?
@page.images.build unless @page.images.any?
format.html { render action: 'new' }
format.json { render json: @page.errors, status: :unprocessable_entity }
end
end
end
# PATCH/PUT /pages/1
# PATCH/PUT /pages/1.json
def update
@page.docs = params[:page][:docs] if params[:page][:docs].present?
@page.ancestry = nil
respond_to do |format|
if @page.update(page_params)
format.html { redirect_to pages_url, notice: 'Page was updated successfully.' }
format.json { head :no_content }
else
format.html { render action: 'edit' }
format.json { render json: @page.errors, status: :unprocessable_entity }
end
end
end
# DELETE /pages/1
# DELETE /pages/1.json
def destroy
@page.destroy
respond_to do |format|
format.html { redirect_to pages_url }
format.json { head :no_content }
end
end
def higher
@page.move_higher
redirect_to pages_path
end
def lower
@page.move_lower
redirect_to pages_path
end
private
def lookup_partials
@partials = Dir[Rails.root.join("app", "views", 'webinius_cms', 'pages', 'partials', '*.html.erb').to_s].collect do |path|
File.basename(path, '.html.erb').gsub(/_/, '')
end
@partials << 'standard' if @partials.blank?
end
# Use callbacks to share common setup or constraints between actions.
def set_page
if params[:id]
@page = Page.where("properties @> hstore(?, ?)", "#{I18n.locale}_slug", params[:id]).first
if @page.blank?
slug_parts = params[:id].split('/')
item_list_id = slug_parts.slice(-1)
page_id = slug_parts.slice(0, slug_parts.size-1)
@page = Page.where("properties @> hstore(?, ?)", "#{I18n.locale}_slug", page_id.join('/')).first
@list_item = ListItem.where("properties @> hstore(?, ?)", "#{I18n.locale}_slug", item_list_id).first
end
else
@page = Page.roots.online.first
end
end
def set_page_with_id
@page = Page.find(params[:id])
end
# Never trust parameters from the scary internet, only allow the white list through.
def page_params
langs = Language.all
dynamic_fields = %w[title content meta_description slug description].collect do |field|
lang_field = []
langs.each do |lang|
lang_field << "#{lang.code}_#{field}"
end
lang_field
end.flatten!
params.require(:page).permit(:parent_id, :status, :map_address, :kind, :picture, :navigation_type,
:remove_picture, :picture, :picture_cache, :docs, :is_news, *dynamic_fields, documents_attributes: [:id, :name, :document, :document_cache, :_destroy], images_attributes: [:id, :title, :image, :image_cache, :_destroy, :description])
end
end
end
| 31.466667 | 242 | 0.608051 |
39396c493fa0ec91f021e09f91da866526d61716 | 5,788 | =begin
PureCloud Platform API
With the PureCloud Platform API, you can control all aspects of your PureCloud environment. With the APIs you can access the system configuration, manage conversations and more.
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
License: UNLICENSED
https://help.mypurecloud.com/articles/terms-and-conditions/
Terms of Service: https://help.mypurecloud.com/articles/terms-and-conditions/
=end
require 'date'
module PureCloud
class QualifierMappingObservationQueryResponse
# A mapping from system presence to a list of organization presence ids
attr_accessor :system_to_organization_mappings
attr_accessor :results
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'system_to_organization_mappings' => :'systemToOrganizationMappings',
:'results' => :'results'
}
end
# Attribute type mapping.
def self.swagger_types
{
:'system_to_organization_mappings' => :'Hash<String, Array<String>>',
:'results' => :'Array<ObservationDataContainer>'
}
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
# convert string to symbol for hash key
attributes = attributes.each_with_object({}){|(k,v), h| h[k.to_sym] = v}
if attributes.has_key?(:'systemToOrganizationMappings')
if (value = attributes[:'systemToOrganizationMappings']).is_a?(Array)
self.system_to_organization_mappings = value
end
end
if attributes.has_key?(:'results')
if (value = attributes[:'results']).is_a?(Array)
self.results = value
end
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properies with the reasons
def list_invalid_properties
invalid_properties = Array.new
return invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
system_to_organization_mappings == o.system_to_organization_mappings &&
results == o.results
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[system_to_organization_mappings, results].hash
end
# build the object from hash
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /^Array<(.*)>/i
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map{ |v| _deserialize($1, v) } )
else
#TODO show warning in debug mode
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
else
# data not found in attributes(hash), not an issue as the data can be optional
end
end
self
end
def _deserialize(type, value)
case type.to_sym
when :DateTime
DateTime.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :BOOLEAN
if value.to_s =~ /^(true|t|yes|y|1)$/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
_model = Object.const_get("PureCloud").const_get(type).new
_model.build_from_hash(value)
end
end
def to_s
to_hash.to_s
end
# to_body is an alias to to_body (backward compatibility))
def to_body
to_hash
end
# return the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
next if value.nil?
hash[param] = _to_hash(value)
end
hash
end
# Method to output non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
def _to_hash(value)
if value.is_a?(Array)
value.compact.map{ |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
| 24.948276 | 177 | 0.60038 |
e865ed137e18741c8330c221d7642570da0d8e0f | 4,793 |
Pod::Spec.new do |spec|
# βββ Spec Metadata ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# These will help people to find your library, and whilst it
# can feel like a chore to fill in it's definitely to your advantage. The
# summary should be tweet-length, and the description more in depth.
#
spec.name = "TTTableIndexView"
spec.version = "1.0.0"
spec.summary = "TableIndexView"
# This description is used to generate tags and improve search results.
# * Think: What does it do? Why did you write it? What is the focus?
# * Try to keep it short, snappy and to the point.
# * Write the description between the DESC delimiters below.
# * Finally, don't worry about the indent, CocoaPods strips it!
spec.description = <<-DESC
customize the indexView of UITableView
DESC
spec.homepage = "https://github.com/heqiang945/TTTableIndexView"
# spec.screenshots = "www.example.com/screenshots_1.gif", "www.example.com/screenshots_2.gif"
# βββ Spec License βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# Licensing your code is important. See https://choosealicense.com for more info.
# CocoaPods will detect a license file if there is a named LICENSE*
# Popular ones are 'MIT', 'BSD' and 'Apache License, Version 2.0'.
#
spec.license = "MIT"
# spec.license = { :type => "MIT", :file => "FILE_LICENSE" }
# βββ Author Metadata βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# Specify the authors of the library, with email addresses. Email addresses
# of the authors are extracted from the SCM log. E.g. $ git log. CocoaPods also
# accepts just a name if you'd rather not provide an email address.
#
# Specify a social_media_url where others can refer to, for example a twitter
# profile URL.
#
spec.author = { "heqiang" => "[email protected]" }
# Or just: spec.author = "heqiang"
# spec.authors = { "heqiang" => "[email protected]" }
# spec.social_media_url = "https://twitter.com/heqiang"
# βββ Platform Specifics βββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# If this Pod runs only on iOS or OS X, then specify the platform and
# the deployment target. You can optionally include the target after the platform.
#
spec.platform = :ios, "11.0"
# When using multiple platforms
# spec.ios.deployment_target = "11.0"
# spec.osx.deployment_target = "10.7"
# spec.watchos.deployment_target = "2.0"
# spec.tvos.deployment_target = "9.0"
# βββ Source Location ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# Specify the location from where the source should be retrieved.
# Supports git, hg, bzr, svn and HTTP.
#
spec.source = { :git => "https://github.com/heqiang945/TTTableIndexView.git", :tag => "#{spec.version}" }
# βββ Source Code ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# CocoaPods is smart about how it includes source code. For source files
# giving a folder will include any swift, h, m, mm, c & cpp files.
# For header files it will include any header in the folder.
# Not including the public_header_files will make all headers public.
#
spec.source_files = "TTTableIndexViewDemo/TableViewSectionIndexView/*.{h,m}"
spec.exclude_files = ""
# βββ Resources ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# A list of resources included with the Pod. These are copied into the
# target bundle with a build phase script. Anything else will be cleaned.
# You can preserve files from being cleaned, please don't preserve
# non-essential files like tests, examples and documentation.
#
# spec.resource = "icon.png"
# spec.resources = "Resources/*.png"
# spec.preserve_paths = "FilesToSave", "MoreFilesToSave"
# βββ Project Linking ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# Link your library with frameworks, or libraries. Libraries do not include
# the lib prefix of their name.
#
# spec.framework = "SomeFramework"
# spec.frameworks = "SomeFramework", "AnotherFramework"
# spec.library = "iconv"
# spec.libraries = "iconv", "xml2"
# βββ Project Settings βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ #
#
# If your library depends on compiler flags you can set them in the xcconfig hash
# where they will only apply to your library. If you depend on other Podspecs
# you can include multiple dependencies to ensure it works.
spec.requires_arc = true
# spec.xcconfig = { "HEADER_SEARCH_PATHS" => "$(SDKROOT)/usr/include/libxml2" }
# spec.dependency "JSONKit", "~> 1.4"
end
| 36.587786 | 113 | 0.592948 |
019d7b967fc653978a8cb87f8d5a9f1b49e83d9b | 1,653 | class NatsStreamingServer < Formula
desc "Lightweight cloud messaging system"
homepage "https://nats.io"
url "https://github.com/nats-io/nats-streaming-server/archive/refs/tags/v0.22.1.tar.gz"
sha256 "116035f0c3c7e6154b7b1352d53ab16bd90b89afbce4afb70fe5d686ca4f24b0"
license "Apache-2.0"
head "https://github.com/nats-io/nats-streaming-server.git"
bottle do
rebuild 1
sha256 cellar: :any_skip_relocation, arm64_big_sur: "7be754512246cbc8d4b4885c94eb8210736b0794a98615e7fe006e31e7b62270"
sha256 cellar: :any_skip_relocation, big_sur: "cebeb9cf1dc610edc6ebe295618e83b64b8afbffd5d2a8451e10d6a22bba9394"
sha256 cellar: :any_skip_relocation, catalina: "5c160c9dfd4652d8eb58a975641968fdc302b60fb1a43a7c864f5a4ac0663b24"
sha256 cellar: :any_skip_relocation, mojave: "81e07ce9a896ea551e332bb831ed5d05282f4a7865377f41837330a567cb2c03"
sha256 cellar: :any_skip_relocation, x86_64_linux: "7df96690d23eb8bddfc8acee69810ee712568f81d64e53fa0849cc168420a72e"
end
depends_on "go" => :build
def install
system "go", "build", "-ldflags", "-s -w", "-trimpath", "-o", bin/"nats-streaming-server"
prefix.install_metafiles
end
service do
run opt_bin/"nats-streaming-server"
end
test do
pid = fork do
exec "#{bin}/nats-streaming-server --port=8085 --pid=#{testpath}/pid --log=#{testpath}/log"
end
sleep 3
begin
assert_match "INFO", shell_output("curl localhost:8085")
assert_predicate testpath/"log", :exist?
assert_match version.to_s, File.read(testpath/"log")
ensure
Process.kill "SIGINT", pid
Process.wait pid
end
end
end
| 36.733333 | 122 | 0.743497 |
bf23a71f2f69b41b2a59c62954c37a7fead4f6c0 | 867 | module Shoulda
module Matchers
module ActiveModel
# @private
module Helpers
def pretty_error_messages(object)
format_validation_errors(object.errors)
end
def format_validation_errors(errors)
list_items = errors.to_hash.keys.map do |attribute|
messages = errors[attribute]
"* #{attribute}: #{messages}"
end
list_items.join("\n")
end
def default_error_message(type, options = {})
model_name = options.delete(:model_name)
attribute = options.delete(:attribute)
instance = options.delete(:instance)
RailsShim.generate_validation_message(
instance,
attribute.to_sym,
type,
model_name,
options,
)
end
end
end
end
end
| 24.083333 | 61 | 0.566321 |
61c660a7774784953206e7beb66948e2f4cd0941 | 2,002 | # Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::ContainerInstance::Mgmt::V2018_10_01
module Models
#
# The instance view of the container group. Only valid in response.
#
class ContainerGroupPropertiesInstanceView
include MsRestAzure
# @return [Array<Event>] The events of this container group.
attr_accessor :events
# @return [String] The state of the container group. Only valid in
# response.
attr_accessor :state
#
# Mapper for ContainerGroupPropertiesInstanceView class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'ContainerGroup_properties_instanceView',
type: {
name: 'Composite',
class_name: 'ContainerGroupPropertiesInstanceView',
model_properties: {
events: {
client_side_validation: true,
required: false,
read_only: true,
serialized_name: 'events',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'EventElementType',
type: {
name: 'Composite',
class_name: 'Event'
}
}
}
},
state: {
client_side_validation: true,
required: false,
read_only: true,
serialized_name: 'state',
type: {
name: 'String'
}
}
}
}
}
end
end
end
end
| 29.014493 | 75 | 0.507493 |
3960ce9ac4411984a7cf2396df5628b4eb635362 | 993 | require_relative '../test_helper'
module Batch
class ModelTest < Minitest::Test
class SimpleJob < RocketJob::Job
include RocketJob::Batch
self.destroy_on_complete = false
self.collect_output = true
self.slice_size = 10
def perform(record)
record
end
end
describe RocketJob::Batch::Model do
before do
@blah_exception = begin
begin
blah
rescue StandardError => exc
exc
end
end
end
after do
@job.destroy if @job && [email protected]_record?
end
describe '#exception' do
it 'saves' do
@job = SimpleJob.new
@job.exception = RocketJob::JobException.from_exception(@blah_exception)
assert_equal true, @job.save!
end
it 'fails' do
@job = SimpleJob.new
assert_equal true, @job.fail!(@blah_exception)
end
end
end
end
end
| 21.586957 | 82 | 0.551863 |
6aac2004dd18835891dd7ba4cae92bbb513cc9da | 538 | cask 'ledger-live' do
version '1.15.0'
sha256 '5feba1d89b4da67e229d5a6fd0e0e2b10a407d80c124aefbfba60d6ce7d3dc72'
# github.com/LedgerHQ/ledger-live-desktop was verified as official when first introduced to the cask
url "https://github.com/LedgerHQ/ledger-live-desktop/releases/download/v#{version}/ledger-live-desktop-#{version}-mac.dmg"
appcast 'https://github.com/LedgerHQ/ledger-live-desktop/releases.atom'
name 'Ledger Live'
homepage 'https://www.ledgerwallet.com/live'
auto_updates true
app 'Ledger Live.app'
end
| 35.866667 | 124 | 0.776952 |
110930088ea0c2ca67770096997671c69a8d2b42 | 45 | Style::Lib::Rails::Engine.routes.draw do
end
| 15 | 40 | 0.755556 |
f7e095514ec43e99997a0932354d618af013760c | 216 | require File.expand_path('../../../spec_helper', __FILE__)
require File.expand_path('../../../shared/complex/numeric/conj', __FILE__)
describe "Numeric#conjugate" do
it_behaves_like(:numeric_conj, :conjugate)
end
| 30.857143 | 74 | 0.740741 |
f7305ccaba5a6f3a3a761235bcd29f48f9cf24bc | 352 | # encoding: binary
module RbNaCl
# Serialization features shared across all "key-like" classes
module Serializable
def to_s; to_bytes; end
def to_str; to_bytes; end
# Inspect this key
#
# @return [String] a string representing this key
def inspect
"#<#{self.class}:#{Util.bin2hex(to_bytes)[0,8]}>"
end
end
end | 23.466667 | 63 | 0.661932 |
1c7a10b3f11aa816787eb444bec8b4c380f8085e | 1,101 | require "test_helper"
class CoinsControllerTest < ActionDispatch::IntegrationTest
setup do
@coin = coins(:one)
end
test "should get index" do
get coins_url
assert_response :success
end
test "should get new" do
get new_coin_url
assert_response :success
end
test "should create coin" do
assert_difference('Coin.count') do
post coins_url, params: { coin: { acronym: @coin.acronym, description: @coin.description, url_image: @coin.url_image } }
end
assert_redirected_to coin_url(Coin.last)
end
test "should show coin" do
get coin_url(@coin)
assert_response :success
end
test "should get edit" do
get edit_coin_url(@coin)
assert_response :success
end
test "should update coin" do
patch coin_url(@coin), params: { coin: { acronym: @coin.acronym, description: @coin.description, url_image: @coin.url_image } }
assert_redirected_to coin_url(@coin)
end
test "should destroy coin" do
assert_difference('Coin.count', -1) do
delete coin_url(@coin)
end
assert_redirected_to coins_url
end
end
| 22.469388 | 131 | 0.700272 |
034b530dafe199f8faa1e067481cd2e14f1d5272 | 690 |
require 'logger'
require 'mysql2'
# let's establish some basic universal stuff to work against
# where the hell are we?
DIR = File.dirname(File.realpath(__FILE__))
# here's our basic data structure
Transaction = Struct.new(:account, :date, :label, :amount, :category)
# prep the logger
Log = Logger.new "#{DIR}/log/ghetto-mint.log" rescue nil
Log ||= Logger.new STDOUT
Log << "\n\n#{Time.new}\n"
# create the table if it doesn't exist yet
D = Mysql2::Client.new username: "root"
D.query "create database if not exists finance;"
D.query "use finance;"
D.query "create table if not exists transactions( account varchar(255), date date, label varchar(255), amount decimal(10, 2) );"
| 25.555556 | 128 | 0.718841 |
ed2430655161ed50e5d36966ccaec7320638be39 | 3,618 | require 'cocoapods'
require 'pathname'
module CocoapodsHelper
UI = FastlaneCore::UI
class CocoapodsHelperException < RuntimeError; end
# Following the lead of npm ci (https://docs.npmjs.com/cli/v7/commands/npm-ci):
# The Podfile.lock must exist.
# The Podfile and Podfile.lock must be in sync.
# A pod install may be required, but it is an error to generate a change to
# the Podfile.lock, including when it's missing. If pod install is run, it
# may regenerate Pods/Manifest.lock or the entire Pods folder if the cache
# is out of sync.
def pod_install_required?(podfile_folder)
podfile_folder = File.expand_path podfile_folder
podfile_path = File.join podfile_folder, 'Podfile'
raise ArgumentError, "No Podfile at #{podfile_folder}" unless File.readable?(podfile_path)
# Podfile must be evalled in its current directory in order to resolve
# the require_relative at the top.
podfile = Dir.chdir(podfile_folder) { Pod::Podfile.from_file podfile_path }
# From here on we expect pod install to succeed. We just check whether it's
# necessary. The Podfile.from_file call above can raise if the Podfile
# contains errors. In that case, pod install will also fail, so we allow
# the exception to be raised instead of returning true.
lockfile_path = File.join podfile_folder, 'Podfile.lock'
manifest_path = File.join podfile_folder, 'Pods', 'Manifest.lock'
# Don't regenerate the lockfile
raise CocoapodsHelperException, "#{lockfile_path} missing or not readable." unless File.readable?(lockfile_path)
return true unless File.readable?(manifest_path)
# This validates the Podfile.lock for yaml formatting at least and makes
# the lockfile hash available to check the Podfile checksum later.
lockfile = Pod::Lockfile.from_file Pathname.new lockfile_path
lockfile_contents = File.read lockfile_path
begin
# diff the contents of Podfile.lock and Pods/Manifest.lock
# This is just what is done in the "[CP] Check Pods Manifest.lock" script
# build phase in a project using CocoaPods. This is a stricter requirement
# than semantic comparison of the two lockfile hashes.
return true unless lockfile_contents == File.read(manifest_path)
# compare checksum of Podfile with checksum in Podfile.lock in case Podfile
# updated since last pod install/update.
return false if lockfile.to_hash["PODFILE CHECKSUM"] == podfile.checksum
rescue StandardError, Pod::PlainInformative => e
# Any error from Pod::Lockfile.from_file or File.read after verifying a
# file exists and is readable. pod install will regenerate these files.
UI.error e.message
return true
end
# Don't regenerate the lockfile.
raise CocoapodsHelperException, "Podfile checksum #{podfile.checksum} does not match PODFILE CHECKSUM in Podfile.lock."
end
def pod_install_if_required(podfile_folder, verbose: false, repo_update: true)
podfile_folder = File.expand_path podfile_folder
install_required = pod_install_required? podfile_folder
UI.message "pod install #{install_required ? '' : 'not '}required in #{podfile_folder}"
return unless install_required
command = %w[pod install]
command << '--silent' unless verbose
command << '--repo-update' if repo_update
Dir.chdir(podfile_folder) { Fastlane::Action.sh(*command) }
end
def current_pod_version
# Get current version from podspec
podspec = File.open('../Branch.podspec', 'r') do |f|
eval f.read
end
podspec.version
end
end
include CocoapodsHelper
| 41.586207 | 123 | 0.734107 |
ac11de2d0d25fa1e2dda6f6fb515abbe7a5558ba | 103 | require 'rom/memory/gateway'
require 'rom/memory/relation'
ROM.register_adapter(:memory, ROM::Memory)
| 20.6 | 42 | 0.786408 |
182582eed3b721bff42c4cb54132d29cfc14eebd | 641 | require "date"
module Clockedin
class ClockEntry
attr_reader :line, :time_span
def initialize(line)
@line = line
@time_span = init_time_span
end
def starts_at
time_span.starts_at
end
def ends_at
time_span.ends_at
end
private
def init_time_span
TimeSpan.new(timestamps[:starts_at], timestamps[:ends_at])
end
def timestamps
match_data = /CLOCK\: \[(.+?)(?=\])\]--\[(.+?)(?=\])/.match(line)
starts_at, ends_at = match_data[1], match_data[2]
{ starts_at: DateTime.parse(starts_at),
ends_at: DateTime.parse(ends_at) }
end
end
end
| 18.852941 | 71 | 0.614665 |
61fc4c7ef0e8a670f7ab508a6abaad7dda94f759 | 3,010 | # Copyright (c) 2018 Public Library of Science
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
class RepetitionsController < ApplicationController
before_action :authenticate_user!
respond_to :json
def index
requires_user_can(:view, task)
respond_with task.repetitions
end
def create
requires_user_can(:edit, task)
repetition = Repetition.create(repetition_params)
render json: repetition_positions(first: repetition)
end
def update
repetition = Repetition.find(params[:id])
requires_user_can(:edit, repetition.task)
repetition.update(repetition_params)
render json: repetition_positions(first: repetition)
end
def destroy
repetition = Repetition.find(params[:id])
requires_user_can(:edit, repetition.task)
repetition.destroy
if params[:destroying_all]
# client has reported that all repetitions are being destroyed
# and does not care about returning a position for reordering
head :no_content
else
render json: repetition_positions(first: repetition)
end
end
private
def task
@task ||= Task.find(params[:task_id] || repetition_params[:task_id])
end
def card_content
@card_content ||= CardContent.find(repetition_params[:card_content_id])
end
def repetition_params
params.require(:repetition).permit(
:card_content_id,
:task_id,
:parent_id,
:position
)
end
# Return an array of sibling repetitions so that positions can be returned to
# the client. Ember expects that when returning an array for a single object
# action (create / update), the first object must be the one that
# was modified. In the case of destroy, do not return the deleted model.
def repetition_positions(first:)
rest = Repetition.where(parent_id: first.parent_id, card_content: first.card_content, task: first.task)
.where.not(id: first.id)
if first.destroyed?
rest
else
[first] + rest
end
end
end
| 33.076923 | 107 | 0.736545 |
2609363be0143096205c0a7fbf35a2c6acce67cf | 3,479 | class Filebeat < Formula
desc "File harvester to ship log files to Elasticsearch or Logstash"
homepage "https://www.elastic.co/products/beats/filebeat"
url "https://github.com/elastic/beats.git",
tag: "v7.11.2",
revision: "1d9cced55410003f5d0b4594ff5471d15a4e2900"
# Outside of the "x-pack" folder, source code in a given file is licensed
# under the Apache License Version 2.0
license "Apache-2.0"
head "https://github.com/elastic/beats.git"
bottle do
sha256 cellar: :any_skip_relocation, arm64_big_sur: "6005cd842b44938b1e897e60af284bd0922f963156a24218676cad6cb826fbf9"
sha256 cellar: :any_skip_relocation, big_sur: "4802438b9cb623d1bd09ef09829b876ba56fff0b1caf42c0225fc72d82eee94a"
sha256 cellar: :any_skip_relocation, catalina: "3a80390119d59c5e8d76729fb4bdc177b07841946cbc36d523b0a04878952fb0"
sha256 cellar: :any_skip_relocation, mojave: "bc410a0c8b9d1370f695e3fd336bad6a70c1fc108251aa37a271792273fb80b2"
end
depends_on "go" => :build
depends_on "mage" => :build
depends_on "[email protected]" => :build
uses_from_macos "rsync" => :build
def install
# remove non open source files
rm_rf "x-pack"
cd "filebeat" do
# don't build docs because it would fail creating the combined OSS/x-pack
# docs and we aren't installing them anyway
inreplace "magefile.go", "mg.SerialDeps(Fields, Dashboards, Config, includeList, fieldDocs,",
"mg.SerialDeps(Fields, Dashboards, Config, includeList,"
# prevent downloading binary wheels during python setup
system "make", "PIP_INSTALL_PARAMS=--no-binary :all", "python-env"
system "mage", "-v", "build"
system "mage", "-v", "update"
(etc/"filebeat").install Dir["filebeat.*", "fields.yml", "modules.d"]
(etc/"filebeat"/"module").install Dir["build/package/modules/*"]
(libexec/"bin").install "filebeat"
prefix.install "build/kibana"
end
(bin/"filebeat").write <<~EOS
#!/bin/sh
exec #{libexec}/bin/filebeat \
--path.config #{etc}/filebeat \
--path.data #{var}/lib/filebeat \
--path.home #{prefix} \
--path.logs #{var}/log/filebeat \
"$@"
EOS
end
plist_options manual: "filebeat"
def plist
<<~EOS
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN"
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>#{plist_name}</string>
<key>Program</key>
<string>#{opt_bin}/filebeat</string>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
EOS
end
test do
log_file = testpath/"test.log"
touch log_file
(testpath/"filebeat.yml").write <<~EOS
filebeat:
inputs:
-
paths:
- #{log_file}
scan_frequency: 0.1s
output:
file:
path: #{testpath}
EOS
(testpath/"log").mkpath
(testpath/"data").mkpath
fork do
exec "#{bin}/filebeat", "-c", "#{testpath}/filebeat.yml",
"-path.config", "#{testpath}/filebeat",
"-path.home=#{testpath}",
"-path.logs", "#{testpath}/log",
"-path.data", testpath
end
sleep 1
log_file.append_lines "foo bar baz"
sleep 5
assert_predicate testpath/"filebeat", :exist?
end
end
| 31.342342 | 122 | 0.622305 |
01365b94dbf39518b40861b00d4fdf65c185152f | 1,017 | require 'tempfile'
require 'json'
require 'English'
require 'open3'
module Packer
class Runner
class ErrorInvalidConfig < RuntimeError
end
def initialize(config)
@config = config
end
def run(command, args={})
config_file = Tempfile.new('')
config_file.write(@config)
config_file.close
args_combined = ''
args.each do |name, value|
args_combined += "-var \"#{name}=#{value}\""
end
logConfig(config_file.path)
packer_command = "packer #{command} -machine-readable #{args_combined} #{config_file.path}"
puts packer_command
Open3.popen2e(packer_command) do |stdin, out, wait_thr|
yield(out) if block_given?
return wait_thr.value
end
end
def logConfig(path)
config_contents = File.read(path)
if !ENV['NEW_PASSWORD'].to_s.empty?
config_contents.gsub! ENV['NEW_PASSWORD'], "( redacted )"
end
puts "config file contents: #{config_contents}"
end
end
end
| 22.6 | 97 | 0.635202 |
e277c6fa2e4a4c5a35827065679e6c2173ac9204 | 371 | module AccountReactivationConcern
extend ActiveSupport::Concern
def confirm_password_reset_profile
return if current_user.decorate.password_reset_profile
redirect_to root_url
end
def reactivate_account_session
@_reactivate_account_session ||= ReactivateAccountSession.new(
user: current_user,
user_session: user_session,
)
end
end
| 23.1875 | 66 | 0.787062 |
33637737d382d2debb7190cca197c6a8612c27e2 | 476 | FactoryGirl.define do
sequence(:store_credits_order_number) { |n| "R1000#{n}" }
factory :store_credit, class: Spree::StoreCredit do
user
created_by { create(:user) }
category { create(:store_credit_category) }
amount { 150.00 }
currency { 'USD' }
credit_type { create(:primary_credit_type) }
end
factory :store_credits_order_without_user, class: Spree::Order do
number { generate(:store_credits_order_number) }
bill_address
end
end
| 26.444444 | 67 | 0.705882 |
792150342cd9f7c4c2456909f112e3b7ae026b6d | 179 | class EmailAddressExhibit < DisplayCase::Exhibit
def self.applicable_to?(object)
object.class.name == 'EmailAddress'
end
def to_s
@context.mail_to(email)
end
end
| 17.9 | 48 | 0.726257 |
7a17bc5fbd6724627fb8b29a931bb99fd6159192 | 15,894 | module RSpec
module Mocks
# @private
class Proxy
SpecificMessage = Struct.new(:object, :message, :args) do
def ==(expectation)
expectation.orig_object == object && expectation.matches?(message, *args)
end
end
# @private
def ensure_implemented(*_args)
# noop for basic proxies, see VerifyingProxy for behaviour.
end
# @private
def initialize(object, order_group, options={})
@object = object
@order_group = order_group
@error_generator = ErrorGenerator.new(object)
@messages_received = []
@options = options
@null_object = false
@method_doubles = Hash.new { |h, k| h[k] = MethodDouble.new(@object, k, self) }
end
# @private
attr_reader :object
# @private
def null_object?
@null_object
end
# @private
# Tells the object to ignore any messages that aren't explicitly set as
# stubs or message expectations.
def as_null_object
@null_object = true
@object
end
# @private
def original_method_handle_for(_message)
nil
end
DEFAULT_MESSAGE_EXPECTATION_OPTS = {}.freeze
# @private
def add_message_expectation(method_name, opts=DEFAULT_MESSAGE_EXPECTATION_OPTS, &block)
location = opts.fetch(:expected_from) { CallerFilter.first_non_rspec_line }
meth_double = method_double_for(method_name)
if null_object? && !block
meth_double.add_default_stub(@error_generator, @order_group, location, opts) do
@object
end
end
meth_double.add_expectation @error_generator, @order_group, location, opts, &block
end
# @private
def add_simple_expectation(method_name, response, location)
method_double_for(method_name).add_simple_expectation method_name, response, @error_generator, location
end
# @private
def build_expectation(method_name)
meth_double = method_double_for(method_name)
meth_double.build_expectation(
@error_generator,
@order_group
)
end
# @private
def replay_received_message_on(expectation, &block)
expected_method_name = expectation.message
meth_double = method_double_for(expected_method_name)
if meth_double.expectations.any?
@error_generator.raise_expectation_on_mocked_method(expected_method_name)
end
unless null_object? || meth_double.stubs.any?
@error_generator.raise_expectation_on_unstubbed_method(expected_method_name)
end
@messages_received.each do |(actual_method_name, args, received_block)|
next unless expectation.matches?(actual_method_name, *args)
expectation.safe_invoke(nil)
block.call(*args, &received_block) if block
end
end
# @private
def check_for_unexpected_arguments(expectation)
return if @messages_received.empty?
return if @messages_received.any? { |method_name, args, _| expectation.matches?(method_name, *args) }
name_but_not_args, others = @messages_received.partition do |(method_name, args, _)|
expectation.matches_name_but_not_args(method_name, *args)
end
return if name_but_not_args.empty? && !others.empty?
expectation.raise_unexpected_message_args_error(name_but_not_args.map { |args| args[1] })
end
# @private
def add_stub(method_name, opts={}, &implementation)
location = opts.fetch(:expected_from) { CallerFilter.first_non_rspec_line }
method_double_for(method_name).add_stub @error_generator, @order_group, location, opts, &implementation
end
# @private
def add_simple_stub(method_name, response)
method_double_for(method_name).add_simple_stub method_name, response
end
# @private
def remove_stub(method_name)
method_double_for(method_name).remove_stub
end
# @private
def remove_stub_if_present(method_name)
method_double_for(method_name).remove_stub_if_present
end
# @private
def verify
@method_doubles.each_value { |d| d.verify }
end
# @private
def reset
@messages_received.clear
end
# @private
def received_message?(method_name, *args, &block)
@messages_received.any? { |array| array == [method_name, args, block] }
end
# @private
def messages_arg_list
@messages_received.map { |_, args, _| args }
end
# @private
def has_negative_expectation?(message)
method_double_for(message).expectations.find { |expectation| expectation.negative_expectation_for?(message) }
end
# @private
def record_message_received(message, *args, &block)
@order_group.invoked SpecificMessage.new(object, message, args)
@messages_received << [message, args, block]
end
# @private
def message_received(message, *args, &block)
record_message_received message, *args, &block
expectation = find_matching_expectation(message, *args)
stub = find_matching_method_stub(message, *args)
if (stub && expectation && expectation.called_max_times?) || (stub && !expectation)
expectation.increase_actual_received_count! if expectation && expectation.actual_received_count_matters?
if (expectation = find_almost_matching_expectation(message, *args))
expectation.advise(*args) unless expectation.expected_messages_received?
end
stub.invoke(nil, *args, &block)
elsif expectation
expectation.unadvise(messages_arg_list)
expectation.invoke(stub, *args, &block)
elsif (expectation = find_almost_matching_expectation(message, *args))
expectation.advise(*args) if null_object? unless expectation.expected_messages_received?
if null_object? || !has_negative_expectation?(message)
expectation.raise_unexpected_message_args_error([args])
end
elsif (stub = find_almost_matching_stub(message, *args))
stub.advise(*args)
raise_missing_default_stub_error(stub, [args])
elsif Class === @object
@object.superclass.__send__(message, *args, &block)
else
@object.__send__(:method_missing, message, *args, &block)
end
end
# @private
def raise_unexpected_message_error(method_name, args)
@error_generator.raise_unexpected_message_error method_name, args
end
# @private
def raise_missing_default_stub_error(expectation, args_for_multiple_calls)
@error_generator.raise_missing_default_stub_error(expectation, args_for_multiple_calls)
end
# @private
def visibility_for(_method_name)
# This is the default (for test doubles). Subclasses override this.
:public
end
if Support::RubyFeatures.module_prepends_supported?
def self.prepended_modules_of(klass)
ancestors = klass.ancestors
# `|| 0` is necessary for Ruby 2.0, where the singleton class
# is only in the ancestor list when there are prepended modules.
singleton_index = ancestors.index(klass) || 0
ancestors[0, singleton_index]
end
def prepended_modules_of_singleton_class
@prepended_modules_of_singleton_class ||= RSpec::Mocks::Proxy.prepended_modules_of(@object.singleton_class)
end
end
private
def method_double_for(message)
@method_doubles[message.to_sym]
end
def find_matching_expectation(method_name, *args)
find_best_matching_expectation_for(method_name) do |expectation|
expectation.matches?(method_name, *args)
end
end
def find_almost_matching_expectation(method_name, *args)
find_best_matching_expectation_for(method_name) do |expectation|
expectation.matches_name_but_not_args(method_name, *args)
end
end
def find_best_matching_expectation_for(method_name)
first_match = nil
method_double_for(method_name).expectations.each do |expectation|
next unless yield expectation
return expectation unless expectation.called_max_times?
first_match ||= expectation
end
first_match
end
def find_matching_method_stub(method_name, *args)
method_double_for(method_name).stubs.find { |stub| stub.matches?(method_name, *args) }
end
def find_almost_matching_stub(method_name, *args)
method_double_for(method_name).stubs.find { |stub| stub.matches_name_but_not_args(method_name, *args) }
end
end
# @private
class TestDoubleProxy < Proxy
def reset
@method_doubles.clear
object.__disallow_further_usage!
super
end
end
# @private
class PartialDoubleProxy < Proxy
def original_method_handle_for(message)
if any_instance_class_recorder_observing_method?(@object.class, message)
message = ::RSpec::Mocks.space.
any_instance_recorder_for(@object.class).
build_alias_method_name(message)
end
::RSpec::Support.method_handle_for(@object, message)
rescue NameError
nil
end
# @private
def add_simple_expectation(method_name, response, location)
method_double_for(method_name).configure_method
super
end
# @private
def add_simple_stub(method_name, response)
method_double_for(method_name).configure_method
super
end
# @private
def visibility_for(method_name)
# We fall back to :public because by default we allow undefined methods
# to be stubbed, and when we do so, we make them public.
MethodReference.method_visibility_for(@object, method_name) || :public
end
def reset
@method_doubles.each_value { |d| d.reset }
super
end
def message_received(message, *args, &block)
RSpec::Mocks.space.any_instance_recorders_from_ancestry_of(object).each do |subscriber|
subscriber.notify_received_message(object, message, args, block)
end
super
end
private
def any_instance_class_recorder_observing_method?(klass, method_name)
only_return_existing = true
recorder = ::RSpec::Mocks.space.any_instance_recorder_for(klass, only_return_existing)
return true if recorder && recorder.already_observing?(method_name)
superklass = klass.superclass
return false if superklass.nil?
any_instance_class_recorder_observing_method?(superklass, method_name)
end
end
# @private
# When we mock or stub a method on a class, we have to treat it a bit different,
# because normally singleton method definitions only affect the object on which
# they are defined, but on classes they affect subclasses, too. As a result,
# we need some special handling to get the original method.
module PartialClassDoubleProxyMethods
def initialize(source_space, *args)
@source_space = source_space
super(*args)
end
# Consider this situation:
#
# class A; end
# class B < A; end
#
# allow(A).to receive(:new)
# expect(B).to receive(:new).and_call_original
#
# When getting the original definition for `B.new`, we cannot rely purely on
# using `B.method(:new)` before our redefinition is defined on `B`, because
# `B.method(:new)` will return a method that will execute the stubbed version
# of the method on `A` since singleton methods on classes are in the lookup
# hierarchy.
#
# To do it properly, we need to find the original definition of `new` from `A`
# from _before_ `A` was stubbed, and we need to rebind it to `B` so that it will
# run with the proper `self`.
#
# That's what this method (together with `original_unbound_method_handle_from_ancestor_for`)
# does.
def original_method_handle_for(message)
unbound_method = superclass_proxy &&
superclass_proxy.original_unbound_method_handle_from_ancestor_for(message.to_sym)
return super unless unbound_method
unbound_method.bind(object)
# :nocov:
rescue TypeError
if RUBY_VERSION == '1.8.7'
# In MRI 1.8.7, a singleton method on a class cannot be rebound to its subclass
if unbound_method && unbound_method.owner.ancestors.first != unbound_method.owner
# This is a singleton method; we can't do anything with it
# But we can work around this using a different implementation
double = method_double_from_ancestor_for(message)
return object.method(double.method_stasher.stashed_method_name)
end
end
raise
# :nocov:
end
protected
def original_unbound_method_handle_from_ancestor_for(message)
double = method_double_from_ancestor_for(message)
double && double.original_method.unbind
end
def method_double_from_ancestor_for(message)
@method_doubles.fetch(message) do
# The fact that there is no method double for this message indicates
# that it has not been redefined by rspec-mocks. We need to continue
# looking up the ancestor chain.
return superclass_proxy &&
superclass_proxy.method_double_from_ancestor_for(message)
end
end
def superclass_proxy
return @superclass_proxy if defined?(@superclass_proxy)
if (superclass = object.superclass)
@superclass_proxy = @source_space.superclass_proxy_for(superclass)
else
@superclass_proxy = nil
end
end
end
# @private
class PartialClassDoubleProxy < PartialDoubleProxy
include PartialClassDoubleProxyMethods
end
# @private
class ProxyForNil < PartialDoubleProxy
def initialize(order_group)
set_expectation_behavior
super(nil, order_group)
end
attr_accessor :disallow_expectations
attr_accessor :warn_about_expectations
def add_message_expectation(method_name, opts={}, &block)
warn_or_raise!(method_name)
super
end
def add_negative_message_expectation(location, method_name, &implementation)
warn_or_raise!(method_name)
super
end
def add_stub(method_name, opts={}, &implementation)
warn_or_raise!(method_name)
super
end
private
def set_expectation_behavior
case RSpec::Mocks.configuration.allow_message_expectations_on_nil
when false
@warn_about_expectations = false
@disallow_expectations = true
when true
@warn_about_expectations = false
@disallow_expectations = false
else
@warn_about_expectations = true
@disallow_expectations = false
end
end
def warn_or_raise!(method_name)
# This method intentionally swallows the message when
# neither disallow_expectations nor warn_about_expectations
# are set to true.
if disallow_expectations
raise_error(method_name)
elsif warn_about_expectations
warn(method_name)
end
end
def warn(method_name)
warning_msg = @error_generator.expectation_on_nil_message(method_name)
RSpec.warning(warning_msg)
end
def raise_error(method_name)
@error_generator.raise_expectation_on_nil_error(method_name)
end
end
end
end
| 32.771134 | 117 | 0.666604 |
7a4bc23dd90ab51f76560e0069ea3ff2cd9f9eda | 3,358 | # frozen_string_literal: true
module Hcaptcha
module Helpers
DEFAULT_ERRORS = {
hcaptcha_unreachable: 'Oops, we failed to validate your hCaptcha response. Please try again.',
verification_failed: 'hCaptcha verification failed, please try again.'
}.freeze
def self.hcaptcha(options)
if options.key?(:stoken)
raise(HcaptchaError, "Secure Token is deprecated. Please remove 'stoken' from your calls to hcaptcha_tags.")
end
if options.key?(:ssl)
raise(HcaptchaError, "SSL is now always true. Please remove 'ssl' from your calls to hcaptcha_tags.")
end
html, tag_attributes = components(options.dup)
html << %(<div #{tag_attributes}></div>\n)
html << <<-HTML
<div class="h-captcha" data-sitekey="#{Hcaptcha.configuration.site_key!}"></div>
HTML
html.respond_to?(:html_safe) ? html.html_safe : html
end
def self.to_error_message(key)
default = DEFAULT_ERRORS.fetch(key) { raise ArgumentError "Unknown hCaptcha error - #{key}" }
to_message("hcaptcha.errors.#{key}", default)
end
if defined?(I18n)
def self.to_message(key, default)
I18n.translate(key, default: default)
end
else
def self.to_message(_key, default)
default
end
end
private_class_method def self.components(options)
html = +''
attributes = {}
options = options.dup
class_attribute = options.delete(:class)
site_key = options.delete(:site_key)
hl = options.delete(:hl)
onload = options.delete(:onload)
render = options.delete(:render)
script_async = options.delete(:script_async)
script_defer = options.delete(:script_defer)
nonce = options.delete(:nonce)
skip_script = (options.delete(:script) == false) || (options.delete(:external_script) == false)
ui = options.delete(:ui)
data_attribute_keys = [:badge, :theme, :type, :callback, :expired_callback, :error_callback, :size]
data_attribute_keys << :tabindex unless ui == :button
data_attributes = {}
data_attribute_keys.each do |data_attribute|
value = options.delete(data_attribute)
data_attributes["data-#{data_attribute.to_s.tr('_', '-')}"] = value if value
end
site_key ||= Hcaptcha.configuration.site_key!
script_url = Hcaptcha.configuration.api_server_url
query_params = hash_to_query(
hl: hl,
onload: onload,
render: render
)
script_url += "?#{query_params}" unless query_params.empty?
async_attr = "async" if script_async != false
defer_attr = "defer" if script_defer != false
nonce_attr = " nonce='#{nonce}'" if nonce
html << %(<script src="#{script_url}" #{async_attr} #{defer_attr} #{nonce_attr}></script>\n) unless skip_script
attributes["data-sitekey"] = site_key
attributes.merge! data_attributes
# The remaining options will be added as attributes on the tag.
attributes["class"] = "hcaptcha #{class_attribute}"
tag_attributes = attributes.merge(options).map { |k, v| %(#{k}="#{v}") }.join(" ")
[html, tag_attributes]
end
private_class_method def self.hash_to_query(hash)
hash.delete_if { |_, val| val.nil? || val.empty? }.to_a.map { |pair| pair.join('=') }.join('&')
end
end
end
| 35.723404 | 117 | 0.648898 |
ed7e888e21467684165ee9417c4d90665b2c9b11 | 135 | class AddDoneToProject < ActiveRecord::Migration[5.2]
def change
add_column :projects, :done, :boolean, default: false
end
end
| 22.5 | 57 | 0.740741 |
bb243adab6c9ebf0e0e0d648ebe7d02a5de48af8 | 2,377 | require 'fog/core/collection'
require 'fog/kubevirt/compute/models/service'
module Fog
module Kubevirt
class Compute
class Services < Fog::Collection
attr_reader :kind, :resource_version
model Fog::Kubevirt::Compute::Service
def all(filters = {})
begin
srvs = service.list_services(filters)
rescue ::Fog::Kubevirt::Errors::ClientError
# we assume that we get 404
srvs = []
end
@kind = srvs.kind
@resource_version = srvs.resource_version
load srvs
end
def get(name)
new service.get_service(name)
end
# Creates a service using provided paramters:
# :name [String] - name of a service
# :port [int] - a port which will be exposed on node and cluster
# :target_port [int] - a vmi port which will be forwarded
# :vmi_name [String] - name of a vmi to be selected
# :service_type [String] - service type used to create service
#
# @param [Hash] attributes containing details about service to be
# created.
def create(args = {})
port = args[:port]
name = args[:name]
target_port = args[:target_port]
vmi_name = args[:vmi_name]
service_type = args[:service_type]
srv = {
:apiVersion => "v1",
:kind => "Service",
:metadata => {
:name => name,
:namespace => service.namespace
},
:spec => {
:externalTrafficPolicy => "Cluster",
:ports => [
{:nodePort => port,
:port => port,
:protocol => "TCP",
:targetPort => target_port
}
],
:selector => {
:"kubevirt.io/vm" => vmi_name
},
:type => service_type
}
}
service.create_service(srv)
end
def delete(name)
begin
srv = get(name)
rescue ::Fog::Kubevirt::Errors::ClientError
# the service doesn't exist
srv = nil
end
service.delete_service(name, service.namespace) unless srv.nil?
end
end
end
end
end
| 28.297619 | 73 | 0.496845 |
79cbcc0ba8e23b62995e9fb1ac49638c36108f2b | 576 | #
# Cookbook Name:: cgit-cookbook
# Recipe:: default
#
# Copyright 2015, Kai Sasaki
#
# MIT License
template "/etc/apache2/conf-available/cgit.conf" do
source "cgit_httpd.conf.erb"
mode '0440'
owner 'root'
group 'root'
variables(
'ip_address' => node[:ipaddress]
)
end
template "/etc/apache2/envvars" do
source "envvars.erb"
mode '0440'
owner 'root'
group 'root'
end
execute "a2enconf" do
user "root"
command "a2enconf cgit"
end
execute "a2enmodule" do
user "root"
command "a2enmod cgi cgid"
end
service "apache2" do
action :restart
end
| 14.769231 | 51 | 0.6875 |
289e0f2354f040f95232fcc61effe7bfc1a95c3e | 327 | require 'test_helper'
class AdminNotifierTest < ActionMailer::TestCase
test "contact_form" do
mail = AdminNotifier.contact_form
assert_equal "Contact form", mail.subject
assert_equal ["[email protected]"], mail.to
assert_equal ["[email protected]"], mail.from
assert_match "Hi", mail.body.encoded
end
end
| 25.153846 | 48 | 0.733945 |
1d7c244ca0f05fd003535dca8dab1d99e8fed914 | 53,454 | class MiqExpression
require_nested :Tag
include Vmdb::Logging
attr_accessor :exp, :context_type, :preprocess_options
config = YAML.load(ERB.new(File.read(Rails.root.join("config", "miq_expression.yml"))).result) # rubocop:disable Security/YAMLLoad
BASE_TABLES = config[:base_tables]
INCLUDE_TABLES = config[:include_tables]
EXCLUDE_COLUMNS = config[:exclude_columns]
EXCLUDE_ID_COLUMNS = config[:exclude_id_columns]
EXCLUDE_EXCEPTIONS = config[:exclude_exceptions]
TAG_CLASSES = config[:tag_classes]
EXCLUDE_FROM_RELATS = config[:exclude_from_relats]
FORMAT_SUB_TYPES = config[:format_sub_types]
FORMAT_BYTE_SUFFIXES = FORMAT_SUB_TYPES[:bytes][:units].to_h.invert
BYTE_FORMAT_WHITELIST = Hash[FORMAT_BYTE_SUFFIXES.keys.collect(&:to_s).zip(FORMAT_BYTE_SUFFIXES.keys)]
NUM_OPERATORS = config[:num_operators].freeze
STRING_OPERATORS = config[:string_operators]
SET_OPERATORS = config[:set_operators]
REGKEY_OPERATORS = config[:regkey_operators]
BOOLEAN_OPERATORS = config[:boolean_operators]
DATE_TIME_OPERATORS = config[:date_time_operators]
DEPRECATED_OPERATORS = config[:deprecated_operators]
UNQUOTABLE_OPERATORS = (STRING_OPERATORS + DEPRECATED_OPERATORS - ['=', 'IS NULL', 'IS NOT NULL', 'IS EMPTY', 'IS NOT EMPTY']).freeze
def initialize(exp, ctype = nil)
@exp = exp
@context_type = ctype
@col_details = nil
@ruby = nil
end
def valid?(component = exp)
operator = component.keys.first
case operator.downcase
when "and", "or"
component[operator].all?(&method(:valid?))
when "not", "!"
valid?(component[operator])
when "find"
validate_set = Set.new(%w(checkall checkany checkcount search))
validate_keys = component[operator].keys.select { |k| validate_set.include?(k) }
validate_keys.all? { |k| valid?(component[operator][k]) }
else
if component[operator].key?("field")
field = Field.parse(component[operator]["field"])
return false if field && !field.valid?
end
if Field.is_field?(component[operator]["value"])
field = Field.parse(component[operator]["value"])
return false unless field && field.valid?
end
true
end
end
def set_tagged_target(model, associations = [])
each_atom(exp) do |atom|
next unless atom.key?("tag")
tag = Tag.parse(atom["tag"])
tag.model = model
tag.associations = associations
atom["tag"] = tag.to_s
end
end
def self.proto?
return @proto if defined?(@proto)
@proto = ::Settings.product.proto
end
def self.to_human(exp)
if exp.kind_of?(self)
exp.to_human
elsif exp.kind_of?(Hash)
case exp["mode"]
when "tag_expr"
exp["expr"]
when "tag"
tag = [exp["ns"], exp["tag"]].join("/")
if exp["include"] == "none"
return "Not Tagged With #{tag}"
else
return "Tagged With #{tag}"
end
when "script"
if exp["expr"] == "true"
"Always True"
else
exp["expr"]
end
else
new(exp).to_human
end
else
exp.inspect
end
end
def to_human
self.class._to_human(exp)
end
def self._to_human(exp, options = {})
return exp unless exp.kind_of?(Hash) || exp.kind_of?(Array)
keys = exp.keys
keys.delete(:token)
operator = keys.first
case operator.downcase
when "like", "not like", "starts with", "ends with", "includes", "includes any", "includes all", "includes only", "limited to", "regular expression", "regular expression matches", "regular expression does not match", "equal", "=", "<", ">", ">=", "<=", "!=", "before", "after"
operands = operands2humanvalue(exp[operator], options)
clause = operands.join(" #{normalize_operator(operator)} ")
when "and", "or"
clause = "( " + exp[operator].collect { |operand| _to_human(operand) }.join(" #{normalize_operator(operator)} ") + " )"
when "not", "!"
clause = normalize_operator(operator) + " ( " + _to_human(exp[operator]) + " )"
when "is null", "is not null", "is empty", "is not empty"
clause = operands2humanvalue(exp[operator], options).first + " " + operator
when "contains"
operands = operands2humanvalue(exp[operator], options)
clause = operands.join(" #{normalize_operator(operator)} ")
when "find"
# FIND Vm.users-name = 'Administrator' CHECKALL Vm.users-enabled = 1
check = nil
check = "checkall" if exp[operator].include?("checkall")
check = "checkany" if exp[operator].include?("checkany")
check = "checkcount" if exp[operator].include?("checkcount")
raise _("expression malformed, must contain one of 'checkall', 'checkany', 'checkcount'") unless check
check =~ /^check(.*)$/
mode = $1.upcase
clause = "FIND" + " " + _to_human(exp[operator]["search"]) + " CHECK " + mode + " " + _to_human(exp[operator][check], :include_table => false).strip
when "key exists"
clause = "KEY EXISTS #{exp[operator]['regkey']}"
when "value exists"
clause = "VALUE EXISTS #{exp[operator]['regkey']} : #{exp[operator]['regval']}"
when "is"
operands = operands2humanvalue(exp[operator], options)
clause = "#{operands.first} #{operator} #{operands.last}"
when "between dates", "between times"
col_name = exp[operator]["field"]
col_type = parse_field_or_tag(col_name)&.column_type
col_human, _value = operands2humanvalue(exp[operator], options)
vals_human = exp[operator]["value"].collect { |v| quote_human(v, col_type) }
clause = "#{col_human} #{operator} #{vals_human.first} AND #{vals_human.last}"
when "from"
col_name = exp[operator]["field"]
col_type = parse_field_or_tag(col_name)&.column_type
col_human, _value = operands2humanvalue(exp[operator], options)
vals_human = exp[operator]["value"].collect { |v| quote_human(v, col_type) }
clause = "#{col_human} #{operator} #{vals_human.first} THROUGH #{vals_human.last}"
end
# puts "clause: #{clause}"
clause
end
def to_ruby(tz = nil)
return "" unless valid?
tz ||= "UTC"
@ruby ||= self.class._to_ruby(exp.deep_clone, context_type, tz)
@ruby.dup
end
def self._to_ruby(exp, context_type, tz)
return exp unless exp.kind_of?(Hash)
operator = exp.keys.first
op_args = exp[operator]
col_name = op_args["field"] if op_args.kind_of?(Hash)
operator = operator.downcase
case operator
when "equal", "=", "<", ">", ">=", "<=", "!="
operands = operands2rubyvalue(operator, op_args, context_type)
clause = operands.join(" #{normalize_ruby_operator(operator)} ")
when "before"
col_type = parse_field_or_tag(col_name)&.column_type if col_name
col_ruby, _value = operands2rubyvalue(operator, {"field" => col_name}, context_type)
val = op_args["value"]
clause = ruby_for_date_compare(col_ruby, col_type, tz, "<", val)
when "after"
col_type = parse_field_or_tag(col_name)&.column_type if col_name
col_ruby, _value = operands2rubyvalue(operator, {"field" => col_name}, context_type)
val = op_args["value"]
clause = ruby_for_date_compare(col_ruby, col_type, tz, nil, nil, ">", val)
when "includes all"
operands = operands2rubyvalue(operator, op_args, context_type)
clause = "(#{operands[0]} & #{operands[1]}) == #{operands[1]}"
when "includes any"
operands = operands2rubyvalue(operator, op_args, context_type)
clause = "(#{operands[1]} - #{operands[0]}) != #{operands[1]}"
when "includes only", "limited to"
operands = operands2rubyvalue(operator, op_args, context_type)
clause = "(#{operands[0]} - #{operands[1]}) == []"
when "like", "not like", "starts with", "ends with", "includes"
operands = operands2rubyvalue(operator, op_args, context_type)
operands[1] =
case operator
when "starts with"
"/^" + re_escape(operands[1].to_s) + "/"
when "ends with"
"/" + re_escape(operands[1].to_s) + "$/"
else
"/" + re_escape(operands[1].to_s) + "/"
end
clause = operands.join(" #{normalize_ruby_operator(operator)} ")
clause = "!(" + clause + ")" if operator == "not like"
when "regular expression matches", "regular expression does not match"
operands = operands2rubyvalue(operator, op_args, context_type)
# If it looks like a regular expression, sanitize from forward
# slashes and interpolation
#
# Regular expressions with a single option are also supported,
# e.g. "/abc/i"
#
# Otherwise sanitize the whole string and add the delimiters
#
# TODO: support regexes with more than one option
if operands[1].starts_with?("/") && operands[1].ends_with?("/")
operands[1][1..-2] = sanitize_regular_expression(operands[1][1..-2])
elsif operands[1].starts_with?("/") && operands[1][-2] == "/"
operands[1][1..-3] = sanitize_regular_expression(operands[1][1..-3])
else
operands[1] = "/" + sanitize_regular_expression(operands[1].to_s) + "/"
end
clause = operands.join(" #{normalize_ruby_operator(operator)} ")
when "and", "or"
clause = "(" + op_args.collect { |operand| _to_ruby(operand, context_type, tz) }.join(" #{normalize_ruby_operator(operator)} ") + ")"
when "not", "!"
clause = normalize_ruby_operator(operator) + "(" + _to_ruby(op_args, context_type, tz) + ")"
when "is null", "is not null", "is empty", "is not empty"
operands = operands2rubyvalue(operator, op_args, context_type)
clause = operands.join(" #{normalize_ruby_operator(operator)} ")
when "contains"
op_args["tag"] ||= col_name
operands = if context_type != "hash"
target = parse_field_or_tag(op_args["tag"])
["<exist ref=#{target.model.to_s.downcase}>#{target.tag_path_with(op_args["value"])}</exist>"]
elsif context_type == "hash"
# This is only for supporting reporting "display filters"
# In the report object the tag value is actually the description and not the raw tag name.
# So we have to trick it by replacing the value with the description.
description = MiqExpression.get_entry_details(op_args["tag"]).inject("") do |s, t|
break(t.first) if t.last == op_args["value"]
s
end
val = op_args["tag"].split(".").last.split("-").join(".")
fld = "<value type=string>#{val}</value>"
[fld, quote(description, "string")]
end
clause = operands.join(" #{normalize_operator(operator)} ")
when "find"
# FIND Vm.users-name = 'Administrator' CHECKALL Vm.users-enabled = 1
check = nil
check = "checkall" if op_args.include?("checkall")
check = "checkany" if op_args.include?("checkany")
if op_args.include?("checkcount")
check = "checkcount"
op = op_args[check].keys.first
op_args[check][op]["field"] = "<count>"
end
raise _("expression malformed, must contain one of 'checkall', 'checkany', 'checkcount'") unless check
check =~ /^check(.*)$/
mode = $1.downcase
clause = "<find><search>" + _to_ruby(op_args["search"], context_type, tz) + "</search>" \
"<check mode=#{mode}>" + _to_ruby(op_args[check], context_type, tz) + "</check></find>"
when "key exists"
clause, = operands2rubyvalue(operator, op_args, context_type)
when "value exists"
clause, = operands2rubyvalue(operator, op_args, context_type)
when "is"
col_ruby, _value = operands2rubyvalue(operator, {"field" => col_name}, context_type)
col_type = parse_field_or_tag(col_name)&.column_type
value = op_args["value"]
clause = if col_type == :date && !RelativeDatetime.relative?(value)
ruby_for_date_compare(col_ruby, col_type, tz, "==", value)
else
ruby_for_date_compare(col_ruby, col_type, tz, ">=", value, "<=", value)
end
when "from"
col_ruby, _value = operands2rubyvalue(operator, {"field" => col_name}, context_type)
col_type = parse_field_or_tag(col_name)&.column_type
start_val, end_val = op_args["value"]
clause = ruby_for_date_compare(col_ruby, col_type, tz, ">=", start_val, "<=", end_val)
else
raise _("operator '%{operator_name}' is not supported") % {:operator_name => operator.upcase}
end
# puts "clause: #{clause}"
clause
end
def to_sql(tz = nil)
tz ||= "UTC"
pexp, attrs = preprocess_for_sql(exp.deep_clone)
sql = to_arel(pexp, tz).to_sql if pexp.present?
incl = includes_for_sql unless sql.blank?
[sql, incl, attrs]
end
def preprocess_for_sql(exp, attrs = nil)
exp.delete(:token)
attrs ||= {:supported_by_sql => true}
operator = exp.keys.first
case operator.downcase
when "and"
exp[operator].dup.each { |atom| preprocess_for_sql(atom, attrs) }
exp[operator].reject!(&:blank?)
exp.delete(operator) if exp[operator].empty?
when "or"
or_attrs = {:supported_by_sql => true}
exp[operator].each { |atom| preprocess_for_sql(atom, or_attrs) }
exp[operator].reject!(&:blank?)
attrs.merge!(or_attrs)
exp.delete(operator) if !or_attrs[:supported_by_sql] || exp[operator].empty? # Clean out unsupported or empty operands
when "not", "!"
preprocess_for_sql(exp[operator], attrs)
exp.delete(operator) if exp[operator].empty? # Clean out empty operands
else
if sql_supports_atom?(exp)
# if field type is Integer and value is String representing size in units (like "2.megabytes") than convert
# this string to correct number using sub_type mappong defined in db/fixtures/miq_report_formats.yml:sub_types_by_column:
convert_size_in_units_to_integer(exp) if %w[= != <= >= > <].include?(operator)
else
attrs[:supported_by_sql] = false
exp.delete(operator)
end
end
exp.empty? ? [nil, attrs] : [exp, attrs]
end
def sql_supports_atom?(exp)
operator = exp.keys.first
case operator.downcase
when "contains"
if exp[operator].key?("tag")
Tag.parse(exp[operator]["tag"]).reflection_supported_by_sql?
elsif exp[operator].key?("field")
Field.parse(exp[operator]["field"]).attribute_supported_by_sql?
else
return false
end
when "includes"
# Support includes operator using "LIKE" only if first operand is in main table
if exp[operator].key?("field") && (!exp[operator]["field"].include?(".") || (exp[operator]["field"].include?(".") && exp[operator]["field"].split(".").length == 2))
return field_in_sql?(exp[operator]["field"])
else
# TODO: Support includes operator for sub-sub-tables
return false
end
when "includes any", "includes all", "includes only"
# Support this only from the main model (for now)
if exp[operator].keys.include?("field") && exp[operator]["field"].split(".").length == 1
model, field = exp[operator]["field"].split("-")
method = "miq_expression_#{operator.downcase.tr(' ', '_')}_#{field}_arel"
return model.constantize.respond_to?(method)
else
return false
end
when "find", "regular expression matches", "regular expression does not match", "key exists", "value exists"
return false
else
# => false if operand is a tag
return false if exp[operator].keys.include?("tag")
# => false if operand is a registry
return false if exp[operator].keys.include?("regkey")
# => TODO: support count of child relationship
return false if exp[operator].key?("count")
return field_in_sql?(exp[operator]["field"]) && value_in_sql?(exp[operator]["value"])
end
end
def value_in_sql?(value)
!Field.is_field?(value) || Field.parse(value).attribute_supported_by_sql?
end
def field_in_sql?(field)
return false unless attribute_supported_by_sql?(field)
# => false if excluded by special case defined in preprocess options
return false if field_excluded_by_preprocess_options?(field)
true
end
def attribute_supported_by_sql?(field)
return false unless col_details[field]
col_details[field][:sql_support]
end
# private attribute_supported_by_sql? -- tests only
def field_excluded_by_preprocess_options?(field)
col_details[field][:excluded_by_preprocess_options]
end
private :field_excluded_by_preprocess_options?
def col_details
@col_details ||= self.class.get_cols_from_expression(exp, preprocess_options)
end
private :col_details
def includes_for_sql
col_details.values.each_with_object({}) { |v, result| result.deep_merge!(v[:include]) }
end
def self.get_cols_from_expression(exp, options = {})
result = {}
if exp.kind_of?(Hash)
if exp.key?("field")
result[exp["field"]] = get_col_info(exp["field"], options) unless exp["field"] == "<count>"
elsif exp.key?("count")
result[exp["count"]] = get_col_info(exp["count"], options)
elsif exp.key?("tag")
# ignore
else
exp.each_value { |v| result.merge!(get_cols_from_expression(v, options)) }
end
elsif exp.kind_of?(Array)
exp.each { |v| result.merge!(get_cols_from_expression(v, options)) }
end
result
end
def self.get_col_info(field, options = {})
f = parse_field_or_tag(field) or raise ArgumentError
{
:include => f.includes,
:data_type => f.column_type,
:format_sub_type => f.sub_type,
:sql_support => f.attribute_supported_by_sql?,
:excluded_by_preprocess_options => f.exclude_col_by_preprocess_options?(options),
:tag => f.kind_of?(MiqExpression::Tag),
}
rescue ArgumentError
# not thrilled with these values. but making tests pass for now
{
:data_type => nil,
:sql_support => false,
:excluded_by_preprocess_options => false,
:tag => false,
:include => {}
}
end
def lenient_evaluate(obj, tz = nil)
ruby_exp = to_ruby(tz)
ruby_exp.nil? || Condition.subst_matches?(ruby_exp, obj)
end
def evaluate(obj, tz = nil)
ruby_exp = to_ruby(tz)
_log.debug("Expression before substitution: #{ruby_exp}")
subst_expr = Condition.subst(ruby_exp, obj)
_log.debug("Expression after substitution: #{subst_expr}")
result = Condition.do_eval(subst_expr)
_log.debug("Expression evaluation result: [#{result}]")
result
end
def self.evaluate_atoms(exp, obj)
exp = exp.kind_of?(self) ? copy_hash(exp.exp) : exp
exp["result"] = new(exp).evaluate(obj)
operators = exp.keys
operators.each do |k|
if %w(and or).include?(k.to_s.downcase) # and/or atom is an array of atoms
exp[k].each do |atom|
evaluate_atoms(atom, obj)
end
elsif %w(not !).include?(k.to_s.downcase) # not atom is a hash expression
evaluate_atoms(exp[k], obj)
else
next
end
end
exp
end
def self.operands2humanvalue(ops, options = {})
# puts "Enter: operands2humanvalue: ops: #{ops.inspect}"
ret = []
if ops["tag"]
v = nil
ret.push(ops["alias"] || value2human(ops["tag"], options))
MiqExpression.get_entry_details(ops["tag"]).each do |t|
v = "'" + t.first + "'" if t.last == ops["value"]
end
if ops["value"] == :user_input
v = "<user input>"
else
v ||= ops["value"].kind_of?(String) ? "'" + ops["value"] + "'" : ops["value"]
end
ret.push(v)
elsif ops["field"]
ops["value"] ||= ''
if ops["field"] == "<count>"
ret.push(nil)
ret.push(ops["value"])
else
ret.push(ops["alias"] || value2human(ops["field"], options))
if ops["value"] == :user_input
ret.push("<user input>")
else
col_type = parse_field_or_tag(ops["field"])&.column_type || "string"
ret.push(quote_human(ops["value"], col_type.to_s))
end
end
elsif ops["count"]
ret.push("COUNT OF " + (ops["alias"] || value2human(ops["count"], options)).strip)
if ops["value"] == :user_input
ret.push("<user input>")
else
ret.push(ops["value"])
end
elsif ops["regkey"]
ops["value"] ||= ''
ret.push(ops["regkey"] + " : " + ops["regval"])
ret.push(ops["value"].kind_of?(String) ? "'" + ops["value"] + "'" : ops["value"])
elsif ops["value"]
ret.push(nil)
ret.push(ops["value"])
end
ret
end
def self.value2human(val, options = {})
options = {
:include_model => true,
:include_table => true
}.merge(options)
tables, col = val.split("-")
first = true
val_is_a_tag = false
ret = ""
if options[:include_table] == true
friendly = tables.split(".").collect do |t|
if t.downcase == "managed"
val_is_a_tag = true
"#{Tenant.root_tenant.name} Tags"
elsif t.downcase == "user_tag"
"My Tags"
elsif first
first = nil
next unless options[:include_model] == true
Dictionary.gettext(t, :type => :model, :notfound => :titleize)
else
Dictionary.gettext(t, :type => :table, :notfound => :titleize)
end
end.compact
ret = friendly.join(".")
ret << " : " unless ret.blank? || col.blank?
end
if val_is_a_tag
if col
classification = options[:classification] || Classification.lookup_by_name(col)
ret << (classification ? classification.description : col)
end
else
model = tables.blank? ? nil : tables.split(".").last.singularize.camelize
dict_col = model.nil? ? col : [model, col].join(".")
column_human = if col
if col.starts_with?(CustomAttributeMixin::CUSTOM_ATTRIBUTES_PREFIX)
CustomAttributeMixin.to_human(col)
else
Dictionary.gettext(dict_col, :type => :column, :notfound => :titleize)
end
end
ret << column_human if col
end
ret = " #{ret}" unless ret.include?(":")
ret
end
def self.quote_by(operator, value, column_type = nil)
if UNQUOTABLE_OPERATORS.map(&:downcase).include?(operator)
value
else
quote(value, column_type.to_s)
end
end
def self.operands2rubyvalue(operator, ops, context_type)
if ops["field"]
if ops["field"] == "<count>"
["<count>", quote(ops["value"], "integer")]
else
target = parse_field_or_tag(ops["field"])
col_type = target&.column_type || "string"
[if context_type == "hash"
"<value type=#{col_type}>#{ops["field"].split(".").last.split("-").join(".")}</value>"
else
"<value ref=#{target.model.to_s.downcase}, type=#{col_type}>#{target.tag_path_with}</value>"
end, quote_by(operator, ops["value"], col_type)]
end
elsif ops["count"]
target = parse_field_or_tag(ops["count"])
["<count ref=#{target.model.to_s.downcase}>#{target.tag_path_with}</count>", quote(ops["value"], target.column_type)]
elsif ops["regkey"]
if operator == "key exists"
["<registry key_exists=1, type=boolean>#{ops["regkey"].strip}</registry> == 'true'", nil]
elsif operator == "value exists"
["<registry value_exists=1, type=boolean>#{ops["regkey"].strip} : #{ops["regval"]}</registry> == 'true'", nil]
else
["<registry>#{ops["regkey"].strip} : #{ops["regval"]}</registry>", quote_by(operator, ops["value"], "string")]
end
end
end
def self.quote(val, typ)
if Field.is_field?(val)
target = parse_field_or_tag(val)
value = target.tag_path_with
col_type = target&.column_type || "string"
reference_attribute = target ? "ref=#{target.model.to_s.downcase}, " : " "
return "<value #{reference_attribute}type=#{col_type}>#{value}</value>"
end
case typ.to_s
when "string", "text", "boolean", nil
# escape any embedded single quotes, etc. - needs to be able to handle even values with trailing backslash
val.to_s.inspect
when "date"
return "nil" if val.blank? # treat nil value as empty string
"\'#{val}\'.to_date"
when "datetime"
return "nil" if val.blank? # treat nil value as empty string
"\'#{val.iso8601}\'.to_time(:utc)"
when "integer", "decimal", "fixnum"
val.to_s.to_i_with_method
when "float"
val.to_s.to_f_with_method
when "numeric_set"
val = val.split(",") if val.kind_of?(String)
v_arr = Array.wrap(val).flat_map { |v| quote_numeric_set_atom(v) }.compact.uniq.sort
"[#{v_arr.join(",")}]"
when "string_set"
val = val.split(",") if val.kind_of?(String)
v_arr = Array.wrap(val).flat_map { |v| "'#{v.to_s.strip}'" }.uniq.sort
"[#{v_arr.join(",")}]"
else
val
end
end
private_class_method def self.quote_numeric_set_atom(val)
val = val.to_s unless val.kind_of?(Numeric) || val.kind_of?(Range)
if val.kind_of?(String)
val = val.strip
val =
if val.include?("..") # Parse Ranges
b, e = val.split("..", 2).map do |i|
if integer?(i)
i.to_i_with_method
elsif numeric?(i)
i.to_f_with_method
end
end
Range.new(b, e) if b && e
elsif integer?(val) # Parse Integers
val.to_i_with_method
elsif numeric?(val) # Parse Floats
val.to_f_with_method
end
end
val.kind_of?(Range) ? val.to_a : val
end
def self.quote_human(val, typ)
case typ.to_s
when "integer", "decimal", "fixnum", "float"
return val.to_i unless val.to_s.number_with_method? || typ.to_s == "float"
if val =~ /^([0-9\.,]+)\.([a-z]+)$/
val, sfx = $1, $2
if sfx.ends_with?("bytes") && FORMAT_BYTE_SUFFIXES.key?(sfx.to_sym)
"#{val} #{FORMAT_BYTE_SUFFIXES[sfx.to_sym]}"
else
"#{val} #{sfx.titleize}"
end
else
val
end
when "string", "date", "datetime"
"\"#{val}\""
else
quote(val, typ)
end
end
# TODO: update this to use the more nuanced
# .sanitize_regular_expression after performing Regexp.escape. The
# extra substitution is required because, although the result from
# Regexp.escape is fine to pass to Regexp.new, it is not when eval'd
# as we do:
#
# ```ruby
# regexp_string = Regexp.escape("/") # => "/"
# # ...
# eval("/" + regexp_string + "/")
# ```
def self.re_escape(s)
Regexp.escape(s).gsub(/\//, '\/')
end
# Escape any unescaped forward slashes and/or interpolation
def self.sanitize_regular_expression(string)
string.gsub(%r{\\*/}, "\\/").gsub(/\\*#/, "\\\#")
end
def self.escape_virtual_custom_attribute(attribute)
if attribute.include?(CustomAttributeMixin::CUSTOM_ATTRIBUTES_PREFIX)
uri_parser = URI::RFC2396_Parser.new
[uri_parser.escape(attribute, /[^A-Za-z0-9:\-_]/), true]
else
[attribute, false]
end
end
def self.normalize_ruby_operator(str)
case str
when "equal", "="
"=="
when "not"
"!"
when "like", "not like", "starts with", "ends with", "includes", "regular expression matches"
"=~"
when "regular expression does not match"
"!~"
when "is null", "is empty"
"=="
when "is not null", "is not empty"
"!="
when "before"
"<"
when "after"
">"
else
str
end
end
def self.normalize_operator(str)
str = str.upcase
case str
when "EQUAL"
"="
when "!"
"NOT"
when "EXIST"
"CONTAINS"
else
str
end
end
def self.base_tables
BASE_TABLES
end
def self.model_details(model, opts = {:typ => "all", :include_model => true, :include_tags => false, :include_my_tags => false, :include_id_columns => false})
@classifications = nil
model = model.to_s
opts = {:typ => "all", :include_model => true}.merge(opts)
if opts[:typ] == "tag"
tags_for_model = if TAG_CLASSES.include?(model)
tag_details(model, opts)
else
[]
end
result = []
TAG_CLASSES.invert.each do |name, tc|
next if tc.constantize.base_class == model.constantize.base_class
path = [model, name].join(".")
result.concat(tag_details(path, opts))
end
@classifications = nil
return tags_for_model.concat(result.sort! { |a, b| a.to_s <=> b.to_s })
end
relats = get_relats(model)
result = []
unless opts[:typ] == "count" || opts[:typ] == "find"
@column_cache ||= {}
key = "#{model}_#{opts[:interval]}_#{opts[:include_model] || false}"
@column_cache[key] = nil if model == "ChargebackVm"
@column_cache[key] ||= get_column_details(relats[:columns], model, model, opts).sort! { |a, b| a.to_s <=> b.to_s }
result.concat(@column_cache[key])
unless opts[:disallow_loading_virtual_custom_attributes]
custom_details = _custom_details_for(model, opts)
result.concat(custom_details.sort_by(&:to_s)) unless custom_details.empty?
end
result.concat(tag_details(model, opts)) if opts[:include_tags] == true && TAG_CLASSES.include?(model)
end
model_details = _model_details(relats, opts)
model_details.sort_by!(&:to_s)
result.concat(model_details)
@classifications = nil
result
end
def self._custom_details_for(model, options)
klass = model.safe_constantize
return [] unless klass < CustomAttributeMixin
custom_attributes_details = []
klass.custom_keys.each do |custom_key|
custom_detail_column = [options[:model_for_column] || model, CustomAttributeMixin.column_name(custom_key)].join("-")
custom_detail_name = CustomAttributeMixin.to_human(custom_key)
if options[:include_model]
model_name = Dictionary.gettext(model, :type => :model, :notfound => :titleize)
custom_detail_name = [model_name, custom_detail_name].join(" : ")
end
custom_attributes_details.push([custom_detail_name, custom_detail_column])
end
custom_attributes_details
end
def self._model_details(relats, opts)
result = []
relats[:reflections].each do |_assoc, ref|
parent = ref[:parent]
case opts[:typ]
when "count"
result.push(get_table_details(parent[:class_path], parent[:assoc_path])) if parent[:multivalue]
when "find"
result.concat(get_column_details(ref[:columns], parent[:class_path], parent[:assoc_path], opts)) if parent[:multivalue]
else
result.concat(get_column_details(ref[:columns], parent[:class_path], parent[:assoc_path], opts))
if opts[:include_tags] == true && TAG_CLASSES.include?(parent[:assoc_class])
result.concat(tag_details(parent[:class_path], opts))
end
end
result.concat(_model_details(ref, opts))
end
result
end
def self.tag_details(path, opts)
result = []
if opts[:no_cache]
@classifications = nil
end
@classifications ||= categories
@classifications.each do |name, cat|
prefix = path.nil? ? "managed" : [path, "managed"].join(".")
field = [prefix, name].join("-")
result.push([value2human(field, opts.merge(:classification => cat)), field])
end
if opts[:include_my_tags] && opts[:userid] && ::Tag.exists?(["name like ?", "/user/#{opts[:userid]}/%"])
prefix = path.nil? ? "user_tag" : [path, "user_tag"].join(".")
field = [prefix, opts[:userid]].join("_")
result.push([value2human(field, opts), field])
end
result.sort! { |a, b| a.to_s <=> b.to_s }
end
def self.get_relats(model)
@model_relats ||= {}
@model_relats[model] = nil if model == "ChargebackVm"
@model_relats[model] ||= build_relats(model)
end
def self.miq_adv_search_lists(model, what, extra_options = {})
@miq_adv_search_lists ||= {}
@miq_adv_search_lists[model.to_s] ||= {}
options = {:include_model => true}.merge(extra_options)
case what.to_sym
when :exp_available_fields then
@miq_adv_search_lists[model.to_s][:exp_available_fields] ||= MiqExpression.model_details(model, options.merge(:typ => "field", :disallow_loading_virtual_custom_attributes => false))
when :exp_available_counts then @miq_adv_search_lists[model.to_s][:exp_available_counts] ||= MiqExpression.model_details(model, options.merge(:typ => "count"))
when :exp_available_finds then @miq_adv_search_lists[model.to_s][:exp_available_finds] ||= MiqExpression.model_details(model, options.merge(:typ => "find"))
end
end
def self.reporting_available_fields(model, interval = nil)
if model.to_s == "VimPerformanceTrend"
VimPerformanceTrend.trend_model_details(interval.to_s)
elsif model.ends_with?("Performance")
model_details(model, :include_model => false, :include_tags => true, :interval => interval)
elsif Chargeback.db_is_chargeback?(model)
cb_model = Chargeback.report_cb_model(model)
model.constantize.try(:refresh_dynamic_metric_columns)
md = model_details(model, :include_model => false, :include_tags => true).select do |c|
allowed_suffixes = Chargeback::ALLOWED_FIELD_SUFFIXES
allowed_suffixes += Metering::ALLOWED_FIELD_SUFFIXES if model.starts_with?('Metering')
c.last.ends_with?(*allowed_suffixes)
end
td = if TAG_CLASSES.include?(cb_model)
tag_details(model, {})
else
[]
end
md + td + _custom_details_for(cb_model, :model_for_column => model)
else
model_details(model, :include_model => false, :include_tags => true)
end
end
def self.build_relats(model, parent = {}, seen = [])
_log.info("Building relationship tree for: [#{parent[:path]} => #{model}]...")
model = model_class(model)
parent[:class_path] ||= model.name
parent[:assoc_path] ||= model.name
parent[:root] ||= model.name
result = {:columns => model.visible_attribute_names, :parent => parent}
result[:reflections] = {}
model.reflections_with_virtual.each do |assoc, ref|
next unless INCLUDE_TABLES.include?(assoc.to_s.pluralize)
next if assoc.to_s.pluralize == "event_logs" && parent[:root] == "Host" && !proto?
next if assoc.to_s.pluralize == "processes" && parent[:root] == "Host" # Process data not available yet for Host
next if ref.macro == :belongs_to && model.name != parent[:root]
# REMOVE ME: workaround to temporarily exclude certain models from the relationships
next if EXCLUDE_FROM_RELATS[model.name]&.include?(assoc.to_s)
assoc_class = ref.klass.name
new_parent = {
:macro => ref.macro,
:class_path => [parent[:class_path], determine_relat_path(ref)].join("."),
:assoc_path => [parent[:assoc_path], assoc.to_s].join("."),
:assoc => assoc,
:assoc_class => assoc_class,
:root => parent[:root]
}
new_parent[:direction] = new_parent[:macro] == :belongs_to ? :up : :down
new_parent[:multivalue] = [:has_many, :has_and_belongs_to_many].include?(new_parent[:macro])
seen_key = [model.name, assoc].join("_")
next if seen.include?(seen_key) ||
assoc_class == parent[:root] ||
parent[:assoc_path].include?(assoc.to_s) ||
parent[:assoc_path].include?(assoc.to_s.singularize) ||
parent[:direction] == :up ||
parent[:multivalue]
seen.push(seen_key)
result[:reflections][assoc] = build_relats(assoc_class, new_parent, seen)
end
result
end
def self.get_table_details(class_path, assoc_path)
[value2human(class_path), assoc_path]
end
def self.get_column_details(column_names, class_path, assoc_path, opts)
include_model = opts[:include_model]
base_model = class_path.split(".").first
excludes = EXCLUDE_COLUMNS
excludes += EXCLUDE_ID_COLUMNS unless opts[:include_id_columns]
# special case for C&U ad-hoc reporting
if opts[:interval] && opts[:interval] != "daily" && base_model.ends_with?("Performance") && !class_path.include?(".")
excludes += ["^min_.*$", "^max_.*$", "^.*derived_storage_.*$", "created_on"]
elsif opts[:interval] && base_model.ends_with?("Performance") && !class_path.include?(".")
excludes += ["created_on"]
end
excludes += ["logical_cpus"] if class_path == "Vm.hardware"
case base_model
when "VmPerformance"
excludes += ["^.*derived_host_count_off$", "^.*derived_host_count_on$", "^.*derived_vm_count_off$", "^.*derived_vm_count_on$", "^.*derived_storage.*$"]
when "HostPerformance"
excludes += ["^.*derived_host_count_off$", "^.*derived_host_count_on$", "^.*derived_storage.*$", "^abs_.*$"]
when "EmsClusterPerformance"
excludes += ["^.*derived_storage.*$", "sys_uptime_absolute_latest", "^abs_.*$"]
when "StoragePerformance"
includes = ["^.*derived_storage.*$", "^timestamp$", "v_date", "v_time", "resource_name"]
column_names = column_names.collect do |c|
next(c) if includes.include?(c)
c if includes.detect { |incl| c.match(incl) }
end.compact
when base_model.starts_with?("Container")
excludes += ["^.*derived_host_count_off$", "^.*derived_host_count_on$", "^.*derived_vm_count_off$", "^.*derived_vm_count_on$", "^.*derived_storage.*$"]
end
column_names.collect do |c|
# check for direct match first
next if excludes.include?(c) && !EXCLUDE_EXCEPTIONS.include?(c)
# check for regexp match if no direct match
col = c
unless EXCLUDE_EXCEPTIONS.include?(c)
excludes.each do |excl|
if c.match(excl)
col = nil
break
end
end
end
next unless col
field_class_path = "#{class_path}-#{col}"
field_assoc_path = "#{assoc_path}-#{col}"
[value2human(field_class_path, :include_model => include_model), field_assoc_path]
end.compact
end
def self.get_col_operators(field)
col_type =
if field == :count || field == :regkey
field
else
parse_field_or_tag(field.to_s)&.column_type || :string
end
case col_type.to_s.downcase.to_sym
when :string
return STRING_OPERATORS
when :integer, :float, :fixnum, :count
return NUM_OPERATORS
when :numeric_set, :string_set
return SET_OPERATORS
when :regkey
return STRING_OPERATORS + REGKEY_OPERATORS
when :boolean
return BOOLEAN_OPERATORS
when :date, :datetime
return DATE_TIME_OPERATORS
else
return STRING_OPERATORS
end
end
STYLE_OPERATORS_EXCLUDES = config[:style_operators_excludes]
def self.get_col_style_operators(field)
get_col_operators(field) - STYLE_OPERATORS_EXCLUDES
end
def self.get_entry_details(field)
ns = field.split("-").first.split(".").last
if ns == "managed"
cat = field.split("-").last
catobj = Classification.lookup_by_name(cat)
return catobj ? catobj.entries.collect { |e| [e.description, e.name] } : []
elsif ns == "user_tag" || ns == "user"
cat = field.split("-").last
return ::Tag.where("name like ?", "/user/#{cat}%").select(:name).collect do |t|
tag_name = t.name.split("/").last
[tag_name, tag_name]
end
else
return field
end
end
def self.atom_error(field, operator, value)
return false if operator == "DEFAULT" # No validation needed for style DEFAULT operator
value = value.to_s unless value.kind_of?(Array)
dt = case operator.to_s.downcase
when "regular expression matches", "regular expression does not match" # TODO
:regexp
else
if field == :count
:integer
else
col_info = get_col_info(field)
[:bytes, :megabytes].include?(col_info[:format_sub_type]) ? :integer : col_info[:data_type]
end
end
case dt
when :string, :text
return false
when :integer, :fixnum, :decimal, :float
return false if send((dt == :float ? :numeric? : :integer?), value)
dt_human = dt == :float ? "Number" : "Integer"
return _("%{value_name} value must not be blank") % {:value_name => dt_human} if value.delete(',').blank?
if value.include?(".") && (value.split(".").last =~ /([a-z]+)/i)
sfx = $1
sfx = sfx.ends_with?("bytes") && FORMAT_BYTE_SUFFIXES.key?(sfx.to_sym) ? FORMAT_BYTE_SUFFIXES[sfx.to_sym] : sfx.titleize
value = "#{value.split(".")[0..-2].join(".")} #{sfx}"
end
return _("Value '%{value}' is not a valid %{value_name}") % {:value => value, :value_name => dt_human}
when :date, :datetime
return false if operator.downcase.include?("empty")
values = value.kind_of?(String) ? value.lines : Array.wrap(value)
return _("No Date/Time value specified") if values.empty? || values.include?(nil)
return _("Two Date/Time values must be specified") if operator.downcase == "from" && values.length < 2
values_converted = values.collect do |v|
return _("Date/Time value must not be blank") if value.blank?
v_cvt = begin
RelativeDatetime.normalize(v, "UTC")
rescue
nil
end
return _("Value '%{value}' is not valid") % {:value => v} if v_cvt.nil?
v_cvt
end
if values_converted.length > 1 && values_converted[0] > values_converted[1]
return _("Invalid Date/Time range, %{first_value} comes before %{second_value}") % {:first_value => values[1],
:second_value => values[0]}
end
return false
when :boolean
unless operator.downcase.include?("null") || %w(true false).include?(value)
return _("Value must be true or false")
end
return false
when :regexp
begin
Regexp.new(value).match("foo")
rescue => err
return _("Regular expression '%{value}' is invalid, '%{error_message}'") % {:value => value,
:error_message => err.message}
end
return false
else
return false
end
end
def self.categories
classifications = Classification.in_my_region.hash_all_by_type_and_name(:show => true)
categories_with_entries = classifications.reject { |_k, v| !v.key?(:entry) }
categories_with_entries.each_with_object({}) do |(name, hash), categories|
categories[name] = hash[:category]
end
end
def self.model_class(model)
# TODO: the temporary cache should be removed after widget refactoring
@model_class ||= Hash.new do |h, m|
h[m] = if m.kind_of?(Class)
m
else
begin
m.to_s.singularize.camelize.constantize
rescue
nil
end
end
end
@model_class[model]
end
def self.integer?(n)
n = n.to_s
n2 = n.delete(',') # strip out commas
begin
Integer(n2)
return true
rescue
return false unless n.number_with_method?
begin
n2 = n.to_f_with_method
return (n2.to_i == n2)
rescue
return false
end
end
end
def self.numeric?(n)
n = n.to_s
n2 = n.delete(',') # strip out commas
begin
Float(n2)
return true
rescue
return false unless n.number_with_method?
begin
n.to_f_with_method
return true
rescue
return false
end
end
end
# Is an MiqExpression or an expression hash a quick_search
def self.quick_search?(exp)
return exp.quick_search? if exp.kind_of?(self)
_quick_search?(exp)
end
def quick_search?
self.class._quick_search?(exp) # Pass the exp hash
end
# Is an expression hash a quick search?
def self._quick_search?(e)
case e
when Array
e.any? { |e_exp| _quick_search?(e_exp) }
when Hash
return true if e["value"] == :user_input
e.values.any? { |e_exp| _quick_search?(e_exp) }
else
false
end
end
def self.create_field(model, associations, field_name)
model = model_class(model)
Field.new(model, associations, field_name)
end
def self.parse_field_or_tag(str)
# managed.location, Model.x.y.managed-location
MiqExpression::Field.parse(str) || MiqExpression::CountField.parse(str) || MiqExpression::Tag.parse(str)
end
def fields(expression = exp)
case expression
when Array
expression.flat_map { |x| fields(x) }
when Hash
return [] if expression.empty?
if (val = expression["field"] || expression["count"] || expression["tag"])
ret = []
tg = self.class.parse_field_or_tag(val)
ret << tg if tg
tg = self.class.parse_field_or_tag(expression["value"].to_s)
ret << tg if tg
ret
else
fields(expression.values)
end
end
end
private
def convert_size_in_units_to_integer(exp)
return if (column_details = col_details[exp.values.first["field"]]).nil?
# attempt to do conversion only if db type of column is integer and value to compare to is String
return unless column_details[:data_type] == :integer && (value = exp.values.first["value"]).class == String
sub_type = column_details[:format_sub_type]
return if %i[mhz_avg hours kbps kbps_precision_2 mhz elapsed_time].include?(sub_type)
case sub_type
when :bytes
exp.values.first["value"] = value.to_i_with_method
when :kilobytes
exp.values.first["value"] = value.to_i_with_method / 1_024
when :megabytes, :megabytes_precision_2
exp.values.first["value"] = value.to_i_with_method / 1_048_576
else
_log.warn("No subtype defined for column #{exp.values.first["field"]} in 'miq_report_formats.yml'")
end
end
# example:
# ruby_for_date_compare(:updated_at, :date, tz, "==", Time.now)
# # => "val=update_at; !val.nil? && val.to_date == '2016-10-05'"
#
# ruby_for_date_compare(:updated_at, :time, tz, ">", Time.yesterday, "<", Time.now)
# # => "val=update_at; !val.nil? && val.utc > '2016-10-04T13:08:00-04:00' && val.utc < '2016-10-05T13:08:00-04:00'"
def self.ruby_for_date_compare(col_ruby, col_type, tz, op1, val1, op2 = nil, val2 = nil)
val_with_cast = "val.#{col_type == :date ? "to_date" : "to_time"}"
val1 = RelativeDatetime.normalize(val1, tz, "beginning", col_type == :date) if val1
val2 = RelativeDatetime.normalize(val2, tz, "end", col_type == :date) if val2
[
"val=#{col_ruby}; !val.nil?",
op1 ? "#{val_with_cast} #{op1} #{quote(val1, col_type)}" : nil,
op2 ? "#{val_with_cast} #{op2} #{quote(val2, col_type)}" : nil,
].compact.join(" && ")
end
private_class_method :ruby_for_date_compare
def to_arel(exp, tz)
operator = exp.keys.first
field = Field.parse(exp[operator]["field"]) if exp[operator].kind_of?(Hash) && exp[operator]["field"]
arel_attribute = field&.arel_attribute
if exp[operator].kind_of?(Hash) && exp[operator]["value"] && Field.is_field?(exp[operator]["value"])
field_value = Field.parse(exp[operator]["value"])
parsed_value = field_value.arel_attribute
elsif exp[operator].kind_of?(Hash)
parsed_value = exp[operator]["value"]
end
case operator.downcase
when "equal", "="
arel_attribute.eq(parsed_value)
when ">"
arel_attribute.gt(parsed_value)
when "after"
value = RelativeDatetime.normalize(parsed_value, tz, "end", field.date?)
arel_attribute.gt(value)
when ">="
arel_attribute.gteq(parsed_value)
when "<"
arel_attribute.lt(parsed_value)
when "before"
value = RelativeDatetime.normalize(parsed_value, tz, "beginning", field.date?)
arel_attribute.lt(value)
when "<="
arel_attribute.lteq(parsed_value)
when "!="
arel_attribute.not_eq(parsed_value)
when "like", "includes"
escape = nil
case_sensitive = true
arel_attribute.matches("%#{parsed_value}%", escape, case_sensitive)
when "includes all", "includes any", "includes only"
method = "miq_expression_"
method << "#{operator.downcase.tr(' ', '_')}_"
method << "#{field.column}_arel"
field.model.send(method, parsed_value)
when "starts with"
escape = nil
case_sensitive = true
arel_attribute.matches("#{parsed_value}%", escape, case_sensitive)
when "ends with"
escape = nil
case_sensitive = true
arel_attribute.matches("%#{parsed_value}", escape, case_sensitive)
when "not like"
escape = nil
case_sensitive = true
arel_attribute.does_not_match("%#{parsed_value}%", escape, case_sensitive)
when "and"
operands = exp[operator].each_with_object([]) do |operand, result|
next if operand.blank?
arel = to_arel(operand, tz)
next if arel.blank?
result << arel
end
Arel::Nodes::Grouping.new(Arel::Nodes::And.new(operands))
when "or"
operands = exp[operator].each_with_object([]) do |operand, result|
next if operand.blank?
arel = to_arel(operand, tz)
next if arel.blank?
result << arel
end
first, *rest = operands
rest.inject(first) { |lhs, rhs| lhs.or(rhs) }
when "not", "!"
Arel::Nodes::Not.new(to_arel(exp[operator], tz))
when "is null"
arel_attribute.eq(nil)
when "is not null"
arel_attribute.not_eq(nil)
when "is empty"
arel = arel_attribute.eq(nil)
arel = arel.or(arel_attribute.eq("")) if field.string?
arel
when "is not empty"
arel = arel_attribute.not_eq(nil)
arel = arel.and(arel_attribute.not_eq("")) if field.string?
arel
when "contains"
# Only support for tags of the main model
if exp[operator].key?("tag")
tag = Tag.parse(exp[operator]["tag"])
ids = tag.target.find_tagged_with(:any => parsed_value, :ns => tag.namespace).pluck(:id)
subquery_for_contains(tag, tag.arel_attribute.in(ids))
else
subquery_for_contains(field, arel_attribute.eq(parsed_value))
end
when "is"
value = parsed_value
start_val = RelativeDatetime.normalize(value, tz, "beginning", field.date?)
end_val = RelativeDatetime.normalize(value, tz, "end", field.date?)
if !field.date? || RelativeDatetime.relative?(value)
arel_attribute.between(start_val..end_val)
else
arel_attribute.eq(start_val)
end
when "from"
start_val, end_val = parsed_value
start_val = RelativeDatetime.normalize(start_val, tz, "beginning", field.date?)
end_val = RelativeDatetime.normalize(end_val, tz, "end", field.date?)
arel_attribute.between(start_val..end_val)
else
raise _("operator '%{operator_name}' is not supported") % {:operator_name => operator}
end
end
def subquery_for_contains(field, limiter_query)
return limiter_query if field.reflections.empty?
# Remove the default scopes via `base_class`. The scope is already in the main query and not needed in the subquery
main_model = field.model.base_class
primary_attribute = main_model.arel_table[main_model.primary_key]
includes_associations = field.reflections.reverse.inject({}) { |i, k| {k.name => i} }
relation_query = main_model.select(primary_attribute)
.joins(includes_associations)
.where(limiter_query)
conn = main_model.connection
sql = conn.unprepared_statement { conn.to_sql(relation_query.arel) }
Arel::Nodes::In.new(primary_attribute, Arel::Nodes::SqlLiteral.new(sql))
end
def self.determine_relat_path(ref)
last_path = ref.name.to_s
class_from_association_name = model_class(last_path)
return last_path unless class_from_association_name
association_class = ref.klass
if association_class < class_from_association_name
last_path = ref.collection? ? association_class.model_name.plural : association_class.model_name.singular
end
last_path
end
private_class_method :determine_relat_path
def each_atom(component, &block)
operator = component.keys.first
case operator.downcase
when "and", "or"
component[operator].each { |sub_component| each_atom(sub_component, &block) }
when "not", "!"
each_atom(component[operator], &block)
when "find"
component[operator].each { |_operator, operands| each_atom(operands, &block) }
else
yield(component[operator])
end
end
end # class MiqExpression
| 36.712912 | 280 | 0.622517 |
f8bbcc3221f6f7a54488c14fbe77ba3862858ff5 | 393 | require "spec_helper"
describe(Shoe) do
it { should have_and_belong_to_many(:stores) }
it("validates presence of a shoe brand") do
new_shoe = Shoe.new({:brand => ""})
expect(new_shoe.save()).to(eq(false))
end
it("capitalizes entry when saved") do
new_shoe = Shoe.create({:brand => "adidas"})
expect(new_shoe.brand()).to(eq("Adidas"))
end
end
| 24.5625 | 50 | 0.62341 |
acb86fb028efe8278a1f9cec65113922bb58636f | 5,679 | module RSpec
module Mocks
# ArgumentMatchers are placeholders that you can include in message
# expectations to match arguments against a broader check than simple
# equality.
#
# With the exception of `any_args` and `no_args`, they all match against
# the arg in same position in the argument list.
module ArgumentMatchers
class AnyArgsMatcher
def description
"any args"
end
end
class AnyArgMatcher
def initialize(ignore)
end
def ==(other)
true
end
end
class NoArgsMatcher
def description
"no args"
end
end
class RegexpMatcher
def initialize(regexp)
@regexp = regexp
end
def ==(value)
Regexp === value ? value == @regexp : value =~ @regexp
end
end
class BooleanMatcher
def initialize(ignore)
end
def ==(value)
[true,false].include?(value)
end
end
class HashIncludingMatcher
def initialize(expected)
@expected = expected
end
def ==(actual)
@expected.all? {|k,v| actual.has_key?(k) && v == actual[k]}
rescue NoMethodError
false
end
def description
"hash_including(#{@expected.inspect.sub(/^\{/,"").sub(/\}$/,"")})"
end
end
class HashExcludingMatcher
def initialize(expected)
@expected = expected
end
def ==(actual)
@expected.none? {|k,v| actual.has_key?(k) && v == actual[k]}
rescue NoMethodError
false
end
def description
"hash_not_including(#{@expected.inspect.sub(/^\{/,"").sub(/\}$/,"")})"
end
end
class DuckTypeMatcher
def initialize(*methods_to_respond_to)
@methods_to_respond_to = methods_to_respond_to
end
def ==(value)
@methods_to_respond_to.all? {|message| value.respond_to?(message)}
end
end
class MatcherMatcher
def initialize(matcher)
@matcher = matcher
end
def ==(value)
@matcher.matches?(value)
end
end
class EqualityProxy
def initialize(given)
@given = given
end
def ==(expected)
@given == expected
end
end
class InstanceOf
def initialize(klass)
@klass = klass
end
def ==(actual)
actual.instance_of?(@klass)
end
end
class KindOf
def initialize(klass)
@klass = klass
end
def ==(actual)
actual.kind_of?(@klass)
end
end
# Passes if object receives `:message` with any args at all. This is
# really a more explicit variation of `object.should_receive(:message)`
#
# @example
#
# object.should_receive(:message).with(any_args())
def any_args
AnyArgsMatcher.new
end
# Passes as long as there is an argument.
#
# @example
#
# object.should_receive(:message).with(anything())
def anything
AnyArgMatcher.new(nil)
end
# Passes if no arguments are passed along with the message
#
# @example
#
# object.should_receive(:message).with(no_args)
def no_args
NoArgsMatcher.new
end
# Passes if the argument responds to the specified messages.
#
# @example
#
# object.should_receive(:message).with(duck_type(:hello))
# object.should_receive(:message).with(duck_type(:hello, :goodbye))
def duck_type(*args)
DuckTypeMatcher.new(*args)
end
# Passes if the argument is boolean.
#
# @example
#
# object.should_receive(:message).with(boolean())
def boolean
BooleanMatcher.new(nil)
end
# Passes if the argument is a hash that includes the specified key(s) or
# key/value pairs. If the hash includes other keys, it will still pass.
#
# @example
#
# object.should_receive(:message).with(hash_including(:key => val))
# object.should_receive(:message).with(hash_including(:key))
# object.should_receive(:message).with(hash_including(:key, :key2 => val2))
def hash_including(*args)
HashIncludingMatcher.new(anythingize_lonely_keys(*args))
end
# Passes if the argument is a hash that doesn't include the specified
# key(s) or key/value
#
# @example
#
# object.should_receive(:message).with(hash_excluding(:key => val))
# object.should_receive(:message).with(hash_excluding(:key))
# object.should_receive(:message).with(hash_excluding(:key, :key2 => :val2))
def hash_excluding(*args)
HashExcludingMatcher.new(anythingize_lonely_keys(*args))
end
alias_method :hash_not_including, :hash_excluding
# Passes if `arg.instance_of?(klass)`
def instance_of(klass)
InstanceOf.new(klass)
end
alias_method :an_instance_of, :instance_of
# Passes if `arg.kind_of?(klass)`
def kind_of(klass)
KindOf.new(klass)
end
alias_method :a_kind_of, :kind_of
private
def anythingize_lonely_keys(*args)
hash = args.last.class == Hash ? args.delete_at(-1) : {}
args.each { | arg | hash[arg] = anything }
hash
end
end
end
end
| 24.584416 | 84 | 0.558725 |
6a3e6b8641b2e4fbe1f60ef39802bc9bd89ad8c4 | 4,864 | Shindo.tests('Radosgw::Provisioning | provisioning requests', ['radosgw']) do
current_timestamp = Time.now.to_i
user_format = {
'email' => String,
'display_name' => String,
'user_id' => String,
'suspended' => Integer,
'keys' =>
[
{
'access_key' => String,
'secret_key' => String,
'user' => String,
}
],
}
tests('User creation') do
tests('is successful').returns(String) do
# Create a user.
#
email, name = "successful_user_creation_test_#{current_timestamp}@example.com", "Fog User 0"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
user_id.class
end
tests('fails if duplicate').raises(Fog::Radosgw::Provisioning::UserAlreadyExists) do
2.times do
email, name = "failed_duplicate_user_creation_test_#{current_timestamp}@example.com", "Fog User 1"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
end
end
end
tests('User delete') do
tests('is successful').returns(200) do
# Create a user.
#
email, name = "successful_user_delete_test_#{current_timestamp}@example.com", "Fog User 2"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
Fog::Radosgw[:provisioning].delete_user(user_id).status
end
tests('is successful').returns(404) do
# Create a user.
#
email, name = "successful_user_delete_test_2_#{current_timestamp}@example.com", "Fog User 3"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
Fog::Radosgw[:provisioning].delete_user(user_id).status
Fog::Radosgw[:provisioning].get_user(user_id).status
end
end
tests('User disable') do
tests('is successful').returns(200) do
# Create a user.
#
email, name = "successful_user_disable_test_#{current_timestamp}@example.com", "Fog User 4"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
Fog::Radosgw[:provisioning].disable_user(user_id).status
end
end
tests('User enable') do
tests('is successful').returns(200) do
# Create a user.
#
email, name = "successful_user_disable_enable_test_#{current_timestamp}@example.com", "Fog User 5"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
Fog::Radosgw[:provisioning].disable_user(user_id).status
Fog::Radosgw[:provisioning].enable_user(user_id).status
end
end
tests('User retrieval') do
tests('is successful').formats(user_format) do
# Create a user.
#
email, name = "user_retrieval_test_#{current_timestamp}@example.com", "Fog User 6"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
# Get user details.
#
Fog::Radosgw[:provisioning].get_user(user_id).body
end
end
tests('User listing') do
tests('sucessfully lists users').formats(user_format) do
# Create a user.
#
email, name = "user_listing_test_#{current_timestamp}@example.com", "Fog User 7"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
# Ensure the list users response contains the user that we just
# created.
#
Fog::Radosgw[:provisioning].list_users.body.select { |x| x['email'] == email }.first
end
tests('successfully lists users containing no disabled users').returns(nil) do
# Create a user.
#
email, name = "user_listing_without_disabled_users_test_#{current_timestamp}@example.com", "Fog User 8"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
# Disable that user.
#
Fog::Radosgw[:provisioning].disable_user(user_id)
# Ensure the list users response does not contain the user that we
# just created and disabled.
#
Fog::Radosgw[:provisioning].list_users(:suspended => 0).body.select { |x| x['email'] == email }.first
end
tests('successfully lists users containing disabled users').formats(user_format) do
# Create a user.
#
email, name = "user_listing_with_disabled_users_test_#{current_timestamp}@example.com", "Fog User 9"
user_id = Fog::Radosgw[:provisioning].create_user(name, name, email).body['user_id']
# Disable that user.
#
Fog::Radosgw[:provisioning].disable_user(user_id)
# Ensure the list users response contains the user that we just
# created and disabled.
#
Fog::Radosgw[:provisioning].list_users.body.select { |x| x['email'] == email }.first
end
end
end
| 28.444444 | 109 | 0.644326 |
f7ef39a846ab355a71f7f521ec8e12f5f31d91e6 | 509 | # frozen_string_literal: true
return unless defined? ::ActiveSupport::CurrentAttributes
require "spec_helper"
RSpec.describe OperatorRecordable::CurrentAttributesStore do
before { described_class::Current.reset }
describe "#[]" do
before { described_class::Current[:foo1] = "bar1" }
it { expect(described_class.new[:foo1]).to eq "bar1" }
end
describe "#[]=" do
before { described_class.new[:foo2] = "bar2" }
it { expect(described_class::Current[:foo2]).to eq "bar2" }
end
end
| 23.136364 | 63 | 0.701375 |
08e792328e0c1298940892dc7b34d358cae31118 | 437 | require 'acts_as_list'
require 'acts_as_markup'
require 'acts_as_published'
require 'friendly_id'
require 'haml-rails'
require 'paperclip'
require 'route_translator'
require 'twitter/bootstrap/components/rails'
require 'ecm/pictures/engine'
require 'ecm/pictures/configuration'
require 'ecm/pictures/version'
module Ecm
module Pictures
extend Configuration
def self.table_name_prefix
'ecm_pictures_'
end
end
end
| 19 | 44 | 0.784897 |
f8eed6887b591d6ba53ecbbb5f0ef5537b71fdae | 1,958 | class Admin::Api::WebHooksFailuresController < Admin::Api::BaseController
representer WebHookFailures
before_action :authorize_web_hooks
# swagger
##~ sapi = source2swagger.namespace("Account Management API")
##~ e = sapi.apis.add
##~ e.path = "/admin/api/webhooks/failures.xml"
##~ e.responseClass = "List[webhook_failures]"
#
##~ op = e.operations.add
##~ op.httpMethod = "GET"
##~ op.summary = "Webhooks List Failed Deliveries"
##~ op.description = "Lists of webhooks that could not be delivered to your end-point after 5 trials. A webhook is considered delivered if your end-point responds with a 200, otherwise it retries 5 times at 60 second intervals."
##~ op.group = "webhooks"
#
##~ op.parameters.add @parameter_access_token
#
def show
respond_with(failures)
end
#
##~ op = e.operations.add
##~ op.httpMethod = "DELETE"
##~ op.summary = "Webhooks Delete Failed Deliveries"
##~ op.description = "Deletes failed delivery records. It is advisible to delete the records past the time of the last webhook failure that was received instead of deleting them all. Between the GET and the DELETE other webhooks failures may have arrived."
##~ op.group = "webhooks"
#
##~ op.parameters.add @parameter_access_token
##~ op.parameters.add :name => "time", :description => "Only failed webhook deliveries whose time is less than or equal to the passed time are destroyed (if used).", :dataType => "time", :required => false, :paramType => "query"
#
def destroy
if params[:time] && !WebHookFailures.valid_time?(params[:time])
render_error 'invalid time', status: :bad_request
else
failures.delete(params[:time])
respond_with(failures, nothing: true)
end
end
private
def authorize_web_hooks
authorize!(:manage, :web_hooks) if current_user
end
def failures
@failures ||= WebHookFailures.new(current_account.id)
end
end
| 36.259259 | 258 | 0.695097 |
1db8e1026be43ef906ca0179e38fd38ce5f28f57 | 2,116 | require_relative './configuration_file_finder'
module Reek
module Configuration
class ConfigFileException < StandardError; end
#
# Reeks singleton configuration instance.
#
module AppConfiguration
@configuration = {}
@has_been_initialized = false
class << self
attr_reader :configuration
def initialize_with(application)
@has_been_initialized = true
configuration_file_path = ConfigurationFileFinder.find(application)
return unless configuration_file_path
load_from_file configuration_file_path
end
def configure_smell_repository(smell_repository)
# Let users call this method directly without having initialized AppConfiguration before
# and if they do, initialize it without application context
initialize_with(nil) unless @has_been_initialized
@configuration.each do |klass_name, config|
klass = load_smell_type(klass_name)
smell_repository.configure(klass, config) if klass
end
end
def load_from_file(path)
if File.size(path) == 0
report_problem('Empty file', path)
return
end
begin
@configuration = YAML.load_file(path) || {}
rescue => error
raise_error(error.to_s, path)
end
raise_error('Not a hash', path) unless @configuration.is_a? Hash
end
def reset
@configuration.clear
end
private
def load_smell_type(name)
Reek::Smells.const_get(name)
rescue NameError
report_problem("\"#{name}\" is not a code smell")
nil
end
def report_problem(reason, path)
$stderr.puts "Warning: #{message(reason, path)}"
end
def raise_error(reason, path)
raise ConfigFileException, message(reason, path)
end
def message(reason, path)
"Invalid configuration file \"#{File.basename(path)}\" -- #{reason}"
end
end
end
end
end
| 27.842105 | 98 | 0.614367 |
1d710ed388cd4d0373b9456c8d05560c09064667 | 455 | # encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::Network::Mgmt::V2019_12_01
module Models
#
# Defines values for EffectiveRouteSource
#
module EffectiveRouteSource
Unknown = "Unknown"
User = "User"
VirtualNetworkGateway = "VirtualNetworkGateway"
Default = "Default"
end
end
end
| 23.947368 | 70 | 0.701099 |
1dc83f917bc7e2eb04188501dc5a373126cc1889 | 204 | require 'test_helper'
class HaloPaysTest < Minitest::Test
def test_that_it_has_a_version_number
refute_nil ::HaloPays::VERSION
end
def test_it_does_something_useful
assert false
end
end
| 17 | 39 | 0.784314 |
398bb11d3b946845246ddd60486260b1dae6727a | 14,079 | # -*- coding: utf-8 -*-
require 'spec_helper'
require_relative 'shared/block_examples'
# rubocop:disable EmptyLines, LineLength
describe RubyToBlock::Block, 'η§»εγ»εθ»’γΈγ£γ³γ«', to_blocks: true do
parts = <<-EOS
car1.on(:start) do
move(10)
self.position = [0, 0]
self.x += 10
self.x = 0
self.y += 10
self.y = 0
if x < 300
if y < 300
end
end
end
car1.move(10)
car1.position = [0, 0]
car1.x += 10
car1.x = 0
car1.y += 10
car1.y = 0
EOS
describe compact_source_code(parts), character_new_data: true do
_parts = parts
let(:parts) { _parts }
it 'η΅ζγζ£γγγγ¨' do
should eq_block_xml(<<-XML)
<field name="NAME">car1</field>
<statement name="DO">
<block type="events_on_start">
<statement name="DO">
<block type="motion_move" inline="true">
<value name="STEP">
<block type="math_number">
<field name="NUM">10</field>
</block>
</value>
<next>
<block type="motion_set_x_y" inline="true">
<value name="X">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<value name="Y">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<next>
<block type="motion_change_x_by" inline="true">
<value name="X">
<block type="math_number">
<field name="NUM">10</field>
</block>
</value>
<next>
<block type="motion_set_x" inline="true">
<value name="X">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<next>
<block type="motion_change_y_by" inline="true">
<value name="Y">
<block type="math_number">
<field name="NUM">10</field>
</block>
</value>
<next>
<block type="motion_set_y" inline="true">
<value name="Y">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<next>
<block type="control_if" inline="true">
<value name="COND">
<block type="operators_compare_lt" inline="true">
<value name="A">
<block type="motion_self_x" />
</value>
<value name="B">
<block type="math_number">
<field name="NUM">300</field>
</block>
</value>
</block>
</value>
<statement name="THEN">
<block type="control_if" inline="true">
<value name="COND">
<block type="operators_compare_lt" inline="true">
<value name="A">
<block type="motion_self_y" />
</value>
<value name="B">
<block type="math_number">
<field name="NUM">300</field>
</block>
</value>
</block>
</value>
</block>
</statement>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</statement>
<next>
<block type="motion_move" inline="true">
<value name="STEP">
<block type="math_number">
<field name="NUM">10</field>
</block>
</value>
<next>
<block type="motion_set_x_y" inline="true">
<value name="X">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<value name="Y">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<next>
<block type="motion_change_x_by" inline="true">
<value name="X">
<block type="math_number">
<field name="NUM">10</field>
</block>
</value>
<next>
<block type="motion_set_x" inline="true">
<value name="X">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<next>
<block type="motion_change_y_by" inline="true">
<value name="Y">
<block type="math_number">
<field name="NUM">10</field>
</block>
</value>
<next>
<block type="motion_set_y" inline="true">
<value name="Y">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</statement>
XML
end
end
parts = <<-EOS
car1.move(10)
car1.position = [0, 0]
car1.x += 10
EOS
describe compact_source_code(parts), character_new_data: true do
_parts = parts
let(:parts) { _parts }
it 'η΅ζγζ£γγγγ¨' do
should eq_block_xml(<<-XML)
<field name="NAME">car1</field>
<statement name="DO">
<block type="motion_move" inline="true">
<value name="STEP">
<block type="math_number">
<field name="NUM">10</field>
</block>
</value>
<next>
<block type="motion_set_x_y" inline="true">
<value name="X">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<value name="Y">
<block type="math_number">
<field name="NUM">0</field>
</block>
</value>
<next>
<block type="motion_change_x_by" inline="true">
<value name="X">
<block type="math_number">
<field name="NUM">10</field>
</block>
</value>
</block>
</next>
</block>
</next>
</block>
</statement>
XML
end
end
parts = <<-EOS
car1.on(:start) do
turn_if_reach_wall
if reach_wall?
turn
end
rotate(15)
rotate(-15)
self.angle = 90
point_towards(:mouse)
point_towards(car1)
if angle < 90
end
end
car1.turn_if_reach_wall
car1.turn
car1.rotate(15)
car1.rotate(-15)
car1.angle = 90
car1.point_towards(:mouse)
car1.point_towards(car1)
EOS
describe compact_source_code(parts), character_new_data: true do
_parts = parts
let(:parts) { _parts }
it 'η΅ζγζ£γγγγ¨' do
should eq_block_xml(<<-XML)
<field name="NAME">car1</field>
<statement name="DO">
<block type="events_on_start">
<statement name="DO">
<block type="motion_turn_if_reach_wall">
<next>
<block type="control_if" inline="true">
<value name="COND">
<block type="sensing_reach_wall" />
</value>
<statement name="THEN">
<block type="motion_turn" />
</statement>
<next>
<block type="motion_rotate_right" inline="true">
<value name="ANGLE">
<block type="math_number">
<field name="NUM">15</field>
</block>
</value>
<next>
<block type="motion_rotate_left" inline="true">
<value name="ANGLE">
<block type="math_number">
<field name="NUM">15</field>
</block>
</value>
<next>
<block type="motion_set_angle" inline="true">
<value name="ANGLE">
<block type="math_number">
<field name="NUM">90</field>
</block>
</value>
<next>
<block type="motion_point_towards_mouse">
<next>
<block type="motion_point_towards_character">
<field name="CHAR">car1</field>
<next>
<block type="control_if" inline="true">
<value name="COND">
<block type="operators_compare_lt" inline="true">
<value name="A">
<block type="motion_self_angle" />
</value>
<value name="B">
<block type="math_number">
<field name="NUM">90</field>
</block>
</value>
</block>
</value>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</statement>
<next>
<block type="motion_turn_if_reach_wall">
<next>
<block type="motion_turn">
<next>
<block type="motion_rotate_right" inline="true">
<value name="ANGLE">
<block type="math_number">
<field name="NUM">15</field>
</block>
</value>
<next>
<block type="motion_rotate_left" inline="true">
<value name="ANGLE">
<block type="math_number">
<field name="NUM">15</field>
</block>
</value>
<next>
<block type="motion_set_angle" inline="true">
<value name="ANGLE">
<block type="math_number">
<field name="NUM">90</field>
</block>
</value>
<next>
<block type="motion_point_towards_mouse">
<next>
<block type="motion_point_towards_character">
<field name="CHAR">car1</field>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</next>
</block>
</statement>
XML
end
end
end
| 36.1 | 91 | 0.335535 |
1a2629ed422a1428d1b9f9fd19bb0d59a8d729c4 | 383 | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe GitlabSchema.types['Subscription'] do
it 'has the expected fields' do
expected_fields = %i[
issuable_assignees_updated
issue_crm_contacts_updated
issuable_title_updated
issuable_labels_updated
]
expect(described_class).to have_graphql_fields(*expected_fields).only
end
end
| 22.529412 | 73 | 0.759791 |
1d3689497d78cd8fe6711601f296b32d2b52a036 | 408 |
module CouchRest
class Database
alias :delete_orig! :delete!
def delete!
clear_model_fresh_cache
delete_orig!
end
# If the database is deleted, ensure that the design docs will be refreshed.
def clear_model_fresh_cache
::CouchRest::Model::Base.subclasses.each{|klass| klass.req_design_doc_refresh if klass.respond_to?(:req_design_doc_refresh)}
end
end
end
| 20.4 | 130 | 0.720588 |
f77c7cb231ba51e102482b5ff3aff2a0f4414997 | 768 | $START_OPTIONS = { :session_store => "memory" }
require File.join(File.dirname(__FILE__), "..", "..", "spec_helper")
require File.join(File.dirname(__FILE__), "session_spec")
require File.join(File.dirname(__FILE__), "controllers", "sessions")
describe Merb::MemorySession do
before do
@session_class = Merb::MemorySession
@session = @session_class.generate
end
it_should_behave_like "All session-store backends"
it "should have a session_store_type class attribute" do
@session.class.session_store_type.should == :memory
end
end
describe Merb::MemorySession, "mixed into Merb::Controller" do
before(:all) { @session_class = Merb::MemorySession }
it_should_behave_like "All session-stores mixed into Merb::Controller"
end | 27.428571 | 72 | 0.733073 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.