hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
911c590b70a6222a15c46cdc0d483385b767044a
| 1,600 |
require 'test_helper'
class CartTest < ActiveSupport::TestCase
# test "the truth" do
# assert true
# end
setup do
@restaurant = Restaurant.new(user_id: 1, address: '1893 Berkeley Avenue')
@restaurant.save
@user = User.new(id: 1, name: 'rest 1', email: '[email protected]', rest: true, password: '123123123', password_confirmation: '123123123')
@user.save
@customer = Customer.new(user_id:2, phone_number: 0000)
@customer.save
@user = User.new(id: 2, name: 'user 1', email: '[email protected]', rest: false, password: '123123123', password_confirmation: '123123123')
@user.save
@menu1 = Menu.new(id: 1, rest_id: 1, name: 'test1', price: 2.5)
@menu1.save
@menu2 = Menu.new(id: 2, rest_id: 1, name: 'test2', price: 3.5)
@menu2.save
@version = Version.new(id: 1, cust_id: 2, rest_id: 1, count: 0)
@version.save
end
test "current cart test" do
@cart1 = Cart.new(cust_id:2, rest_id: 1, version: 0, menu_id: 1, qty: 1)
@cart1.save
@cart2 = Cart.new(cust_id:2, rest_id: 1, version: 0, menu_id: 2, qty: 1)
@cart2.save
curCart = Cart.current_cart(2)
assert_equal(6, curCart[:cart][1])
end
test "find cart test" do
@cart = Cart.new(id: 1, cust_id:2, rest_id: 1, version: 0, menu_id: 1, qty: 1)
@cart.save
cartFound = Cart.find_cart(2, 1, 0).first
assert_equal(cartFound, @cart)
end
# def test_user_id
# cust = Customer.new :user_id => "", :phone_number => 00000
# assert_false cust.save
# cust = @customer.dup
# assert_false cust.save
# end
end
| 29.62963 | 145 | 0.635 |
d581d5efce6e932557341f6aa51ce2c9423c3dda
| 1,164 |
module HoldsHelper
def hold_date(event)
event.date.strftime("%A, %B %-d, between ") + event.times.sub("-", "&")
end
def hold_slot_options(slots)
grouped_slots = slots.group_by(&:date).map { |date, slots|
[date.strftime("%B %-d, %Y"), slots.map { |s| [hold_date(s), s.id] }]
}
grouped_options_for_select(grouped_slots)
end
def render_hold_items(items, &block)
block ||= proc {}
render layout: "holds/items", locals: {items: items}, &block
end
def render_hold_status(hold)
previous_holds_count = hold.previous_active_holds.count
appointment = hold.upcoming_appointment
if appointment
"Scheduled for pick-up at #{format_date(appointment.starts_at)} " +
format_time_range(appointment.starts_at, appointment.ends_at)
elsif previous_holds_count == 0
"Ready for pickup. Schedule by #{format_date(hold.created_at + 7.days)}"
else
"##{previous_holds_count} on wait list"
end
end
private
def format_date(date)
date.strftime("%a, %-m/%-d")
end
def format_time_range(starts_at, ends_at)
"#{starts_at.strftime('%l%P')} - #{ends_at.strftime('%l%P')}"
end
end
| 27.714286 | 78 | 0.668385 |
bbed08f504430dad90ec7af9876413aec94c7d88
| 52,172 |
require_relative '../settings'
require_relative '../object_store_settings'
require_relative '../smime_signature_settings'
# Default settings
Settings['shared'] ||= Settingslogic.new({})
Settings.shared['path'] = Settings.absolute(Settings.shared['path'] || "shared")
Settings['encrypted_settings'] ||= Settingslogic.new({})
Settings.encrypted_settings['path'] ||= File.join(Settings.shared['path'], "encrypted_settings")
Settings.encrypted_settings['path'] = Settings.absolute(Settings.encrypted_settings['path'])
Settings['ldap'] ||= Settingslogic.new({})
Settings.ldap['enabled'] = false if Settings.ldap['enabled'].nil?
Settings.ldap['servers'] ||= Settingslogic.new({})
Settings.ldap['prevent_ldap_sign_in'] = false if Settings.ldap['prevent_ldap_sign_in'].blank?
Settings.ldap['secret_file'] = Settings.absolute(Settings.ldap['secret_file'] || File.join(Settings.encrypted_settings['path'], "ldap.yaml.enc"))
Gitlab.ee do
Settings.ldap['sync_time'] = 3600 if Settings.ldap['sync_time'].nil?
Settings.ldap['schedule_sync_daily'] = 1 if Settings.ldap['schedule_sync_daily'].nil?
Settings.ldap['schedule_sync_hour'] = 1 if Settings.ldap['schedule_sync_hour'].nil?
Settings.ldap['schedule_sync_minute'] = 30 if Settings.ldap['schedule_sync_minute'].nil?
end
# backwards compatibility, we only have one host
if Settings.ldap['enabled'] || Rails.env.test?
if Settings.ldap['host'].present?
# We detected old LDAP configuration syntax. Update the config to make it
# look like it was entered with the new syntax.
server = Settings.ldap.except('sync_time')
Settings.ldap['servers'] = {
'main' => server
}
end
Settings.ldap['servers'].each do |key, server|
server = Settingslogic.new(server)
server['label'] ||= 'LDAP'
server['timeout'] ||= 10.seconds
server['block_auto_created_users'] = false if server['block_auto_created_users'].nil?
server['allow_username_or_email_login'] = false if server['allow_username_or_email_login'].nil?
server['smartcard_auth'] = false unless %w[optional required].include?(server['smartcard_auth'])
server['active_directory'] = true if server['active_directory'].nil?
server['attributes'] = {} if server['attributes'].nil?
server['lowercase_usernames'] = false if server['lowercase_usernames'].nil?
server['provider_name'] ||= "ldap#{key}".downcase
server['provider_class'] = OmniAuth::Utils.camelize(server['provider_name'])
server['external_groups'] = [] if server['external_groups'].nil?
server['sync_ssh_keys'] = 'sshPublicKey' if server['sync_ssh_keys'].to_s == 'true'
# For backwards compatibility
server['encryption'] ||= server['method']
server['encryption'] = 'simple_tls' if server['encryption'] == 'ssl'
server['encryption'] = 'start_tls' if server['encryption'] == 'tls'
# Certificate verification was added in 9.4.2, and defaulted to false for
# backwards-compatibility.
#
# Since GitLab 10.0, verify_certificates defaults to true for security.
server['verify_certificates'] = true if server['verify_certificates'].nil?
# Expose ability to set `tls_options` directly. Deprecate `ca_file` and
# `ssl_version` in favor of `tls_options` hash option.
server['tls_options'] ||= {}
if server['ssl_version'] || server['ca_file']
Rails.logger.warn 'DEPRECATED: LDAP options `ssl_version` and `ca_file` should be nested within `tls_options`' # rubocop:disable Gitlab/RailsLogger
end
if server['ssl_version']
server['tls_options']['ssl_version'] ||= server['ssl_version']
server.delete('ssl_version')
end
if server['ca_file']
server['tls_options']['ca_file'] ||= server['ca_file']
server.delete('ca_file')
end
Settings.ldap['servers'][key] = server
end
end
Settings['omniauth'] ||= Settingslogic.new({})
Settings.omniauth['enabled'] = true if Settings.omniauth['enabled'].nil?
Settings.omniauth['auto_sign_in_with_provider'] = false if Settings.omniauth['auto_sign_in_with_provider'].nil?
Settings.omniauth['allow_single_sign_on'] = false if Settings.omniauth['allow_single_sign_on'].nil?
Settings.omniauth['allow_bypass_two_factor'] = false if Settings.omniauth['allow_bypass_two_factor'].nil?
Settings.omniauth['external_providers'] = [] if Settings.omniauth['external_providers'].nil?
Settings.omniauth['block_auto_created_users'] = true if Settings.omniauth['block_auto_created_users'].nil?
Settings.omniauth['auto_link_ldap_user'] = false if Settings.omniauth['auto_link_ldap_user'].nil?
Settings.omniauth['auto_link_saml_user'] = false if Settings.omniauth['auto_link_saml_user'].nil?
Settings.omniauth['auto_link_user'] = false if Settings.omniauth['auto_link_user'].nil?
Settings.omniauth['sync_profile_from_provider'] = false if Settings.omniauth['sync_profile_from_provider'].nil?
Settings.omniauth['sync_profile_attributes'] = ['email'] if Settings.omniauth['sync_profile_attributes'].nil?
# Handle backwards compatibility with merge request 11268
if Settings.omniauth['sync_email_from_provider']
if Settings.omniauth['sync_profile_from_provider'].is_a?(Array)
Settings.omniauth['sync_profile_from_provider'] |= [Settings.omniauth['sync_email_from_provider']]
elsif !Settings.omniauth['sync_profile_from_provider']
Settings.omniauth['sync_profile_from_provider'] = [Settings.omniauth['sync_email_from_provider']]
end
Settings.omniauth['sync_profile_attributes'] |= ['email'] unless Settings.omniauth['sync_profile_attributes'] == true
end
Settings.omniauth['providers'] ||= []
Settings.omniauth['cas3'] ||= Settingslogic.new({})
Settings.omniauth.cas3['session_duration'] ||= 8.hours
Settings.omniauth['session_tickets'] ||= Settingslogic.new({})
Settings.omniauth.session_tickets['cas3'] = 'ticket'
# Fill out omniauth-gitlab settings. It is needed for easy set up GHE or GH by just specifying url.
github_default_url = "https://github.com"
github_settings = Settings.omniauth['providers'].find { |provider| provider["name"] == "github" }
if github_settings
# For compatibility with old config files (before 7.8)
# where people dont have url in github settings
if github_settings['url'].blank?
github_settings['url'] = github_default_url
end
github_settings["args"] ||= Settingslogic.new({})
github_settings["args"]["client_options"] =
if github_settings["url"].include?(github_default_url)
OmniAuth::Strategies::GitHub.default_options[:client_options]
else
{
"site" => File.join(github_settings["url"], "api/v3"),
"authorize_url" => File.join(github_settings["url"], "login/oauth/authorize"),
"token_url" => File.join(github_settings["url"], "login/oauth/access_token")
}
end
end
# SAML should be enabled for the tests automatically, but only for EE.
saml_provider_enabled = Settings.omniauth.providers.any? do |provider|
provider['name'] == 'group_saml'
end
if Gitlab.ee? && Rails.env.test? && !saml_provider_enabled
Settings.omniauth.providers << Settingslogic.new({ 'name' => 'group_saml' })
end
Settings['issues_tracker'] ||= {}
#
# GitLab
#
Settings['gitlab'] ||= Settingslogic.new({})
Settings.gitlab['default_project_creation'] ||= ::Gitlab::Access::DEVELOPER_MAINTAINER_PROJECT_ACCESS
Settings.gitlab['default_project_deletion_protection'] ||= false
Settings.gitlab['default_projects_limit'] ||= 100000
Settings.gitlab['default_branch_protection'] ||= 2
Settings.gitlab['default_can_create_group'] = true if Settings.gitlab['default_can_create_group'].nil?
Settings.gitlab['default_theme'] = Gitlab::Themes::APPLICATION_DEFAULT if Settings.gitlab['default_theme'].nil?
Settings.gitlab['host'] ||= ENV['GITLAB_HOST'] || 'localhost'
Settings.gitlab['ssh_host'] ||= Settings.gitlab.host
Settings.gitlab['https'] = false if Settings.gitlab['https'].nil?
Settings.gitlab['port'] ||= ENV['GITLAB_PORT'] || (Settings.gitlab.https ? 443 : 80)
Settings.gitlab['relative_url_root'] ||= ENV['RAILS_RELATIVE_URL_ROOT'] || ''
# / is not a valid relative URL root
Settings.gitlab['relative_url_root'] = '' if Settings.gitlab['relative_url_root'] == '/'
Settings.gitlab['protocol'] ||= Settings.gitlab.https ? "https" : "http"
Settings.gitlab['email_enabled'] ||= true if Settings.gitlab['email_enabled'].nil?
Settings.gitlab['email_from'] ||= ENV['GITLAB_EMAIL_FROM'] || "gitlab@#{Settings.gitlab.host}"
Settings.gitlab['email_display_name'] ||= ENV['GITLAB_EMAIL_DISPLAY_NAME'] || 'GitLab'
Settings.gitlab['email_reply_to'] ||= ENV['GITLAB_EMAIL_REPLY_TO'] || "noreply@#{Settings.gitlab.host}"
Settings.gitlab['email_subject_suffix'] ||= ENV['GITLAB_EMAIL_SUBJECT_SUFFIX'] || ""
Settings.gitlab['email_smime'] = SmimeSignatureSettings.parse(Settings.gitlab['email_smime'])
Settings.gitlab['base_url'] ||= Settings.__send__(:build_base_gitlab_url)
Settings.gitlab['url'] ||= Settings.__send__(:build_gitlab_url)
Settings.gitlab['user'] ||= 'git'
# External configuration may cause the ssh user to differ from the GitLab user
Settings.gitlab['ssh_user'] ||= Settings.gitlab.user
Settings.gitlab['user_home'] ||= begin
Etc.getpwnam(Settings.gitlab['user']).dir
rescue ArgumentError # no user configured
'/home/' + Settings.gitlab['user']
end
Settings.gitlab['time_zone'] ||= nil
Settings.gitlab['signup_enabled'] ||= true if Settings.gitlab['signup_enabled'].nil?
Settings.gitlab['signin_enabled'] ||= true if Settings.gitlab['signin_enabled'].nil?
Settings.gitlab['restricted_visibility_levels'] = Settings.__send__(:verify_constant_array, Gitlab::VisibilityLevel, Settings.gitlab['restricted_visibility_levels'], [])
Settings.gitlab['username_changing_enabled'] = true if Settings.gitlab['username_changing_enabled'].nil?
Settings.gitlab['issue_closing_pattern'] = '\b((?:[Cc]los(?:e[sd]?|ing)|\b[Ff]ix(?:e[sd]|ing)?|\b[Rr]esolv(?:e[sd]?|ing)|\b[Ii]mplement(?:s|ed|ing)?)(:?) +(?:(?:issues? +)?%{issue_ref}(?:(?: *,? +and +| *,? *)?)|([A-Z][A-Z0-9_]+-\d+))+)' if Settings.gitlab['issue_closing_pattern'].nil?
Settings.gitlab['default_projects_features'] ||= {}
Settings.gitlab['webhook_timeout'] ||= 10
Settings.gitlab['graphql_timeout'] ||= 30
Settings.gitlab['max_attachment_size'] ||= 10
Settings.gitlab['session_expire_delay'] ||= 10080
Settings.gitlab['unauthenticated_session_expire_delay'] ||= 2.hours.to_i
Settings.gitlab.default_projects_features['issues'] = true if Settings.gitlab.default_projects_features['issues'].nil?
Settings.gitlab.default_projects_features['merge_requests'] = true if Settings.gitlab.default_projects_features['merge_requests'].nil?
Settings.gitlab.default_projects_features['wiki'] = true if Settings.gitlab.default_projects_features['wiki'].nil?
Settings.gitlab.default_projects_features['snippets'] = true if Settings.gitlab.default_projects_features['snippets'].nil?
Settings.gitlab.default_projects_features['builds'] = true if Settings.gitlab.default_projects_features['builds'].nil?
Settings.gitlab.default_projects_features['container_registry'] = true if Settings.gitlab.default_projects_features['container_registry'].nil?
Settings.gitlab.default_projects_features['visibility_level'] = Settings.__send__(:verify_constant, Gitlab::VisibilityLevel, Settings.gitlab.default_projects_features['visibility_level'], Gitlab::VisibilityLevel::PRIVATE)
Settings.gitlab['domain_allowlist'] ||= []
Settings.gitlab['import_sources'] ||= Gitlab::ImportSources.values
Settings.gitlab['trusted_proxies'] ||= []
Settings.gitlab['content_security_policy'] ||= Gitlab::ContentSecurityPolicy::ConfigLoader.default_settings_hash
Settings.gitlab['no_todos_messages'] ||= YAML.load_file(Rails.root.join('config', 'no_todos_messages.yml'))
Settings.gitlab['impersonation_enabled'] ||= true if Settings.gitlab['impersonation_enabled'].nil?
Settings.gitlab['usage_ping_enabled'] = true if Settings.gitlab['usage_ping_enabled'].nil?
Settings.gitlab['max_request_duration_seconds'] ||= 57
Gitlab.ee do
Settings.gitlab['mirror_max_delay'] ||= 300
Settings.gitlab['mirror_max_capacity'] ||= 30
Settings.gitlab['mirror_capacity_threshold'] ||= 15
Settings.gitlab['seat_link_enabled'] = true if Settings.gitlab['seat_link_enabled'].nil?
end
#
# Elasticseacrh
#
Gitlab.ee do
Settings['elasticsearch'] ||= Settingslogic.new({})
Settings.elasticsearch['enabled'] = false if Settings.elasticsearch['enabled'].nil?
Settings.elasticsearch['url'] = ENV['ELASTIC_URL'] || "http://localhost:9200"
Settings.elasticsearch['indexer_path'] ||= Gitlab::Utils.which('gitlab-elasticsearch-indexer')
end
#
# CI
#
Settings['gitlab_ci'] ||= Settingslogic.new({})
Settings.gitlab_ci['shared_runners_enabled'] = true if Settings.gitlab_ci['shared_runners_enabled'].nil?
Settings.gitlab_ci['all_broken_builds'] = true if Settings.gitlab_ci['all_broken_builds'].nil?
Settings.gitlab_ci['add_pusher'] = false if Settings.gitlab_ci['add_pusher'].nil?
Settings.gitlab_ci['builds_path'] = Settings.absolute(Settings.gitlab_ci['builds_path'] || "builds/")
Settings.gitlab_ci['url'] ||= Settings.__send__(:build_gitlab_ci_url)
#
# Reply by email
#
Settings['incoming_email'] ||= Settingslogic.new({})
Settings.incoming_email['enabled'] = false if Settings.incoming_email['enabled'].nil?
#
# Service desk email
#
Settings['service_desk_email'] ||= Settingslogic.new({})
Settings.service_desk_email['enabled'] = false if Settings.service_desk_email['enabled'].nil?
#
# Build Artifacts
#
Settings['artifacts'] ||= Settingslogic.new({})
Settings.artifacts['enabled'] = true if Settings.artifacts['enabled'].nil?
Settings.artifacts['storage_path'] = Settings.absolute(Settings.artifacts.values_at('path', 'storage_path').compact.first || File.join(Settings.shared['path'], "artifacts"))
# Settings.artifact['path'] is deprecated, use `storage_path` instead
Settings.artifacts['path'] = Settings.artifacts['storage_path']
Settings.artifacts['max_size'] ||= 100 # in megabytes
Settings.artifacts['object_store'] = ObjectStoreSettings.legacy_parse(Settings.artifacts['object_store'])
#
# Registry
#
Settings['registry'] ||= Settingslogic.new({})
Settings.registry['enabled'] ||= false
Settings.registry['host'] ||= "example.com"
Settings.registry['port'] ||= nil
Settings.registry['api_url'] ||= "http://localhost:5000/"
Settings.registry['key'] ||= nil
Settings.registry['issuer'] ||= nil
Settings.registry['host_port'] ||= [Settings.registry['host'], Settings.registry['port']].compact.join(':')
Settings.registry['path'] = Settings.absolute(Settings.registry['path'] || File.join(Settings.shared['path'], 'registry'))
Settings.registry['notifications'] ||= []
#
# Error Reporting and Logging with Sentry
#
Settings['sentry'] ||= Settingslogic.new({})
Settings.sentry['enabled'] ||= false
Settings.sentry['dsn'] ||= nil
Settings.sentry['environment'] ||= nil
Settings.sentry['clientside_dsn'] ||= nil
#
# Pages
#
Settings['pages'] ||= Settingslogic.new({})
Settings['pages'] = ::Gitlab::Pages::Settings.new(Settings.pages) # For path access detection https://gitlab.com/gitlab-org/gitlab/-/issues/230702
Settings.pages['enabled'] = false if Settings.pages['enabled'].nil?
Settings.pages['access_control'] = false if Settings.pages['access_control'].nil?
Settings.pages['path'] = Settings.absolute(Settings.pages['path'] || File.join(Settings.shared['path'], "pages"))
Settings.pages['https'] = false if Settings.pages['https'].nil?
Settings.pages['host'] ||= "example.com"
Settings.pages['port'] ||= Settings.pages.https ? 443 : 80
Settings.pages['protocol'] ||= Settings.pages.https ? "https" : "http"
Settings.pages['url'] ||= Settings.__send__(:build_pages_url)
Settings.pages['external_http'] ||= false unless Settings.pages['external_http'].present?
Settings.pages['external_https'] ||= false unless Settings.pages['external_https'].present?
Settings.pages['artifacts_server'] ||= Settings.pages['enabled'] if Settings.pages['artifacts_server'].nil?
Settings.pages['secret_file'] ||= Rails.root.join('.gitlab_pages_secret')
# We want pages zip archives to be stored on the same directory as old pages hierarchical structure
# this will allow us to easier migrate existing instances with NFS
Settings.pages['storage_path'] = Settings.pages['path']
Settings.pages['object_store'] = ObjectStoreSettings.legacy_parse(Settings.pages['object_store'])
#
# Geo
#
Gitlab.ee do
Settings['geo'] ||= Settingslogic.new({})
# For backwards compatibility, default to gitlab_url and if so, ensure it ends with "/"
Settings.geo['node_name'] = Settings.geo['node_name'].presence || Settings.gitlab['url'].chomp('/').concat('/')
#
# Registry replication
#
Settings.geo['registry_replication'] ||= Settingslogic.new({})
Settings.geo.registry_replication['enabled'] ||= false
end
#
# Unleash
#
Settings['feature_flags'] ||= Settingslogic.new({})
Settings.feature_flags['unleash'] ||= Settingslogic.new({})
Settings.feature_flags.unleash['enabled'] = false if Settings.feature_flags.unleash['enabled'].nil?
#
# External merge request diffs
#
Settings['external_diffs'] ||= Settingslogic.new({})
Settings.external_diffs['enabled'] = false if Settings.external_diffs['enabled'].nil?
Settings.external_diffs['when'] = 'always' if Settings.external_diffs['when'].nil?
Settings.external_diffs['storage_path'] = Settings.absolute(Settings.external_diffs['storage_path'] || File.join(Settings.shared['path'], 'external-diffs'))
Settings.external_diffs['object_store'] = ObjectStoreSettings.legacy_parse(Settings.external_diffs['object_store'])
#
# Git LFS
#
Settings['lfs'] ||= Settingslogic.new({})
Settings.lfs['enabled'] = true if Settings.lfs['enabled'].nil?
Settings.lfs['storage_path'] = Settings.absolute(Settings.lfs['storage_path'] || File.join(Settings.shared['path'], "lfs-objects"))
Settings.lfs['object_store'] = ObjectStoreSettings.legacy_parse(Settings.lfs['object_store'])
#
# Uploads
#
Settings['uploads'] ||= Settingslogic.new({})
Settings.uploads['storage_path'] = Settings.absolute(Settings.uploads['storage_path'] || 'public')
Settings.uploads['base_dir'] = Settings.uploads['base_dir'] || 'uploads/-/system'
Settings.uploads['object_store'] = ObjectStoreSettings.legacy_parse(Settings.uploads['object_store'])
Settings.uploads['object_store']['remote_directory'] ||= 'uploads'
#
# Packages
#
Settings['packages'] ||= Settingslogic.new({})
Settings.packages['enabled'] = true if Settings.packages['enabled'].nil?
Settings.packages['dpkg_deb_path'] = '/usr/bin/dpkg-deb' if Settings.packages['dpkg_deb_path'].nil?
Settings.packages['storage_path'] = Settings.absolute(Settings.packages['storage_path'] || File.join(Settings.shared['path'], "packages"))
Settings.packages['object_store'] = ObjectStoreSettings.legacy_parse(Settings.packages['object_store'])
#
# Dependency Proxy
#
Settings['dependency_proxy'] ||= Settingslogic.new({})
Settings.dependency_proxy['enabled'] = true if Settings.dependency_proxy['enabled'].nil?
Settings.dependency_proxy['storage_path'] = Settings.absolute(Settings.dependency_proxy['storage_path'] || File.join(Settings.shared['path'], "dependency_proxy"))
Settings.dependency_proxy['object_store'] = ObjectStoreSettings.legacy_parse(Settings.dependency_proxy['object_store'])
# For first iteration dependency proxy uses Rails server to download blobs.
# To ensure acceptable performance we only allow feature to be used with
# multithreaded web-server Puma. This will be removed once download logic is moved
# to GitLab workhorse
Settings.dependency_proxy['enabled'] = false unless Gitlab::Runtime.puma?
#
# Terraform state
#
Settings['terraform_state'] ||= Settingslogic.new({})
Settings.terraform_state['enabled'] = true if Settings.terraform_state['enabled'].nil?
Settings.terraform_state['storage_path'] = Settings.absolute(Settings.terraform_state['storage_path'] || File.join(Settings.shared['path'], "terraform_state"))
Settings.terraform_state['object_store'] = ObjectStoreSettings.legacy_parse(Settings.terraform_state['object_store'])
#
# Mattermost
#
Settings['mattermost'] ||= Settingslogic.new({})
Settings.mattermost['enabled'] = false if Settings.mattermost['enabled'].nil?
Settings.mattermost['host'] = nil unless Settings.mattermost.enabled
#
# Gravatar
#
Settings['gravatar'] ||= Settingslogic.new({})
Settings.gravatar['enabled'] = true if Settings.gravatar['enabled'].nil?
Settings.gravatar['plain_url'] ||= 'https://www.gravatar.com/avatar/%{hash}?s=%{size}&d=identicon'
Settings.gravatar['ssl_url'] ||= 'https://secure.gravatar.com/avatar/%{hash}?s=%{size}&d=identicon'
Settings.gravatar['host'] = Settings.host_without_www(Settings.gravatar['plain_url'])
#
# Cron Jobs
#
Settings['cron_jobs'] ||= Settingslogic.new({})
if Gitlab.ee? && Settings['ee_cron_jobs']
Settings.cron_jobs.merge!(Settings.ee_cron_jobs)
end
Settings.cron_jobs['stuck_ci_jobs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['stuck_ci_jobs_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['stuck_ci_jobs_worker']['job_class'] = 'StuckCiJobsWorker'
Settings.cron_jobs['pipeline_schedule_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['pipeline_schedule_worker']['cron'] ||= '19 * * * *'
Settings.cron_jobs['pipeline_schedule_worker']['job_class'] = 'PipelineScheduleWorker'
Settings.cron_jobs['expire_build_artifacts_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['expire_build_artifacts_worker']['cron'] ||= '*/7 * * * *'
Settings.cron_jobs['expire_build_artifacts_worker']['job_class'] = 'ExpireBuildArtifactsWorker'
Settings.cron_jobs['ci_pipelines_expire_artifacts_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['ci_pipelines_expire_artifacts_worker']['cron'] ||= '*/23 * * * *'
Settings.cron_jobs['ci_pipelines_expire_artifacts_worker']['job_class'] = 'Ci::PipelineArtifacts::ExpireArtifactsWorker'
Settings.cron_jobs['ci_schedule_delete_objects_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['ci_schedule_delete_objects_worker']['cron'] ||= '*/16 * * * *'
Settings.cron_jobs['ci_schedule_delete_objects_worker']['job_class'] = 'Ci::ScheduleDeleteObjectsCronWorker'
Settings.cron_jobs['environments_auto_stop_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['environments_auto_stop_cron_worker']['cron'] ||= '24 * * * *'
Settings.cron_jobs['environments_auto_stop_cron_worker']['job_class'] = 'Environments::AutoStopCronWorker'
Settings.cron_jobs['repository_check_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['repository_check_worker']['cron'] ||= '20 * * * *'
Settings.cron_jobs['repository_check_worker']['job_class'] = 'RepositoryCheck::DispatchWorker'
Settings.cron_jobs['admin_email_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['admin_email_worker']['cron'] ||= '0 0 * * 0'
Settings.cron_jobs['admin_email_worker']['job_class'] = 'AdminEmailWorker'
Settings.cron_jobs['personal_access_tokens_expiring_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['personal_access_tokens_expiring_worker']['cron'] ||= '0 1 * * *'
Settings.cron_jobs['personal_access_tokens_expiring_worker']['job_class'] = 'PersonalAccessTokens::ExpiringWorker'
Settings.cron_jobs['personal_access_tokens_expired_notification_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['personal_access_tokens_expired_notification_worker']['cron'] ||= '0 2 * * *'
Settings.cron_jobs['personal_access_tokens_expired_notification_worker']['job_class'] = 'PersonalAccessTokens::ExpiredNotificationWorker'
Settings.cron_jobs['repository_archive_cache_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['repository_archive_cache_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['repository_archive_cache_worker']['job_class'] = 'RepositoryArchiveCacheWorker'
Settings.cron_jobs['import_export_project_cleanup_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['import_export_project_cleanup_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['import_export_project_cleanup_worker']['job_class'] = 'ImportExportProjectCleanupWorker'
Settings.cron_jobs['ci_archive_traces_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['ci_archive_traces_cron_worker']['cron'] ||= '17 * * * *'
Settings.cron_jobs['ci_archive_traces_cron_worker']['job_class'] = 'Ci::ArchiveTracesCronWorker'
Settings.cron_jobs['requests_profiles_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['requests_profiles_worker']['cron'] ||= '0 0 * * *'
Settings.cron_jobs['requests_profiles_worker']['job_class'] = 'RequestsProfilesWorker'
Settings.cron_jobs['remove_expired_members_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['remove_expired_members_worker']['cron'] ||= '10 0 * * *'
Settings.cron_jobs['remove_expired_members_worker']['job_class'] = 'RemoveExpiredMembersWorker'
Settings.cron_jobs['remove_expired_group_links_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['remove_expired_group_links_worker']['cron'] ||= '10 0 * * *'
Settings.cron_jobs['remove_expired_group_links_worker']['job_class'] = 'RemoveExpiredGroupLinksWorker'
Settings.cron_jobs['remove_unaccepted_member_invites_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['remove_unaccepted_member_invites_worker']['cron'] ||= '10 15 * * *'
Settings.cron_jobs['remove_unaccepted_member_invites_worker']['job_class'] = 'RemoveUnacceptedMemberInvitesWorker'
Settings.cron_jobs['prune_old_events_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['prune_old_events_worker']['cron'] ||= '0 */6 * * *'
Settings.cron_jobs['prune_old_events_worker']['job_class'] = 'PruneOldEventsWorker'
Settings.cron_jobs['trending_projects_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['trending_projects_worker']['cron'] = '0 1 * * *'
Settings.cron_jobs['trending_projects_worker']['job_class'] = 'TrendingProjectsWorker'
Settings.cron_jobs['remove_unreferenced_lfs_objects_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['remove_unreferenced_lfs_objects_worker']['cron'] ||= '20 0 * * *'
Settings.cron_jobs['remove_unreferenced_lfs_objects_worker']['job_class'] = 'RemoveUnreferencedLfsObjectsWorker'
Settings.cron_jobs['import_stuck_project_import_jobs'] ||= Settingslogic.new({})
Settings.cron_jobs['import_stuck_project_import_jobs']['cron'] ||= '15 * * * *'
Settings.cron_jobs['import_stuck_project_import_jobs']['job_class'] = 'Gitlab::Import::StuckProjectImportJobsWorker'
Settings.cron_jobs['jira_import_stuck_jira_import_jobs'] ||= Settingslogic.new({})
Settings.cron_jobs['jira_import_stuck_jira_import_jobs']['cron'] ||= '* 0/15 * * *'
Settings.cron_jobs['jira_import_stuck_jira_import_jobs']['job_class'] = 'Gitlab::JiraImport::StuckJiraImportJobsWorker'
Settings.cron_jobs['stuck_export_jobs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['stuck_export_jobs_worker']['cron'] ||= '30 * * * *'
Settings.cron_jobs['stuck_export_jobs_worker']['job_class'] = 'StuckExportJobsWorker'
Settings.cron_jobs['gitlab_usage_ping_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['gitlab_usage_ping_worker']['cron'] ||= nil # This is dynamically loaded in the sidekiq initializer
Settings.cron_jobs['gitlab_usage_ping_worker']['job_class'] = 'GitlabUsagePingWorker'
Settings.cron_jobs['stuck_merge_jobs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['stuck_merge_jobs_worker']['cron'] ||= '0 */2 * * *'
Settings.cron_jobs['stuck_merge_jobs_worker']['job_class'] = 'StuckMergeJobsWorker'
Settings.cron_jobs['pages_domain_verification_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['pages_domain_verification_cron_worker']['cron'] ||= '*/15 * * * *'
Settings.cron_jobs['pages_domain_verification_cron_worker']['job_class'] = 'PagesDomainVerificationCronWorker'
Settings.cron_jobs['pages_domain_removal_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['pages_domain_removal_cron_worker']['cron'] ||= '47 0 * * *'
Settings.cron_jobs['pages_domain_removal_cron_worker']['job_class'] = 'PagesDomainRemovalCronWorker'
Settings.cron_jobs['pages_domain_ssl_renewal_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['pages_domain_ssl_renewal_cron_worker']['cron'] ||= '*/10 * * * *'
Settings.cron_jobs['pages_domain_ssl_renewal_cron_worker']['job_class'] = 'PagesDomainSslRenewalCronWorker'
Settings.cron_jobs['issue_due_scheduler_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['issue_due_scheduler_worker']['cron'] ||= '50 00 * * *'
Settings.cron_jobs['issue_due_scheduler_worker']['job_class'] = 'IssueDueSchedulerWorker'
Settings.cron_jobs['prune_web_hook_logs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['prune_web_hook_logs_worker']['cron'] ||= '0 */1 * * *'
Settings.cron_jobs['prune_web_hook_logs_worker']['job_class'] = 'PruneWebHookLogsWorker'
Settings.cron_jobs['metrics_dashboard_schedule_annotations_prune_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['metrics_dashboard_schedule_annotations_prune_worker']['cron'] ||= '0 1 * * *'
Settings.cron_jobs['metrics_dashboard_schedule_annotations_prune_worker']['job_class'] = 'Metrics::Dashboard::ScheduleAnnotationsPruneWorker'
Settings.cron_jobs['schedule_migrate_external_diffs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['schedule_migrate_external_diffs_worker']['cron'] ||= '15 * * * *'
Settings.cron_jobs['schedule_migrate_external_diffs_worker']['job_class'] = 'ScheduleMigrateExternalDiffsWorker'
Settings.cron_jobs['namespaces_prune_aggregation_schedules_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['namespaces_prune_aggregation_schedules_worker']['cron'] ||= '5 1 * * *'
Settings.cron_jobs['namespaces_prune_aggregation_schedules_worker']['job_class'] = 'Namespaces::PruneAggregationSchedulesWorker'
Settings.cron_jobs['container_expiration_policy_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['container_expiration_policy_worker']['cron'] ||= '50 * * * *'
Settings.cron_jobs['container_expiration_policy_worker']['job_class'] = 'ContainerExpirationPolicyWorker'
Settings.cron_jobs['x509_issuer_crl_check_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['x509_issuer_crl_check_worker']['cron'] ||= '30 1 * * *'
Settings.cron_jobs['x509_issuer_crl_check_worker']['job_class'] = 'X509IssuerCrlCheckWorker'
Settings.cron_jobs['users_create_statistics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['users_create_statistics_worker']['cron'] ||= '2 15 * * *'
Settings.cron_jobs['users_create_statistics_worker']['job_class'] = 'Users::CreateStatisticsWorker'
Settings.cron_jobs['authorized_project_update_periodic_recalculate_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['authorized_project_update_periodic_recalculate_worker']['cron'] ||= '45 1 * * 6'
Settings.cron_jobs['authorized_project_update_periodic_recalculate_worker']['job_class'] = 'AuthorizedProjectUpdate::PeriodicRecalculateWorker'
Settings.cron_jobs['update_container_registry_info_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['update_container_registry_info_worker']['cron'] ||= '0 0 * * *'
Settings.cron_jobs['update_container_registry_info_worker']['job_class'] = 'UpdateContainerRegistryInfoWorker'
Settings.cron_jobs['postgres_dynamic_partitions_creator'] ||= Settingslogic.new({})
Settings.cron_jobs['postgres_dynamic_partitions_creator']['cron'] ||= '21 */6 * * *'
Settings.cron_jobs['postgres_dynamic_partitions_creator']['job_class'] ||= 'PartitionCreationWorker'
Settings.cron_jobs['ci_platform_metrics_update_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['ci_platform_metrics_update_cron_worker']['cron'] ||= '47 9 * * *'
Settings.cron_jobs['ci_platform_metrics_update_cron_worker']['job_class'] = 'CiPlatformMetricsUpdateCronWorker'
Settings.cron_jobs['analytics_instance_statistics_count_job_trigger_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['analytics_instance_statistics_count_job_trigger_worker']['cron'] ||= '50 23 */1 * *'
Settings.cron_jobs['analytics_instance_statistics_count_job_trigger_worker']['job_class'] ||= 'Analytics::InstanceStatistics::CountJobTriggerWorker'
Settings.cron_jobs['member_invitation_reminder_emails_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['member_invitation_reminder_emails_worker']['cron'] ||= '0 0 * * *'
Settings.cron_jobs['member_invitation_reminder_emails_worker']['job_class'] = 'MemberInvitationReminderEmailsWorker'
Settings.cron_jobs['schedule_merge_request_cleanup_refs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['schedule_merge_request_cleanup_refs_worker']['cron'] ||= '* * * * *'
Settings.cron_jobs['schedule_merge_request_cleanup_refs_worker']['job_class'] = 'ScheduleMergeRequestCleanupRefsWorker'
Settings.cron_jobs['manage_evidence_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['manage_evidence_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['manage_evidence_worker']['job_class'] = 'Releases::ManageEvidenceWorker'
Gitlab.ee do
Settings.cron_jobs['analytics_devops_adoption_create_all_snapshots_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['analytics_devops_adoption_create_all_snapshots_worker']['cron'] ||= '0 4 * * *'
Settings.cron_jobs['analytics_devops_adoption_create_all_snapshots_worker']['job_class'] = 'Analytics::DevopsAdoption::CreateAllSnapshotsWorker'
Settings.cron_jobs['active_user_count_threshold_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['active_user_count_threshold_worker']['cron'] ||= '0 12 * * *'
Settings.cron_jobs['active_user_count_threshold_worker']['job_class'] = 'ActiveUserCountThresholdWorker'
Settings.cron_jobs['adjourned_group_deletion_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['adjourned_group_deletion_worker']['cron'] ||= '0 3 * * *'
Settings.cron_jobs['adjourned_group_deletion_worker']['job_class'] = 'AdjournedGroupDeletionWorker'
Settings.cron_jobs['clear_shared_runners_minutes_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['clear_shared_runners_minutes_worker']['cron'] ||= '0 0 1 * *'
Settings.cron_jobs['clear_shared_runners_minutes_worker']['job_class'] = 'ClearSharedRunnersMinutesWorker'
Settings.cron_jobs['adjourned_projects_deletion_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['adjourned_projects_deletion_cron_worker']['cron'] ||= '0 4 * * *'
Settings.cron_jobs['adjourned_projects_deletion_cron_worker']['job_class'] = 'AdjournedProjectsDeletionCronWorker'
Settings.cron_jobs['geo_verification_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_verification_cron_worker']['cron'] ||= '* * * * *'
Settings.cron_jobs['geo_verification_cron_worker']['job_class'] ||= 'Geo::VerificationCronWorker'
Settings.cron_jobs['geo_file_download_dispatch_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_file_download_dispatch_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_file_download_dispatch_worker']['job_class'] ||= 'Geo::FileDownloadDispatchWorker'
Settings.cron_jobs['geo_registry_sync_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_registry_sync_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_registry_sync_worker']['job_class'] ||= 'Geo::RegistrySyncWorker'
Settings.cron_jobs['geo_metrics_update_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_metrics_update_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_metrics_update_worker']['job_class'] ||= 'Geo::MetricsUpdateWorker'
Settings.cron_jobs['geo_prune_event_log_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_prune_event_log_worker']['cron'] ||= '*/5 * * * *'
Settings.cron_jobs['geo_prune_event_log_worker']['job_class'] ||= 'Geo::PruneEventLogWorker'
Settings.cron_jobs['geo_repository_sync_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_repository_sync_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_repository_sync_worker']['job_class'] ||= 'Geo::RepositorySyncWorker'
Settings.cron_jobs['geo_secondary_registry_consistency_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_secondary_registry_consistency_worker']['cron'] ||= '* * * * *'
Settings.cron_jobs['geo_secondary_registry_consistency_worker']['job_class'] ||= 'Geo::Secondary::RegistryConsistencyWorker'
Settings.cron_jobs['geo_repository_verification_primary_batch_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_repository_verification_primary_batch_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_repository_verification_primary_batch_worker']['job_class'] ||= 'Geo::RepositoryVerification::Primary::BatchWorker'
Settings.cron_jobs['geo_repository_verification_secondary_scheduler_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_repository_verification_secondary_scheduler_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_repository_verification_secondary_scheduler_worker']['job_class'] ||= 'Geo::RepositoryVerification::Secondary::SchedulerWorker'
Settings.cron_jobs['geo_container_repository_sync_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_container_repository_sync_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_container_repository_sync_worker']['job_class'] ||= 'Geo::ContainerRepositorySyncDispatchWorker'
Settings.cron_jobs['historical_data_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['historical_data_worker']['cron'] ||= '0 12 * * *'
Settings.cron_jobs['historical_data_worker']['job_class'] = 'HistoricalDataWorker'
Settings.cron_jobs['incident_sla_exceeded_check_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['incident_sla_exceeded_check_worker']['cron'] ||= '*/2 * * * *'
Settings.cron_jobs['incident_sla_exceeded_check_worker']['job_class'] = 'IncidentManagement::IncidentSlaExceededCheckWorker'
Settings.cron_jobs['import_software_licenses_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['import_software_licenses_worker']['cron'] ||= '0 3 * * 0'
Settings.cron_jobs['import_software_licenses_worker']['job_class'] = 'ImportSoftwareLicensesWorker'
Settings.cron_jobs['ldap_group_sync_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['ldap_group_sync_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['ldap_group_sync_worker']['job_class'] = 'LdapAllGroupsSyncWorker'
Settings.cron_jobs['ldap_sync_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['ldap_sync_worker']['cron'] ||= '30 1 * * *'
Settings.cron_jobs['ldap_sync_worker']['job_class'] = 'LdapSyncWorker'
Settings.cron_jobs['pseudonymizer_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['pseudonymizer_worker']['cron'] ||= '0 23 * * *'
Settings.cron_jobs['pseudonymizer_worker']['job_class'] ||= 'PseudonymizerWorker'
Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker']['cron'] ||= '0 12 * * *'
Settings.cron_jobs['update_max_seats_used_for_gitlab_com_subscriptions_worker']['job_class'] = 'UpdateMaxSeatsUsedForGitlabComSubscriptionsWorker'
Settings.cron_jobs['elastic_index_bulk_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['elastic_index_bulk_cron_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['elastic_index_bulk_cron_worker']['job_class'] ||= 'ElasticIndexBulkCronWorker'
Settings.cron_jobs['elastic_index_initial_bulk_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['elastic_index_initial_bulk_cron_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['elastic_index_initial_bulk_cron_worker']['job_class'] ||= 'ElasticIndexInitialBulkCronWorker'
Settings.cron_jobs['elastic_cluster_reindexing_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['elastic_cluster_reindexing_cron_worker']['cron'] ||= '*/10 * * * *'
Settings.cron_jobs['elastic_cluster_reindexing_cron_worker']['job_class'] ||= 'ElasticClusterReindexingCronWorker'
Settings.cron_jobs['elastic_remove_expired_namespace_subscriptions_from_index_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['elastic_remove_expired_namespace_subscriptions_from_index_cron_worker']['cron'] ||= '10 3 * * *'
Settings.cron_jobs['elastic_remove_expired_namespace_subscriptions_from_index_cron_worker']['job_class'] ||= 'ElasticRemoveExpiredNamespaceSubscriptionsFromIndexCronWorker'
Settings.cron_jobs['elastic_migration_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['elastic_migration_worker']['cron'] ||= '*/30 * * * *'
Settings.cron_jobs['elastic_migration_worker']['job_class'] ||= 'Elastic::MigrationWorker'
Settings.cron_jobs['sync_seat_link_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['sync_seat_link_worker']['cron'] ||= "#{rand(60)} 0 * * *"
Settings.cron_jobs['sync_seat_link_worker']['job_class'] = 'SyncSeatLinkWorker'
Settings.cron_jobs['web_application_firewall_metrics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['web_application_firewall_metrics_worker']['cron'] ||= '0 1 * * 0'
Settings.cron_jobs['web_application_firewall_metrics_worker']['job_class'] = 'IngressModsecurityCounterMetricsWorker'
Settings.cron_jobs['users_create_statistics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['users_create_statistics_worker']['cron'] ||= '2 15 * * *'
Settings.cron_jobs['users_create_statistics_worker']['job_class'] = 'Users::CreateStatisticsWorker'
Settings.cron_jobs['network_policy_metrics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['network_policy_metrics_worker']['cron'] ||= '0 3 * * 0'
Settings.cron_jobs['network_policy_metrics_worker']['job_class'] = 'NetworkPolicyMetricsWorker'
Settings.cron_jobs['iterations_update_status_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['iterations_update_status_worker']['cron'] ||= '5 0 * * *'
Settings.cron_jobs['iterations_update_status_worker']['job_class'] = 'IterationsUpdateStatusWorker'
Settings.cron_jobs['vulnerability_statistics_schedule_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['vulnerability_statistics_schedule_worker']['cron'] ||= '15 1 * * *'
Settings.cron_jobs['vulnerability_statistics_schedule_worker']['job_class'] = 'Vulnerabilities::Statistics::ScheduleWorker'
Settings.cron_jobs['vulnerability_historical_statistics_deletion_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['vulnerability_historical_statistics_deletion_worker']['cron'] ||= '15 3 * * *'
Settings.cron_jobs['vulnerability_historical_statistics_deletion_worker']['job_class'] = 'Vulnerabilities::HistoricalStatistics::DeletionWorker'
end
#
# Sidekiq
#
Settings['sidekiq'] ||= Settingslogic.new({})
Settings['sidekiq']['log_format'] ||= 'default'
#
# GitLab Shell
#
Settings['gitlab_shell'] ||= Settingslogic.new({})
Settings.gitlab_shell['path'] = Settings.absolute(Settings.gitlab_shell['path'] || Settings.gitlab['user_home'] + '/gitlab-shell/')
Settings.gitlab_shell['hooks_path'] = :deprecated_use_gitlab_shell_path_instead
Settings.gitlab_shell['authorized_keys_file'] ||= File.join(Dir.home, '.ssh', 'authorized_keys')
Settings.gitlab_shell['secret_file'] ||= Rails.root.join('.gitlab_shell_secret')
Settings.gitlab_shell['receive_pack'] = true if Settings.gitlab_shell['receive_pack'].nil?
Settings.gitlab_shell['upload_pack'] = true if Settings.gitlab_shell['upload_pack'].nil?
Settings.gitlab_shell['ssh_host'] ||= Settings.gitlab.ssh_host
Settings.gitlab_shell['ssh_port'] ||= 22
Settings.gitlab_shell['ssh_user'] = Settings.gitlab.ssh_user
Settings.gitlab_shell['owner_group'] ||= Settings.gitlab.user
Settings.gitlab_shell['ssh_path_prefix'] ||= Settings.__send__(:build_gitlab_shell_ssh_path_prefix)
Settings.gitlab_shell['git_timeout'] ||= 10800
# Object storage
ObjectStoreSettings.new(Settings).parse!
#
# Workhorse
#
Settings['workhorse'] ||= Settingslogic.new({})
Settings.workhorse['secret_file'] ||= Rails.root.join('.gitlab_workhorse_secret')
#
# GitLab KAS
#
Settings['gitlab_kas'] ||= Settingslogic.new({})
Settings.gitlab_kas['secret_file'] ||= Rails.root.join('.gitlab_kas_secret')
#
# Repositories
#
Settings['repositories'] ||= Settingslogic.new({})
Settings.repositories['storages'] ||= {}
unless Settings.repositories.storages['default']
Settings.repositories.storages['default'] ||= {}
# We set the path only if the default storage doesn't exist, in case it exists
# but follows the pre-9.0 configuration structure. `6_validations.rb` initializer
# will validate all storages and throw a relevant error to the user if necessary.
Settings.repositories.storages['default']['path'] ||= Settings.gitlab['user_home'] + '/repositories/'
end
Settings.repositories.storages.each do |key, storage|
Settings.repositories.storages[key] = Gitlab::GitalyClient::StorageSettings.new(storage)
end
#
# The repository_downloads_path is used to remove outdated repository
# archives, if someone has it configured incorrectly, and it points
# to the path where repositories are stored this can cause some
# data-integrity issue. In this case, we sets it to the default
# repository_downloads_path value.
#
repositories_storages = Settings.repositories.storages.values
repository_downloads_path = Settings.gitlab['repository_downloads_path'].to_s.gsub(%r{/$}, '')
repository_downloads_full_path = File.expand_path(repository_downloads_path, Settings.gitlab['user_home'])
# Gitaly migration: https://gitlab.com/gitlab-org/gitaly/issues/1255
Gitlab::GitalyClient::StorageSettings.allow_disk_access do
if repository_downloads_path.blank? || repositories_storages.any? { |rs| [repository_downloads_path, repository_downloads_full_path].include?(rs.legacy_disk_path.gsub(%r{/$}, '')) }
Settings.gitlab['repository_downloads_path'] = File.join(Settings.shared['path'], 'cache/archive')
end
end
#
# Backup
#
Settings['backup'] ||= Settingslogic.new({})
Settings.backup['keep_time'] ||= 0
Settings.backup['pg_schema'] = nil
Settings.backup['path'] = Settings.absolute(Settings.backup['path'] || "tmp/backups/")
Settings.backup['archive_permissions'] ||= 0600
Settings.backup['upload'] ||= Settingslogic.new({ 'remote_directory' => nil, 'connection' => nil })
Settings.backup['upload']['multipart_chunk_size'] ||= 104857600
Settings.backup['upload']['encryption'] ||= nil
Settings.backup['upload']['encryption_key'] ||= ENV['GITLAB_BACKUP_ENCRYPTION_KEY']
Settings.backup['upload']['storage_class'] ||= nil
#
# Pseudonymizer
#
Gitlab.ee do
Settings['pseudonymizer'] ||= Settingslogic.new({})
Settings.pseudonymizer['manifest'] = Settings.absolute(Settings.pseudonymizer['manifest'] || Rails.root.join("config/pseudonymizer.yml"))
Settings.pseudonymizer['upload'] ||= Settingslogic.new({ 'remote_directory' => nil, 'connection' => nil })
# Settings.pseudonymizer['upload']['multipart_chunk_size'] ||= 104857600
end
#
# Git
#
Settings['git'] ||= Settingslogic.new({})
Settings.git['bin_path'] ||= '/usr/bin/git'
# Important: keep the satellites.path setting until GitLab 9.0 at
# least. This setting is fed to 'rm -rf' in
# db/migrate/20151023144219_remove_satellites.rb
Settings['satellites'] ||= Settingslogic.new({})
Settings.satellites['path'] = Settings.absolute(Settings.satellites['path'] || "tmp/repo_satellites/")
#
# Kerberos
#
Gitlab.ee do
Settings['kerberos'] ||= Settingslogic.new({})
Settings.kerberos['enabled'] = false if Settings.kerberos['enabled'].nil?
Settings.kerberos['keytab'] = nil if Settings.kerberos['keytab'].blank? # nil means use default keytab
Settings.kerberos['simple_ldap_linking_allowed_realms'] = [] if Settings.kerberos['simple_ldap_linking_allowed_realms'].blank?
Settings.kerberos['service_principal_name'] = nil if Settings.kerberos['service_principal_name'].blank? # nil means any SPN in keytab
Settings.kerberos['use_dedicated_port'] = false if Settings.kerberos['use_dedicated_port'].nil?
Settings.kerberos['https'] = Settings.gitlab.https if Settings.kerberos['https'].nil?
Settings.kerberos['port'] ||= Settings.kerberos.https ? 8443 : 8088
if Settings.kerberos['enabled'] && !Settings.omniauth.providers.map(&:name).include?('kerberos_spnego')
Settings.omniauth.providers << Settingslogic.new({ 'name' => 'kerberos_spnego' })
end
end
#
# Smartcard
#
Gitlab.ee do
Settings['smartcard'] ||= Settingslogic.new({})
Settings.smartcard['enabled'] = false if Settings.smartcard['enabled'].nil?
Settings.smartcard['client_certificate_required_host'] = Settings.gitlab.host if Settings.smartcard['client_certificate_required_host'].nil?
Settings.smartcard['client_certificate_required_port'] = 3444 if Settings.smartcard['client_certificate_required_port'].nil?
Settings.smartcard['required_for_git_access'] = false if Settings.smartcard['required_for_git_access'].nil?
Settings.smartcard['san_extensions'] = false if Settings.smartcard['san_extensions'].nil?
end
#
# FortiAuthenticator
#
Settings['forti_authenticator'] ||= Settingslogic.new({})
Settings.forti_authenticator['enabled'] = false if Settings.forti_authenticator['enabled'].nil?
Settings.forti_authenticator['port'] = 443 if Settings.forti_authenticator['port'].to_i == 0
#
# FortiToken Cloud
#
Settings['forti_token_cloud'] ||= Settingslogic.new({})
Settings.forti_token_cloud['enabled'] = false if Settings.forti_token_cloud['enabled'].nil?
#
# Extra customization
#
Settings['extra'] ||= Settingslogic.new({})
Settings.extra['matomo_site_id'] ||= Settings.extra['piwik_site_id'] if Settings.extra['piwik_site_id'].present?
Settings.extra['matomo_url'] ||= Settings.extra['piwik_url'] if Settings.extra['piwik_url'].present?
#
# Rack::Attack settings
#
Settings['rack_attack'] ||= Settingslogic.new({})
Settings.rack_attack['git_basic_auth'] ||= Settingslogic.new({})
Settings.rack_attack.git_basic_auth['enabled'] = false if Settings.rack_attack.git_basic_auth['enabled'].nil?
Settings.rack_attack.git_basic_auth['ip_whitelist'] ||= %w{127.0.0.1}
Settings.rack_attack.git_basic_auth['maxretry'] ||= 10
Settings.rack_attack.git_basic_auth['findtime'] ||= 1.minute
Settings.rack_attack.git_basic_auth['bantime'] ||= 1.hour
#
# Gitaly
#
Settings['gitaly'] ||= Settingslogic.new({})
#
# Webpack settings
#
Settings['webpack'] ||= Settingslogic.new({})
Settings.webpack['config_file'] ||= 'config/webpack.config.js'
Settings.webpack['output_dir'] ||= 'public/assets/webpack'
Settings.webpack['public_path'] ||= 'assets/webpack'
Settings.webpack['manifest_filename'] ||= 'manifest.json'
Settings.webpack['dev_server'] ||= Settingslogic.new({})
Settings.webpack.dev_server['enabled'] ||= false
Settings.webpack.dev_server['host'] ||= 'localhost'
Settings.webpack.dev_server['port'] ||= 3808
Settings.webpack.dev_server['https'] ||= false
#
# Monitoring settings
#
Settings['monitoring'] ||= Settingslogic.new({})
Settings.monitoring['ip_whitelist'] ||= ['127.0.0.1/8']
Settings.monitoring['unicorn_sampler_interval'] ||= 10
Settings.monitoring['sidekiq_exporter'] ||= Settingslogic.new({})
Settings.monitoring.sidekiq_exporter['enabled'] ||= false
Settings.monitoring.sidekiq_exporter['log_enabled'] ||= false
Settings.monitoring.sidekiq_exporter['address'] ||= 'localhost'
Settings.monitoring.sidekiq_exporter['port'] ||= 8082
Settings.monitoring['web_exporter'] ||= Settingslogic.new({})
Settings.monitoring.web_exporter['enabled'] ||= false
Settings.monitoring.web_exporter['address'] ||= 'localhost'
Settings.monitoring.web_exporter['port'] ||= 8083
#
# Prometheus settings
#
Settings['prometheus'] ||= Settingslogic.new({})
# TODO: Remove listen_address and enable in GitLab 14.0 and set default value
# of server_address to be nil and enabled to be false -
# https://gitlab.com/gitlab-org/gitlab/-/issues/296022
Settings.prometheus['enable'] ||= false
Settings.prometheus['listen_address'] ||= nil
Settings.prometheus['enabled'] = Settings.prometheus['enable'] if Settings.prometheus['enabled'].nil?
Settings.prometheus['server_address'] ||= Settings.prometheus['listen_address']
#
# Shutdown settings
#
Settings['shutdown'] ||= Settingslogic.new({})
Settings.shutdown['blackout_seconds'] ||= 10
#
# Testing settings
#
if Rails.env.test?
Settings.gitlab['default_projects_limit'] = 42
Settings.gitlab['default_can_create_group'] = true
Settings.gitlab['default_can_create_team'] = false
end
| 59.0181 | 286 | 0.757322 |
187cffcee883ca2f7d9a272be906991cae021c61
| 1,376 |
# frozen_string_literal: true
RSpec.describe 'MeiliSearch::Client - Stats' do
before(:all) do
@client = MeiliSearch::Client.new($URL, $MASTER_KEY)
end
it 'gets version' do
response = @client.version
expect(response).to be_a(Hash)
expect(response).to have_key('commitSha')
expect(response).to have_key('buildDate')
expect(response).to have_key('pkgVersion')
end
it 'gets sys-info' do
response = @client.sysinfo
expect(response).to be_a(Hash)
expect(response).to have_key('memoryUsage')
expect(response).to have_key('processorUsage')
expect(response).to have_key('global')
expect(response['global']['totalMemory']).not_to be_a(String)
expect(response['processorUsage'].first).not_to be_a(String)
end
it 'gets pretty sys-info' do
response = @client.pretty_sysinfo
expect(response).to be_a(Hash)
expect(response).to have_key('memoryUsage')
expect(response).to have_key('processorUsage')
expect(response).to have_key('global')
expect(response['global']['totalMemory']).to be_a(String)
expect(response['global']['totalMemory']).to end_with('B')
expect(response['processorUsage'].first).to be_a(String)
expect(response['processorUsage'].first).to end_with('%')
end
it 'gets stats' do
response = @client.stats
expect(response).to have_key('databaseSize')
end
end
| 32 | 65 | 0.702035 |
8716129ffa309856cd4042c8f51c026b91766c83
| 3,828 |
require 'spec_helper_integration'
feature 'Authorization Code Flow' do
background do
config_is_set(:authenticate_resource_owner) { User.first || redirect_to('/sign_in') }
client_exists
create_resource_owner
sign_in
end
scenario 'resource owner authorizes the client' do
visit authorization_endpoint_url(:client => @client)
click_on "Authorize"
access_grant_should_exist_for(@client, @resource_owner)
i_should_be_on_client_callback(@client)
url_should_have_param("code", Doorkeeper::AccessGrant.first.token)
url_should_not_have_param("state")
url_should_not_have_param("error")
end
scenario 'resource owner authorizes using test url' do
@client.redirect_uri = Doorkeeper.configuration.test_redirect_uri
@client.save!
visit authorization_endpoint_url(:client => @client)
click_on "Authorize"
access_grant_should_exist_for(@client, @resource_owner)
i_should_see 'Authorization code:'
i_should_see Doorkeeper::AccessGrant.first.token
end
scenario 'resource owner authorizes the client with state parameter set' do
visit authorization_endpoint_url(:client => @client, :state => "return-me")
click_on "Authorize"
url_should_have_param("code", Doorkeeper::AccessGrant.first.token)
url_should_have_param("state", "return-me")
end
scenario 'resource owner requests an access token with authorization code' do
visit authorization_endpoint_url(:client => @client)
click_on "Authorize"
authorization_code = Doorkeeper::AccessGrant.first.token
post token_endpoint_url(:code => authorization_code, :client => @client)
access_token_should_exist_for(@client, @resource_owner)
should_not_have_json 'error'
should_have_json 'access_token', Doorkeeper::AccessToken.first.token
should_have_json 'token_type', "bearer"
should_have_json_within 'expires_in', Doorkeeper::AccessToken.first.expires_in, 1
end
context 'with scopes' do
background do
default_scopes_exist :public
optional_scopes_exist :write
end
scenario 'resource owner authorizes the client with default scopes' do
visit authorization_endpoint_url(:client => @client)
click_on "Authorize"
access_grant_should_exist_for(@client, @resource_owner)
access_grant_should_have_scopes :public
end
scenario 'resource owner authorizes the client with required scopes' do
visit authorization_endpoint_url(:client => @client, :scope => "public write")
click_on "Authorize"
access_grant_should_have_scopes :public, :write
end
scenario 'resource owner authorizes the client with required scopes (without defaults)' do
visit authorization_endpoint_url(:client => @client, :scope => "write")
click_on "Authorize"
access_grant_should_have_scopes :write
end
scenario 'new access token matches required scopes' do
visit authorization_endpoint_url(:client => @client, :scope => "public write")
click_on "Authorize"
authorization_code = Doorkeeper::AccessGrant.first.token
post token_endpoint_url(:code => authorization_code, :client => @client)
access_token_should_exist_for(@client, @resource_owner)
access_token_should_have_scopes :public, :write
end
scenario 'returns new token if scopes have changed' do
client_is_authorized(@client, @resource_owner, :scopes => "public write")
visit authorization_endpoint_url(:client => @client, :scope => "public")
click_on "Authorize"
authorization_code = Doorkeeper::AccessGrant.first.token
post token_endpoint_url(:code => authorization_code, :client => @client)
expect(Doorkeeper::AccessToken.count).to be(2)
should_have_json 'access_token', Doorkeeper::AccessToken.last.token
end
end
end
| 35.119266 | 94 | 0.742685 |
abaab6a5e915066d5a3fb6409416595d548aa9dd
| 1,082 |
require 'spec_helper'
include SpecInfra::Helper::Solaris
describe php_config('default_mimetype') do
let(:stdout) { 'text/html' }
its(:value) { should eq 'text/html' }
its(:command) { should eq "php -r 'echo get_cfg_var( \"default_mimetype\" );'" }
end
describe php_config('default_mimetype') do
let(:stdout) { 'text/html' }
its(:value) { should_not eq 'text/plain' }
end
describe php_config('session.cache_expire') do
let(:stdout) { '180' }
its(:value) { should eq 180 }
its(:command) { should eq "php -r 'echo get_cfg_var( \"session.cache_expire\" );'" }
end
describe php_config('session.cache_expire') do
let(:stdout) { '180' }
its(:value) { should_not eq 360 }
end
describe php_config('mbstring.http_output_conv_mimetypes') do
let(:stdout) { 'application' }
its(:value) { should match /application/ }
its(:command) { should eq "php -r 'echo get_cfg_var( \"mbstring.http_output_conv_mimetypes\" );'" }
end
describe php_config('mbstring.http_output_conv_mimetypes') do
let(:stdout) { 'application' }
its(:value) { should_not match /html/ }
end
| 29.243243 | 101 | 0.695933 |
7a7e733732a26eca629d796dbb886d956092774d
| 1,737 |
class Ack < Formula
desc "Search tool like grep, but optimized for programmers"
homepage "https://beyondgrep.com/"
url "https://beyondgrep.com/ack-v3.5.0"
sha256 "6870d3c90691c3c4a9ec2ae69880e85c5188aa57adeeca2a794b477e034b989f"
license "Artistic-2.0"
bottle do
rebuild 1
sha256 cellar: :any_skip_relocation, x86_64_linux: "a838e3fffb37b9064032dd9c1b38323f77bf3b692a119037f490ee1cc22df53a"
end
head do
url "https://github.com/beyondgrep/ack3.git", branch: "dev"
resource "File::Next" do
url "https://cpan.metacpan.org/authors/id/P/PE/PETDANCE/File-Next-1.16.tar.gz"
sha256 "6965f25c2c132d0ba7a6f72b57b8bc6d25cf8c1b7032caa3a9bda8612e41d759"
end
end
depends_on "pod2man" => :build
uses_from_macos "perl"
def install
if build.head?
ENV.prepend_create_path "PERL5LIB", libexec/"lib/perl5"
ENV.prepend_path "PERL5LIB", libexec/"lib"
resource("File::Next").stage do
system "perl", "Makefile.PL", "INSTALL_BASE=#{libexec}"
system "make", "install"
end
system "perl", "Makefile.PL", "DESTDIR=#{buildpath}"
system "make"
libexec.install "ack"
chmod 0755, libexec/"ack"
(libexec/"lib").install "blib/lib/App"
(bin/"ack").write_env_script("#{libexec}/ack", PERL5LIB: ENV["PERL5LIB"])
man1.install "blib/man1/ack.1"
else
bin.install "ack-v#{version.to_s.tr("-", "_")}" => "ack"
system "#{Formula["pod2man"].opt_bin}/pod2man", "#{bin}/ack", "ack.1", "--release=ack v#{version}"
man1.install "ack.1"
end
end
test do
assert_equal "foo bar\n", pipe_output("#{bin}/ack --noenv --nocolor bar -",
"foo\nfoo bar\nbaz")
end
end
| 31.017857 | 121 | 0.651698 |
ac7ea39eaa0da2f96892744a79f5d6a6224a6ce6
| 169 |
puts "Enter X"
x = gets.chomp.to_r
puts "Enter Y (X != Y)"
y = gets.chomp.to_r
if x > y
y = (y + x) / 2
puts "Y = #{y}"
else
x = (y * x) * 2
puts "X = #{x}"
end
| 15.363636 | 23 | 0.47929 |
7af34a2f758e67a8d3873db675df31876c969935
| 2,012 |
# Create 2 Users, on is an admin the other one a user (automatically creates user budgets)
admin,user = User.create!(
[
{
name: 'admin',
admin: true,
email: '[email protected]',
password: 'password',
password_confirmation: 'password',
},
{
name: 'user',
admin: false,
email: '[email protected]',
password: 'password',
password_confirmation: 'password',
},
]
)
# Create another budget that is shared between both users
Budget.create!(
:name => "Shared Budget",
:description => "This is a shared Budget between admin and user",
:users => User.all,
:creator => admin
)
# Create 3 categories for every budget
Budget.all.each do |budget|
3.times do |i|
budget.categories.create!(
:name => "Category #{i+1}",
:planned => (i+1)*100
)
end
end
#Add some transactions
admin.budgets.each do |budget|
categories = budget.categories
budget.transactions.create!([
{
:amount => 10,
:comment => "Foo Bar",
:date => Date.today,
:category => categories[0],
:user => admin
},
{
:amount => 69,
:comment => "The quick brown fox...",
:date => Date.today,
:category => categories[1],
:user => admin
},
{
:amount => 486,
:comment => "unexpected costs",
:date => Date.today,
:category => categories[2],
:user => admin
}
])
end
user.budgets.each do |budget|
categories = budget.categories
budget.transactions.create!([
{
:amount => 48,
:comment => "Foo Bar",
:date => Date.today,
:category => categories[0],
:user => user
},
{
:amount => 10,
:comment => "The quick brown fox...",
:date => Date.today,
:category => categories[1],
:user => user
},
{
:amount => 5,
:comment => "Bar Baz",
:date => Date.today,
:category => categories[2],
:user => user
}
])
end
| 21.178947 | 90 | 0.539264 |
bbd4b8aa5cc46e64d5cbf476c51916904407285d
| 164 |
class SpacePolicy < ApplicationPolicy
def show?
user.present? && user.space.present? && user.space == record
end
def index_profiles?
show?
end
end
| 16.4 | 64 | 0.682927 |
e9604ee2c5992db7e1732f515bda7a6ed5e4018e
| 2,727 |
# Challenge name: Roman to Integer
#
# Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
#
# Symbol Value
# I 1
# V 5
# X 10
# L 50
# C 100
# D 500
# M 1000
#
# For example, 2 is written as II in Roman numeral, just two one's added together. 12 is written as XII, which is simply X + II. The number 27 is written as XXVII, which is XX + V + II.
#
# Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
#
# I can be placed before V (5) and X (10) to make 4 and 9.
# X can be placed before L (50) and C (100) to make 40 and 90.
# C can be placed before D (500) and M (1000) to make 400 and 900.
# Given a roman numeral, convert it to an integer.
#
#
# Approach 1: Left-to-Right Pass
#
# Complexity Analysis
#
# Let n be the length of the input string (the total number of symbols in it).
#
# Time complexity: O(1).
# As there is a finite set of roman numerals.
#
# Space complexity: O(1).
# Because only a constant number of single-value variables are used, the space complexity is O(1).
ROM_NUMS = {
'I' => 1,
'V' => 5,
'X' => 10,
'L' => 50,
'C' => 100,
'D' => 500,
'M' => 1000
}
# Now, recall that each symbol adds its own value, except for when a smaller
# valued symbol is before a larger valued symbol. In those cases, instead of
# adding both symbols to the total, we need to subtract the large from the
# small, adding that instead.
# Therefore, the simplest algorithm is to use a pointer to scan through the
# string, at each step deciding whether to add the current symbol and
# go forward 1 place, or add the difference of the next 2 symbols and
# go forward 2 places.
def roman_to_int(s)
res = 0
temp = 0
s.chars.each_with_index do |el, i|
# subtractive case: if at least 2 symbols remaining AND value of s[i] < value of s[i + 1]
if ROM_NUMS[s[i + 1]] && ROM_NUMS[el] < ROM_NUMS[s[i + 1]]
temp = ROM_NUMS[el]
else
# Else this is NOT the subtractive case.
res += (ROM_NUMS[el] - temp)
temp = 0
end
end
res
end
s = 'III'
puts roman_to_int(s)
# Output: 3
s = 'IV'
puts roman_to_int(s)
# Output: 4
s = 'IX'
puts roman_to_int(s)
# Output: 9
s = 'LVIII'
puts roman_to_int(s)
# Output: 58
# Explanation: L = 50, V= 5, III = 3.
s = 'MCMXCIV'
puts roman_to_int(s)
# Output: 1994
# Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
| 28.113402 | 347 | 0.652365 |
edb08cffc5d30d1c98e05b6894242a5103d25b15
| 2,778 |
class Rethinkdb < Formula
desc "The open-source database for the realtime web"
homepage "https://www.rethinkdb.com/"
url "https://download.rethinkdb.com/dist/rethinkdb-2.3.6.tgz"
sha256 "c42159666910ad01be295a57caf8839ec3a89227d8919be5418e3aa1f0a3dc28"
bottle do
cellar :any
sha256 "eaa4700adc14905f388602c44008cbefcd2ac5c22a4a23e6871058a5f1a2a7ca" => :high_sierra
sha256 "1f936e43b0cb7b321d9a14a2f2de994154162ca5bb656c8583506ca253eadf6b" => :sierra
sha256 "d090123ea89626f60caa5517b1416b669d3cacfd51fcedfdcd6f58020e941190" => :el_capitan
sha256 "a17c6864cef6dfc7f1e8ab7da2fcd640d85a504991c0d61175e2f6c78e1ba6ee" => :yosemite
end
depends_on :macos => :lion
depends_on "boost" => :build
depends_on "openssl"
fails_with :gcc do
build 5666 # GCC 4.2.1
cause "RethinkDB uses C++0x"
end
# Fix error with Xcode 9, patch merged upstream:
# https://github.com/rethinkdb/rethinkdb/pull/6450
if DevelopmentTools.clang_build_version >= 900
patch do
url "https://raw.githubusercontent.com/Homebrew/formula-patches/fb00ee376a/rethinkdb/xcode9.patch"
sha256 "abd50d91a247ee7de988020dd9d405a3d4cd93edb2875b7d5822ba0f513f85a0"
end
end
def install
args = ["--prefix=#{prefix}"]
# rethinkdb requires that protobuf be linked against libc++
# but brew's protobuf is sometimes linked against libstdc++
args += ["--fetch", "protobuf"]
system "./configure", *args
system "make"
system "make", "install-osx"
(var/"log/rethinkdb").mkpath
inreplace "packaging/assets/config/default.conf.sample",
/^# directory=.*/, "directory=#{var}/rethinkdb"
etc.install "packaging/assets/config/default.conf.sample" => "rethinkdb.conf"
end
plist_options :manual => "rethinkdb"
def plist; <<~EOS
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>#{plist_name}</string>
<key>ProgramArguments</key>
<array>
<string>#{opt_bin}/rethinkdb</string>
<string>--config-file</string>
<string>#{etc}/rethinkdb.conf</string>
</array>
<key>WorkingDirectory</key>
<string>#{HOMEBREW_PREFIX}</string>
<key>StandardOutPath</key>
<string>#{var}/log/rethinkdb/rethinkdb.log</string>
<key>StandardErrorPath</key>
<string>#{var}/log/rethinkdb/rethinkdb.log</string>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
</dict>
</plist>
EOS
end
test do
shell_output("#{bin}/rethinkdb create -d test")
assert File.read("test/metadata").start_with?("RethinkDB")
end
end
| 32.302326 | 115 | 0.685385 |
ab8927f00e703f904c0367abf8524b62e8f374b5
| 2,267 |
require 'yaml'
require 'csv'
# MIGRATION STATUS: Done!
raise 'Migration already performed.' # Don't run this. Kept for posterity
def order_of_keys
%w(
CVE
yaml_instructions
curated_instructions
curated
reported_instructions
reported_date
announced_instructions
announced_date
published_instructions
published_date
description_instructions
description
bounty_instructions
bounty
reviews
bugs
repo
fixes_vcc_instructions
fixes
vccs
upvotes_instructions
upvotes
unit_tested
discovered
autodiscoverable
specification
subsystem
interesting_commits
i18n
ipc
lessons
mistakes
CWE_instructions
CWE
CWE_note
nickname_instructions
nickname
)
end
Dir['cves/*.yml'].each do |yml_file|
h = YAML.load(File.open(yml_file, 'r').read)
h['subsystem']['question'] = <<~EOS
What subsystems was the mistake in?
Most systems don't have a formal list of their subsystems, but you can
usually infer them from path names, bug report tags, or other key words
used. A single source file is not what we mean by a subsystem. In Django,
the "Component" field on the bug report is useful. But there may be other
subsystems involved.
Your subsystem name(s) should not have any dots or slashes in them. Only
alphanumerics, whitespace, _, - and @.Feel free to add multiple using a YAML
array.
In the answer field, explain where you saw these words.
In the name field, a subsystem name (or an array of names)
e.g. clipboard, model, view, controller, mod_dav, ui, authentication
EOS
# Also do discoverable --> autodiscoverable
h['autodiscoverable'] = h['discoverable']
# remove sandbox question
h.delete('sandbox')
# Reconstruct the hash in the order we specify
out_h = {}
order_of_keys.each do |key|
out_h[key] = h[key]
end
# Generate the new YML, clean it up, write it out.
File.open(yml_file, "w+") do |file|
yml_txt = out_h.to_yaml[4..-1] # strip off ---\n
stripped_yml = ""
yml_txt.each_line do |line|
stripped_yml += "#{line.rstrip}\n" # strip trailing whitespace
end
file.write(stripped_yml)
print '.'
end
end
puts 'Done!'
| 23.132653 | 76 | 0.688575 |
6a95a8df707bbc68e75eddbb56e17e3821e30bac
| 5,593 |
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
require 'thrift'
require 'thrift/protocol'
require File.dirname(__FILE__) + '/dynomite_types'
module Dynomite
class Client
include ::Thrift::Client
def get(key)
send_get(key)
return recv_get()
end
def send_get(key)
send_message('get', Get_args, :key => key)
end
def recv_get()
result = receive_message(Get_result)
return result.success unless result.success.nil?
raise result.fail unless result.fail.nil?
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get failed: unknown result')
end
def put(key, context, data)
send_put(key, context, data)
return recv_put()
end
def send_put(key, context, data)
send_message('put', Put_args, :key => key, :context => context, :data => data)
end
def recv_put()
result = receive_message(Put_result)
return result.success unless result.success.nil?
raise result.fail unless result.fail.nil?
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'put failed: unknown result')
end
def has(key)
send_has(key)
return recv_has()
end
def send_has(key)
send_message('has', Has_args, :key => key)
end
def recv_has()
result = receive_message(Has_result)
return result.success unless result.success.nil?
raise result.fail unless result.fail.nil?
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'has failed: unknown result')
end
def remove(key)
send_remove(key)
return recv_remove()
end
def send_remove(key)
send_message('remove', Remove_args, :key => key)
end
def recv_remove()
result = receive_message(Remove_result)
return result.success unless result.success.nil?
raise result.fail unless result.fail.nil?
raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'remove failed: unknown result')
end
end
class Processor
include ::Thrift::Processor
def process_get(seqid, iprot, oprot)
args = read_args(iprot, Get_args)
result = Get_result.new()
begin
result.success = @handler.get(args.key)
rescue FailureException => fail
result.fail = fail
end
write_result(result, oprot, 'get', seqid)
end
def process_put(seqid, iprot, oprot)
args = read_args(iprot, Put_args)
result = Put_result.new()
begin
result.success = @handler.put(args.key, args.context, args.data)
rescue FailureException => fail
result.fail = fail
end
write_result(result, oprot, 'put', seqid)
end
def process_has(seqid, iprot, oprot)
args = read_args(iprot, Has_args)
result = Has_result.new()
begin
result.success = @handler.has(args.key)
rescue FailureException => fail
result.fail = fail
end
write_result(result, oprot, 'has', seqid)
end
def process_remove(seqid, iprot, oprot)
args = read_args(iprot, Remove_args)
result = Remove_result.new()
begin
result.success = @handler.remove(args.key)
rescue FailureException => fail
result.fail = fail
end
write_result(result, oprot, 'remove', seqid)
end
end
# HELPER FUNCTIONS AND STRUCTURES
class Get_args
include ::Thrift::Struct
::Thrift::Struct.field_accessor self, :key
FIELDS = {
1 => {:type => ::Thrift::Types::STRING, :name => 'key'}
}
end
class Get_result
include ::Thrift::Struct
::Thrift::Struct.field_accessor self, :success, :fail
FIELDS = {
0 => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => GetResult},
1 => {:type => ::Thrift::Types::STRUCT, :name => 'fail', :class => FailureException}
}
end
class Put_args
include ::Thrift::Struct
::Thrift::Struct.field_accessor self, :key, :context, :data
FIELDS = {
1 => {:type => ::Thrift::Types::STRING, :name => 'key'},
2 => {:type => ::Thrift::Types::STRING, :name => 'context'},
3 => {:type => ::Thrift::Types::STRING, :name => 'data'}
}
end
class Put_result
include ::Thrift::Struct
::Thrift::Struct.field_accessor self, :success, :fail
FIELDS = {
0 => {:type => ::Thrift::Types::I32, :name => 'success'},
1 => {:type => ::Thrift::Types::STRUCT, :name => 'fail', :class => FailureException}
}
end
class Has_args
include ::Thrift::Struct
::Thrift::Struct.field_accessor self, :key
FIELDS = {
1 => {:type => ::Thrift::Types::STRING, :name => 'key'}
}
end
class Has_result
include ::Thrift::Struct
::Thrift::Struct.field_accessor self, :success, :fail
FIELDS = {
0 => {:type => ::Thrift::Types::I32, :name => 'success'},
1 => {:type => ::Thrift::Types::STRUCT, :name => 'fail', :class => FailureException}
}
end
class Remove_args
include ::Thrift::Struct
::Thrift::Struct.field_accessor self, :key
FIELDS = {
1 => {:type => ::Thrift::Types::STRING, :name => 'key'}
}
end
class Remove_result
include ::Thrift::Struct
::Thrift::Struct.field_accessor self, :success, :fail
FIELDS = {
0 => {:type => ::Thrift::Types::I32, :name => 'success'},
1 => {:type => ::Thrift::Types::STRUCT, :name => 'fail', :class => FailureException}
}
end
end
| 27.416667 | 127 | 0.623637 |
5d69ad981664f7cca0587ebf09727d351e66e542
| 6,253 |
require 'active_support/hash_with_indifferent_access'
module Pod
class Installer
# Represents the installation options the user can customize via a
# `Podfile`.
#
class InstallationOptions
# Parses installation options from a podfile.
#
# @param [Podfile] podfile the podfile to parse installation options
# from.
#
# @raise [Informative] if `podfile` does not specify a `CocoaPods`
# install.
#
# @return [Self]
#
def self.from_podfile(podfile)
name, options = podfile.installation_method
unless name.downcase == 'cocoapods'
raise Informative, "Currently need to specify a `cocoapods` install, you chose `#{name}`."
end
new(options)
end
# Defines a new installation option.
#
# @param [#to_s] name the name of the option.
#
# @param default the default value for the option.
#
# @param [Boolean] boolean whether the option has a boolean value.
#
# @return [void]
#
# @!macro [attach] option
#
# @note this option defaults to $2.
#
# @return [Boolean] the $1 $0 for installation.
#
def self.option(name, default, boolean: true)
name = name.to_s
raise ArgumentError, "The `#{name}` option is already defined" if defaults.key?(name)
defaults[name] = default
attr_accessor name
alias_method "#{name}?", name if boolean
end
# @return [Hash<Symbol,Object>] all known installation options and their
# default values.
#
def self.defaults
@defaults ||= {}
end
# @return [Array<Symbol>] the names of all known installation options.
#
def self.all_options
defaults.keys
end
# Initializes the installation options with a hash of options from a
# Podfile.
#
# @param [Hash] options the options to parse.
#
# @raise [Informative] if `options` contains any unknown keys.
#
def initialize(options = {})
options = ActiveSupport::HashWithIndifferentAccess.new(options)
unknown_keys = options.keys - self.class.all_options.map(&:to_s)
raise Informative, "Unknown installation options: #{unknown_keys.to_sentence}." unless unknown_keys.empty?
self.class.defaults.each do |key, default|
value = options.fetch(key, default)
send("#{key}=", value)
end
end
# @param [Boolean] include_defaults whether values that match the default
# for their option should be included. Defaults to `true`.
#
# @return [Hash] the options, keyed by option name.
#
def to_h(include_defaults: true)
self.class.defaults.reduce(ActiveSupport::HashWithIndifferentAccess.new) do |hash, (option, default)|
value = send(option)
hash[option] = value if include_defaults || value != default
hash
end
end
def ==(other)
other.is_a?(self.class) && to_h == other.to_h
end
alias_method :eql, :==
def hash
to_h.hash
end
# Whether to clean the sources of the pods during installation
#
# Cleaning removes any files not used by the pod as specified by the podspec and the platforms
# that the project supports
#
# @see {PodSourceInstaller#clean!}
#
option :clean, true
# Whether to deduplicate pod targets
#
# Target deduplication adds suffixes to pod targets for the cases where a pod is included
# in multiple targets that have different requirements. For example, a pod named 'MyPod' with a subspec 'SubA'
# that is included in two targets as follows:
#
# target 'MyTargetA' do
# pod 'MyPod/SubA'
# end
#
# target 'MyTargetB' do
# pod 'MyPod'
# end
#
# will result in two Pod targets: `MyPod` and `MyPod-SubA`
#
option :deduplicate_targets, true
# Whether to generate deterministic UUIDs when creating the Pods project
#
# @see {Xcodeproj#generate_uuid}
#
option :deterministic_uuids, true
# Whether to integrate the installed pods into the user project
#
# If set to false, Pods will be downloaded and installed to the `Pods/` directory
# but they will not be integrated into your project.
#
option :integrate_targets, true
# Whether to lock the source files of pods. Xcode will prompt to unlock the files when attempting to modify
# their contents
#
# @note There is a performance penalty to locking the pods during installation. If this is significantly
# impacting the duration of `pod install` for your project, you can try setting this to `false`
#
option :lock_pod_sources, true
# Whether to emit a warning when multiple sources contain a Pod with the same name and version
#
option :warn_for_multiple_pod_sources, true
# Whether to share Xcode schemes for development pods.
#
# Schemes for development pods are created automatically but are not shared by default.
#
option :share_schemes_for_development_pods, false
# Whether to disable the input & output paths of the CocoaPods script phases (Copy Frameworks & Copy Resources)
#
# @see https://github.com/CocoaPods/CocoaPods/issues/8073
#
option :disable_input_output_paths, false
# Whether to preserve the file structure of all Pods, including externally sourced pods.
#
# By default, the file structure of Pod sources is preserved only for development pods. Setting
# `:preserve_pod_file_structure` to `true` will _always_ preserve the file structure.
#
option :preserve_pod_file_structure, false
# Whether to generate a project per pod target. Instead of creating 1 `Pods.xcodeproj`, this option will generate
# a project for every pod target that will be nested under the `Pods.xcodeproj`.
#
option :generate_multiple_pod_projects, false
end
end
end
| 34.357143 | 119 | 0.631537 |
6a5a223a614117fd08cb3a17e47be6e7d5cd250b
| 152 |
require 'backup/files'
module Backup
class Artifacts < Files
def initialize
super('artifacts', JobArtifactUploader.root)
end
end
end
| 15.2 | 50 | 0.710526 |
01fc2f061da715ac7299bab1eb2a356b4c9cb6cb
| 130 |
When(/^I run `([^`]*)` in bash$/)do |cmd|
cmd = "bash -c '%s'" % unescape_text(unescape_text(cmd))
run_simple(cmd, false)
end
| 26 | 58 | 0.6 |
391b1e26ad1621683e7ace8b1616b5d36895e595
| 157 |
class AddPublishedAtToCostsReports < ActiveRecord::Migration[5.0]
def change
add_column :costs_reports, :published_at, :datetime, null: true
end
end
| 26.166667 | 67 | 0.77707 |
e21f022f2ed36ba3e79d2adf8fba1c5874651010
| 1,998 |
# encoding: utf-8
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::Network::Mgmt::V2020_08_01
module Models
#
# Response for ServiceAssociationLinks_List operation.
#
class ServiceAssociationLinksListResult
include MsRestAzure
# @return [Array<ServiceAssociationLink>] The service association links
# in a subnet.
attr_accessor :value
# @return [String] The URL to get the next set of results.
attr_accessor :next_link
#
# Mapper for ServiceAssociationLinksListResult class as Ruby Hash.
# This will be used for serialization/deserialization.
#
def self.mapper()
{
client_side_validation: true,
required: false,
serialized_name: 'ServiceAssociationLinksListResult',
type: {
name: 'Composite',
class_name: 'ServiceAssociationLinksListResult',
model_properties: {
value: {
client_side_validation: true,
required: false,
serialized_name: 'value',
type: {
name: 'Sequence',
element: {
client_side_validation: true,
required: false,
serialized_name: 'ServiceAssociationLinkElementType',
type: {
name: 'Composite',
class_name: 'ServiceAssociationLink'
}
}
}
},
next_link: {
client_side_validation: true,
required: false,
read_only: true,
serialized_name: 'nextLink',
type: {
name: 'String'
}
}
}
}
}
end
end
end
end
| 28.956522 | 77 | 0.518018 |
f718631b17f7a15f423227a090c7a62bbbabcada
| 831 |
# frozen_string_literal: true
RSpec.describe EditorJs::Blocks::HeaderBlock do
let(:valid_data1) do
{
type: 'header',
data: {
text: 'this is a <b>header</b> <a>hacker</a> by <b>me</b>',
level: 2
}
}
end
let(:invalid_data1) do
{
type: 'header',
data: {
text: 'this is a <b>header</b> <a>hacker</a> by <b>me</b>',
level: 7
}
}
end
context 'with valid data' do
let(:header) { described_class.new(valid_data1) }
it { expect(header).to be_valid }
it { expect(header.render).to eq(%|<h2 class="editor_js--header">this is a by <b>me</b></h2>|) }
it { expect(header.plain).to eq('this is a by <b>me</b>') }
end
it { expect(described_class.new(invalid_data1)).not_to be_valid }
end
| 24.441176 | 114 | 0.5716 |
1aebea2d77dd80322f4b8a1ec5ecfa224fcefba1
| 629 |
# frozen_string_literal: true
require 'test_helper'
class ResponseTest < Test::Unit::TestCase
def setup
@http = mock
@http.stubs(:active?).returns(true)
@api = MatrixSdk::Api.new 'https://example.com'
@api.instance_variable_set :@http, @http
@api.stubs(:print_http)
end
def test_creation
data = { test_key: 'value' }
response = MatrixSdk::Response.new(@api, data)
assert_equal @api, response.api
assert_equal 'value', response.test_key
end
def test_creation_failure
data = 'Something else'
assert_raises(ArgumentError) { MatrixSdk::Response.new(@api, data) }
end
end
| 22.464286 | 72 | 0.689984 |
625983d611560601a87dd4939517b4505a8bcd51
| 1,076 |
#
# Be sure to run `pod lib lint RSDTesting.podspec' to ensure this is a
# valid spec before submitting.
#
# Any lines starting with a # are optional, but their use is encouraged
# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = "RSDTesting"
s.version = "0.1.8"
s.summary = "Helper code for tests written in Swift."
s.description = <<-DESC
Testing helpers for asynchronous code, faking, mocking, and swizzling
DESC
s.homepage = "https://github.com/RaviDesai/RSDTesting"
s.license = 'MIT'
s.author = { "RaviDesai" => "[email protected]" }
s.source = { :git => "https://github.com/RaviDesai/RSDTesting.git", :tag => s.version.to_s }
s.platform = :ios, '9.0'
s.requires_arc = true
s.source_files = 'Pod/Classes/**/*'
# s.resource_bundles = {
# 'RSDTesting' => ['Pod/Assets/*.png']
# }
s.frameworks = 'UIKit', 'XCTest'
s.pod_target_xcconfig = { 'ENABLE_BITCODE' => 'NO' }
end
| 31.647059 | 104 | 0.607807 |
9155bdadc352c1bddb9b30036901504a35c7c74d
| 450 |
module Parsers::Xml::Cv
class EmailParser
include HappyMapper
register_namespace "cv", "http://openhbx.org/api/terms/1.0"
tag 'email'
namespace 'cv'
element :type, String, tag: "type"
element :email_address, String, tag: "email_address"
def request_hash
{
email_type:type.split("#").last,
email_address:email_address
}
end
def to_hash
request_hash
end
end
end
| 18.75 | 63 | 0.62 |
e2b5f775b2f0b4d8f9bddcb6cad0deb3d2ee8039
| 1,956 |
module PQState
def self.progress_changer
@progress_changer ||= StateMachine.build(
CLOSED,
## Commissioning
Transition(UNASSIGNED, NO_RESPONSE) do |pq|
pq.action_officers_pqs.any?
end,
## Rejecting
Transition(NO_RESPONSE, REJECTED, &:rejected?),
Transition(REJECTED, NO_RESPONSE, &:no_response?),
## Draft Pending
Transition(NO_RESPONSE, DRAFT_PENDING) do |pq|
pq.action_officer_accepted.present?
end,
## With POD
Transition(DRAFT_PENDING, WITH_POD) do |pq|
!!pq.draft_answer_received
end,
## POD Query
Transition(WITH_POD, POD_QUERY) do |pq|
!!pq.pod_query_flag
end,
## POD Clearance
Transition.factory([WITH_POD, POD_QUERY], [POD_CLEARED]) do |pq|
(pq.draft_answer_received || pq.pod_query_flag) && pq.pod_clearance
end,
## With Minister
Transition(POD_CLEARED, WITH_MINISTER) do |pq|
if !pq.policy_minister
!!pq.sent_to_answering_minister
else
!!(pq.sent_to_answering_minister && pq.sent_to_policy_minister)
end
end,
## Minister Query
Transition(WITH_MINISTER, MINISTERIAL_QUERY) do |pq|
pq.answering_minister_query || pq.policy_minister_query
end,
## Minister Cleared
Transition.factory([WITH_MINISTER, MINISTERIAL_QUERY], [MINISTER_CLEARED]) do |pq|
(!pq.policy_minister && pq.cleared_by_answering_minister) ||
(pq.cleared_by_answering_minister && pq.cleared_by_policy_minister)
end,
## Answered
Transition(MINISTER_CLEARED, ANSWERED) do |pq|
pq.pq_withdrawn || pq.answer_submitted
end,
# Transferred out
Transition.factory(ALL - CLOSED, [TRANSFERRED_OUT]) do |pq|
pq.transfer_out_ogd_id && pq.transfer_out_date
end
)
end
def self.Transition(from, to, &block)
Transition.new(from, to, block)
end
end
| 32.6 | 88 | 0.649284 |
e87b6d8bae182017cdc0055251059162d066dbd6
| 1,754 |
module Rppc::Core
require "net/sender"
require "core/engine"
Net = Rppc::Net
# Class which represent a node in the network
# @author Giuseppe Pagano <[email protected]>
#
# @!attribute [r] ip
# @return [String] The ip of the node.
# @!attribute state
# @return [String] State (or personal message) of the node.
# @!attribute username
# @return [String] Username of the node.
class Node
attr_reader :ip
attr_accessor :state
attr_accessor :username
# Class constructor
#
# @param ip [String] the ip of the node
def initialize(ip)
@ip = ip
@sender = Net::Sender.new Engine::UDP_PORT, Engine::TCP_PORT
end
# Tells if the provided ip refers to this node
#
# @param ip [String] which contains the sender address
# @return [Boolean] Returns true if the address info matches the node's ip
def is_you?(ip)
ip == @ip
end
# Sends a message to the node via tcp
#
# @param message [String] the message to send
def send_tcp message
@sender.send_tcp message, @ip
end
# Sends a message to the node via udp
#
# @param message [String] the message to send
def send_udp message
@sender.send_udp message, @ip
end
# Sends a broadcast message
#
# @param message [String] the message to send
def send_broadcast message
@sender.send_udp_broadcast message
end
def to_s
"<#{@ip} with [#{@usermame}]>"
end
def to_str
to_s
end
end
end
| 26.179104 | 82 | 0.555302 |
03d2b8d96a6641b2d8e29bb7991365923dab55f7
| 151 |
# Example ActiveModel with validations
class ValidatedModel
include Tire::Model::Persistence
property :name
validates_presence_of :name
end
| 12.583333 | 38 | 0.788079 |
262bd6d83bccec94f6bb8428e23daf4eaf463165
| 756 |
include_recipe 'user'
user_account 'hsolo' do
comment 'Han Solo'
ssh_keys ['ssh-rsa AAAA111', 'ssh-ed25519 AAAA222']
home '/opt/hoth/hsolo'
end
user_account 'lando' do
action [:create, :lock]
end
user_account 'obiwan' do
action :remove
end
user_account 'darth.vader' do
uid 4042
non_unique true
end
user_account 'askywalker' do
uid 4042
non_unique true
end
# set up an existing user with an existing key.
test_user = 'leia'
test_user_home = "/home/#{test_user}"
user test_user do
manage_home true
home test_user_home
end
directory "#{test_user_home}/.ssh" do
mode '0700'
owner test_user
group test_user
end
file "#{test_user_home}/.ssh/id_dsa" do
content 'bogus'
end
user_account 'leia'
| 15.75 | 54 | 0.702381 |
e92e14920893a83f87b502696583f4c70d0dcc3f
| 484 |
require 'will_paginate/collection'
# http://www.desimcadam.com/archives/8
Array.class_eval do
def paginate(options = {})
raise ArgumentError, "parameter hash expected (got #{options.inspect})" unless Hash === options
WillPaginate::Collection.create(
options[:page] || 1,
options[:per_page] || 30,
options[:total_entries] || self.length
) { |pager|
pager.replace self[pager.offset, pager.per_page].to_a
}
end
end
| 28.470588 | 100 | 0.63843 |
037d8cd5f4d7da83383fbe45d87b16b0b396934f
| 754 |
# frozen_string_literal: true
require 'eac_ruby_utils/core_ext'
require 'eac_ruby_utils/ruby/command'
require 'delegate'
module EacRubyGemsUtils
class Gem
class Command < ::EacRubyUtils::Ruby::Command
attr_reader :gem
def initialize(gem, command_args, extra_options = {})
@gem = gem
super(command_args, extra_options.merge(host_env: gem.host_env))
end
# Changes current directory to the gem's directory.
def chdir_root
chdir(gem.root.to_path)
end
def envvar_gemfile
envvar('BUNDLE_GEMFILE', gem.gemfile_path.to_path)
end
protected
def duplicate(command, extra_options)
self.class.new(gem, command, extra_options)
end
end
end
end
| 22.176471 | 72 | 0.676393 |
bb2873a671ce92915b31129c3a2e317643311f45
| 527 |
$LOAD_PATH.unshift File.expand_path(File.dirname(__FILE__))
require 'lydown/core_ext'
require 'lydown/cache'
module Lydown
require 'yaml'
DEFAUTLS_FILENAME = File.expand_path('lydown/defaults.yml', File.dirname(__FILE__))
DEFAULTS = YAML.load(IO.read(DEFAUTLS_FILENAME)).deep!
end
require 'lydown/errors'
require 'lydown/parsing'
require 'lydown/templates'
require 'lydown/inverso'
require 'lydown/rendering'
require 'lydown/lilypond'
require 'lydown/work_context'
require 'lydown/work'
require 'lydown/translation'
| 25.095238 | 85 | 0.789374 |
b9ca4831b17c081fb7fea77b4f15b22cd361434e
| 1,886 |
class UsersController < ApplicationController
before_action :authenticate_user!
before_action :set_user, only: %i[show]
before_action :find_partners
layout 'application'
# GET /users or /users.json
def index; end
# GET /users/1 or /users/1.json
def show; end
# GET /users/1/edit
def edit; end
# PATCH/PUT /users/1 or /users/1.json
def update
respond_to do |format|
if self_update? && current_user.update(user_params)
format.html { redirect_to user_url(current_user), notice: 'User was successfully updated.' }
format.json { render :show, status: :ok, location: current_user }
else
format.html { render :edit, status: :unprocessable_entity }
format.json { render json: current_user.errors, status: :unprocessable_entity }
end
end
end
private
def set_user
@user = User.find(params[:id])
end
def find_partners
# Show previous partners
@partner_ids = MysteryPair.by_user(current_user)
.before_at(Date.today)
.where.not(partner_id: current_user.id)
.order(:lunch_date).pluck(:partner_id)
@partners = User.where(id: @partner_ids)
@next_partner_ids = MysteryPair.by_user(current_user)
.after_at(Date.today)
.where.not(partner_id: current_user.id)
.order(:lunch_date).pluck(:partner_id)
@next_partners = User.where(id: @next_partner_ids)
@limit_show = params[:more].present? ? @partners.size : 10
end
# Only allow a list of trusted parameters through.
def user_params
params.require(:user).permit(
:photo,
:username,
:first_name,
:last_name,
:email
)
end
def self_update?
params[:id] == current_user.id.to_s
end
end
| 29.015385 | 100 | 0.61877 |
ed704af977e6e14ccb78809a6f19e6a3c1fad328
| 779 |
class OpenAPI::Loader::Denormalizer
#
# Denormalizes all the 'security' definitions
# by moving them from the root OpenAPI object
# right into the corresponding operation objects.
#
# @private
#
class Security < SimpleDelegator
def call
default = delete "security"
operations.each do |operation|
operation["security"] ||= default if default
end
end
private
def paths
Enumerator.new do |yielder|
fetch("paths", {}).each_value do |path|
yielder << path if path.is_a? Hash
end
end
end
def operations
Enumerator.new do |yielder|
paths.each do |path|
path.each_value { |item| yielder << item if item.is_a? Hash }
end
end
end
end
end
| 21.638889 | 71 | 0.61104 |
268674f7d6184dfc9b3a68eb19a2be168f139247
| 8,896 |
=begin
#OpenAPI Petstore
#This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
OpenAPI Generator version: 3.3.2-SNAPSHOT
=end
require 'uri'
module Petstore
class Configuration
# Defines url scheme
attr_accessor :scheme
# Defines url host
attr_accessor :host
# Defines url base path
attr_accessor :base_path
# Defines API keys used with API Key authentications.
#
# @return [Hash] key: parameter name, value: parameter value (API key)
#
# @example parameter name is "api_key", API key is "xxx" (e.g. "api_key=xxx" in query string)
# config.api_key['api_key'] = 'xxx'
attr_accessor :api_key
# Defines API key prefixes used with API Key authentications.
#
# @return [Hash] key: parameter name, value: API key prefix
#
# @example parameter name is "Authorization", API key prefix is "Token" (e.g. "Authorization: Token xxx" in headers)
# config.api_key_prefix['api_key'] = 'Token'
attr_accessor :api_key_prefix
# Defines the username used with HTTP basic authentication.
#
# @return [String]
attr_accessor :username
# Defines the password used with HTTP basic authentication.
#
# @return [String]
attr_accessor :password
# Defines the access token (Bearer) used with OAuth2.
attr_accessor :access_token
# Set this to enable/disable debugging. When enabled (set to true), HTTP request/response
# details will be logged with `logger.debug` (see the `logger` attribute).
# Default to false.
#
# @return [true, false]
attr_accessor :debugging
# Defines the logger used for debugging.
# Default to `Rails.logger` (when in Rails) or logging to STDOUT.
#
# @return [#debug]
attr_accessor :logger
# Defines the temporary folder to store downloaded files
# (for API endpoints that have file response).
# Default to use `Tempfile`.
#
# @return [String]
attr_accessor :temp_folder_path
# The time limit for HTTP request in seconds.
# Default to 0 (never times out).
attr_accessor :timeout
# Set this to false to skip client side validation in the operation.
# Default to true.
# @return [true, false]
attr_accessor :client_side_validation
### TLS/SSL setting
# Set this to false to skip verifying SSL certificate when calling API from https server.
# Default to true.
#
# @note Do NOT set it to false in production code, otherwise you would face multiple types of cryptographic attacks.
#
# @return [true, false]
attr_accessor :verify_ssl
### TLS/SSL setting
# Set this to false to skip verifying SSL host name
# Default to true.
#
# @note Do NOT set it to false in production code, otherwise you would face multiple types of cryptographic attacks.
#
# @return [true, false]
attr_accessor :verify_ssl_host
### TLS/SSL setting
# Set this to customize the certificate file to verify the peer.
#
# @return [String] the path to the certificate file
#
# @see The `cainfo` option of Typhoeus, `--cert` option of libcurl. Related source code:
# https://github.com/typhoeus/typhoeus/blob/master/lib/typhoeus/easy_factory.rb#L145
attr_accessor :ssl_ca_cert
### TLS/SSL setting
# Client certificate file (for client certificate)
attr_accessor :cert_file
### TLS/SSL setting
# Client private key file (for client certificate)
attr_accessor :key_file
# Set this to customize parameters encoding of array parameter with multi collectionFormat.
# Default to nil.
#
# @see The params_encoding option of Ethon. Related source code:
# https://github.com/typhoeus/ethon/blob/master/lib/ethon/easy/queryable.rb#L96
attr_accessor :params_encoding
attr_accessor :inject_format
attr_accessor :force_ending_format
def initialize
@scheme = 'http'
@host = 'petstore.swagger.io'
@base_path = '/v2'
@api_key = {}
@api_key_prefix = {}
@timeout = 0
@client_side_validation = true
@verify_ssl = true
@verify_ssl_host = true
@params_encoding = nil
@cert_file = nil
@key_file = nil
@debugging = false
@inject_format = false
@force_ending_format = false
@logger = defined?(Rails) ? Rails.logger : Logger.new(STDOUT)
yield(self) if block_given?
end
# The default Configuration object.
def self.default
@@default ||= Configuration.new
end
def configure
yield(self) if block_given?
end
def scheme=(scheme)
# remove :// from scheme
@scheme = scheme.sub(/:\/\//, '')
end
def host=(host)
# remove http(s):// and anything after a slash
@host = host.sub(/https?:\/\//, '').split('/').first
end
def base_path=(base_path)
# Add leading and trailing slashes to base_path
@base_path = "/#{base_path}".gsub(/\/+/, '/')
@base_path = '' if @base_path == '/'
end
def base_url
url = "#{scheme}://#{[host, base_path].join('/').gsub(/\/+/, '/')}".sub(/\/+\z/, '')
URI.encode(url)
end
# Gets API key (with prefix if set).
# @param [String] param_name the parameter name of API key auth
def api_key_with_prefix(param_name)
if @api_key_prefix[param_name]
"#{@api_key_prefix[param_name]} #{@api_key[param_name]}"
else
@api_key[param_name]
end
end
# Gets Basic Auth token string
def basic_auth_token
'Basic ' + ["#{username}:#{password}"].pack('m').delete("\r\n")
end
# Returns Auth Settings hash for api client.
def auth_settings
{
'api_key' =>
{
type: 'api_key',
in: 'header',
key: 'api_key',
value: api_key_with_prefix('api_key')
},
'api_key_query' =>
{
type: 'api_key',
in: 'query',
key: 'api_key_query',
value: api_key_with_prefix('api_key_query')
},
'http_basic_test' =>
{
type: 'basic',
in: 'header',
key: 'Authorization',
value: basic_auth_token
},
'petstore_auth' =>
{
type: 'oauth2',
in: 'header',
key: 'Authorization',
value: "Bearer #{access_token}"
},
}
end
# Returns an array of Server setting
def server_settings
[
{
url: "http://{server}.swagger.io:{port}/v2",
description: "petstore server",
variables: {
server: {
description: "No descriptoin provided",
default_value: "petstore",
enum_values: [
"petstore",
"qa-petstore",
"dev-petstore"
]
},
port: {
description: "No descriptoin provided",
default_value: "80",
enum_values: [
"80",
"8080"
]
}
}
},
{
url: "https://localhost:8080/{version}",
description: "The local server",
variables: {
version: {
description: "No descriptoin provided",
default_value: "v2",
enum_values: [
"v1",
"v2"
]
}
}
}
]
end
# Returns URL based on server settings
#
# @param index array index of the server settings
# @param variables hash of variable and the corresponding value
def server_url(index, variables = {})
servers = server_settings
# check array index out of bound
if (index < 0 || index > servers.size)
fail ArgumentError "Invalid index #{index} when selecting the server. Must be less than #{servers.size}"
end
server = servers[index]
url = server[:url]
# go through variable and assign a value
server[:variables].each do |name, variable|
if variables.key?(name)
if (server[:variables][name][:enum_values].include? variables[name])
url.gsub! "{" + name.to_s + "}", variables[name]
else
fail ArgumentError, "The variable `#{name}` in the server URL has invalid value #{variables[name]}. Must be #{server[:variables][name][:enum_values]}."
end
else
# use default value
url.gsub! "{" + name.to_s + "}", server[:variables][name][:default_value]
end
end
url
end
end
end
| 29.071895 | 163 | 0.589366 |
62c1cdfa6dc535253478c7bf8a4a24d690c60231
| 3,728 |
require_relative 'test_helper'
require 'yaml'
class SessionTest < Minitest::Test
describe 'JMS::Session' do
before do
@config, @queue_name, @topic_name = read_config
end
it 'create a session' do
JMS::Connection.session(@config) do |session|
assert session
end
end
it 'create automatic messages' do
JMS::Connection.session(@config) do |session|
assert session
# Create Text Message
assert_equal session.message("Hello").java_kind_of?(JMS::TextMessage), true
# Create Map Message
assert_equal session.message('hello' => 'world').java_kind_of?(JMS::MapMessage), true
end
end
it 'create explicit messages' do
JMS::Connection.session(@config) do |session|
assert session
# Create Text Message
assert_equal session.create_text_message("Hello").java_kind_of?(JMS::TextMessage), true
# Create Map Message
assert_equal session.create_map_message.java_kind_of?(JMS::MapMessage), true
end
end
it 'create temporary destinations in blocks' do
JMS::Connection.session(@config) do |session|
assert session
# Temporary Queue
session.destination(queue_name: :temporary) do |destination|
assert_equal destination.java_kind_of?(JMS::TemporaryQueue), true
end
# Temporary Topic
session.create_destination(topic_name: :temporary) do |destination|
assert_equal destination.java_kind_of?(JMS::TemporaryTopic), true
end
end
end
it 'create temporary destinations' do
JMS::Connection.session(@config) do |session|
assert session
# Temporary Queue
destination = session.create_destination(queue_name: :temporary)
assert_equal destination.java_kind_of?(JMS::TemporaryQueue), true
destination.delete
# Temporary Topic
destination = session.create_destination(topic_name: :temporary)
assert_equal destination.java_kind_of?(JMS::TemporaryTopic), true
destination.delete
end
end
it 'create destinations in blocks' do
JMS::Connection.session(@config) do |session|
assert session
# Temporary Queue
session.destination(queue_name: @queue_name) do |destination|
assert_equal destination.java_kind_of?(JMS::Queue), true
end
# Temporary Topic
session.create_destination(topic_name: @topic_name) do |destination|
assert_equal destination.java_kind_of?(JMS::Topic), true
end
end
end
it 'create destinations' do
JMS::Connection.session(@config) do |session|
assert session
# Queue
queue = session.create_destination(queue_name: @queue_name)
assert_equal queue.java_kind_of?(JMS::Queue), true
# Topic
topic = session.create_destination(topic_name: @topic_name)
assert_equal topic.java_kind_of?(JMS::Topic), true
end
end
it 'create destinations using direct methods' do
JMS::Connection.session(@config) do |session|
assert session
# Queue
queue = session.queue(@queue_name)
assert_equal queue.java_kind_of?(JMS::Queue), true
# Temporary Queue
queue = session.temporary_queue
assert_equal queue.java_kind_of?(JMS::TemporaryQueue), true
queue.delete
# Topic
topic = session.topic(@topic_name)
assert_equal topic.java_kind_of?(JMS::Topic), true
# Temporary Topic
topic = session.temporary_topic
assert_equal topic.java_kind_of?(JMS::TemporaryTopic), true
topic.delete
end
end
end
end
| 29.587302 | 95 | 0.657725 |
e993629f19d7f9bd32b9e8f94357178ab61bc539
| 330 |
class CreatePosts < ActiveRecord::Migration[6.0]
def change
create_table :posts do |t|
t.text :title
t.text :description
t.text :img_url
t.string :price
t.string :quantity
t.text :contact_info
t.references :user, null: false, foreign_key: true
t.timestamps
end
end
end
| 20.625 | 56 | 0.633333 |
1a9422fb743ae20d311ba50bb6b93b94368bde10
| 2,244 |
# frozen_string_literal: true
require_relative 'uprofile/version'
require_relative 'uprofile/bluepages/email'
require_relative 'uprofile/bluepages/uid'
require_relative 'uprofile/bluepages/typeahead'
require_relative 'uprofile/bluepages/skills'
require_relative 'uprofile/bluepages/teams'
require_relative 'uprofile/bluepages/all'
require_relative 'uprofile/arguments'
module RailsIbm
module Uprofile
class Error < StandardError; end
DOC_LINK = "Please refer to 'https://github.com/niightly/rails_ibm-uprofile/blob/master/README.md#usage' for more details.'".freeze
def self.info(options = {})
args = verify(options)
if options.include?(:emails) || options.include?(:email)
RailsIbm::Uprofile::Bluepages::Email.new([options[:emails] || options[:email]].flatten).results(**args.filter(%i[parsed remap]))
elsif options.include?(:uids) || options.include?(:uid)
RailsIbm::Uprofile::Bluepages::Uid.new([options[:uids] || options[:uid]].flatten).results(**args.filter(%i[parsed remap]))
elsif options.include?(:query)
RailsIbm::Uprofile::Bluepages::Typeahead.new(options[:query], **default_params(options)).results(**args.filter(%i[parsed remap detailed array]))
end
end
def self.skills(options = {})
args = verify(options, single: true)
RailsIbm::Uprofile::Bluepages::Skills.new(options[:uid] || options[:email]).results
end
def self.teams(options = {})
args = verify(options, single: true)
RailsIbm::Uprofile::Bluepages::Teams.new(options[:uid] || options[:email]).results
end
def self.all(options = {})
args = verify(options, single: true)
RailsIbm::Uprofile::Bluepages::All.new(options[:uid] || options[:email]).results
end
private
def self.verify(options, single: false)
args = RailsIbm::Uprofile::Arguments.new(params(options, single))
args.validate
args
end
def self.default_params(options)
{ serial: options[:serial] || '000000', limit: options[:limit] || 10, start: options[:start] || 0 }
end
def self.params(options, single)
return options unless single
options.tap { |hs| %i[uids emails].each { |k| hs.delete(k) } }
end
end
end
| 35.619048 | 152 | 0.688057 |
d5d003d2b2768b3497879b53b686961302f2e60f
| 791 |
require 'rspec_api_documentation/dsl/endpoint/set_param'
module RspecApiDocumentation
module DSL
module Endpoint
class Params
attr_reader :example_group, :example
def initialize(example_group, example, extra_params)
@example_group = example_group
@example = example
@extra_params = extra_params
end
def call
set_param = -> hash, param {
SetParam.new(self, hash, param).call
}
example.metadata.fetch(:parameters, {}).inject({}, &set_param)
.deep_merge(
example.metadata.fetch(:attributes, {}).inject({}, &set_param)
).deep_merge(extra_params)
end
private
attr_reader :extra_params
end
end
end
end
| 23.264706 | 76 | 0.599241 |
bb93837a9e118fae6b3bcc4c43cb399ea239d4ce
| 229 |
# -*- coding: UTF-8 -*-
module RubyPersistenceAPI
module Backends
module ActiveMemory
class CatGateway < RubyPersistenceAPI::ActiveMemory::Gateway
entity_class Entities::Cat
end
end
end
end
| 12.722222 | 66 | 0.668122 |
bf230344037b5190e3f98b1d9a8ef0b19640cbf1
| 603 |
Pod::Spec.new do |s|
s.name = 'ElloCerts'
s.version = '1.0.0'
s.summary = 'The private certs for the Ello iOS app.'
s.homepage = 'https://github.com/ello/Ello-iOS-Certs'
s.license = 'Private'
s.authors = 'Ello'
s.source = { git: '[email protected]:ello/Ello-iOS-Certs.git', tag: s.version.to_s }
s.platform = :ios, '8.0'
s.requires_arc = true
s.source_files = 'Pod/Classes/ElloCerts.swift'
s.resources = 'Pod/Assets/*'
s.frameworks = 'UIKit'
s.module_name = 'ElloCerts'
s.dependency "Alamofire", "~> 3.0"
end
| 30.15 | 93 | 0.577114 |
e8283cb107ab1213b6cd69ee03b0c4682e896113
| 251 |
class CreateLikes < ActiveRecord::Migration[5.0]
def change
create_table :likes do |t|
t.references :likable, polymorphic: true, index: true
t.references :profile, foreign_key: true, index: true
t.timestamps
end
end
end
| 22.818182 | 59 | 0.685259 |
03aaeda83cf071a3b98f5da4cf8a1f1d7ed5fd22
| 7,369 |
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
require 'spec_helper'
require 'rbeapi/client'
include FixtureHelpers
describe Rbeapi::Client do
subject { described_class }
let(:node) { double('node') }
def dut_conf
fixture_file('dut.conf')
end
def test_conf
fixture_file('test.conf')
end
let(:dut) do
File.read(dut_conf)
end
let(:test) do
File.read(test_conf)
end
let(:veos01) do
{
'username' => 'eapi',
'password' => 'password',
'transport' => 'http',
'host' => 'veos01'
}
end
# Client class methods
describe '#config_for' do
it 'returns the configuration options for the connection' do
expect(subject.load_config(test_conf)).to eq(nil)
expect(subject.config_for('veos01')).to eq(veos01)
end
end
describe '#connect_to' do
it 'retrieves the node config' do
expect(subject.connect_to('veos01')).to be_truthy
end
end
describe '#load_config' do
it 'overrides the default conf file loaded in the config' do
expect(subject.load_config(test_conf)).to eq(nil)
expect(subject.config_for('dut')).to eq(nil)
expect(subject.config_for('veos01')).to eq(veos01)
end
end
# Config class methods
describe 'config' do
it 'gets the loaded configuration file data' do
expect(subject.load_config(test_conf)).to eq(nil)
expect(subject.config.to_s).to eq(test)
end
end
describe '#read' do
it 'read the specified filename and load it' do
expect(subject.load_config(dut_conf)).to eq(transport: 'socket')
expect(subject.config.read(test_conf)).to eq(nil)
expect(subject.config.to_s).to eq(test)
end
end
describe '#get_connection' do
it 'get connection dut' do
expect(subject.config.get_connection('veos01')).to eq(veos01)
end
end
describe '#reload' do
it 'reloads the configuration file' do
expect(subject.config.get_connection('veos01')).to eq(veos01)
expect(subject.config.reload(filename: [dut_conf]))
.to eq(transport: 'socket')
expect(subject.config.get_connection('veos01')).to eq(nil)
expect(subject.config.get_connection('dut')).not_to be_nil
end
end
describe '#add_connection' do
it 'adds a new connection section' do
expect(subject.config.add_connection('test2',
username: 'test2',
password: 'test',
transport: 'http',
host: 'test2'
)).to eq(username: 'test2',
password: 'test',
transport: 'http',
host: 'test2')
expect(subject.config.get_connection('test2'))
.to eq(username: 'test2',
password: 'test',
transport: 'http',
host: 'test2')
end
end
# Node Class Methods
describe '#running_config' do
it 'gets the nodes running config' do
allow(node).to receive(:running_config).and_return(test)
expect(node).to receive(:running_config)
expect(node.running_config.to_s).to eq(test)
end
end
describe '#startup_config' do
it 'gets the nodes startup-configuration' do
allow(node).to receive(:startup_config).and_return(test)
expect(node).to receive(:startup_config)
expect(node.startup_config).to eq(test)
end
end
describe '#enable_authentication' do
it 'gets the nodes startup-configuration' do
expect(node).to receive(:enable_authentication).with('newpassword')
expect(node.enable_authentication('newpassword')).to eq(nil)
end
end
describe '#config' do
it 'puts switch into config mode' do
expect(node).to receive(:config)
.with(['no ip virtual-router mac-address'])
expect(node.config(['no ip virtual-router mac-address'])).to eq(nil)
end
it 'puts switch into config mode with options' do
expect(node).to receive(:config)
.with(['no ip virtual-router mac-address'],
encoding: 'json',
open_timeout: 27.00,
read_timeout: 27.00)
expect(node.config(['no ip virtual-router mac-address'],
encoding: 'json',
open_timeout: 27.00,
read_timeout: 27.00)).to eq(nil)
end
end
describe '#enable' do
it 'puts the switch into privilege mode' do
expect(node).to receive(:enable).with('show hostname', encoding: 'text')
expect(node.enable('show hostname', encoding: 'text'))
.to eq(nil)
end
end
describe '#run_commands' do
it 'send commands to node' do
expect(node).to receive(:run_commands)
.with('show hostname', encoding: 'text')
expect(node.run_commands('show hostname', encoding: 'text'))
.to eq(nil)
end
end
describe '#get_config' do
it 'will retrieve the specified configuration' do
expect(node).to receive(:get_config)
.with(config: 'running-config')
expect(node.get_config(config: 'running-config'))
.to eq(nil)
end
it 'will retrieve the specified configuration with param' do
expect(node).to receive(:get_config)
.with(config: 'running-config', param: 'all')
expect(node.get_config(config: 'running-config', param: 'all'))
.to eq(nil)
end
end
describe '#api' do
it 'returns api module' do
expect(node).to receive(:api).with('vlans')
expect(node.api('vlans')).to eq(nil)
end
end
describe '#refresh' do
it 'refreshes configs for next call' do
expect(node).to receive(:refresh)
expect(node.refresh).to eq(nil)
end
end
end
| 31.762931 | 78 | 0.638757 |
5d89b6b27eacc89e56d25ad70d91ea0f727550d3
| 3,049 |
class User < ActiveRecord::Base
self.table_name = "users"
self.primary_key = 'id'
has_one :agent, :dependent => :destroy, :inverse_of => :user
has_one :customer, :dependent => :destroy, :inverse_of => :user
has_many :issues, :inverse_of => :user
has_many :suggestions, :inverse_of => :user
has_many :customer_devices, :inverse_of => :user
has_many :customer_location_alerts, :inverse_of => :user
has_many :agent_change_info, :inverse_of => :user
accepts_nested_attributes_for :customer, :agent
#rails_admin do
#configure :customer do
#visible(false)
#end
#configure :agent do
#visible(false)
#end
#end
def name
"#{email}"
end
def role
if self.customer.present?
return I18n.t 'customer'
elsif self.agent.present?
return I18n.t 'agent'
else
return 'admin'
end
end
def valid_device_token(device_token, device_type)
device_verified=false;
#return device_verified;
if(device_type && device_type && !device_type.blank? && !device_token.blank?)
customer_devices = self.customer_devices.where(:device_type => device_type, :device_token =>device_token)
if customer_devices.count > 0
device_verified=true;
else
customer = self.customer;
#return customer.id;
customer.update_attributes(:phone_verified => false)
device_verified =false;
end
end
return device_verified;
end
def generate_endpoint_arn(token, device_type)
sns = Aws::SNS::Client.new
begin
if self.role =='agent'
if device_type == "Android"
endpoint = sns.create_platform_endpoint(
platform_application_arn:'arn:aws:sns:us-west-2:126524593432:app/GCM/Agento_Floortime',
token:token)
else
endpoint = sns.create_platform_endpoint(
platform_application_arn:'arn:aws:sns:us-west-2:126524593432:app/APNS_SANDBOX/Agento_Floortime_iOS',
token: token)
end
elsif self.role =='customer'
if device_type == "Android"
endpoint = sns.create_platform_endpoint(
platform_application_arn:'arn:aws:sns:us-west-2:126524593432:app/GCM/Agento_Customer',
token:token)
else
endpoint = sns.create_platform_endpoint(
platform_application_arn:'arn:aws:sns:us-west-2:126524593432:app/APNS_SANDBOX/Agento_Customer_iOS',
token: token)
end
else
p "unknown"
end
rescue Exception => e
p e
return false
end
return endpoint.endpoint_arn
end
def add_device(endpoint_arn, device_type, device_token)
begin
customer_device = CustomerDevice.find_or_initialize_by(endpoint_arn: endpoint_arn)
customer_device.endpoint_arn = endpoint_arn
customer_device.device_type = device_type
customer_device.device_token = device_token
customer_device.user_id = self.id
customer_device.save
return true
rescue Exception => e
return false
end
end
end
| 28.231481 | 112 | 0.670712 |
18afaebb8e5bad42da9df12de592017cf2e861d5
| 1,796 |
#-- encoding: UTF-8
#-- copyright
# OpenProject is a project management system.
# Copyright (C) 2012-2015 the OpenProject Foundation (OPF)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3.
#
# OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
# Copyright (C) 2006-2013 Jean-Philippe Lang
# Copyright (C) 2010-2013 the ChiliProject Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# See doc/COPYRIGHT.rdoc for more details.
#++
module ActionView
class Resolver
def find_all(name, prefix = nil, partial = false, details = {}, key = nil, locals = [])
cached(key, [name, prefix, partial], details, locals) do
if details[:formats] & [:xml, :json]
details = details.dup
details[:formats] = details[:formats].dup + [:api]
end
find_templates(name, prefix, partial, details)
end
end
end
end
module ActionController
module MimeResponds
class Collector
def api(&block)
any(:xml, :json, &block)
end
end
end
end
| 33.886792 | 91 | 0.708797 |
18cf205e6eb03f31498690de05edd9bea1c4921e
| 624 |
require 'rails_helper'
RSpec.describe Like, type: :model do
User.delete_all
Post.delete_all
it '1. FAIL CHECK - Checks if a user can be created' do
test_l = Like.create(post_id: nil, user_id: nil)
expect(test_l).to be_invalid
end
it '2. SUCCESS CHECK - Checks if a user can be created' do
User.create(email: '[email protected]', username: 'mateo', password: 'this is a password')
test_u = User.first
Post.create(content: 'This is a test post', user_id: test_u.id)
test_p = Post.first
test_l = Like.create(post_id: test_p.id, user_id: test_u.id)
expect(test_l).to be_valid
end
end
| 29.714286 | 91 | 0.69391 |
5d0c9befe867229dcf4720cc5b2d9295a74ec145
| 811 |
Ежова надеется сделать его в стартовый состав в СКА в матче против слова.
СКА Сергей Зубов, Илья Ежова, подчеркнул, что он надеется сделать его в стартовый состав команды Красной армии в следующем матче чемпионата КХЛ.
Ежова сменил Микко Коскинена на льду на 23-minute Марк со счетом 0-3 и отвлекло каждый кадр на своей цели; пусть СКА в четвертый гол после того, как вратарь был отведен для шестой игрок.
Следующий матч команды против слова на дороге.
Я надеюсь, что в следующем матче я буду в стартовый состав, - цитирует Ежова рапорт.
Я не эксперт по вратарей: я сам вратарь; они не показывают повторы пропущенных голов ", и поэтому трудно отследить.
"Динамо" играло хорошо, и каждый имеет плохой игры.
Команда все равно проиграла.
Было бы лучше, если бы я должен был дать больше голов, но команда выиграла.
| 81.1 | 186 | 0.799014 |
bf1cf912e5dc3dcefb5bdf50d61074b7f8825a49
| 1,937 |
ORDINALS = {
"first" => 0,
"second" => 1
}
def dehumanize(string)
string.gsub(/\W+/,'_').downcase
end
def generate_fabricator_name(model_name)
model_name.singularize.to_sym
end
def get_class(model_name)
fabricator_name = generate_fabricator_name(model_name)
Fabrication.schematics[fabricator_name].klass
end
Then /^that ([^"]*) should be persisted$/ do |object_name|
object_name = dehumanize(object_name)
object = fabrications[object_name]
object.should be_persisted
end
Then /^that ([^"]*) should have "([^"]*)" for a "([^"]*)"$/ do |object_name, value, field|
object_name = dehumanize(object_name)
object = fabrications[object_name]
object.send(dehumanize(field)).to_s.should == value
end
Then /^they should be persisted$/ do
@they.each do |object|
object.should be_persisted
end
end
Then /^they should reference that ([^"]*)$/ do |parent_name|
parent_name = dehumanize(parent_name)
parent = fabrications[parent_name]
parent_class = get_class(parent_name)
parent_class_name = parent_class.to_s.underscore
@they.each do |object|
object.send(parent_class_name).should == parent
end
end
Then /^the ([^"]*) should have "([^"]*)" for a "([^"]*)"$/ do |ordindal, value, field|
object = @they[ORDINALS[ordindal]]
object.send(dehumanize(field)).to_s.should == value
end
Then /^that ([^"]*) should reference that ([^"]*)$/ do |child_name, parent_name|
parent_name = dehumanize(parent_name)
parent = fabrications[parent_name]
parent_class = get_class(parent_name)
parent_class_name = parent_class.to_s.underscore
child_name = dehumanize(child_name)
child = fabrications[child_name]
child.send(parent_class_name).should == parent
end
Then /^that (.*) should have (\d+) (.*)$/ do |parent_name, count, child_name|
parent_name = dehumanize(parent_name)
parent = fabrications[parent_name]
parent.send(dehumanize(child_name).pluralize).count.should == count.to_i
end
| 28.072464 | 90 | 0.71967 |
26e623916c93249ea247e691c3011e47b51732ca
| 46,654 |
# helpers: coerce_to, respond_to
require 'corelib/comparable'
require 'corelib/regexp'
class String < `String`
include Comparable
%x{
Opal.defineProperty(#{self}.$$prototype, '$$is_string', true);
Opal.defineProperty(#{self}.$$prototype, '$$cast', function(string) {
var klass = this.$$class;
if (klass.$$constructor === String) {
return string;
} else {
return new klass.$$constructor(string);
}
});
}
def __id__
`self.toString()`
end
alias object_id __id__
def self.try_convert(what)
Opal.coerce_to?(what, String, :to_str)
end
def self.new(*args)
%x{
var str = args[0] || "";
var opts = args[args.length-1];
str = $coerce_to(str, #{String}, 'to_str');
if (opts && opts.$$is_hash) {
if (opts.$$smap.encoding) str = str.$force_encoding(opts.$$smap.encoding);
}
str = new self.$$constructor(str);
if (!str.$initialize.$$pristine) #{`str`.initialize(*args)};
return str;
}
end
# Our initialize method does nothing, the string value setup is being
# done by String.new. Therefore not all kinds of subclassing will work.
# As a rule of thumb, when subclassing String, either make sure to override
# .new or make sure that the first argument given to a constructor is
# a string we want our subclass-string to hold.
def initialize(str = undefined, encoding: nil, capacity: nil)
end
def %(data)
if Array === data
format(self, *data)
else
format(self, data)
end
end
def *(count)
%x{
count = $coerce_to(count, #{Integer}, 'to_int');
if (count < 0) {
#{raise ArgumentError, 'negative argument'}
}
if (count === 0) {
return self.$$cast('');
}
var result = '',
string = self.toString();
// All credit for the bit-twiddling magic code below goes to Mozilla
// polyfill implementation of String.prototype.repeat() posted here:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/repeat
if (string.length * count >= 1 << 28) {
#{raise RangeError, 'multiply count must not overflow maximum string size'}
}
for (;;) {
if ((count & 1) === 1) {
result += string;
}
count >>>= 1;
if (count === 0) {
break;
}
string += string;
}
return self.$$cast(result);
}
end
def +(other)
other = `$coerce_to(#{other}, #{String}, 'to_str')`
%x{
if (other == "" && self.$$class === Opal.String) return #{self};
if (self == "" && other.$$class === Opal.String) return #{other};
var out = self + other;
if (self.encoding === out.encoding && other.encoding === out.encoding) return out;
if (self.encoding.name === "UTF-8" || other.encoding.name === "UTF-8") return out;
return Opal.enc(out, self.encoding);
}
end
def <=>(other)
if other.respond_to? :to_str
other = other.to_str.to_s
`self > other ? 1 : (self < other ? -1 : 0)`
else
%x{
var cmp = #{other <=> self};
if (cmp === nil) {
return nil;
}
else {
return cmp > 0 ? -1 : (cmp < 0 ? 1 : 0);
}
}
end
end
def ==(other)
%x{
if (other.$$is_string) {
return self.toString() === other.toString();
}
if ($respond_to(other, '$to_str')) {
return #{other == self};
}
return false;
}
end
alias eql? ==
alias === ==
def =~(other)
%x{
if (other.$$is_string) {
#{raise TypeError, 'type mismatch: String given'};
}
return #{other =~ self};
}
end
def [](index, length = undefined)
%x{
var size = self.length, exclude;
if (index.$$is_range) {
exclude = index.excl;
length = $coerce_to(index.end, #{Integer}, 'to_int');
index = $coerce_to(index.begin, #{Integer}, 'to_int');
if (Math.abs(index) > size) {
return nil;
}
if (index < 0) {
index += size;
}
if (length < 0) {
length += size;
}
if (!exclude) {
length += 1;
}
length = length - index;
if (length < 0) {
length = 0;
}
return self.$$cast(self.substr(index, length));
}
if (index.$$is_string) {
if (length != null) {
#{raise TypeError}
}
return self.indexOf(index) !== -1 ? self.$$cast(index) : nil;
}
if (index.$$is_regexp) {
var match = self.match(index);
if (match === null) {
#{$~ = nil}
return nil;
}
#{$~ = MatchData.new(`index`, `match`)}
if (length == null) {
return self.$$cast(match[0]);
}
length = $coerce_to(length, #{Integer}, 'to_int');
if (length < 0 && -length < match.length) {
return self.$$cast(match[length += match.length]);
}
if (length >= 0 && length < match.length) {
return self.$$cast(match[length]);
}
return nil;
}
index = $coerce_to(index, #{Integer}, 'to_int');
if (index < 0) {
index += size;
}
if (length == null) {
if (index >= size || index < 0) {
return nil;
}
return self.$$cast(self.substr(index, 1));
}
length = $coerce_to(length, #{Integer}, 'to_int');
if (length < 0) {
return nil;
}
if (index > size || index < 0) {
return nil;
}
return self.$$cast(self.substr(index, length));
}
end
alias byteslice []
def b
`new String(#{self})`.force_encoding('binary')
end
def capitalize
`self.$$cast(self.charAt(0).toUpperCase() + self.substr(1).toLowerCase())`
end
def casecmp(other)
return nil unless other.respond_to?(:to_str)
other = `$coerce_to(other, #{String}, 'to_str')`.to_s
%x{
var ascii_only = /^[\x00-\x7F]*$/;
if (ascii_only.test(self) && ascii_only.test(other)) {
self = self.toLowerCase();
other = other.toLowerCase();
}
}
self <=> other
end
def casecmp?(other)
%x{
var cmp = #{casecmp(other)};
if (cmp === nil) {
return nil;
} else {
return cmp === 0;
}
}
end
def center(width, padstr = ' ')
width = `$coerce_to(#{width}, #{Integer}, 'to_int')`
padstr = `$coerce_to(#{padstr}, #{String}, 'to_str')`.to_s
if padstr.empty?
raise ArgumentError, 'zero width padding'
end
return self if `width <= self.length`
%x{
var ljustified = #{ljust ((width + `self.length`) / 2).ceil, padstr},
rjustified = #{rjust ((width + `self.length`) / 2).floor, padstr};
return self.$$cast(rjustified + ljustified.slice(self.length));
}
end
def chomp(separator = $/)
return self if `separator === nil || self.length === 0`
separator = Opal.coerce_to!(separator, String, :to_str).to_s
%x{
var result;
if (separator === "\n") {
result = self.replace(/\r?\n?$/, '');
}
else if (separator === "") {
result = self.replace(/(\r?\n)+$/, '');
}
else if (self.length >= separator.length) {
var tail = self.substr(self.length - separator.length, separator.length);
if (tail === separator) {
result = self.substr(0, self.length - separator.length);
}
}
if (result != null) {
return self.$$cast(result);
}
}
self
end
def chop
%x{
var length = self.length, result;
if (length <= 1) {
result = "";
} else if (self.charAt(length - 1) === "\n" && self.charAt(length - 2) === "\r") {
result = self.substr(0, length - 2);
} else {
result = self.substr(0, length - 1);
}
return self.$$cast(result);
}
end
def chr
`self.charAt(0)`
end
def clone
copy = `new String(self)`
copy.copy_singleton_methods(self)
copy.initialize_clone(self)
copy
end
def dup
copy = `new String(self)`
copy.initialize_dup(self)
copy
end
def count(*sets)
%x{
if (sets.length === 0) {
#{raise ArgumentError, 'ArgumentError: wrong number of arguments (0 for 1+)'}
}
var char_class = char_class_from_char_sets(sets);
if (char_class === null) {
return 0;
}
return self.length - self.replace(new RegExp(char_class, 'g'), '').length;
}
end
def delete(*sets)
%x{
if (sets.length === 0) {
#{raise ArgumentError, 'ArgumentError: wrong number of arguments (0 for 1+)'}
}
var char_class = char_class_from_char_sets(sets);
if (char_class === null) {
return self;
}
return self.$$cast(self.replace(new RegExp(char_class, 'g'), ''));
}
end
def delete_prefix(prefix)
%x{
if (!prefix.$$is_string) {
prefix = $coerce_to(prefix, #{String}, 'to_str');
}
if (self.slice(0, prefix.length) === prefix) {
return self.$$cast(self.slice(prefix.length));
} else {
return self;
}
}
end
def delete_suffix(suffix)
%x{
if (!suffix.$$is_string) {
suffix = $coerce_to(suffix, #{String}, 'to_str');
}
if (self.slice(self.length - suffix.length) === suffix) {
return self.$$cast(self.slice(0, self.length - suffix.length));
} else {
return self;
}
}
end
def downcase
`self.$$cast(self.toLowerCase())`
end
def each_line(separator = $/, &block)
return enum_for :each_line, separator unless block_given?
%x{
if (separator === nil) {
Opal.yield1(block, self);
return self;
}
separator = $coerce_to(separator, #{String}, 'to_str')
var a, i, n, length, chomped, trailing, splitted;
if (separator.length === 0) {
for (a = self.split(/(\n{2,})/), i = 0, n = a.length; i < n; i += 2) {
if (a[i] || a[i + 1]) {
var value = (a[i] || "") + (a[i + 1] || "");
Opal.yield1(block, self.$$cast(value));
}
}
return self;
}
chomped = #{chomp(separator)};
trailing = self.length != chomped.length;
splitted = chomped.split(separator);
for (i = 0, length = splitted.length; i < length; i++) {
if (i < length - 1 || trailing) {
Opal.yield1(block, self.$$cast(splitted[i] + separator));
}
else {
Opal.yield1(block, self.$$cast(splitted[i]));
}
}
}
self
end
def empty?
`self.length === 0`
end
def end_with?(*suffixes)
%x{
for (var i = 0, length = suffixes.length; i < length; i++) {
var suffix = $coerce_to(suffixes[i], #{String}, 'to_str').$to_s();
if (self.length >= suffix.length &&
self.substr(self.length - suffix.length, suffix.length) == suffix) {
return true;
}
}
}
false
end
alias equal? ===
def gsub(pattern, replacement = undefined, &block)
%x{
if (replacement === undefined && block === nil) {
return #{enum_for :gsub, pattern};
}
var result = '', match_data = nil, index = 0, match, _replacement;
if (pattern.$$is_regexp) {
pattern = Opal.global_multiline_regexp(pattern);
} else {
pattern = $coerce_to(pattern, #{String}, 'to_str');
pattern = new RegExp(pattern.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), 'gm');
}
var lastIndex;
while (true) {
match = pattern.exec(self);
if (match === null) {
#{$~ = nil}
result += self.slice(index);
break;
}
match_data = #{MatchData.new `pattern`, `match`};
if (replacement === undefined) {
lastIndex = pattern.lastIndex;
_replacement = block(match[0]);
pattern.lastIndex = lastIndex; // save and restore lastIndex
}
else if (replacement.$$is_hash) {
_replacement = #{`replacement`[`match[0]`].to_s};
}
else {
if (!replacement.$$is_string) {
replacement = $coerce_to(replacement, #{String}, 'to_str');
}
_replacement = replacement.replace(/([\\]+)([0-9+&`'])/g, function (original, slashes, command) {
if (slashes.length % 2 === 0) {
return original;
}
switch (command) {
case "+":
for (var i = match.length - 1; i > 0; i--) {
if (match[i] !== undefined) {
return slashes.slice(1) + match[i];
}
}
return '';
case "&": return slashes.slice(1) + match[0];
case "`": return slashes.slice(1) + self.slice(0, match.index);
case "'": return slashes.slice(1) + self.slice(match.index + match[0].length);
default: return slashes.slice(1) + (match[command] || '');
}
}).replace(/\\\\/g, '\\');
}
if (pattern.lastIndex === match.index) {
result += (self.slice(index, match.index) + _replacement + (self[match.index] || ""));
pattern.lastIndex += 1;
}
else {
result += (self.slice(index, match.index) + _replacement)
}
index = pattern.lastIndex;
}
#{$~ = `match_data`}
return self.$$cast(result);
}
end
def hash
`self.toString()`
end
def hex
to_i 16
end
def include?(other)
%x{
if (!other.$$is_string) {
other = $coerce_to(other, #{String}, 'to_str');
}
return self.indexOf(other) !== -1;
}
end
def index(search, offset = undefined)
%x{
var index,
match,
regex;
if (offset === undefined) {
offset = 0;
} else {
offset = $coerce_to(offset, #{Integer}, 'to_int');
if (offset < 0) {
offset += self.length;
if (offset < 0) {
return nil;
}
}
}
if (search.$$is_regexp) {
regex = Opal.global_multiline_regexp(search);
while (true) {
match = regex.exec(self);
if (match === null) {
#{$~ = nil};
index = -1;
break;
}
if (match.index >= offset) {
#{$~ = MatchData.new(`regex`, `match`)}
index = match.index;
break;
}
regex.lastIndex = match.index + 1;
}
} else {
search = $coerce_to(search, #{String}, 'to_str');
if (search.length === 0 && offset > self.length) {
index = -1;
} else {
index = self.indexOf(search, offset);
}
}
return index === -1 ? nil : index;
}
end
def inspect
%x{
var escapable = /[\\\"\x00-\x1f\u007F-\u009F\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,
meta = {
'\u0007': '\\a',
'\u001b': '\\e',
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'\v': '\\v',
'"' : '\\"',
'\\': '\\\\'
},
escaped = self.replace(escapable, function (chr) {
if (meta[chr]) return meta[chr];
chr = chr.charCodeAt(0);
if (chr <= 0xff && (self.encoding["$binary?"]() || self.internal_encoding["$binary?"]())) {
return '\\x' + ('00' + chr.toString(16).toUpperCase()).slice(-2);
} else {
return '\\u' + ('0000' + chr.toString(16).toUpperCase()).slice(-4);
}
});
return '"' + escaped.replace(/\#[\$\@\{]/g, '\\$&') + '"';
}
end
def intern
`self.toString()`
end
def lines(separator = $/, &block)
e = each_line(separator, &block)
block ? self : e.to_a
end
def ljust(width, padstr = ' ')
width = `$coerce_to(#{width}, #{Integer}, 'to_int')`
padstr = `$coerce_to(#{padstr}, #{String}, 'to_str')`.to_s
if padstr.empty?
raise ArgumentError, 'zero width padding'
end
return self if `width <= self.length`
%x{
var index = -1,
result = "";
width -= self.length;
while (++index < width) {
result += padstr;
}
return self.$$cast(self + result.slice(0, width));
}
end
def lstrip
`self.replace(/^\s*/, '')`
end
def ascii_only?
# non-ASCII-compatible encoding must return false
%x{
if (!self.encoding.ascii) return false;
return /^[\x00-\x7F]*$/.test(self);
}
end
def match(pattern, pos = undefined, &block)
if String === pattern || pattern.respond_to?(:to_str)
pattern = Regexp.new(pattern.to_str)
end
unless Regexp === pattern
raise TypeError, "wrong argument type #{pattern.class} (expected Regexp)"
end
pattern.match(self, pos, &block)
end
def match?(pattern, pos = undefined)
if String === pattern || pattern.respond_to?(:to_str)
pattern = Regexp.new(pattern.to_str)
end
unless Regexp === pattern
raise TypeError, "wrong argument type #{pattern.class} (expected Regexp)"
end
pattern.match?(self, pos)
end
def next
%x{
var i = self.length;
if (i === 0) {
return self.$$cast('');
}
var result = self;
var first_alphanum_char_index = self.search(/[a-zA-Z0-9]/);
var carry = false;
var code;
while (i--) {
code = self.charCodeAt(i);
if ((code >= 48 && code <= 57) ||
(code >= 65 && code <= 90) ||
(code >= 97 && code <= 122)) {
switch (code) {
case 57:
carry = true;
code = 48;
break;
case 90:
carry = true;
code = 65;
break;
case 122:
carry = true;
code = 97;
break;
default:
carry = false;
code += 1;
}
} else {
if (first_alphanum_char_index === -1) {
if (code === 255) {
carry = true;
code = 0;
} else {
carry = false;
code += 1;
}
} else {
carry = true;
}
}
result = result.slice(0, i) + String.fromCharCode(code) + result.slice(i + 1);
if (carry && (i === 0 || i === first_alphanum_char_index)) {
switch (code) {
case 65:
break;
case 97:
break;
default:
code += 1;
}
if (i === 0) {
result = String.fromCharCode(code) + result;
} else {
result = result.slice(0, i) + String.fromCharCode(code) + result.slice(i);
}
carry = false;
}
if (!carry) {
break;
}
}
return self.$$cast(result);
}
end
def oct
%x{
var result,
string = self,
radix = 8;
if (/^\s*_/.test(string)) {
return 0;
}
string = string.replace(/^(\s*[+-]?)(0[bodx]?)(.+)$/i, function (original, head, flag, tail) {
switch (tail.charAt(0)) {
case '+':
case '-':
return original;
case '0':
if (tail.charAt(1) === 'x' && flag === '0x') {
return original;
}
}
switch (flag) {
case '0b':
radix = 2;
break;
case '0':
case '0o':
radix = 8;
break;
case '0d':
radix = 10;
break;
case '0x':
radix = 16;
break;
}
return head + tail;
});
result = parseInt(string.replace(/_(?!_)/g, ''), radix);
return isNaN(result) ? 0 : result;
}
end
def ord
%x{
if (typeof self.codePointAt === "function") {
return self.codePointAt(0);
}
else {
return self.charCodeAt(0);
}
}
end
def partition(sep)
%x{
var i, m;
if (sep.$$is_regexp) {
m = sep.exec(self);
if (m === null) {
i = -1;
} else {
#{MatchData.new `sep`, `m`};
sep = m[0];
i = m.index;
}
} else {
sep = $coerce_to(sep, #{String}, 'to_str');
i = self.indexOf(sep);
}
if (i === -1) {
return [self, '', ''];
}
return [
self.slice(0, i),
self.slice(i, i + sep.length),
self.slice(i + sep.length)
];
}
end
def reverse
`self.split('').reverse().join('')`
end
def rindex(search, offset = undefined)
%x{
var i, m, r, _m;
if (offset === undefined) {
offset = self.length;
} else {
offset = $coerce_to(offset, #{Integer}, 'to_int');
if (offset < 0) {
offset += self.length;
if (offset < 0) {
return nil;
}
}
}
if (search.$$is_regexp) {
m = null;
r = Opal.global_multiline_regexp(search);
while (true) {
_m = r.exec(self);
if (_m === null || _m.index > offset) {
break;
}
m = _m;
r.lastIndex = m.index + 1;
}
if (m === null) {
#{$~ = nil}
i = -1;
} else {
#{MatchData.new `r`, `m`};
i = m.index;
}
} else {
search = $coerce_to(search, #{String}, 'to_str');
i = self.lastIndexOf(search, offset);
}
return i === -1 ? nil : i;
}
end
def rjust(width, padstr = ' ')
width = `$coerce_to(#{width}, #{Integer}, 'to_int')`
padstr = `$coerce_to(#{padstr}, #{String}, 'to_str')`.to_s
if padstr.empty?
raise ArgumentError, 'zero width padding'
end
return self if `width <= self.length`
%x{
var chars = Math.floor(width - self.length),
patterns = Math.floor(chars / padstr.length),
result = Array(patterns + 1).join(padstr),
remaining = chars - result.length;
return self.$$cast(result + padstr.slice(0, remaining) + self);
}
end
def rpartition(sep)
%x{
var i, m, r, _m;
if (sep.$$is_regexp) {
m = null;
r = Opal.global_multiline_regexp(sep);
while (true) {
_m = r.exec(self);
if (_m === null) {
break;
}
m = _m;
r.lastIndex = m.index + 1;
}
if (m === null) {
i = -1;
} else {
#{MatchData.new `r`, `m`};
sep = m[0];
i = m.index;
}
} else {
sep = $coerce_to(sep, #{String}, 'to_str');
i = self.lastIndexOf(sep);
}
if (i === -1) {
return ['', '', self];
}
return [
self.slice(0, i),
self.slice(i, i + sep.length),
self.slice(i + sep.length)
];
}
end
def rstrip
`self.replace(/[\s\u0000]*$/, '')`
end
def scan(pattern, &block)
%x{
var result = [],
match_data = nil,
match;
if (pattern.$$is_regexp) {
pattern = Opal.global_multiline_regexp(pattern);
} else {
pattern = $coerce_to(pattern, #{String}, 'to_str');
pattern = new RegExp(pattern.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), 'gm');
}
while ((match = pattern.exec(self)) != null) {
match_data = #{MatchData.new `pattern`, `match`};
if (block === nil) {
match.length == 1 ? result.push(match[0]) : result.push(#{`match_data`.captures});
} else {
match.length == 1 ? block(match[0]) : block.call(self, #{`match_data`.captures});
}
if (pattern.lastIndex === match.index) {
pattern.lastIndex += 1;
}
}
#{$~ = `match_data`}
return (block !== nil ? self : result);
}
end
alias slice []
def split(pattern = undefined, limit = undefined)
%x{
if (self.length === 0) {
return [];
}
if (limit === undefined) {
limit = 0;
} else {
limit = #{Opal.coerce_to!(limit, Integer, :to_int)};
if (limit === 1) {
return [self];
}
}
if (pattern === undefined || pattern === nil) {
pattern = #{$; || ' '};
}
var result = [],
string = self.toString(),
index = 0,
match,
i, ii;
if (pattern.$$is_regexp) {
pattern = Opal.global_multiline_regexp(pattern);
} else {
pattern = $coerce_to(pattern, #{String}, 'to_str').$to_s();
if (pattern === ' ') {
pattern = /\s+/gm;
string = string.replace(/^\s+/, '');
} else {
pattern = new RegExp(pattern.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), 'gm');
}
}
result = string.split(pattern);
if (result.length === 1 && result[0] === string) {
return [self.$$cast(result[0])];
}
while ((i = result.indexOf(undefined)) !== -1) {
result.splice(i, 1);
}
function castResult() {
for (i = 0; i < result.length; i++) {
result[i] = self.$$cast(result[i]);
}
}
if (limit === 0) {
while (result[result.length - 1] === '') {
result.length -= 1;
}
castResult();
return result;
}
match = pattern.exec(string);
if (limit < 0) {
if (match !== null && match[0] === '' && pattern.source.indexOf('(?=') === -1) {
for (i = 0, ii = match.length; i < ii; i++) {
result.push('');
}
}
castResult();
return result;
}
if (match !== null && match[0] === '') {
result.splice(limit - 1, result.length - 1, result.slice(limit - 1).join(''));
castResult();
return result;
}
if (limit >= result.length) {
castResult();
return result;
}
i = 0;
while (match !== null) {
i++;
index = pattern.lastIndex;
if (i + 1 === limit) {
break;
}
match = pattern.exec(string);
}
result.splice(limit - 1, result.length - 1, string.slice(index));
castResult();
return result;
}
end
def squeeze(*sets)
%x{
if (sets.length === 0) {
return self.$$cast(self.replace(/(.)\1+/g, '$1'));
}
var char_class = char_class_from_char_sets(sets);
if (char_class === null) {
return self;
}
return self.$$cast(self.replace(new RegExp('(' + char_class + ')\\1+', 'g'), '$1'));
}
end
def start_with?(*prefixes)
%x{
for (var i = 0, length = prefixes.length; i < length; i++) {
if (prefixes[i].$$is_regexp) {
var regexp = prefixes[i];
var match = regexp.exec(self);
if (match != null && match.index === 0) {
#{$~ = MatchData.new(`regexp`, `match`)};
return true;
} else {
#{$~ = nil}
}
} else {
var prefix = $coerce_to(prefixes[i], #{String}, 'to_str').$to_s();
if (self.indexOf(prefix) === 0) {
return true;
}
}
}
return false;
}
end
def strip
`self.replace(/^\s*/, '').replace(/[\s\u0000]*$/, '')`
end
def sub(pattern, replacement = undefined, &block)
%x{
if (!pattern.$$is_regexp) {
pattern = $coerce_to(pattern, #{String}, 'to_str');
pattern = new RegExp(pattern.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'));
}
var result, match = pattern.exec(self);
if (match === null) {
#{$~ = nil}
result = self.toString();
} else {
#{MatchData.new `pattern`, `match`}
if (replacement === undefined) {
if (block === nil) {
#{raise ArgumentError, 'wrong number of arguments (1 for 2)'}
}
result = self.slice(0, match.index) + block(match[0]) + self.slice(match.index + match[0].length);
} else if (replacement.$$is_hash) {
result = self.slice(0, match.index) + #{`replacement`[`match[0]`].to_s} + self.slice(match.index + match[0].length);
} else {
replacement = $coerce_to(replacement, #{String}, 'to_str');
replacement = replacement.replace(/([\\]+)([0-9+&`'])/g, function (original, slashes, command) {
if (slashes.length % 2 === 0) {
return original;
}
switch (command) {
case "+":
for (var i = match.length - 1; i > 0; i--) {
if (match[i] !== undefined) {
return slashes.slice(1) + match[i];
}
}
return '';
case "&": return slashes.slice(1) + match[0];
case "`": return slashes.slice(1) + self.slice(0, match.index);
case "'": return slashes.slice(1) + self.slice(match.index + match[0].length);
default: return slashes.slice(1) + (match[command] || '');
}
}).replace(/\\\\/g, '\\');
result = self.slice(0, match.index) + replacement + self.slice(match.index + match[0].length);
}
}
return self.$$cast(result);
}
end
alias succ next
def sum(n = 16)
%x{
n = $coerce_to(n, #{Integer}, 'to_int');
var result = 0,
length = self.length,
i = 0;
for (; i < length; i++) {
result += self.charCodeAt(i);
}
if (n <= 0) {
return result;
}
return result & (Math.pow(2, n) - 1);
}
end
def swapcase
%x{
var str = self.replace(/([a-z]+)|([A-Z]+)/g, function($0,$1,$2) {
return $1 ? $0.toUpperCase() : $0.toLowerCase();
});
if (self.constructor === String) {
return str;
}
return #{self.class.new `str`};
}
end
def to_f
%x{
if (self.charAt(0) === '_') {
return 0;
}
var result = parseFloat(self.replace(/_/g, ''));
if (isNaN(result) || result == Infinity || result == -Infinity) {
return 0;
}
else {
return result;
}
}
end
def to_i(base = 10)
%x{
var result,
string = self.toLowerCase(),
radix = $coerce_to(base, #{Integer}, 'to_int');
if (radix === 1 || radix < 0 || radix > 36) {
#{raise ArgumentError, "invalid radix #{`radix`}"}
}
if (/^\s*_/.test(string)) {
return 0;
}
string = string.replace(/^(\s*[+-]?)(0[bodx]?)(.+)$/, function (original, head, flag, tail) {
switch (tail.charAt(0)) {
case '+':
case '-':
return original;
case '0':
if (tail.charAt(1) === 'x' && flag === '0x' && (radix === 0 || radix === 16)) {
return original;
}
}
switch (flag) {
case '0b':
if (radix === 0 || radix === 2) {
radix = 2;
return head + tail;
}
break;
case '0':
case '0o':
if (radix === 0 || radix === 8) {
radix = 8;
return head + tail;
}
break;
case '0d':
if (radix === 0 || radix === 10) {
radix = 10;
return head + tail;
}
break;
case '0x':
if (radix === 0 || radix === 16) {
radix = 16;
return head + tail;
}
break;
}
return original
});
result = parseInt(string.replace(/_(?!_)/g, ''), radix);
return isNaN(result) ? 0 : result;
}
end
def to_proc
method_name = '$' + `self.valueOf()`
proc do |*args, &block|
%x{
if (args.length === 0) {
#{raise ArgumentError, 'no receiver given'}
}
var recv = args[0];
if (recv == null) recv = nil;
var body = recv[#{method_name}];
if (!body) {
return recv.$method_missing.apply(recv, args);
}
if (typeof block === 'function') {
body.$$p = block;
}
if (args.length === 1) {
return body.call(recv);
} else {
return body.apply(recv, args.slice(1));
}
}
end
end
def to_s
`self.toString()`
end
alias to_str to_s
alias to_sym intern
def tr(from, to)
%x{
from = $coerce_to(from, #{String}, 'to_str').$to_s();
to = $coerce_to(to, #{String}, 'to_str').$to_s();
if (from.length == 0 || from === to) {
return self;
}
var i, in_range, c, ch, start, end, length;
var subs = {};
var from_chars = from.split('');
var from_length = from_chars.length;
var to_chars = to.split('');
var to_length = to_chars.length;
var inverse = false;
var global_sub = null;
if (from_chars[0] === '^' && from_chars.length > 1) {
inverse = true;
from_chars.shift();
global_sub = to_chars[to_length - 1]
from_length -= 1;
}
var from_chars_expanded = [];
var last_from = null;
in_range = false;
for (i = 0; i < from_length; i++) {
ch = from_chars[i];
if (last_from == null) {
last_from = ch;
from_chars_expanded.push(ch);
}
else if (ch === '-') {
if (last_from === '-') {
from_chars_expanded.push('-');
from_chars_expanded.push('-');
}
else if (i == from_length - 1) {
from_chars_expanded.push('-');
}
else {
in_range = true;
}
}
else if (in_range) {
start = last_from.charCodeAt(0);
end = ch.charCodeAt(0);
if (start > end) {
#{raise ArgumentError, "invalid range \"#{`String.fromCharCode(start)`}-#{`String.fromCharCode(end)`}\" in string transliteration"}
}
for (c = start + 1; c < end; c++) {
from_chars_expanded.push(String.fromCharCode(c));
}
from_chars_expanded.push(ch);
in_range = null;
last_from = null;
}
else {
from_chars_expanded.push(ch);
}
}
from_chars = from_chars_expanded;
from_length = from_chars.length;
if (inverse) {
for (i = 0; i < from_length; i++) {
subs[from_chars[i]] = true;
}
}
else {
if (to_length > 0) {
var to_chars_expanded = [];
var last_to = null;
in_range = false;
for (i = 0; i < to_length; i++) {
ch = to_chars[i];
if (last_to == null) {
last_to = ch;
to_chars_expanded.push(ch);
}
else if (ch === '-') {
if (last_to === '-') {
to_chars_expanded.push('-');
to_chars_expanded.push('-');
}
else if (i == to_length - 1) {
to_chars_expanded.push('-');
}
else {
in_range = true;
}
}
else if (in_range) {
start = last_to.charCodeAt(0);
end = ch.charCodeAt(0);
if (start > end) {
#{raise ArgumentError, "invalid range \"#{`String.fromCharCode(start)`}-#{`String.fromCharCode(end)`}\" in string transliteration"}
}
for (c = start + 1; c < end; c++) {
to_chars_expanded.push(String.fromCharCode(c));
}
to_chars_expanded.push(ch);
in_range = null;
last_to = null;
}
else {
to_chars_expanded.push(ch);
}
}
to_chars = to_chars_expanded;
to_length = to_chars.length;
}
var length_diff = from_length - to_length;
if (length_diff > 0) {
var pad_char = (to_length > 0 ? to_chars[to_length - 1] : '');
for (i = 0; i < length_diff; i++) {
to_chars.push(pad_char);
}
}
for (i = 0; i < from_length; i++) {
subs[from_chars[i]] = to_chars[i];
}
}
var new_str = ''
for (i = 0, length = self.length; i < length; i++) {
ch = self.charAt(i);
var sub = subs[ch];
if (inverse) {
new_str += (sub == null ? global_sub : ch);
}
else {
new_str += (sub != null ? sub : ch);
}
}
return self.$$cast(new_str);
}
end
def tr_s(from, to)
%x{
from = $coerce_to(from, #{String}, 'to_str').$to_s();
to = $coerce_to(to, #{String}, 'to_str').$to_s();
if (from.length == 0) {
return self;
}
var i, in_range, c, ch, start, end, length;
var subs = {};
var from_chars = from.split('');
var from_length = from_chars.length;
var to_chars = to.split('');
var to_length = to_chars.length;
var inverse = false;
var global_sub = null;
if (from_chars[0] === '^' && from_chars.length > 1) {
inverse = true;
from_chars.shift();
global_sub = to_chars[to_length - 1]
from_length -= 1;
}
var from_chars_expanded = [];
var last_from = null;
in_range = false;
for (i = 0; i < from_length; i++) {
ch = from_chars[i];
if (last_from == null) {
last_from = ch;
from_chars_expanded.push(ch);
}
else if (ch === '-') {
if (last_from === '-') {
from_chars_expanded.push('-');
from_chars_expanded.push('-');
}
else if (i == from_length - 1) {
from_chars_expanded.push('-');
}
else {
in_range = true;
}
}
else if (in_range) {
start = last_from.charCodeAt(0);
end = ch.charCodeAt(0);
if (start > end) {
#{raise ArgumentError, "invalid range \"#{`String.fromCharCode(start)`}-#{`String.fromCharCode(end)`}\" in string transliteration"}
}
for (c = start + 1; c < end; c++) {
from_chars_expanded.push(String.fromCharCode(c));
}
from_chars_expanded.push(ch);
in_range = null;
last_from = null;
}
else {
from_chars_expanded.push(ch);
}
}
from_chars = from_chars_expanded;
from_length = from_chars.length;
if (inverse) {
for (i = 0; i < from_length; i++) {
subs[from_chars[i]] = true;
}
}
else {
if (to_length > 0) {
var to_chars_expanded = [];
var last_to = null;
in_range = false;
for (i = 0; i < to_length; i++) {
ch = to_chars[i];
if (last_from == null) {
last_from = ch;
to_chars_expanded.push(ch);
}
else if (ch === '-') {
if (last_to === '-') {
to_chars_expanded.push('-');
to_chars_expanded.push('-');
}
else if (i == to_length - 1) {
to_chars_expanded.push('-');
}
else {
in_range = true;
}
}
else if (in_range) {
start = last_from.charCodeAt(0);
end = ch.charCodeAt(0);
if (start > end) {
#{raise ArgumentError, "invalid range \"#{`String.fromCharCode(start)`}-#{`String.fromCharCode(end)`}\" in string transliteration"}
}
for (c = start + 1; c < end; c++) {
to_chars_expanded.push(String.fromCharCode(c));
}
to_chars_expanded.push(ch);
in_range = null;
last_from = null;
}
else {
to_chars_expanded.push(ch);
}
}
to_chars = to_chars_expanded;
to_length = to_chars.length;
}
var length_diff = from_length - to_length;
if (length_diff > 0) {
var pad_char = (to_length > 0 ? to_chars[to_length - 1] : '');
for (i = 0; i < length_diff; i++) {
to_chars.push(pad_char);
}
}
for (i = 0; i < from_length; i++) {
subs[from_chars[i]] = to_chars[i];
}
}
var new_str = ''
var last_substitute = null
for (i = 0, length = self.length; i < length; i++) {
ch = self.charAt(i);
var sub = subs[ch]
if (inverse) {
if (sub == null) {
if (last_substitute == null) {
new_str += global_sub;
last_substitute = true;
}
}
else {
new_str += ch;
last_substitute = null;
}
}
else {
if (sub != null) {
if (last_substitute == null || last_substitute !== sub) {
new_str += sub;
last_substitute = sub;
}
}
else {
new_str += ch;
last_substitute = null;
}
}
}
return self.$$cast(new_str);
}
end
def upcase
`self.$$cast(self.toUpperCase())`
end
def upto(stop, excl = false, &block)
return enum_for :upto, stop, excl unless block_given?
%x{
var a, b, s = self.toString();
stop = $coerce_to(stop, #{String}, 'to_str');
if (s.length === 1 && stop.length === 1) {
a = s.charCodeAt(0);
b = stop.charCodeAt(0);
while (a <= b) {
if (excl && a === b) {
break;
}
block(String.fromCharCode(a));
a += 1;
}
} else if (parseInt(s, 10).toString() === s && parseInt(stop, 10).toString() === stop) {
a = parseInt(s, 10);
b = parseInt(stop, 10);
while (a <= b) {
if (excl && a === b) {
break;
}
block(a.toString());
a += 1;
}
} else {
while (s.length <= stop.length && s <= stop) {
if (excl && s === stop) {
break;
}
block(s);
s = #{`s`.succ};
}
}
return self;
}
end
%x{
function char_class_from_char_sets(sets) {
function explode_sequences_in_character_set(set) {
var result = '',
i, len = set.length,
curr_char,
skip_next_dash,
char_code_from,
char_code_upto,
char_code;
for (i = 0; i < len; i++) {
curr_char = set.charAt(i);
if (curr_char === '-' && i > 0 && i < (len - 1) && !skip_next_dash) {
char_code_from = set.charCodeAt(i - 1);
char_code_upto = set.charCodeAt(i + 1);
if (char_code_from > char_code_upto) {
#{raise ArgumentError, "invalid range \"#{`char_code_from`}-#{`char_code_upto`}\" in string transliteration"}
}
for (char_code = char_code_from + 1; char_code < char_code_upto + 1; char_code++) {
result += String.fromCharCode(char_code);
}
skip_next_dash = true;
i++;
} else {
skip_next_dash = (curr_char === '\\');
result += curr_char;
}
}
return result;
}
function intersection(setA, setB) {
if (setA.length === 0) {
return setB;
}
var result = '',
i, len = setA.length,
chr;
for (i = 0; i < len; i++) {
chr = setA.charAt(i);
if (setB.indexOf(chr) !== -1) {
result += chr;
}
}
return result;
}
var i, len, set, neg, chr, tmp,
pos_intersection = '',
neg_intersection = '';
for (i = 0, len = sets.length; i < len; i++) {
set = $coerce_to(sets[i], #{String}, 'to_str');
neg = (set.charAt(0) === '^' && set.length > 1);
set = explode_sequences_in_character_set(neg ? set.slice(1) : set);
if (neg) {
neg_intersection = intersection(neg_intersection, set);
} else {
pos_intersection = intersection(pos_intersection, set);
}
}
if (pos_intersection.length > 0 && neg_intersection.length > 0) {
tmp = '';
for (i = 0, len = pos_intersection.length; i < len; i++) {
chr = pos_intersection.charAt(i);
if (neg_intersection.indexOf(chr) === -1) {
tmp += chr;
}
}
pos_intersection = tmp;
neg_intersection = '';
}
if (pos_intersection.length > 0) {
return '[' + #{Regexp.escape(`pos_intersection`)} + ']';
}
if (neg_intersection.length > 0) {
return '[^' + #{Regexp.escape(`neg_intersection`)} + ']';
}
return null;
}
}
def instance_variables
[]
end
def self._load(*args)
new(*args)
end
def unicode_normalize(form = :nfc)
raise ArgumentError, "Invalid normalization form #{form}" unless %i[nfc nfd nfkc nfkd].include?(form)
`self.normalize(#{form.upcase})`
end
def unicode_normalized?(form = :nfc)
unicode_normalize(form) == self
end
def unpack(format)
raise "To use String#unpack, you must first require 'corelib/string/unpack'."
end
def unpack1(format)
raise "To use String#unpack1, you must first require 'corelib/string/unpack'."
end
def freeze
%x{
if (typeof self === 'string') return self;
self.$$frozen = true;
return self;
}
end
alias +@ dup
def -@
%x{
if (typeof self === 'string') return self;
if (self.$$frozen === true) return self;
if (self.encoding.name == 'UTF-8' && self.internal_encoding.name == 'UTF-8') return self.toString();
return self.$dup().$freeze();
}
end
def frozen?
`typeof self === 'string' || self.$$frozen === true`
end
Opal.pristine self, :initialize
end
Symbol = String
| 24.802764 | 147 | 0.47012 |
38497340ba6d0f590151c6afbb62256475928cd8
| 12,730 |
=begin
#Tatum API
## Authentication <!-- ReDoc-Inject: <security-definitions> -->
OpenAPI spec version: 3.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Swagger Codegen version: 3.0.31
=end
require 'date'
module Tatum
class GenerateCustodialWalletCeloKMS
# Blockchain to work with.
attr_accessor :chain
# Currency to pay for transaction gas
attr_accessor :fee_currency
# Identifier of the private key associated in signing application. Private key, or signature Id must be present.
attr_accessor :signature_id
# If signatureId is mnemonic-based, this is the index to the specific address from that mnemonic.
attr_accessor :index
# If address should support ERC20 tokens, it should be marked as true.
attr_accessor :enable_fungible_tokens
# If address should support ERC721 tokens, it should be marked as true.
attr_accessor :enable_non_fungible_tokens
# If address should support ERC1155 tokens, it should be marked as true.
attr_accessor :enable_semi_fungible_tokens
# If address should support batch transfers of the assets, it should be marked as true.
attr_accessor :enable_batch_transactions
attr_accessor :fee
# Nonce to be set to the transaction. If not present, last known nonce will be used.
attr_accessor :nonce
class EnumAttributeValidator
attr_reader :datatype
attr_reader :allowable_values
def initialize(datatype, allowable_values)
@allowable_values = allowable_values.map do |value|
case datatype.to_s
when /Integer/i
value.to_i
when /Float/i
value.to_f
else
value
end
end
end
def valid?(value)
!value || allowable_values.include?(value)
end
end
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'chain' => :'chain',
:'fee_currency' => :'feeCurrency',
:'signature_id' => :'signatureId',
:'index' => :'index',
:'enable_fungible_tokens' => :'enableFungibleTokens',
:'enable_non_fungible_tokens' => :'enableNonFungibleTokens',
:'enable_semi_fungible_tokens' => :'enableSemiFungibleTokens',
:'enable_batch_transactions' => :'enableBatchTransactions',
:'fee' => :'fee',
:'nonce' => :'nonce'
}
end
# Attribute type mapping.
def self.openapi_types
{
:'chain' => :'Object',
:'fee_currency' => :'Object',
:'signature_id' => :'Object',
:'index' => :'Object',
:'enable_fungible_tokens' => :'Object',
:'enable_non_fungible_tokens' => :'Object',
:'enable_semi_fungible_tokens' => :'Object',
:'enable_batch_transactions' => :'Object',
:'fee' => :'Object',
:'nonce' => :'Object'
}
end
# List of attributes with nullable: true
def self.openapi_nullable
Set.new([
])
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
if (!attributes.is_a?(Hash))
fail ArgumentError, "The input argument (attributes) must be a hash in `Tatum::GenerateCustodialWalletCeloKMS` initialize method"
end
# check to see if the attribute exists and convert string to symbol for hash key
attributes = attributes.each_with_object({}) { |(k, v), h|
if (!self.class.attribute_map.key?(k.to_sym))
fail ArgumentError, "`#{k}` is not a valid attribute in `Tatum::GenerateCustodialWalletCeloKMS`. Please check the name to make sure it's valid. List of attributes: " + self.class.attribute_map.keys.inspect
end
h[k.to_sym] = v
}
if attributes.key?(:'chain')
self.chain = attributes[:'chain']
end
if attributes.key?(:'fee_currency')
self.fee_currency = attributes[:'fee_currency']
end
if attributes.key?(:'signature_id')
self.signature_id = attributes[:'signature_id']
end
if attributes.key?(:'index')
self.index = attributes[:'index']
end
if attributes.key?(:'enable_fungible_tokens')
self.enable_fungible_tokens = attributes[:'enable_fungible_tokens']
end
if attributes.key?(:'enable_non_fungible_tokens')
self.enable_non_fungible_tokens = attributes[:'enable_non_fungible_tokens']
end
if attributes.key?(:'enable_semi_fungible_tokens')
self.enable_semi_fungible_tokens = attributes[:'enable_semi_fungible_tokens']
end
if attributes.key?(:'enable_batch_transactions')
self.enable_batch_transactions = attributes[:'enable_batch_transactions']
end
if attributes.key?(:'fee')
self.fee = attributes[:'fee']
end
if attributes.key?(:'nonce')
self.nonce = attributes[:'nonce']
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properties with the reasons
def list_invalid_properties
invalid_properties = Array.new
if @chain.nil?
invalid_properties.push('invalid value for "chain", chain cannot be nil.')
end
if @fee_currency.nil?
invalid_properties.push('invalid value for "fee_currency", fee_currency cannot be nil.')
end
if @signature_id.nil?
invalid_properties.push('invalid value for "signature_id", signature_id cannot be nil.')
end
if @enable_fungible_tokens.nil?
invalid_properties.push('invalid value for "enable_fungible_tokens", enable_fungible_tokens cannot be nil.')
end
if @enable_non_fungible_tokens.nil?
invalid_properties.push('invalid value for "enable_non_fungible_tokens", enable_non_fungible_tokens cannot be nil.')
end
if @enable_semi_fungible_tokens.nil?
invalid_properties.push('invalid value for "enable_semi_fungible_tokens", enable_semi_fungible_tokens cannot be nil.')
end
if @enable_batch_transactions.nil?
invalid_properties.push('invalid value for "enable_batch_transactions", enable_batch_transactions cannot be nil.')
end
invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
return false if @chain.nil?
chain_validator = EnumAttributeValidator.new('Object', ['CELO'])
return false unless chain_validator.valid?(@chain)
return false if @fee_currency.nil?
fee_currency_validator = EnumAttributeValidator.new('Object', ['CELO', 'CUSD', 'CEUR'])
return false unless fee_currency_validator.valid?(@fee_currency)
return false if @signature_id.nil?
return false if @enable_fungible_tokens.nil?
return false if @enable_non_fungible_tokens.nil?
return false if @enable_semi_fungible_tokens.nil?
return false if @enable_batch_transactions.nil?
true
end
# Custom attribute writer method checking allowed values (enum).
# @param [Object] chain Object to be assigned
def chain=(chain)
validator = EnumAttributeValidator.new('Object', ['CELO'])
unless validator.valid?(chain)
fail ArgumentError, "invalid value for \"chain\", must be one of #{validator.allowable_values}."
end
@chain = chain
end
# Custom attribute writer method checking allowed values (enum).
# @param [Object] fee_currency Object to be assigned
def fee_currency=(fee_currency)
validator = EnumAttributeValidator.new('Object', ['CELO', 'CUSD', 'CEUR'])
unless validator.valid?(fee_currency)
fail ArgumentError, "invalid value for \"fee_currency\", must be one of #{validator.allowable_values}."
end
@fee_currency = fee_currency
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
chain == o.chain &&
fee_currency == o.fee_currency &&
signature_id == o.signature_id &&
index == o.index &&
enable_fungible_tokens == o.enable_fungible_tokens &&
enable_non_fungible_tokens == o.enable_non_fungible_tokens &&
enable_semi_fungible_tokens == o.enable_semi_fungible_tokens &&
enable_batch_transactions == o.enable_batch_transactions &&
fee == o.fee &&
nonce == o.nonce
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Integer] Hash code
def hash
[chain, fee_currency, signature_id, index, enable_fungible_tokens, enable_non_fungible_tokens, enable_semi_fungible_tokens, enable_batch_transactions, fee, nonce].hash
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def self.build_from_hash(attributes)
new.build_from_hash(attributes)
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.openapi_types.each_pair do |key, type|
if type =~ /\AArray<(.*)>/i
# check to ensure the input is an array given that the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map { |v| _deserialize($1, v) })
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
elsif attributes[self.class.attribute_map[key]].nil? && self.class.openapi_nullable.include?(key)
self.send("#{key}=", nil)
end
end
self
end
# Deserializes the data based on type
# @param string type Data type
# @param string value Value to be deserialized
# @return [Object] Deserialized data
def _deserialize(type, value)
case type.to_sym
when :DateTime
DateTime.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :Boolean
if value.to_s =~ /\A(true|t|yes|y|1)\z/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+?), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
Tatum.const_get(type).build_from_hash(value)
end
end
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# to_body is an alias to to_hash (backward compatibility)
# @return [Hash] Returns the object in the form of hash
def to_body
to_hash
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
if value.nil?
is_nullable = self.class.openapi_nullable.include?(attr)
next if !is_nullable || (is_nullable && !instance_variable_defined?(:"@#{attr}"))
end
hash[param] = _to_hash(value)
end
hash
end
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end end
end
| 33.677249 | 215 | 0.648625 |
18855ddc23c9896bbcd2717cb906b9b701413271
| 366 |
# frozen_string_literal: true
files = Dir.glob(
File.join('lib', 'scc', '*.rb')
)
files << 'lib/scc.rb'
Gem::Specification.new do |s|
s.name = 'scc'
s.version = '0.0.1'
s.date = '2020-08-06'
s.summary = 'scc is a simple gem to call scc golang program from ruby'
s.files = files
s.require_paths = ['lib']
s.author = 'Jean Carlos Sales Pantoja'
end
| 20.333333 | 72 | 0.636612 |
1ac63568600111915222c9b884cef10df8a99cf8
| 5,029 |
module ManageIQ::Providers
class Hawkular::MiddlewareManager::AlertProfileManager
require 'hawkular/hawkular_client'
def initialize(alerts_client)
@alerts_client = alerts_client
end
def process_alert_profile(operation, miq_alert_profile)
profile_id = miq_alert_profile[:id]
old_alerts_ids = miq_alert_profile[:old_alerts_ids]
new_alerts_ids = miq_alert_profile[:new_alerts_ids]
old_assignments_ids = miq_alert_profile[:old_assignments_ids]
new_assignments_ids = miq_alert_profile[:new_assignments_ids]
case operation
when :update_alerts
update_alerts(profile_id, old_alerts_ids, new_alerts_ids, old_assignments_ids)
when :update_assignments
update_assignments(profile_id, old_alerts_ids, old_assignments_ids, new_assignments_ids)
end
end
def update_alerts(profile_id, old_alerts_ids, new_alerts_ids, old_assignments_ids)
unless old_assignments_ids.empty?
to_remove_alerts_ids = old_alerts_ids - new_alerts_ids
to_add_alerts_ids = new_alerts_ids - old_alerts_ids
to_remove_alerts_ids.each do |alert_id|
group_trigger = @alerts_client.get_single_trigger "MiQ-#{alert_id}", true
unassign_members(group_trigger, profile_id, old_assignments_ids)
end
to_add_alerts_ids.each do |alert_id|
group_trigger = @alerts_client.get_single_trigger "MiQ-#{alert_id}", true
assign_members(group_trigger, profile_id, old_assignments_ids)
end
end
end
def update_assignments(profile_id, old_alerts_ids, old_assignments_ids, new_assignments_ids)
to_unassign_ids = old_assignments_ids - new_assignments_ids
to_assign_ids = new_assignments_ids - old_assignments_ids
if to_unassign_ids.any? || to_assign_ids.any?
old_alerts_ids.each do |alert_id|
group_trigger = @alerts_client.get_single_trigger "MiQ-#{alert_id}", true
unassign_members(group_trigger, profile_id, to_unassign_ids) unless to_unassign_ids.empty?
assign_members(group_trigger, profile_id, to_assign_ids) unless to_assign_ids.empty?
end
end
end
def unassign_members(group_trigger, profile_id, members_ids)
context, profiles = unassign_members_context(group_trigger, profile_id)
group_trigger.context = context
@alerts_client.update_group_trigger(group_trigger)
if profiles.empty?
members_ids.each do |member_id|
@alerts_client.orphan_member("#{group_trigger.id}-#{member_id}")
@alerts_client.delete_trigger("#{group_trigger.id}-#{member_id}")
end
end
end
def unassign_members_context(group_trigger, profile_id)
context = group_trigger.context.nil? ? {} : group_trigger.context
profiles = context['miq.alert_profiles'].nil? ? [] : context['miq.alert_profiles'].split(",")
profiles -= [profile_id.to_s]
context['miq.alert_profiles'] = profiles.uniq.join(",")
[context, profiles]
end
def assign_members(group_trigger, profile_id, members_ids)
group_trigger.context = assign_members_context(group_trigger, profile_id)
@alerts_client.update_group_trigger(group_trigger)
members = @alerts_client.list_members group_trigger.id
current_members_ids = members.collect(&:id)
members_ids.each do |member_id|
next if current_members_ids.include?("#{group_trigger.id}-#{member_id}")
create_new_member(group_trigger, member_id)
end
end
def assign_members_context(group_trigger, profile_id)
context = group_trigger.context.nil? ? {} : group_trigger.context
profiles = context['miq.alert_profiles'].nil? ? [] : context['miq.alert_profiles'].split(",")
profiles.push(profile_id.to_s)
context['miq.alert_profiles'] = profiles.uniq.join(",")
context
end
def create_new_member(group_trigger, member_id)
server = MiddlewareServer.find(member_id)
new_member = ::Hawkular::Alerts::Trigger::GroupMemberInfo.new
new_member.group_id = group_trigger.id
new_member.member_id = "#{group_trigger.id}-#{member_id}"
new_member.member_name = "#{group_trigger.name} for #{server.name}"
new_member.member_context = {'resource_path' => server.ems_ref.to_s}
new_member.data_id_map = calculate_member_data_id_map(server, group_trigger)
@alerts_client.create_member_trigger(new_member)
end
def calculate_member_data_id_map(server, group_trigger)
data_id_map = {}
prefix = group_trigger.context['dataId.hm.prefix'].nil? ? '' : group_trigger.context['dataId.hm.prefix']
group_trigger.conditions.each do |condition|
data_id_map[condition.data_id] = "#{prefix}MI~R~[#{server.feed}/#{server.nativeid}]~MT~#{condition.data_id}"
unless condition.data2_id.nil?
data_id_map[condition.data2_id] = "#{prefix}MI~R~[#{server.feed}/#{server.nativeid}]~MT~#{condition.data2_id}"
end
end
data_id_map
end
end
end
| 44.504425 | 120 | 0.720223 |
5dd13882ec1df30ae6b2c1b5289f49eef9e10606
| 2,652 |
require 'rails_helper'
RSpec.feature 'Vendor makes unconditional offer' do
include CandidateHelper
scenario 'A vendor makes an unconditional offer and this is accepted by the candidate' do
FeatureFlag.activate(:unconditional_offers_via_api)
given_a_candidate_has_submitted_their_application
when_i_make_an_unconditional_offer_for_the_application_over_the_api
then_i_can_see_the_offer_was_made_successfully
when_the_candidate_accepts_the_unconditional_offer
then_the_candidate_sees_that_they_have_accepted_the_offer
end
def given_a_candidate_has_submitted_their_application
candidate_completes_application_form
candidate_submits_application
end
def when_i_make_an_unconditional_offer_for_the_application_over_the_api
api_token = VendorAPIToken.create_with_random_token!(provider: @provider)
Capybara.current_session.driver.header('Authorization', "Bearer #{api_token}")
Capybara.current_session.driver.header('Content-Type', 'application/json')
@application_choice = @application.application_choices.first
@course_option = @application_choice.course_option
@provider_user = create(:provider_user, send_notifications: true, providers: [@provider])
uri = "/api/v1/applications/#{@application_choice.id}/offer"
@api_response = page.driver.post(uri, unconditional_offer_payload)
# Unset session headers
Capybara.current_session.driver.header('Authorization', nil)
Capybara.current_session.driver.header('Content-Type', nil)
end
def then_i_can_see_the_offer_was_made_successfully
parsed_response_body = JSON.parse(@api_response.body)
application_attrs = parsed_response_body.dig('data', 'attributes')
expect(@api_response.status).to eq 200
expect(application_attrs['status']).to eq('offer')
expect(application_attrs.dig('offer', 'conditions')).to eq([])
end
def when_the_candidate_accepts_the_unconditional_offer
visit candidate_interface_offer_path(@application_choice)
choose 'Accept offer'
click_button 'Continue'
click_button 'Accept offer'
end
def then_the_candidate_sees_that_they_have_accepted_the_offer
expect(page).to have_content "You have accepted your offer for #{@application_choice.course.name_and_code} at #{@application_choice.provider.name}"
end
def unconditional_offer_payload
{
meta: {
attribution: {
full_name: 'Jane Smith',
email: '[email protected]',
user_id: '12345',
},
timestamp: Time.zone.now.iso8601,
},
data: {
conditions: [],
course: nil,
},
}.to_json
end
end
| 34 | 151 | 0.762821 |
abfac6ec07ff038857e242d0a002573109cc0aef
| 1,495 |
class Xtensor < Formula
desc "C++ tensors with broadcasting and lazy computing"
homepage "https://xtensor.readthedocs.io/en/latest/"
url "https://github.com/xtensor-stack/xtensor/archive/0.21.9.tar.gz"
sha256 "845cd3cc4f4992be7425b5f44a015181415cdb35d10f73ddbc8d433e331dc740"
head "https://github.com/xtensor-stack/xtensor.git"
license "BSD-3-Clause"
depends_on "cmake" => :build
depends_on "xtl"
depends_on "matteosecli/quantstack/xsimd"
def install
mkdir "build" do
system "cmake", "..",
"-Dxtl_DIR=#{Formula["xtl"].lib}/cmake/xtl",
"-DXTENSOR_USE_XSIMD=ON",
*std_cmake_args
system "make", "install"
end
end
test do
(testpath/"test.cpp").write <<~EOS
#include <iostream>
#include "xtensor/xarray.hpp"
#include "xtensor/xio.hpp"
#include "xtensor/xview.hpp"
int main()
{
xt::xarray<double> arr1
{{11.0, 12.0, 13.0},
{21.0, 22.0, 23.0},
{31.0, 32.0, 33.0}};
xt::xarray<double> arr2
{100.0, 200.0, 300.0};
xt::xarray<double> result = xt::view(arr1, 1) + arr2;
xt::xarray<double> expected
{121.0, 222.0, 323.0};
std::cout << result(2) << std::endl;
return xt::allclose( result, expected ) ? 0 : 1;
}
EOS
system ENV.cxx, "test.cpp", "-I#{include}", "-std=c++14", "-o", "test"
assert_equal "323", shell_output("./test").chomp
end
end
| 27.685185 | 75 | 0.579933 |
1c8289c812f3fa06567e0902b80bdd223bbb9dde
| 189 |
# Copyright (c) 2019 Danil Pismenny <[email protected]>
# frozen_string_literal: true
module Operations
class LiabilitiesController < ResourcesController
layout 'fluid'
end
end
| 18.9 | 57 | 0.777778 |
08e0d27b7399206ccf5fe998496017536e5d1407
| 556 |
module FedoraMigrate
class RubydoraConnection
attr_accessor :options, :connection
def initialize(params = {})
params = params.dup
self.options = params
connect
end
def connect(force = false)
return unless @connection.nil? || force
allowable_options = [:url, :user, :password, :timeout, :open_timeout, :ssl_client_cert, :ssl_client_key, :validateChecksum]
client_options = options.reject { |k, _v| !allowable_options.include?(k) }
@connection = Rubydora.connect client_options
end
end
end
| 29.263158 | 129 | 0.68705 |
f8e695de3e2ea467f36cdd5b01acc59b05c08b07
| 393 |
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
module Azure::EventGrid::Mgmt::V2017_09_15_preview
module Models
#
# Defines values for OperationOrigin
#
module OperationOrigin
User = "User"
System = "System"
UserAndSystem = "UserAndSystem"
end
end
end
| 23.117647 | 70 | 0.699746 |
e8c95dd29cf8e8c40871681369f30e2f22ebfcc8
| 342 |
class CreateEmailReminderItems < ActiveRecord::Migration[4.2]
def change
create_table :email_reminder_items do |t|
t.references :email_reminder
t.string :type
t.text :configuration
t.timestamps
end
add_index :email_reminder_items, :email_reminder_id
add_index :email_reminder_items, :type
end
end
| 24.428571 | 61 | 0.72807 |
61e11879d81e7bb7075ad217d6d529da77f47588
| 180 |
class Bar
include Mongoid::Document
field :name, :type => String
field :location, :type => Array
has_one :rating, :as => :ratable
index [[ :location, Mongo::GEO2D ]]
end
| 22.5 | 37 | 0.661111 |
21162bfd614226bca9e3715269aa9b268f52dc33
| 227 |
class CreateUsers < ActiveRecord::Migration[6.0]
def change
create_table :users do |t|
t.string :username
t.string :email
t.string :password_digest
t.timestamps null: false
end
end
end
| 18.916667 | 48 | 0.643172 |
e240766da0d43ac5e88a8e514ba5f34ff41bf067
| 152 |
class AddDescriptionDataToApidaeObjects < ActiveRecord::Migration[5.1]
def change
add_column :apidae_objects, :description_data, :jsonb
end
end
| 25.333333 | 70 | 0.796053 |
621efb16a8b6663930410cae81959a4ef9c6f911
| 219 |
class SponsorSignedEmailOnThresholdEmailJob < EmailJob
self.mailer = SponsorMailer
self.email = :sponsor_signed_email_on_threshold
def perform(signature)
if signature.validated?
super
end
end
end
| 19.909091 | 54 | 0.767123 |
117c59a6a3a511988f6f350c159dac1b254b00c8
| 1,691 |
require 'test_helper'
class UsersControllerTest < ActionDispatch::IntegrationTest
def setup
@user = users(:michael)
@other_user = users(:archer)
end
test "should redirect index when not logged in" do
get users_path
assert_redirected_to login_url
end
test "should get new" do
get signup_path
assert_response :success
end
test "should redirect edit when not logged in" do
get edit_user_path(@user)
assert_not flash.empty?
assert_redirected_to login_url
end
test "should redirect update when not logged in" do
patch user_path(@user), params: { user: { name: @user.name,
email: @user.email } }
assert_not flash.empty?
assert_redirected_to login_url
end
test "should redirect edit when logged in as wrong user" do
log_in_as(@other_user)
get edit_user_path(@user)
assert flash.empty?
assert_redirected_to root_url
end
test "should redirect update when logged in as wrong user" do
log_in_as(@other_user)
patch user_path(@user), params: { user: { name: @user.name,
email: @user.email } }
assert flash.empty?
assert_redirected_to root_url
end
test "should redirect destroy when not logged in" do
assert_no_difference 'User.count' do
delete user_path(@user)
end
assert_redirected_to login_url
end
test "should redirect destroy when logged in as a non-admin" do
log_in_as(@other_user)
assert_no_difference 'User.count' do
delete user_path(@user)
end
assert_redirected_to root_url
end
end
| 29.155172 | 69 | 0.654051 |
1c2d4a4d456aaf9b22574f899b73033e601ff7b2
| 56,595 |
# Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
#
# This program is licensed to you under the Apache License Version 2.0,
# and you may not use this file except in compliance with the Apache License Version 2.0.
# You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Apache License Version 2.0 is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
# Author:: Alex Dean (mailto:[email protected])
# Copyright:: Copyright (c) 2012-2019 Snowplow Analytics Ltd
# License:: Apache License Version 2.0
require 'set'
require 'elasticity'
require 'aws-sdk-s3'
require 'awrence'
require 'json'
require 'base64'
require 'contracts'
require 'iglu-client'
require 'securerandom'
require 'tempfile'
require 'rest-client'
# Ruby class to execute Snowplow's Hive jobs against Amazon EMR
# using Elasticity (https://github.com/rslifka/elasticity).
module Snowplow
module EmrEtlRunner
class EmrJob
include Contracts
# Constants
JAVA_PACKAGE = "com.snowplowanalytics.snowplow"
PARTFILE_REGEXP = ".*part-.*"
PARTFILE_GROUPBY_REGEXP = ".*(part-)\\d+-(.*)"
ATOMIC_EVENTS_PARTFILE_GROUPBY_REGEXP = ".*\/atomic-events\/(part-)\\d+-(.*)"
SHREDDED_TYPES_PARTFILE_GROUPBY_REGEXP = ".*\/shredded-types\/vendor=(.+)\/name=(.+)\/.+\/version=(.+)\/(part-)\\d+-(.*)"
SHREDDED_TSV_TYPES_PARTFILE_GROUPBY_REGEXP = ".*\/shredded-tsv\/vendor=(.+)\/name=(.+)\/.+\/version=(.+)\/(part-)\\d+-(.*)"
STREAM_ENRICH_REGEXP = ".*\.gz"
SUCCESS_REGEXP = ".*_SUCCESS"
STANDARD_HOSTED_ASSETS = "s3://snowplow-hosted-assets"
ENRICH_STEP_INPUT = 'hdfs:///local/snowplow/raw-events/'
ENRICH_STEP_OUTPUT = 'hdfs:///local/snowplow/enriched-events/'
SHRED_STEP_OUTPUT = 'hdfs:///local/snowplow/shredded-events/'
SHRED_JOB_WITH_PROCESSING_MANIFEST = Gem::Version.new('0.14.0-rc1')
SHRED_JOB_WITH_TSV_OUTPUT = Gem::Version.new('0.16.0-rc1')
RDB_LOADER_WITH_PROCESSING_MANIFEST = Gem::Version.new('0.15.0-rc4')
AMI_4 = Gem::Version.new("4.0.0")
AMI_5 = Gem::Version.new("5.0.0")
# Need to understand the status of all our jobflow steps
@@running_states = Set.new(%w(WAITING RUNNING PENDING SHUTTING_DOWN))
@@failed_states = Set.new(%w(FAILED CANCELLED))
include Monitoring::Logging
include Snowplow::EmrEtlRunner::Utils
include Snowplow::EmrEtlRunner::S3
include Snowplow::EmrEtlRunner::EMR
# Initializes our wrapper for the Amazon EMR client.
Contract Bool, Bool, Bool, Bool, Bool, Bool, Bool, Bool, ArchiveStep, ArchiveStep, ConfigHash, ArrayOf[String], String, TargetsHash, RdbLoaderSteps, Bool, String => EmrJob
def initialize(debug, staging, enrich, staging_stream_enrich, shred, es, archive_raw, rdb_load, archive_enriched, archive_shredded, config, enrichments_array, resolver, targets, rdbloader_steps, use_persistent_jobflow, persistent_jobflow_duration)
logger.debug "Initializing EMR jobflow"
# Configuration
custom_assets_bucket =
get_hosted_assets_bucket(STANDARD_HOSTED_ASSETS, config[:aws][:s3][:buckets][:assets], config[:aws][:emr][:region])
standard_assets_bucket =
get_hosted_assets_bucket(STANDARD_HOSTED_ASSETS, STANDARD_HOSTED_ASSETS, config[:aws][:emr][:region])
assets = get_assets(
custom_assets_bucket,
config.dig(:enrich, :versions, :spark_enrich),
config[:storage][:versions][:rdb_shredder],
config[:storage][:versions][:hadoop_elasticsearch],
config[:storage][:versions][:rdb_loader])
collector_format = config.dig(:collectors, :format)
@run_tstamp = Time.new
run_id = @run_tstamp.strftime("%Y-%m-%d-%H-%M-%S")
@run_id = run_id
@rdb_loader_log_base = config[:aws][:s3][:buckets][:log] + "rdb-loader/#{@run_id}/"
@rdb_loader_logs = [] # pairs of target name and associated log
etl_tstamp = (@run_tstamp.to_f * 1000).to_i.to_s
output_codec = output_codec_from_compression_format(config.dig(:enrich, :output_compression))
encrypted = config[:aws][:s3][:buckets][:encrypted]
s3 = Aws::S3::Client.new(
:access_key_id => config[:aws][:access_key_id],
:secret_access_key => config[:aws][:secret_access_key],
:region => config[:aws][:s3][:region])
ami_version = Gem::Version.new(config[:aws][:emr][:ami_version])
shredder_version = Gem::Version.new(config[:storage][:versions][:rdb_shredder])
# Configure Elasticity with your AWS credentials
Elasticity.configure do |c|
c.access_key = config[:aws][:access_key_id]
c.secret_key = config[:aws][:secret_access_key]
end
# Create a job flow
@use_persistent_jobflow = use_persistent_jobflow
@persistent_jobflow_duration_s = parse_duration(persistent_jobflow_duration)
found_persistent_jobflow = false
if use_persistent_jobflow
emr = Elasticity::EMR.new(:region => config[:aws][:emr][:region])
emr_jobflow_id = get_emr_jobflow_id(emr, config[:aws][:emr][:jobflow][:job_name])
if emr_jobflow_id.nil?
@jobflow = Elasticity::JobFlow.new
else
@jobflow = Elasticity::JobFlow.from_jobflow_id(emr_jobflow_id, config[:aws][:emr][:region])
found_persistent_jobflow = true
end
@jobflow.action_on_failure = "CANCEL_AND_WAIT"
@jobflow.keep_job_flow_alive_when_no_steps = true
else
@jobflow = Elasticity::JobFlow.new
end
# Configure
@jobflow.name = config[:aws][:emr][:jobflow][:job_name]
if ami_version < AMI_4
@legacy = true
@jobflow.ami_version = config[:aws][:emr][:ami_version]
else
@legacy = false
@jobflow.release_label = "emr-#{config[:aws][:emr][:ami_version]}"
end
@jobflow.tags = config[:monitoring][:tags]
@jobflow.ec2_key_name = config[:aws][:emr][:ec2_key_name]
@jobflow.region = config[:aws][:emr][:region]
@jobflow.job_flow_role = config[:aws][:emr][:jobflow_role] # Note job_flow vs jobflow
@jobflow.service_role = config[:aws][:emr][:service_role]
@jobflow.placement = config[:aws][:emr][:placement]
@jobflow.additional_info = config[:aws][:emr][:additional_info]
unless config[:aws][:emr][:ec2_subnet_id].nil? # Nils placement so do last and conditionally
@jobflow.ec2_subnet_id = config[:aws][:emr][:ec2_subnet_id]
end
unless config[:aws][:emr][:security_configuration].nil?
@jobflow.security_configuration = config[:aws][:emr][:security_configuration]
end
@jobflow.log_uri = config[:aws][:s3][:buckets][:log]
@jobflow.enable_debugging = debug
@jobflow.visible_to_all_users = true
@jobflow.instance_count = config[:aws][:emr][:jobflow][:core_instance_count] + 1 # +1 for the master instance
@jobflow.master_instance_type = config[:aws][:emr][:jobflow][:master_instance_type]
@jobflow.slave_instance_type = config[:aws][:emr][:jobflow][:core_instance_type]
@jobflow.timeout = 120
s3_endpoint = get_s3_endpoint(config[:aws][:s3][:region])
csbr = config[:aws][:s3][:buckets][:raw]
csbe = config[:aws][:s3][:buckets][:enriched]
csbs = config[:aws][:s3][:buckets][:shredded]
@pending_jobflow_steps = []
# Clear HDFS if persistent jobflow has been found
if found_persistent_jobflow
submit_jobflow_step(get_rmr_step([ENRICH_STEP_INPUT, ENRICH_STEP_OUTPUT, SHRED_STEP_OUTPUT], standard_assets_bucket, "Empty Snowplow HDFS"), use_persistent_jobflow)
submit_jobflow_step(get_hdfs_expunge_step, use_persistent_jobflow)
end
# staging
if staging
unless empty?(s3, csbr[:processing])
raise DirectoryNotEmptyError, "Cannot safely add staging step to jobflow, #{csbr[:processing]} is not empty"
end
src_pattern = collector_format == 'clj-tomcat' ? '.*localhost\_access\_log.*\.txt.*' : '.+'
src_pattern_regex = Regexp.new src_pattern
non_empty_locs = csbr[:in].select { |l|
not empty?(s3, l,
lambda { |k| !(k =~ /\/$/) and !(k =~ /\$folder\$$/) and !(k =~ src_pattern_regex).nil? })
}
if non_empty_locs.empty?
raise NoDataToProcessError, "No Snowplow logs to process since last run"
else
non_empty_locs.each { |l|
staging_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
staging_step.arguments = [
"--src", l,
"--dest", csbr[:processing],
"--s3Endpoint", s3_endpoint,
"--srcPattern", src_pattern,
"--deleteOnSuccess"
]
if collector_format == 'clj-tomcat'
staging_step.arguments = staging_step.arguments + [ '--groupBy', '.*/_*(.+)' ]
end
if encrypted
staging_step.arguments = staging_step.arguments + [ '--s3ServerSideEncryption' ]
end
staging_step.name = "[staging] s3-dist-cp: Raw #{l} -> Raw Staging S3"
submit_jobflow_step(staging_step, use_persistent_jobflow)
}
end
end
# EBS
unless config[:aws][:emr][:jobflow][:core_instance_ebs].nil?
ebs_bdc = Elasticity::EbsBlockDeviceConfig.new
ebs_bdc.volume_type = config[:aws][:emr][:jobflow][:core_instance_ebs][:volume_type]
ebs_bdc.size_in_gb = config[:aws][:emr][:jobflow][:core_instance_ebs][:volume_size]
ebs_bdc.volumes_per_instance = 1
if config[:aws][:emr][:jobflow][:core_instance_ebs][:volume_type] == "io1"
ebs_bdc.iops = config[:aws][:emr][:jobflow][:core_instance_ebs][:volume_iops]
end
ebs_c = Elasticity::EbsConfiguration.new
ebs_c.add_ebs_block_device_config(ebs_bdc)
ebs_c.ebs_optimized = true
unless config[:aws][:emr][:jobflow][:core_instance_ebs][:ebs_optimized].nil?
ebs_c.ebs_optimized = config[:aws][:emr][:jobflow][:core_instance_ebs][:ebs_optimized]
end
@jobflow.set_core_ebs_configuration(ebs_c)
end
@jobflow.add_application("Hadoop") unless found_persistent_jobflow
if collector_format == 'thrift'
if @legacy
[
Elasticity::HadoopBootstrapAction.new('-c', 'io.file.buffer.size=65536'),
Elasticity::HadoopBootstrapAction.new('-m', 'mapreduce.user.classpath.first=true')
].each do |action|
@jobflow.add_bootstrap_action(action) unless found_persistent_jobflow
end
else
[{
"Classification" => "core-site",
"Properties" => {
"io.file.buffer.size" => "65536"
}
},
{
"Classification" => "mapred-site",
"Properties" => {
"mapreduce.user.classpath.first" => "true"
}
}].each do |config|
@jobflow.add_configuration(config) unless found_persistent_jobflow
end
end
end
# Add custom bootstrap actions
bootstrap_actions = config[:aws][:emr][:bootstrap]
unless bootstrap_actions.nil?
bootstrap_actions.each do |bootstrap_action|
@jobflow.add_bootstrap_action(Elasticity::BootstrapAction.new(bootstrap_action)) unless found_persistent_jobflow
end
end
# Prepare a bootstrap action based on the AMI version
bootstrap_script_location = if ami_version < AMI_4
"#{standard_assets_bucket}common/emr/snowplow-ami3-bootstrap-0.1.0.sh"
elsif ami_version >= AMI_4 && ami_version < AMI_5
"#{standard_assets_bucket}common/emr/snowplow-ami4-bootstrap-0.2.0.sh"
else
"#{standard_assets_bucket}common/emr/snowplow-ami5-bootstrap-0.1.0.sh"
end
cc_version = get_cc_version(config.dig(:enrich, :versions, :spark_enrich))
@jobflow.add_bootstrap_action(Elasticity::BootstrapAction.new(bootstrap_script_location, cc_version)) unless found_persistent_jobflow
# Install and launch HBase
hbase = config[:aws][:emr][:software][:hbase]
unless not hbase
install_hbase_action = Elasticity::BootstrapAction.new("s3://#{config[:aws][:emr][:region]}.elasticmapreduce/bootstrap-actions/setup-hbase")
@jobflow.add_bootstrap_action(install_hbase_action) unless found_persistent_jobflow
start_hbase_step = Elasticity::CustomJarStep.new("/home/hadoop/lib/hbase-#{hbase}.jar")
start_hbase_step.name = "Start HBase #{hbase}"
start_hbase_step.arguments = [ 'emr.hbase.backup.Main', '--start-master' ]
# NOTE: Presumes that HBase will remain available for a persistent cluster
submit_jobflow_step(start_hbase_step, use_persistent_jobflow) unless found_persistent_jobflow
end
# Install Lingual
lingual = config[:aws][:emr][:software][:lingual]
unless not lingual
install_lingual_action = Elasticity::BootstrapAction.new("s3://files.concurrentinc.com/lingual/#{lingual}/lingual-client/install-lingual-client.sh")
@jobflow.add_bootstrap_action(install_lingual_action) unless found_persistent_jobflow
end
# EMR configuration: Spark, YARN, etc
configuration = config[:aws][:emr][:configuration]
unless configuration.nil?
configuration.each do |k, h|
@jobflow.add_configuration({"Classification" => k, "Properties" => h}) unless found_persistent_jobflow
end
end
# Now let's add our core group
core_instance_group = Elasticity::InstanceGroup.new.tap { |ig|
ig.type = config[:aws][:emr][:jobflow][:core_instance_type]
# check if bid exists
cib = config[:aws][:emr][:jobflow][:core_instance_bid]
if cib.nil?
ig.set_on_demand_instances
else
ig.set_spot_instances(cib)
end
}
@jobflow.set_core_instance_group(core_instance_group)
# Now let's add our task group if required
tic = config[:aws][:emr][:jobflow][:task_instance_count]
if tic > 0
instance_group = Elasticity::InstanceGroup.new.tap { |ig|
ig.count = tic
ig.type = config[:aws][:emr][:jobflow][:task_instance_type]
tib = config[:aws][:emr][:jobflow][:task_instance_bid]
if tib.nil?
ig.set_on_demand_instances
else
ig.set_spot_instances(tib)
end
}
@jobflow.set_task_instance_group(instance_group)
end
stream_enrich_mode = !csbe[:stream].nil?
# Get full path when we need to move data to enrich_final_output
# otherwise (when enriched/good is non-empty already)
# we can list files withing folders using '*.'-regexps
enrich_final_output = if enrich || staging_stream_enrich
partition_by_run(csbe[:good], run_id)
else
csbe[:good]
end
if enrich
raw_input = csbr[:processing]
# When resuming from enrich, we need to check for emptiness of the processing bucket
if !staging and empty?(s3, raw_input)
raise NoDataToProcessError, "No Snowplow logs in #{raw_input}, can't resume from enrich"
end
# for ndjson/urbanairship we can group by everything, just aim for the target size
group_by = is_ua_ndjson(collector_format) ? ".*\/(\w+)\/.*" : ".*([0-9]+-[0-9]+-[0-9]+)-[0-9]+.*"
# Create the Hadoop MR step for the file crushing
compact_to_hdfs_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
compact_to_hdfs_step.arguments = [
"--src" , raw_input,
"--dest" , ENRICH_STEP_INPUT,
"--s3Endpoint" , s3_endpoint
] + [
"--groupBy" , group_by,
"--targetSize" , "128",
"--outputCodec" , "lzo"
].select { |el|
is_cloudfront_log(collector_format) || is_ua_ndjson(collector_format)
}
# uncompress events that are gzipped since this format is unsplittable and causes issues
# downstream in the spark enrich job snowplow/snowplow#3525
if collector_format == "clj-tomcat" then
compact_to_hdfs_step.arguments << "--outputCodec" << "none"
end
if encrypted
compact_to_hdfs_step.arguments = compact_to_hdfs_step.arguments + [ '--s3ServerSideEncryption' ]
end
compact_to_hdfs_step.name = "[enrich] s3-dist-cp: Raw S3 -> Raw HDFS"
submit_jobflow_step(compact_to_hdfs_step, use_persistent_jobflow)
# 2. Enrichment
enrich_asset = if assets[:enrich].nil?
raise ConfigError, "Cannot add enrich step as spark_enrich version is not configured"
else
assets[:enrich]
end
enrich_version = config.dig(:enrich, :versions, :spark_enrich)
enrich_step =
if is_spark_enrich(enrich_version) then
@jobflow.add_application("Spark") unless found_persistent_jobflow
build_spark_step(
"[enrich] spark: Enrich Raw Events",
enrich_asset,
"enrich.spark.EnrichJob",
{ :in => glob_path(ENRICH_STEP_INPUT),
:good => ENRICH_STEP_OUTPUT,
:bad => partition_by_run(csbe[:bad], run_id)
},
{ 'input-format' => collector_format,
'etl-timestamp' => etl_tstamp,
'iglu-config' => build_iglu_config_json(resolver),
'enrichments' => build_enrichments_json(enrichments_array)
}
)
else
build_scalding_step(
"[enrich] scalding: Enrich Raw Events",
enrich_asset,
"enrich.hadoop.EtlJob",
{ :in => glob_path(ENRICH_STEP_INPUT),
:good => ENRICH_STEP_OUTPUT,
:bad => partition_by_run(csbe[:bad], run_id),
:errors => partition_by_run(csbe[:errors], run_id, config.dig(:enrich, :continue_on_unexpected_error))
},
{ :input_format => collector_format,
:etl_tstamp => etl_tstamp,
:iglu_config => build_iglu_config_json(resolver),
:enrichments => build_enrichments_json(enrichments_array)
}
)
end
# Late check whether our enrichment directory is empty. We do an early check too
unless empty?(s3, csbe[:good])
raise DirectoryNotEmptyError, "Cannot safely add enrichment step to jobflow, #{csbe[:good]} is not empty"
end
submit_jobflow_step(enrich_step, use_persistent_jobflow)
# We need to copy our enriched events from HDFS back to S3
copy_to_s3_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
copy_to_s3_step.arguments = [
"--src" , ENRICH_STEP_OUTPUT,
"--dest" , enrich_final_output,
"--groupBy" , PARTFILE_GROUPBY_REGEXP,
"--targetSize", "24",
"--s3Endpoint", s3_endpoint
] + output_codec
if encrypted
copy_to_s3_step.arguments = copy_to_s3_step.arguments + [ '--s3ServerSideEncryption' ]
end
copy_to_s3_step.name = "[enrich] spark: Enriched HDFS -> S3"
submit_jobflow_step(copy_to_s3_step, use_persistent_jobflow)
copy_success_file_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
copy_success_file_step.arguments = [
"--src" , ENRICH_STEP_OUTPUT,
"--dest" , enrich_final_output,
"--srcPattern" , SUCCESS_REGEXP,
"--s3Endpoint" , s3_endpoint
]
if encrypted
copy_success_file_step.arguments = copy_success_file_step.arguments + [ '--s3ServerSideEncryption' ]
end
copy_success_file_step.name = "[enrich] spark: Enriched HDFS _SUCCESS -> S3"
submit_jobflow_step(copy_success_file_step, use_persistent_jobflow)
end
# Staging data produced by Stream Enrich
if staging_stream_enrich
unless empty?(s3, csbe[:good])
raise DirectoryNotEmptyError, "Cannot safely add stream staging step to jobflow, #{csbe[:good]} is not empty"
end
src_pattern_regex = Regexp.new STREAM_ENRICH_REGEXP
if empty?(s3, csbe[:stream], lambda { |k| !(k =~ /\/$/) and !(k =~ /\$folder\$$/) and !(k =~ src_pattern_regex).nil? })
raise NoDataToProcessError, "No Snowplow enriched stream logs to process since last run"
end
staging_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
staging_step.arguments = [
"--src" , csbe[:stream],
"--dest" , enrich_final_output,
"--s3Endpoint" , s3_endpoint,
"--srcPattern" , STREAM_ENRICH_REGEXP,
"--deleteOnSuccess"
]
if encrypted
staging_step.arguments = staging_step.arguments + [ '--s3ServerSideEncryption' ]
end
staging_step.name = "[staging_stream_enrich] s3-dist-cp: Stream Enriched #{csbe[:stream]} -> Enriched Staging S3"
submit_jobflow_step(staging_step, use_persistent_jobflow)
end
if shred
# 3. Shredding
shred_final_output = partition_by_run(csbs[:good], run_id)
# Add processing manifest if available
processing_manifest = get_processing_manifest(targets)
processing_manifest_shred_args =
if not processing_manifest.nil?
if shredder_version >= SHRED_JOB_WITH_PROCESSING_MANIFEST
{ 'processing-manifest-table' => processing_manifest, 'item-id' => shred_final_output }
else
{}
end
else
{}
end
# Add target config JSON if necessary
storage_target_shred_args = get_rdb_shredder_target(config, targets[:ENRICHED_EVENTS])
# If we enriched, we free some space on HDFS by deleting the raw events
# otherwise we need to copy the enriched events back to HDFS
if enrich
submit_jobflow_step(get_rmr_step([ENRICH_STEP_INPUT], standard_assets_bucket, "Empty Raw HDFS"), use_persistent_jobflow)
else
src_pattern = if stream_enrich_mode then STREAM_ENRICH_REGEXP else PARTFILE_REGEXP end
copy_to_hdfs_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
copy_to_hdfs_step.arguments = [
"--src" , enrich_final_output, # Opposite way round to normal
"--dest" , ENRICH_STEP_OUTPUT,
"--srcPattern" , src_pattern,
"--outputCodec", "none",
"--s3Endpoint" , s3_endpoint
]
if encrypted
copy_to_hdfs_step.arguments = copy_to_hdfs_step.arguments + [ '--s3ServerSideEncryption' ]
end
copy_to_hdfs_step.name = "[shred] s3-dist-cp: Enriched S3 -> HDFS"
submit_jobflow_step(copy_to_hdfs_step, use_persistent_jobflow)
end
shred_step =
if is_rdb_shredder(config[:storage][:versions][:rdb_shredder]) then
@jobflow.add_application("Spark") unless found_persistent_jobflow
duplicate_storage_config = build_duplicate_storage_json(targets[:DUPLICATE_TRACKING], false)
build_spark_step(
"[shred] spark: Shred Enriched Events",
assets[:shred],
"storage.spark.ShredJob",
{ :in => glob_path(ENRICH_STEP_OUTPUT),
:good => SHRED_STEP_OUTPUT,
:bad => partition_by_run(csbs[:bad], run_id)
},
{
'iglu-config' => build_iglu_config_json(resolver)
}.merge(duplicate_storage_config).merge(processing_manifest_shred_args).merge(storage_target_shred_args)
)
else
duplicate_storage_config = build_duplicate_storage_json(targets[:DUPLICATE_TRACKING])
build_scalding_step(
"[shred] scalding: Shred Enriched Events",
assets[:shred],
"enrich.hadoop.ShredJob",
{ :in => glob_path(ENRICH_STEP_OUTPUT),
:good => SHRED_STEP_OUTPUT,
:bad => partition_by_run(csbs[:bad], run_id),
:errors => partition_by_run(csbs[:errors], run_id, config.dig(:enrich, :continue_on_unexpected_error))
},
{
:iglu_config => build_iglu_config_json(resolver)
}.merge(duplicate_storage_config)
)
end
# Late check whether our target directory is empty
unless empty?(s3, csbs[:good])
raise DirectoryNotEmptyError, "Cannot safely add shredding step to jobflow, #{csbs[:good]} is not empty"
end
submit_jobflow_step(shred_step, use_persistent_jobflow)
# We need to copy our shredded types from HDFS back to S3
# Whether to combine the files outputted by the shred step
consolidate_shredded_output = config[:aws][:s3][:consolidate_shredded_output]
if consolidate_shredded_output
copy_atomic_events_to_s3_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
copy_atomic_events_to_s3_step.arguments = [
"--src" , SHRED_STEP_OUTPUT,
"--dest" , shred_final_output,
"--groupBy" , ATOMIC_EVENTS_PARTFILE_GROUPBY_REGEXP,
"--targetSize", "24",
"--s3Endpoint", s3_endpoint
] + output_codec
if encrypted
copy_atomic_events_to_s3_step.arguments = copy_atomic_events_to_s3_step.arguments + [ '--s3ServerSideEncryption' ]
end
copy_atomic_events_to_s3_step.name = "[shred] s3-dist-cp: Shredded atomic events HDFS -> S3"
submit_jobflow_step(copy_atomic_events_to_s3_step, use_persistent_jobflow)
# Copy shredded JSONs (pre-R32)
copy_shredded_types_to_s3_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
copy_shredded_types_to_s3_step.arguments = [
"--src" , SHRED_STEP_OUTPUT,
"--dest" , shred_final_output,
"--groupBy" , SHREDDED_TYPES_PARTFILE_GROUPBY_REGEXP,
"--targetSize", "24",
"--s3Endpoint", s3_endpoint
] + output_codec
if encrypted
copy_shredded_types_to_s3_step.arguments = copy_shredded_types_to_s3_step.arguments + [ '--s3ServerSideEncryption' ]
end
copy_shredded_types_to_s3_step.name = "[shred] s3-dist-cp: Shredded JSON types HDFS -> S3"
submit_jobflow_step(copy_shredded_types_to_s3_step, use_persistent_jobflow)
# Copy shredded TSVs (R32+)
if shredder_version >= SHRED_JOB_WITH_TSV_OUTPUT
copy_shredded_tsv_types_to_s3_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
copy_shredded_tsv_types_to_s3_step.arguments = [
"--src" , SHRED_STEP_OUTPUT,
"--dest" , shred_final_output,
"--groupBy" , SHREDDED_TSV_TYPES_PARTFILE_GROUPBY_REGEXP,
"--targetSize", "24",
"--s3Endpoint", s3_endpoint
] + output_codec
if encrypted
copy_shredded_tsv_types_to_s3_step.arguments = copy_shredded_tsv_types_to_s3_step.arguments + [ '--s3ServerSideEncryption' ]
end
copy_shredded_tsv_types_to_s3_step.name = "[shred] s3-dist-cp: Shredded TSV types HDFS -> S3"
submit_jobflow_step(copy_shredded_tsv_types_to_s3_step, use_persistent_jobflow)
end
else
copy_to_s3_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
copy_to_s3_step.arguments = [
"--src" , SHRED_STEP_OUTPUT,
"--dest" , shred_final_output,
"--srcPattern", PARTFILE_REGEXP,
"--s3Endpoint", s3_endpoint
] + output_codec
if encrypted
copy_to_s3_step.arguments = copy_to_s3_step.arguments + [ '--s3ServerSideEncryption' ]
end
copy_to_s3_step.name = "[shred] s3-dist-cp: Shredded HDFS -> S3"
submit_jobflow_step(copy_to_s3_step, use_persistent_jobflow)
end
copy_success_file_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
copy_success_file_step.arguments = [
"--src" , SHRED_STEP_OUTPUT,
"--dest" , shred_final_output,
"--srcPattern" , SUCCESS_REGEXP,
"--s3Endpoint" , s3_endpoint
]
if encrypted
copy_success_file_step.arguments = copy_success_file_step.arguments + [ '--s3ServerSideEncryption' ]
end
copy_success_file_step.name = "[shred] s3-dist-cp: Shredded HDFS _SUCCESS -> S3"
submit_jobflow_step(copy_success_file_step, use_persistent_jobflow)
end
if es
get_elasticsearch_steps(config, assets, enrich, shred, targets[:FAILED_EVENTS]).each do |step|
submit_jobflow_step(step, use_persistent_jobflow)
end
end
if archive_raw
# We need to copy our enriched events from HDFS back to S3
archive_raw_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
archive_raw_step.arguments = [
"--src" , csbr[:processing],
"--dest" , partition_by_run(csbr[:archive], run_id),
"--s3Endpoint" , s3_endpoint,
"--deleteOnSuccess"
]
if encrypted
archive_raw_step.arguments = archive_raw_step.arguments + [ '--s3ServerSideEncryption' ]
end
archive_raw_step.name = "[archive_raw] s3-dist-cp: Raw Staging S3 -> Raw Archive S3"
submit_jobflow_step(archive_raw_step, use_persistent_jobflow)
end
if rdb_load
rdb_loader_version = Gem::Version.new(config[:storage][:versions][:rdb_loader])
skip_manifest = stream_enrich_mode && rdb_loader_version > RDB_LOADER_WITH_PROCESSING_MANIFEST
get_rdb_loader_steps(config, targets[:ENRICHED_EVENTS], resolver, assets[:loader], rdbloader_steps, skip_manifest).each do |step|
submit_jobflow_step(step, use_persistent_jobflow)
end
end
if archive_enriched == 'pipeline'
archive_enriched_step = get_archive_step(csbe[:good], csbe[:archive], run_id, s3_endpoint, "[archive_enriched] s3-dist-cp: Enriched S3 -> Enriched Archive S3", encrypted)
submit_jobflow_step(archive_enriched_step, use_persistent_jobflow)
elsif archive_enriched == 'recover'
latest_run_id = get_latest_run_id(s3, csbe[:good])
archive_enriched_step = get_archive_step(csbe[:good], csbe[:archive], latest_run_id, s3_endpoint, '[archive_enriched] s3-dist-cp: Enriched S3 -> S3 Enriched Archive', encrypted)
submit_jobflow_step(archive_enriched_step, use_persistent_jobflow)
else # skip
nil
end
if archive_shredded == 'pipeline'
archive_shredded_step = get_archive_step(csbs[:good], csbs[:archive], run_id, s3_endpoint, "[archive_shredded] s3-dist-cp: Shredded S3 -> Shredded Archive S3", encrypted)
submit_jobflow_step(archive_shredded_step, use_persistent_jobflow)
elsif archive_shredded == 'recover'
latest_run_id = get_latest_run_id(s3, csbs[:good], 'atomic-events')
archive_shredded_step = get_archive_step(csbs[:good], csbs[:archive], latest_run_id, s3_endpoint, "[archive_shredded] s3-dist-cp: Shredded S3 -> S3 Shredded Archive", encrypted)
submit_jobflow_step(archive_shredded_step, use_persistent_jobflow)
else # skip
nil
end
self
end
# Create one step for each Elasticsearch target for each source for that target
#
Contract ConfigHash, Hash, Bool, Bool, ArrayOf[Iglu::SelfDescribingJson] => ArrayOf[Elasticity::ScaldingStep]
def get_elasticsearch_steps(config, assets, enrich, shred, failure_storages)
# The default sources are the enriched and shredded errors generated for this run
sources = []
sources << partition_by_run(config[:aws][:s3][:buckets][:enriched][:bad], @run_id) if enrich
sources << partition_by_run(config[:aws][:s3][:buckets][:shredded][:bad], @run_id) if shred
steps = failure_storages.flat_map { |target|
sources.map { |source|
step = Elasticity::ScaldingStep.new(
assets[:elasticsearch],
"com.snowplowanalytics.snowplow.storage.hadoop.ElasticsearchJob",
({
:input => source,
:host => target.data[:host],
:port => target.data[:port].to_s,
:index => target.data[:index],
:type => target.data[:type],
:es_nodes_wan_only => target.data[:nodesWanOnly] ? "true" : "false"
}).reject { |k, v| v.nil? }
)
step.name = "Errors in #{source} -> Elasticsearch: #{target.data[:name]}"
step
}
}
# Wait 60 seconds before starting the first step so S3 can become consistent
if (enrich || shred) && steps.any?
steps[0].arguments << '--delay' << '60'
end
steps
end
# Run (and wait for) the daily ETL job.
#
# Throws a BootstrapFailureError if the job fails due to a bootstrap failure.
# Throws an EmrExecutionError if the jobflow fails for any other reason.
Contract ConfigHash => nil
def run(config)
snowplow_tracking_enabled = ! config[:monitoring][:snowplow].nil?
if snowplow_tracking_enabled
Monitoring::Snowplow.parameterize(config)
end
@pending_jobflow_steps.each do |jobflow_step|
begin
retries ||= 0
# if the job flow is already running this triggers an HTTP call
@jobflow.add_step(jobflow_step)
rescue Elasticity::ThrottlingException, RestClient::RequestTimeout, RestClient::InternalServerError, RestClient::ServiceUnavailable, RestClient::SSLCertificateNotVerified => e
if retries < 3
retries += 1
delay = 2 ** retries + 30
logger.warn "Got error [#{e.message}] while trying to submit jobflow step [#{jobflow_step.name}] to jobflow [#{@jobflow.jobflow_id}]. Retrying in #{delay} seconds"
sleep(delay)
retry
else
if snowplow_tracking_enabled
step_status = Elasticity::ClusterStepStatus.new
step_status.name = "Add step [#{jobflow_step.name}] to jobflow [#{@jobflow.jobflow_id}]. (Error: [#{e.message}])"
step_status.state = "FAILED"
Monitoring::Snowplow.instance.track_single_step(step_status)
end
raise EmrExecutionError, "Can't add step [#{jobflow_step.name}] to jobflow [#{@jobflow.jobflow_id}] (retried 3 times). Error: [#{e.message}]."
end
end
end
jobflow_id = @jobflow.jobflow_id
if jobflow_id.nil?
begin
retries ||= 0
jobflow_id = @jobflow.run
rescue Elasticity::ThrottlingException, RestClient::RequestTimeout, RestClient::InternalServerError, RestClient::ServiceUnavailable, RestClient::SSLCertificateNotVerified
logger.warn "Got an error while trying to submit the jobflow"
retries += 1
sleep(2 ** retries + 30)
retry if retries < 3
end
end
logger.debug "EMR jobflow #{jobflow_id} started, waiting for jobflow to complete..."
if snowplow_tracking_enabled
Monitoring::Snowplow.instance.track_job_started(jobflow_id, cluster_status(@jobflow), cluster_step_status_for_run(@jobflow))
end
status = wait_for
if status.successful or status.rdb_loader_failure or status.rdb_loader_cancellation
log_level = if status.successful
'info'
elsif status.rdb_loader_cancellation
'warn'
else
'error'
end
output_rdb_loader_logs(config[:aws][:s3][:region], config[:aws][:access_key_id],
config[:aws][:secret_access_key], log_level)
end
cluster_status = cluster_status(@jobflow)
cluster_step_status_for_run = cluster_step_status_for_run(@jobflow)
if status.successful
logger.debug "EMR jobflow #{jobflow_id} completed successfully."
if snowplow_tracking_enabled
Monitoring::Snowplow.instance.track_job_succeeded(jobflow_id, cluster_status, cluster_step_status_for_run)
end
elsif status.bootstrap_failure
if snowplow_tracking_enabled
Monitoring::Snowplow.instance.track_job_failed(jobflow_id, cluster_status, cluster_step_status_for_run)
end
raise BootstrapFailureError, get_failure_details(jobflow_id, cluster_status, cluster_step_status_for_run)
else
if snowplow_tracking_enabled
Monitoring::Snowplow.instance.track_job_failed(jobflow_id, cluster_status, cluster_step_status_for_run)
end
raise EmrExecutionError, get_failure_details(jobflow_id, cluster_status, cluster_step_status_for_run)
end
if @use_persistent_jobflow and
@persistent_jobflow_duration_s > 0 and
cluster_status.created_at + @persistent_jobflow_duration_s < @run_tstamp
logger.debug "EMR jobflow has expired and will be shutdown."
begin
retries ||= 0
@jobflow.shutdown
rescue Elasticity::ThrottlingException, RestClient::RequestTimeout, RestClient::InternalServerError, RestClient::ServiceUnavailable, RestClient::SSLCertificateNotVerified
retries += 1
sleep(2 ** retries + 30)
retry if retries < 3
end
end
nil
end
# Fetch logs from S3 left by RDB Loader steps
#
# Parameters:
# +region+:: region for logs bucket
Contract String, String, String, String => nil
def output_rdb_loader_logs(region, aws_access_key_id, aws_secret_key, log_level)
s3 = Aws::S3::Client.new(
:access_key_id => aws_access_key_id,
:secret_access_key => aws_secret_key,
:region => region)
if @rdb_loader_logs.empty? or empty?(s3, @rdb_loader_log_base)
logger.info "No RDB Loader logs"
else
logger.info "RDB Loader logs"
@rdb_loader_logs.each do |l|
tmp = Tempfile.new("rdbloader")
bucket, key = parse_bucket_prefix(l[1])
logger.debug "Downloading #{l[1]} to #{tmp.path}"
begin
s3.get_object({
response_target: tmp,
bucket: bucket,
key: key,
})
if log_level == 'info'
logger.info l[0]
logger.info tmp.read
elsif log_level == 'warn'
logger.warn l[0]
logger.warn tmp.read
else
logger.error l[0]
logger.error tmp.read
end
rescue Exception => e
logger.error "Error while downloading RDB log #{l[1]}"
logger.error e.message
ensure
tmp.close
tmp.unlink
end
end
end
nil
end
private
# Adds a step to the jobflow according to whether or not
# we are using a persistent cluster.
#
# Parameters:
# +jobflow_step+:: the step to add
# +use_persistent_jobflow+:: whether a persistent jobflow should be used
def submit_jobflow_step(jobflow_step, use_persistent_jobflow = false)
if use_persistent_jobflow
jobflow_step.action_on_failure = "CANCEL_AND_WAIT"
end
@pending_jobflow_steps << jobflow_step
end
# Build an Elasticity RDB Loader step.
#
# Parameters:
# +config+:: main Snowplow config.yml
# +targets+:: list of Storage target config hashes
# +resolver+:: base64-encoded Iglu resolver JSON
# +jar+:: s3 object with RDB Loader jar
# +skip_manifest+:: whether load_manifest RDB Loader step should be skipped
Contract ConfigHash, ArrayOf[Iglu::SelfDescribingJson], String, String, RdbLoaderSteps, Bool => ArrayOf[Elasticity::CustomJarStep]
def get_rdb_loader_steps(config, targets, resolver, jar, rdbloader_steps, skip_manifest)
# Remove credentials from config
clean_config = deep_copy(config)
clean_config[:aws][:access_key_id] = ""
clean_config[:aws][:secret_access_key] = ""
default_arguments = {
:config => Base64.strict_encode64(recursive_stringify_keys(clean_config).to_yaml),
:resolver => build_iglu_config_json(resolver)
}
skip_steps = if skip_manifest then rdbloader_steps[:skip] + ["load_manifest"] else rdbloader_steps[:skip] end
targets.map { |target|
name = target.data[:name]
log_key = @rdb_loader_log_base + SecureRandom.uuid
@rdb_loader_logs << [name, log_key]
encoded_target = Base64.strict_encode64(target.to_json.to_json)
arguments = [
"--config", default_arguments[:config],
"--resolver", default_arguments[:resolver],
"--logkey", log_key,
"--target", encoded_target
] + unless skip_steps.empty?
["--skip", skip_steps.join(",")]
else
[]
end + unless rdbloader_steps[:include].empty?
["--include", rdbloader_steps[:include].join(",")]
else
[]
end
rdb_loader_step = Elasticity::CustomJarStep.new(jar)
rdb_loader_step.arguments = arguments
rdb_loader_step.name = "[rdb_load] Load #{name} Storage Target"
rdb_loader_step
}
end
# List bucket (enriched:good or shredded:good) and return latest run folder
#
# Parameters:
# +s3+:: AWS S3 client
# +s3_path+:: Full S3 path to folder
# +suffix+:: Suffix to check for emptiness, atomic-events in case of shredded:good
def get_latest_run_id(s3, s3_path, suffix = '')
run_id_regex = /.*\/run=((\d|-)+)\/.*/
folder = last_object_name(s3, s3_path,
lambda { |k| !(k =~ /\$folder\$$/) and !k[run_id_regex, 1].nil? })
run_id = folder[run_id_regex, 1]
if run_id.nil?
logger.error "No run folders in [#{s3_path}] found"
raise UnexpectedStateError, "No run folders in [#{s3_path}] found"
else
path = File.join(s3_path, "run=#{run_id}", suffix)
if empty?(s3, path)
raise NoDataToProcessError, "Cannot archive #{path}, no data found"
else
run_id
end
end
end
# Defines a S3DistCp step for archiving enriched or shred folder
#
# Parameters:
# +good_path+:: shredded:good or enriched:good full S3 path
# +archive_path+:: enriched:archive or shredded:archive full S3 path
# +run_id_folder+:: run id foler name (2017-05-10-02-45-30, without `=run`)
# +name+:: step description to show in EMR console
# +encrypted+:: whether the destination bucket is encrypted
#
# Returns a step ready for adding to the Elasticity Jobflow.
Contract String, String, String, String, String, Bool => Elasticity::S3DistCpStep
def get_archive_step(good_path, archive_path, run_id_folder, s3_endpoint, name, encrypted)
archive_step = Elasticity::S3DistCpStep.new(legacy = @legacy)
archive_step.arguments = [
"--src" , partition_by_run(good_path, run_id_folder),
"--dest" , partition_by_run(archive_path, run_id_folder),
"--s3Endpoint" , s3_endpoint,
"--deleteOnSuccess"
]
if encrypted
archive_step.arguments = archive_step.arguments + [ '--s3ServerSideEncryption' ]
end
archive_step.name = name
archive_step
end
# Defines an Elasticity Scalding step.
#
# Parameters:
# +step_name+:: name of step
# +main_class+:: Java main class to run
# +folders+:: hash of in, good, bad, errors S3/HDFS folders
# +extra_step_args+:: additional arguments to pass to the step
# +targets+:: list of targets parsed from self-describing JSONs
#
# Returns a step ready for adding to the Elasticity Jobflow.
Contract String, String, String, Hash, Hash => Elasticity::ScaldingStep
def build_scalding_step(step_name, jar, main_class, folders, extra_step_args={})
# Build our argument hash
arguments = extra_step_args
.merge({
:input_folder => folders[:in],
:output_folder => folders[:good],
:bad_rows_folder => folders[:bad],
:exceptions_folder => folders[:errors]
})
.reject { |k, v| v.nil? } # Because folders[:errors] may be empty
arguments['tool.partialok'] = ''
# Now create the Hadoop MR step for the jobflow
scalding_step = Elasticity::ScaldingStep.new(jar, "#{JAVA_PACKAGE}.#{main_class}", arguments)
scalding_step.name = step_name
scalding_step
end
# Defines an Elasticity Spark step.
#
# Parameters:
# +step_name+:: name of the step
# +main_class+:: class to run
# +folders+:: hash of input, output, bad S3/HDFS folders
# +extra_step_args+:: additional command line arguments to pass to the step
#
# Returns a step read to be added to the Elasticity Jobflow.
Contract String, String, String, Hash, Hash => Elasticity::SparkStep
def build_spark_step(step_name, jar, main_class, folders, extra_step_args={})
arguments = extra_step_args
.merge({
'input-folder' => folders[:in],
'output-folder' => folders[:good],
'bad-folder' => folders[:bad],
})
spark_step = Elasticity::SparkStep.new(jar, "#{JAVA_PACKAGE}.#{main_class}")
spark_step.app_arguments = arguments
spark_step.spark_arguments = {
'master' => 'yarn',
'deploy-mode' => 'cluster'
}
spark_step.name = step_name
spark_step
end
# Wait for a jobflow.
# Check its status every 5 minutes till it completes.
#
# Returns true if the jobflow completed without error,
# false otherwise.
Contract None => JobResult
def wait_for
success = false
bootstrap_failure = false
rdb_loader_failure = false
rdb_loader_cancellation = false
# Loop until we can quit...
while true do
begin
cluster_step_status_for_run = cluster_step_status_for_run(@jobflow)
if cluster_step_status_for_run.nil?
logger.warn "Could not retrieve cluster status, waiting 5 minutes before checking jobflow again"
sleep(300)
else
# Count up running tasks and failures
statuses = cluster_step_status_for_run.map(&:state).inject([0, 0]) do |sum, state|
[ sum[0] + (@@running_states.include?(state) ? 1 : 0), sum[1] + (@@failed_states.include?(state) ? 1 : 0) ]
end
# If no step is still running, then quit
if statuses[0] == 0
success = statuses[1] == 0 # True if no failures
bootstrap_failure = EmrJob.bootstrap_failure?(@jobflow, cluster_step_status_for_run)
rdb_loader_failure = EmrJob.rdb_loader_failure?(cluster_step_status_for_run)
rdb_loader_cancellation = EmrJob.rdb_loader_cancellation?(cluster_step_status_for_run)
break
else
# Sleep a while before we check again
sleep(60)
end
end
rescue SocketError => se
logger.warn "Got socket error #{se}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue Errno::ECONNREFUSED => ref
logger.warn "Got connection refused #{ref}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue Errno::ECONNRESET => res
logger.warn "Got connection reset #{res}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue Errno::ETIMEDOUT => to
logger.warn "Got connection timeout #{to}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue RestClient::InternalServerError => ise
logger.warn "Got internal server error #{ise}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue Elasticity::ThrottlingException => te
logger.warn "Got Elasticity throttling exception #{te}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue ArgumentError => ae
logger.warn "Got Elasticity argument error #{ae}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue IOError => ioe
logger.warn "Got IOError #{ioe}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue RestClient::SSLCertificateNotVerified => sce
logger.warn "Got RestClient::SSLCertificateNotVerified #{sce}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue RestClient::RequestTimeout => rt
logger.warn "Got RestClient::RequestTimeout #{rt}, waiting 5 minutes before checking jobflow again"
sleep(300)
rescue RestClient::ServiceUnavailable => su
logger.warn "Got RestClient::ServiceUnavailable #{su}, waiting 5 minutes before checking jobflow again"
sleep(300)
end
end
JobResult.new(success, bootstrap_failure, rdb_loader_failure, rdb_loader_cancellation)
end
# Spaceship operator supporting nils
#
# Parameters:
# +a+:: First argument
# +b+:: Second argument
Contract Maybe[Time], Maybe[Time] => Num
def self.nilable_spaceship(a, b)
case
when (a.nil? and b.nil?)
0
when a.nil?
1
when b.nil?
-1
else
a <=> b
end
end
# Recursively change the keys of a YAML from symbols to strings
def recursive_stringify_keys(h)
if h.class == [].class
h.map {|key| recursive_stringify_keys(key)}
elsif h.class == {}.class
Hash[h.map {|k,v| [k.to_s, recursive_stringify_keys(v)]}]
else
h
end
end
def deep_copy(o)
Marshal.load(Marshal.dump(o))
end
# Ensures we only look at the steps submitted in this run
# and not within prior persistent runs
#
# Parameters:
# +jobflow+:: The jobflow to extract steps from
Contract Elasticity::JobFlow => ArrayOf[Elasticity::ClusterStepStatus]
def cluster_step_status_for_run(jobflow)
begin
retries ||= 0
jobflow.cluster_step_status
.select { |a| a.created_at >= @run_tstamp }
.sort_by { |a| a.created_at }
rescue Elasticity::ThrottlingException, RestClient::RequestTimeout, RestClient::InternalServerError, RestClient::ServiceUnavailable, RestClient::SSLCertificateNotVerified
retries += 1
sleep(2 ** retries + 30)
retry if retries < 3
end
end
Contract Elasticity::JobFlow => Elasticity::ClusterStatus
def cluster_status(jobflow)
begin
retries ||= 0
jobflow.cluster_status
rescue Elasticity::ThrottlingException, RestClient::RequestTimeout, RestClient::InternalServerError, RestClient::ServiceUnavailable, RestClient::SSLCertificateNotVerified
retries += 1
sleep(2 ** retries + 30)
retry if retries < 3
end
end
# Returns true if the jobflow failed at a rdb loader step
Contract ArrayOf[Elasticity::ClusterStepStatus] => Bool
def self.rdb_loader_failure?(cluster_step_statuses)
rdb_loader_failure_indicator = /Storage Target/
cluster_step_statuses.any? { |s| s.state == 'FAILED' && !(s.name =~ rdb_loader_failure_indicator).nil? }
end
# Returns true if the rdb loader step was cancelled
Contract ArrayOf[Elasticity::ClusterStepStatus] => Bool
def self.rdb_loader_cancellation?(cluster_step_statuses)
rdb_loader_failure_indicator = /Storage Target/
cluster_step_statuses.any? { |s| s.state == 'CANCELLED' && !(s.name =~ rdb_loader_failure_indicator).nil? }
end
# Returns true if the jobflow seems to have failed due to a bootstrap failure
Contract Elasticity::JobFlow, ArrayOf[Elasticity::ClusterStepStatus] => Bool
def self.bootstrap_failure?(jobflow, cluster_step_statuses)
bootstrap_failure_indicator = /BOOTSTRAP_FAILURE|bootstrap action|Master instance startup failed/
cluster_step_statuses.all? { |s| s.state == 'CANCELLED' } &&
(!(jobflow.cluster_status.last_state_change_reason =~ bootstrap_failure_indicator).nil?)
end
Contract ArrayOf[String], String, String => Elasticity::CustomJarStep
def get_rmr_step(locations, bucket, description)
step = Elasticity::CustomJarStep.new("s3://#{@jobflow.region}.elasticmapreduce/libs/script-runner/script-runner.jar")
step.arguments = ["#{bucket}common/emr/snowplow-hadoop-fs-rmr-0.2.0.sh"] + locations
step.name = "[cleanup] #{description}"
step
end
def get_hdfs_expunge_step
step = Elasticity::CustomJarStep.new("command-runner.jar")
step.arguments = %W(hdfs dfs -expunge)
step.name = "[cleanup] Empty HDFS trash"
step
end
Contract TargetsHash => Maybe[String]
def get_processing_manifest(targets)
targets[:ENRICHED_EVENTS].select { |t| not t.data[:processingManifest].nil? }.map { |t| t.data.dig(:processingManifest, :amazonDynamoDb, :tableName) }.first
end
Contract ConfigHash, ArrayOf[Iglu::SelfDescribingJson] => Hash
def get_rdb_shredder_target(config, targets)
supported_targets = targets.select { |target_config|
target_config.schema.name == 'redshift_config' && target_config.schema.version.model >= 4
}
if Gem::Version.new(config[:storage][:versions][:rdb_shredder]) >= SHRED_JOB_WITH_TSV_OUTPUT && !supported_targets.empty?
{ 'target' => Base64.strict_encode64(supported_targets.first.to_json.to_json) }
else
{}
end
end
end
end
end
| 44.111458 | 253 | 0.616044 |
b9232d19f8e5cd1901916580f1629a9a5081dca1
| 891 |
# See http://docs.chef.io/config_rb_knife.html for more information on knife configuration options
#current_dir = File.dirname(__FILE__)
#log_level :info
#log_location STDOUT
#node_name "nitinjain0506"
#client_key "#{current_dir}/nitinjain0506.pem"
#chef_server_url "https://api.chef.io/organizations/tcsbby"
#cookbook_path ["#{current_dir}/cookbooks"]
log_level :info
log_location STDOUT
node_name 'nitinjain0506'
client_key '/root/chef-repo/.chef/nitinjain0506.pem'
validation_client_name 'tcsbby-validator'
validation_key '/root/chef-repo/.chef/tcsbby-validator.pem'
chef_server_url 'https://api.chef.io/organizations/tcsbby'
syntax_check_cache_path '/root/chef-repo/syntax_check_cache'
cookbook_path [ '/root/chef-repo/cookbooks' ]
| 46.894737 | 98 | 0.670034 |
6223a941047427633ec5e266d3c05d1fbe57c882
| 1,144 |
Rails.application.routes.draw do
devise_for :users
root to: 'homes#top'#localhosst3000する
get 'home/about' => 'homes#about'
get 'users/mypage' => 'users#mypage'
get 'users/animal/:id' => 'users#animal', as: 'user_animal'
get '/blog_genre/:id' => 'blogs#genre',as:'blog_genre'#ジャンル検索でidが必要
get 'search' =>'search#search'
post 'animals/:animal_id/tasks/:id/change' => 'tasks#change',as: 'task_change'
resources :users do
get 'follows' => 'relationships#follows', as: 'follows'#フォロー一覧
get 'followers' => 'relationships#followers', as: 'followers'#フォロワー一覧
resource :relationships, only: [:create, :destroy]
resource :animal_permits, only: [:create, :destroy] do
patch :status_change#ステータスを変更、animal_permit.rbで指示
end
end
resources :blogs do
resource :blog_comments, only: [:create, :destroy]
resource :favorites, only: [:create, :destroy]
end
resources :animals do
resource :animal_comments, only: [:create, :destroy]
resource :tasks, only: [:create, :destroy, :update]
end
# For details on the DSL available within this file, see http://guides.rubyonrails.org/routing.html
end
| 38.133333 | 101 | 0.698427 |
d5c8331851e8273b1ed9bc44a47408dbe50ff52a
| 743 |
module Imgrb::Headers
##
#Header for animated png files
class ApngHeader < PngHeader
attr_reader :filter_method, :interlace_method, :default_image,
:number_of_frames, :number_of_plays
def initialize(width, height, bit_depth, compression_method, image_type,
filter_method, interlace_method, number_of_frames,
number_of_plays, default_image)
super(width, height, bit_depth, compression_method, image_type,
filter_method, interlace_method)
@default_image = default_image
@number_of_frames = number_of_frames
@number_of_plays = number_of_plays
end
def image_format
:apng
end
def animated?
true
end
end
end
| 27.518519 | 76 | 0.678331 |
0828e1d0424d1617c0bf5714bf3ad51275749101
| 324 |
class FontTulpenOne < Formula
head "https://github.com/google/fonts/raw/main/ofl/tulpenone/TulpenOne-Regular.ttf", verified: "github.com/google/fonts/"
desc "Tulpen One"
homepage "https://fonts.google.com/specimen/Tulpen+One"
def install
(share/"fonts").install "TulpenOne-Regular.ttf"
end
test do
end
end
| 29.454545 | 123 | 0.734568 |
acdc0f3819fe566c74242f2df8955fdf8a98e7bb
| 1,213 |
class PdftkJava < Formula
desc "Port of pdftk in java"
homepage "https://gitlab.com/pdftk-java/pdftk"
url "https://gitlab.com/pdftk-java/pdftk/-/archive/v3.0.9/pdftk-v3.0.9.tar.gz"
sha256 "8210167286849552eff08199e7734223c6ae9b7f1875e4e2b5b6e7996514dd10"
head "https://gitlab.com/pdftk-java/pdftk.git"
bottle do
cellar :any_skip_relocation
rebuild 1
sha256 "3162c6a5dfc854143c476838c31e671cf02163bb8475b3a046552c1c47fb54ef" => :catalina
sha256 "bbbadd1e4476c47832c87ca1c8b786a3e50bb4dc8856fb421ba349d874d68b74" => :mojave
sha256 "9167487c4fa0180827b361089f7e5cac58b3b359c71e6f283172910fcb80952f" => :high_sierra
sha256 "0a3efa6565500b9fb85613b8e66853790061ce133bb759bc01c54ecaf25f13a6" => :x86_64_linux
end
depends_on "gradle" => :build
depends_on :java => "1.8"
def install
system "gradle", "shadowJar", "--no-daemon"
libexec.install "build/libs/pdftk-all.jar"
bin.write_jar_script libexec/"pdftk-all.jar", "pdftk", :java_version => "1.8"
end
test do
pdf = test_fixtures("test.pdf")
output_path = testpath/"output.pdf"
system bin/"pdftk", pdf, pdf, "cat", "output", output_path
assert output_path.read.start_with?("%PDF")
end
end
| 36.757576 | 94 | 0.746908 |
b9819894081a7319c8366f5ef0ad1889b8faf339
| 1,116 |
class ApplicationController < Sinatra::Base
## control app startup and general logic ##
# configuration
configure do
set :views, 'app/views'
# A session is used to keep state during requests. If activated, you have one session hash per user session: #
enable :sessions
# Throws an exception if SESSION_SECRET does not exist in the current ENV. Set random hex for session secret.
# Solution found from sinatra documentation and user grempe in this thread: https://github.com/sinatra/sinatra/issues/1187
set :session_secret, 'password'
end
# index
get '/' do
@userid=Product.all.map(&:user_id).uniq
erb :index
end
# define helper methods for use in route handlers and templates: #
helpers do
def logged_in?
!!session[:user_id]
end
def redirect_to_index
redirect '/'
end
def redirect_if_not_logged_in
if !logged_in?
redirect "/login"
erb :login
end
end
def is_unique?
User.find_by_username(params[:username]) == nil
end
def current_user
User.find(session[:user_id])
end
end
def current_project
Project.get_current_product
end
end
| 26.571429 | 124 | 0.723118 |
2685baac1015f65ef0d786220ec19646a647da53
| 1,983 |
#!/usr/bin/env ruby
# encoding: utf-8
require "rubygems"
require "amqp"
class Consumer
#
# API
#
def handle_message(metadata, payload)
puts "Received a message: #{payload}, content_type = #{metadata.content_type}"
end # handle_message(metadata, payload)
end
class Worker
#
# API
#
def initialize(channel, queue_name = AMQ::Protocol::EMPTY_STRING, consumer = Consumer.new)
@queue_name = queue_name
@channel = channel
@channel.on_error(&method(:handle_channel_exception))
@consumer = consumer
end # initialize
def start
@queue = @channel.queue(@queue_name, :exclusive => true)
@queue.subscribe(&@consumer.method(:handle_message))
end # start
#
# Implementation
#
def handle_channel_exception(channel, channel_close)
puts "Oops... a channel-level exception: code = #{channel_close.reply_code}, message = #{channel_close.reply_text}"
end # handle_channel_exception(channel, channel_close)
end
class Producer
#
# API
#
def initialize(channel, exchange)
@channel = channel
@exchange = exchange
end # initialize(channel, exchange)
def publish(message, options = {})
@exchange.publish(message, options)
end # publish(message, options = {})
#
# Implementation
#
def handle_channel_exception(channel, channel_close)
puts "Oops... a channel-level exception: code = #{channel_close.reply_code}, message = #{channel_close.reply_text}"
end # handle_channel_exception(channel, channel_close)
end
AMQP.start("amqp://guest:[email protected]") do |connection, open_ok|
channel = AMQP::Channel.new(connection)
worker = Worker.new(channel, "amqpgem.objects.integration")
worker.start
producer = Producer.new(channel, channel.default_exchange)
puts "Publishing..."
producer.publish("Hello, world", :routing_key => "amqpgem.objects.integration")
# stop in 2 seconds
EventMachine.add_timer(2.0) { connection.close { EventMachine.stop } }
end
| 22.033333 | 119 | 0.706001 |
870e39600bbac087829251c3d22473dd9584d13c
| 1,344 |
require File.dirname(__FILE__) + "/../../../spec_helper"
module FixtureReplacementController
describe MethodGenerator, "Evaluation loading" do
before :each do
@module = Module.new
extend @module
item_attributes = lambda do |o|
o.category = default_category
end
writing_attributes = lambda do |w|
w.name = "foo"
end
ClassFactory.stub!(:fixture_replacement_module).and_return @module
@item_attributes = AttributeCollection.new(:item, :attributes => item_attributes)
@writing_attributes = AttributeCollection.new(:writing, :from => :item, :attributes => writing_attributes, :class => Writing)
AttributeCollection.new(:category)
end
it "should not raise an error if the a default_* method is referenced before it is defined" do
lambda {
MethodGenerator.generate_methods
}.should_not raise_error
end
it "should merge the hash with item and writing when new_writing is called" do
MethodGenerator.generate_methods
@writing_attributes.should_receive(:merge!)
new_writing
end
it "should merge the has with item and writing when create_writing is called" do
MethodGenerator.generate_methods
@writing_attributes.should_receive(:merge!)
create_writing
end
end
end
| 32.780488 | 131 | 0.691964 |
3368ee7e7a734b0797e564bc1a1690fe008a7971
| 1,252 |
#-- encoding: UTF-8
#-- copyright
# OpenProject is a project management system.
# Copyright (C) 2012-2014 the OpenProject Foundation (OPF)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3.
#
# OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
# Copyright (C) 2006-2013 Jean-Philippe Lang
# Copyright (C) 2010-2013 the ChiliProject Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# See doc/COPYRIGHT.rdoc for more details.
#++
module MailHandlerHelper
end
| 39.125 | 91 | 0.76278 |
e2cfa252563b98c25ccb51b87fd4caa41ff2a357
| 574 |
require 'spec_helper'
describe package('zookeeper-server') do
it { should be_installed }
end
describe user('zookeeper') do
it { should exit }
end
describe group('zookeeper') do
it { should exit }
end
describe service('zookeeper-server') do
it { should be_running }
end
describe process("zookeeper") do
it { should be_running }
its(:user) { should eq "zookeeper" }
its(:group) { should eq "zookeeper" }
end
describe port(2181) do
it { not should be_listening.on('127.0.0.1').with('tcp') }
it { should be_listening.on(o['ipaddress']).with('tcp') }
end
| 19.793103 | 60 | 0.691638 |
e89d388d63a17925b94228a1791ea9ca2d71ce26
| 7,250 |
# frozen_string_literal: true
#
# Author:: Ashique Saidalavi (<[email protected]>)
# Copyright:: Copyright (c) 2022 Chef Software, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "spec_helper"
describe Vra::DeploymentRequest do
let(:client) do
Vra::Client.new(
username: "[email protected]",
password: "password",
tenant: "tenant",
base_url: "https://vra.corp.local"
)
end
let(:catalog_id) { "cat-123456" }
let(:request_payload) do
{
image_mapping: "Centos Image",
name: "test deployment",
flavor_mapping: "Small",
version: "1",
project_id: "pro-123",
}
end
describe "#initialize" do
it "should raise errors for missing arguments" do
request = described_class.new(
client,
catalog_id,
request_payload
)
expect(request.name).to eq("test deployment")
expect(request.image_mapping).to eq("Centos Image")
expect(request.flavor_mapping).to eq("Small")
expect(request.version).to eq("1")
expect(request.count).to eq(1)
end
end
describe "#validate!" do
it "should return error if params are missing" do
request = described_class.new(client, catalog_id)
expect { request.send(:validate!) }.to raise_error(ArgumentError)
request.image_mapping = "Centos Image"
request.name = "test deployment"
request.flavor_mapping = "Small"
request.version = "1"
request.project_id = "pro-123"
expect { request.send(:validate!) }.not_to raise_error(ArgumentError)
end
context "versions" do
let(:dep_request) do
described_class.new(
client,
catalog_id,
image_mapping: "centos",
name: "sample dep",
flavor_mapping: "small",
project_id: "pro-123"
)
end
before do
allow(client).to receive(:authorized?).and_return(true)
end
it "should not call the api to fetch versions if provided in the params" do
expect(client).not_to receive(:http_get_paginated_array!)
dep_request.version = "1"
dep_request.send(:validate!)
end
it "should fetch version from api if version is blank" do
expect(client).to receive(:http_get_paginated_array!).and_return([{ "id" => "2", "description" => "v2.0" }])
dep_request.send(:validate!)
expect(dep_request.version).to eq("2")
end
it "should raise an exception if no valid versions found" do
expect(client).to receive(:http_get_paginated_array!).and_return([])
expect { dep_request.send(:validate!) }
.to raise_error(ArgumentError)
.with_message("Unable to fetch a valid catalog version")
end
end
end
describe "#additional parameters" do
let(:request) do
described_class.new(client, catalog_id, request_payload)
end
context "set_parameter" do
it "should set the parameter" do
request.set_parameter("hardware-config", "stirng", "Small")
expect(request.parameters).to eq({ inputs: { "hardware-config" => "Small" } })
expect(request.parameters[:inputs].count).to be(1)
end
end
context "set_parameters" do
it "should be able to set multiple parameters" do
request.set_parameters("test-parent", { "param1" => { type: "string", value: 1234 } })
expect(request.parameters)
.to eq({ inputs: { "test-parent" => { "inputs" => { "param1" => 1234 } } } })
end
it "should set multiple parameters with different data types" do
request.set_parameters("param1", { key1: { type: "string", value: "data" } })
request.set_parameters("param2", { key2: { type: "boolean", value: false } })
request.set_parameters("param3", { key3: { type: "integer", value: 100 } })
expect(request.parameters[:inputs].count).to be 3
end
end
context "delete_parameter" do
before(:each) do
request.set_parameter("hardware-config", "string", "small")
end
it "should delete the existing parameter" do
expect(request.parameters[:inputs].count).to be(1)
request.delete_parameter("hardware-config")
expect(request.parameters[:inputs].count).to be(0)
end
end
context "#hash_parameters" do
it "should have the correct representation" do
request.set_parameters(:param1, { key1: { type: "string", value: "data" } })
expect(request.hash_parameters).to eq({ param1: { key1: "data" } })
end
end
end
describe "#submit" do
let(:request) do
described_class.new(client, catalog_id, request_payload)
end
before(:each) do
allow(client).to receive(:authorized?).and_return(true)
end
it "should call the validate before submit" do
expect(request).to receive(:validate!)
stub_request(:post, client.full_url("/catalog/api/items/cat-123456/request"))
.to_return(status: 200, body: '[{"deploymentId": "123"}]', headers: {})
allow(Vra::Deployment).to receive(:new)
request.submit
end
it "should call the api to submit the deployment request" do
response = double("response", body: '[{"deploymentId": "123"}]', success?: true)
allow(client)
.to receive(:http_post)
.with(
"/catalog/api/items/#{catalog_id}/request",
{
deploymentName: "test deployment",
projectId: "pro-123",
version: "1",
inputs: {
count: 1,
image: "Centos Image",
flavor: "Small",
},
}.to_json
)
.and_return(response)
allow(Vra::Deployment).to receive(:new)
request.submit
end
it "should return a deployment object" do
response = double("response", body: '[{"deploymentId": "123"}]', success?: true)
allow(client).to receive(:http_post).and_return(response)
allow(client)
.to receive(:get_parsed)
.and_return(JSON.parse(File.read("spec/fixtures/resource/sample_deployment.json")))
dep = request.submit
expect(dep).to be_an_instance_of(Vra::Deployment)
expect(dep.id).to eq("123")
end
it "should handle the VRA Errors" do
allow(request).to receive(:send_request!).and_raise(Vra::Exception::HTTPError)
expect { request.submit }.to raise_error(Vra::Exception::RequestError)
end
it "should handle the generic errors" do
allow(request).to receive(:send_request!).and_raise(ArgumentError)
expect { request.submit }.to raise_error(ArgumentError)
end
end
end
| 31.25 | 116 | 0.63131 |
08fe40c1cb9c27b0c3276a17cee8ff6828e3ef4e
| 21,360 |
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/master/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
module Aws::AutoScalingPlans
# @api private
module ClientApi
include Seahorse::Model
ApplicationSource = Shapes::StructureShape.new(name: 'ApplicationSource')
ApplicationSources = Shapes::ListShape.new(name: 'ApplicationSources')
ConcurrentUpdateException = Shapes::StructureShape.new(name: 'ConcurrentUpdateException')
Cooldown = Shapes::IntegerShape.new(name: 'Cooldown')
CreateScalingPlanRequest = Shapes::StructureShape.new(name: 'CreateScalingPlanRequest')
CreateScalingPlanResponse = Shapes::StructureShape.new(name: 'CreateScalingPlanResponse')
CustomizedScalingMetricSpecification = Shapes::StructureShape.new(name: 'CustomizedScalingMetricSpecification')
DeleteScalingPlanRequest = Shapes::StructureShape.new(name: 'DeleteScalingPlanRequest')
DeleteScalingPlanResponse = Shapes::StructureShape.new(name: 'DeleteScalingPlanResponse')
DescribeScalingPlanResourcesRequest = Shapes::StructureShape.new(name: 'DescribeScalingPlanResourcesRequest')
DescribeScalingPlanResourcesResponse = Shapes::StructureShape.new(name: 'DescribeScalingPlanResourcesResponse')
DescribeScalingPlansRequest = Shapes::StructureShape.new(name: 'DescribeScalingPlansRequest')
DescribeScalingPlansResponse = Shapes::StructureShape.new(name: 'DescribeScalingPlansResponse')
DisableScaleIn = Shapes::BooleanShape.new(name: 'DisableScaleIn')
ErrorMessage = Shapes::StringShape.new(name: 'ErrorMessage')
InternalServiceException = Shapes::StructureShape.new(name: 'InternalServiceException')
InvalidNextTokenException = Shapes::StructureShape.new(name: 'InvalidNextTokenException')
LimitExceededException = Shapes::StructureShape.new(name: 'LimitExceededException')
MaxResults = Shapes::IntegerShape.new(name: 'MaxResults')
MetricDimension = Shapes::StructureShape.new(name: 'MetricDimension')
MetricDimensionName = Shapes::StringShape.new(name: 'MetricDimensionName')
MetricDimensionValue = Shapes::StringShape.new(name: 'MetricDimensionValue')
MetricDimensions = Shapes::ListShape.new(name: 'MetricDimensions')
MetricName = Shapes::StringShape.new(name: 'MetricName')
MetricNamespace = Shapes::StringShape.new(name: 'MetricNamespace')
MetricScale = Shapes::FloatShape.new(name: 'MetricScale')
MetricStatistic = Shapes::StringShape.new(name: 'MetricStatistic')
MetricUnit = Shapes::StringShape.new(name: 'MetricUnit')
NextToken = Shapes::StringShape.new(name: 'NextToken')
ObjectNotFoundException = Shapes::StructureShape.new(name: 'ObjectNotFoundException')
PolicyName = Shapes::StringShape.new(name: 'PolicyName')
PolicyType = Shapes::StringShape.new(name: 'PolicyType')
PredefinedScalingMetricSpecification = Shapes::StructureShape.new(name: 'PredefinedScalingMetricSpecification')
ResourceCapacity = Shapes::IntegerShape.new(name: 'ResourceCapacity')
ResourceIdMaxLen1600 = Shapes::StringShape.new(name: 'ResourceIdMaxLen1600')
ResourceLabel = Shapes::StringShape.new(name: 'ResourceLabel')
ScalableDimension = Shapes::StringShape.new(name: 'ScalableDimension')
ScalingInstruction = Shapes::StructureShape.new(name: 'ScalingInstruction')
ScalingInstructions = Shapes::ListShape.new(name: 'ScalingInstructions')
ScalingMetricType = Shapes::StringShape.new(name: 'ScalingMetricType')
ScalingPlan = Shapes::StructureShape.new(name: 'ScalingPlan')
ScalingPlanName = Shapes::StringShape.new(name: 'ScalingPlanName')
ScalingPlanNames = Shapes::ListShape.new(name: 'ScalingPlanNames')
ScalingPlanResource = Shapes::StructureShape.new(name: 'ScalingPlanResource')
ScalingPlanResources = Shapes::ListShape.new(name: 'ScalingPlanResources')
ScalingPlanStatusCode = Shapes::StringShape.new(name: 'ScalingPlanStatusCode')
ScalingPlanVersion = Shapes::IntegerShape.new(name: 'ScalingPlanVersion')
ScalingPlans = Shapes::ListShape.new(name: 'ScalingPlans')
ScalingPolicies = Shapes::ListShape.new(name: 'ScalingPolicies')
ScalingPolicy = Shapes::StructureShape.new(name: 'ScalingPolicy')
ScalingStatusCode = Shapes::StringShape.new(name: 'ScalingStatusCode')
ServiceNamespace = Shapes::StringShape.new(name: 'ServiceNamespace')
TagFilter = Shapes::StructureShape.new(name: 'TagFilter')
TagFilters = Shapes::ListShape.new(name: 'TagFilters')
TagValues = Shapes::ListShape.new(name: 'TagValues')
TargetTrackingConfiguration = Shapes::StructureShape.new(name: 'TargetTrackingConfiguration')
TargetTrackingConfigurations = Shapes::ListShape.new(name: 'TargetTrackingConfigurations')
TimestampType = Shapes::TimestampShape.new(name: 'TimestampType')
UpdateScalingPlanRequest = Shapes::StructureShape.new(name: 'UpdateScalingPlanRequest')
UpdateScalingPlanResponse = Shapes::StructureShape.new(name: 'UpdateScalingPlanResponse')
ValidationException = Shapes::StructureShape.new(name: 'ValidationException')
XmlString = Shapes::StringShape.new(name: 'XmlString')
XmlStringMaxLen128 = Shapes::StringShape.new(name: 'XmlStringMaxLen128')
XmlStringMaxLen256 = Shapes::StringShape.new(name: 'XmlStringMaxLen256')
ApplicationSource.add_member(:cloud_formation_stack_arn, Shapes::ShapeRef.new(shape: XmlString, location_name: "CloudFormationStackARN"))
ApplicationSource.add_member(:tag_filters, Shapes::ShapeRef.new(shape: TagFilters, location_name: "TagFilters"))
ApplicationSource.struct_class = Types::ApplicationSource
ApplicationSources.member = Shapes::ShapeRef.new(shape: ApplicationSource)
CreateScalingPlanRequest.add_member(:scaling_plan_name, Shapes::ShapeRef.new(shape: ScalingPlanName, required: true, location_name: "ScalingPlanName"))
CreateScalingPlanRequest.add_member(:application_source, Shapes::ShapeRef.new(shape: ApplicationSource, required: true, location_name: "ApplicationSource"))
CreateScalingPlanRequest.add_member(:scaling_instructions, Shapes::ShapeRef.new(shape: ScalingInstructions, required: true, location_name: "ScalingInstructions"))
CreateScalingPlanRequest.struct_class = Types::CreateScalingPlanRequest
CreateScalingPlanResponse.add_member(:scaling_plan_version, Shapes::ShapeRef.new(shape: ScalingPlanVersion, required: true, location_name: "ScalingPlanVersion"))
CreateScalingPlanResponse.struct_class = Types::CreateScalingPlanResponse
CustomizedScalingMetricSpecification.add_member(:metric_name, Shapes::ShapeRef.new(shape: MetricName, required: true, location_name: "MetricName"))
CustomizedScalingMetricSpecification.add_member(:namespace, Shapes::ShapeRef.new(shape: MetricNamespace, required: true, location_name: "Namespace"))
CustomizedScalingMetricSpecification.add_member(:dimensions, Shapes::ShapeRef.new(shape: MetricDimensions, location_name: "Dimensions"))
CustomizedScalingMetricSpecification.add_member(:statistic, Shapes::ShapeRef.new(shape: MetricStatistic, required: true, location_name: "Statistic"))
CustomizedScalingMetricSpecification.add_member(:unit, Shapes::ShapeRef.new(shape: MetricUnit, location_name: "Unit"))
CustomizedScalingMetricSpecification.struct_class = Types::CustomizedScalingMetricSpecification
DeleteScalingPlanRequest.add_member(:scaling_plan_name, Shapes::ShapeRef.new(shape: ScalingPlanName, required: true, location_name: "ScalingPlanName"))
DeleteScalingPlanRequest.add_member(:scaling_plan_version, Shapes::ShapeRef.new(shape: ScalingPlanVersion, required: true, location_name: "ScalingPlanVersion"))
DeleteScalingPlanRequest.struct_class = Types::DeleteScalingPlanRequest
DeleteScalingPlanResponse.struct_class = Types::DeleteScalingPlanResponse
DescribeScalingPlanResourcesRequest.add_member(:scaling_plan_name, Shapes::ShapeRef.new(shape: ScalingPlanName, required: true, location_name: "ScalingPlanName"))
DescribeScalingPlanResourcesRequest.add_member(:scaling_plan_version, Shapes::ShapeRef.new(shape: ScalingPlanVersion, required: true, location_name: "ScalingPlanVersion"))
DescribeScalingPlanResourcesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
DescribeScalingPlanResourcesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken"))
DescribeScalingPlanResourcesRequest.struct_class = Types::DescribeScalingPlanResourcesRequest
DescribeScalingPlanResourcesResponse.add_member(:scaling_plan_resources, Shapes::ShapeRef.new(shape: ScalingPlanResources, location_name: "ScalingPlanResources"))
DescribeScalingPlanResourcesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken"))
DescribeScalingPlanResourcesResponse.struct_class = Types::DescribeScalingPlanResourcesResponse
DescribeScalingPlansRequest.add_member(:scaling_plan_names, Shapes::ShapeRef.new(shape: ScalingPlanNames, location_name: "ScalingPlanNames"))
DescribeScalingPlansRequest.add_member(:scaling_plan_version, Shapes::ShapeRef.new(shape: ScalingPlanVersion, location_name: "ScalingPlanVersion"))
DescribeScalingPlansRequest.add_member(:application_sources, Shapes::ShapeRef.new(shape: ApplicationSources, location_name: "ApplicationSources"))
DescribeScalingPlansRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
DescribeScalingPlansRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken"))
DescribeScalingPlansRequest.struct_class = Types::DescribeScalingPlansRequest
DescribeScalingPlansResponse.add_member(:scaling_plans, Shapes::ShapeRef.new(shape: ScalingPlans, location_name: "ScalingPlans"))
DescribeScalingPlansResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken"))
DescribeScalingPlansResponse.struct_class = Types::DescribeScalingPlansResponse
MetricDimension.add_member(:name, Shapes::ShapeRef.new(shape: MetricDimensionName, required: true, location_name: "Name"))
MetricDimension.add_member(:value, Shapes::ShapeRef.new(shape: MetricDimensionValue, required: true, location_name: "Value"))
MetricDimension.struct_class = Types::MetricDimension
MetricDimensions.member = Shapes::ShapeRef.new(shape: MetricDimension)
PredefinedScalingMetricSpecification.add_member(:predefined_scaling_metric_type, Shapes::ShapeRef.new(shape: ScalingMetricType, required: true, location_name: "PredefinedScalingMetricType"))
PredefinedScalingMetricSpecification.add_member(:resource_label, Shapes::ShapeRef.new(shape: ResourceLabel, location_name: "ResourceLabel"))
PredefinedScalingMetricSpecification.struct_class = Types::PredefinedScalingMetricSpecification
ScalingInstruction.add_member(:service_namespace, Shapes::ShapeRef.new(shape: ServiceNamespace, required: true, location_name: "ServiceNamespace"))
ScalingInstruction.add_member(:resource_id, Shapes::ShapeRef.new(shape: ResourceIdMaxLen1600, required: true, location_name: "ResourceId"))
ScalingInstruction.add_member(:scalable_dimension, Shapes::ShapeRef.new(shape: ScalableDimension, required: true, location_name: "ScalableDimension"))
ScalingInstruction.add_member(:min_capacity, Shapes::ShapeRef.new(shape: ResourceCapacity, required: true, location_name: "MinCapacity"))
ScalingInstruction.add_member(:max_capacity, Shapes::ShapeRef.new(shape: ResourceCapacity, required: true, location_name: "MaxCapacity"))
ScalingInstruction.add_member(:target_tracking_configurations, Shapes::ShapeRef.new(shape: TargetTrackingConfigurations, required: true, location_name: "TargetTrackingConfigurations"))
ScalingInstruction.struct_class = Types::ScalingInstruction
ScalingInstructions.member = Shapes::ShapeRef.new(shape: ScalingInstruction)
ScalingPlan.add_member(:scaling_plan_name, Shapes::ShapeRef.new(shape: ScalingPlanName, required: true, location_name: "ScalingPlanName"))
ScalingPlan.add_member(:scaling_plan_version, Shapes::ShapeRef.new(shape: ScalingPlanVersion, required: true, location_name: "ScalingPlanVersion"))
ScalingPlan.add_member(:application_source, Shapes::ShapeRef.new(shape: ApplicationSource, required: true, location_name: "ApplicationSource"))
ScalingPlan.add_member(:scaling_instructions, Shapes::ShapeRef.new(shape: ScalingInstructions, required: true, location_name: "ScalingInstructions"))
ScalingPlan.add_member(:status_code, Shapes::ShapeRef.new(shape: ScalingPlanStatusCode, required: true, location_name: "StatusCode"))
ScalingPlan.add_member(:status_message, Shapes::ShapeRef.new(shape: XmlString, location_name: "StatusMessage"))
ScalingPlan.add_member(:status_start_time, Shapes::ShapeRef.new(shape: TimestampType, location_name: "StatusStartTime"))
ScalingPlan.add_member(:creation_time, Shapes::ShapeRef.new(shape: TimestampType, location_name: "CreationTime"))
ScalingPlan.struct_class = Types::ScalingPlan
ScalingPlanNames.member = Shapes::ShapeRef.new(shape: ScalingPlanName)
ScalingPlanResource.add_member(:scaling_plan_name, Shapes::ShapeRef.new(shape: ScalingPlanName, required: true, location_name: "ScalingPlanName"))
ScalingPlanResource.add_member(:scaling_plan_version, Shapes::ShapeRef.new(shape: ScalingPlanVersion, required: true, location_name: "ScalingPlanVersion"))
ScalingPlanResource.add_member(:service_namespace, Shapes::ShapeRef.new(shape: ServiceNamespace, required: true, location_name: "ServiceNamespace"))
ScalingPlanResource.add_member(:resource_id, Shapes::ShapeRef.new(shape: ResourceIdMaxLen1600, required: true, location_name: "ResourceId"))
ScalingPlanResource.add_member(:scalable_dimension, Shapes::ShapeRef.new(shape: ScalableDimension, required: true, location_name: "ScalableDimension"))
ScalingPlanResource.add_member(:scaling_policies, Shapes::ShapeRef.new(shape: ScalingPolicies, location_name: "ScalingPolicies"))
ScalingPlanResource.add_member(:scaling_status_code, Shapes::ShapeRef.new(shape: ScalingStatusCode, required: true, location_name: "ScalingStatusCode"))
ScalingPlanResource.add_member(:scaling_status_message, Shapes::ShapeRef.new(shape: XmlString, location_name: "ScalingStatusMessage"))
ScalingPlanResource.struct_class = Types::ScalingPlanResource
ScalingPlanResources.member = Shapes::ShapeRef.new(shape: ScalingPlanResource)
ScalingPlans.member = Shapes::ShapeRef.new(shape: ScalingPlan)
ScalingPolicies.member = Shapes::ShapeRef.new(shape: ScalingPolicy)
ScalingPolicy.add_member(:policy_name, Shapes::ShapeRef.new(shape: PolicyName, required: true, location_name: "PolicyName"))
ScalingPolicy.add_member(:policy_type, Shapes::ShapeRef.new(shape: PolicyType, required: true, location_name: "PolicyType"))
ScalingPolicy.add_member(:target_tracking_configuration, Shapes::ShapeRef.new(shape: TargetTrackingConfiguration, location_name: "TargetTrackingConfiguration"))
ScalingPolicy.struct_class = Types::ScalingPolicy
TagFilter.add_member(:key, Shapes::ShapeRef.new(shape: XmlStringMaxLen128, location_name: "Key"))
TagFilter.add_member(:values, Shapes::ShapeRef.new(shape: TagValues, location_name: "Values"))
TagFilter.struct_class = Types::TagFilter
TagFilters.member = Shapes::ShapeRef.new(shape: TagFilter)
TagValues.member = Shapes::ShapeRef.new(shape: XmlStringMaxLen256)
TargetTrackingConfiguration.add_member(:predefined_scaling_metric_specification, Shapes::ShapeRef.new(shape: PredefinedScalingMetricSpecification, location_name: "PredefinedScalingMetricSpecification"))
TargetTrackingConfiguration.add_member(:customized_scaling_metric_specification, Shapes::ShapeRef.new(shape: CustomizedScalingMetricSpecification, location_name: "CustomizedScalingMetricSpecification"))
TargetTrackingConfiguration.add_member(:target_value, Shapes::ShapeRef.new(shape: MetricScale, required: true, location_name: "TargetValue"))
TargetTrackingConfiguration.add_member(:disable_scale_in, Shapes::ShapeRef.new(shape: DisableScaleIn, location_name: "DisableScaleIn"))
TargetTrackingConfiguration.add_member(:scale_out_cooldown, Shapes::ShapeRef.new(shape: Cooldown, location_name: "ScaleOutCooldown"))
TargetTrackingConfiguration.add_member(:scale_in_cooldown, Shapes::ShapeRef.new(shape: Cooldown, location_name: "ScaleInCooldown"))
TargetTrackingConfiguration.add_member(:estimated_instance_warmup, Shapes::ShapeRef.new(shape: Cooldown, location_name: "EstimatedInstanceWarmup"))
TargetTrackingConfiguration.struct_class = Types::TargetTrackingConfiguration
TargetTrackingConfigurations.member = Shapes::ShapeRef.new(shape: TargetTrackingConfiguration)
UpdateScalingPlanRequest.add_member(:application_source, Shapes::ShapeRef.new(shape: ApplicationSource, location_name: "ApplicationSource"))
UpdateScalingPlanRequest.add_member(:scaling_plan_name, Shapes::ShapeRef.new(shape: ScalingPlanName, required: true, location_name: "ScalingPlanName"))
UpdateScalingPlanRequest.add_member(:scaling_instructions, Shapes::ShapeRef.new(shape: ScalingInstructions, location_name: "ScalingInstructions"))
UpdateScalingPlanRequest.add_member(:scaling_plan_version, Shapes::ShapeRef.new(shape: ScalingPlanVersion, required: true, location_name: "ScalingPlanVersion"))
UpdateScalingPlanRequest.struct_class = Types::UpdateScalingPlanRequest
UpdateScalingPlanResponse.struct_class = Types::UpdateScalingPlanResponse
# @api private
API = Seahorse::Model::Api.new.tap do |api|
api.version = "2018-01-06"
api.metadata = {
"endpointPrefix" => "autoscaling",
"jsonVersion" => "1.1",
"protocol" => "json",
"serviceFullName" => "AWS Auto Scaling Plans",
"signatureVersion" => "v4",
"signingName" => "autoscaling-plans",
"targetPrefix" => "AnyScaleScalingPlannerFrontendService",
}
api.add_operation(:create_scaling_plan, Seahorse::Model::Operation.new.tap do |o|
o.name = "CreateScalingPlan"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: CreateScalingPlanRequest)
o.output = Shapes::ShapeRef.new(shape: CreateScalingPlanResponse)
o.errors << Shapes::ShapeRef.new(shape: ValidationException)
o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
o.errors << Shapes::ShapeRef.new(shape: ConcurrentUpdateException)
o.errors << Shapes::ShapeRef.new(shape: InternalServiceException)
end)
api.add_operation(:delete_scaling_plan, Seahorse::Model::Operation.new.tap do |o|
o.name = "DeleteScalingPlan"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DeleteScalingPlanRequest)
o.output = Shapes::ShapeRef.new(shape: DeleteScalingPlanResponse)
o.errors << Shapes::ShapeRef.new(shape: ValidationException)
o.errors << Shapes::ShapeRef.new(shape: ObjectNotFoundException)
o.errors << Shapes::ShapeRef.new(shape: ConcurrentUpdateException)
o.errors << Shapes::ShapeRef.new(shape: InternalServiceException)
end)
api.add_operation(:describe_scaling_plan_resources, Seahorse::Model::Operation.new.tap do |o|
o.name = "DescribeScalingPlanResources"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DescribeScalingPlanResourcesRequest)
o.output = Shapes::ShapeRef.new(shape: DescribeScalingPlanResourcesResponse)
o.errors << Shapes::ShapeRef.new(shape: ValidationException)
o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException)
o.errors << Shapes::ShapeRef.new(shape: ConcurrentUpdateException)
o.errors << Shapes::ShapeRef.new(shape: InternalServiceException)
end)
api.add_operation(:describe_scaling_plans, Seahorse::Model::Operation.new.tap do |o|
o.name = "DescribeScalingPlans"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: DescribeScalingPlansRequest)
o.output = Shapes::ShapeRef.new(shape: DescribeScalingPlansResponse)
o.errors << Shapes::ShapeRef.new(shape: ValidationException)
o.errors << Shapes::ShapeRef.new(shape: InvalidNextTokenException)
o.errors << Shapes::ShapeRef.new(shape: ConcurrentUpdateException)
o.errors << Shapes::ShapeRef.new(shape: InternalServiceException)
end)
api.add_operation(:update_scaling_plan, Seahorse::Model::Operation.new.tap do |o|
o.name = "UpdateScalingPlan"
o.http_method = "POST"
o.http_request_uri = "/"
o.input = Shapes::ShapeRef.new(shape: UpdateScalingPlanRequest)
o.output = Shapes::ShapeRef.new(shape: UpdateScalingPlanResponse)
o.errors << Shapes::ShapeRef.new(shape: ValidationException)
o.errors << Shapes::ShapeRef.new(shape: ConcurrentUpdateException)
o.errors << Shapes::ShapeRef.new(shape: InternalServiceException)
o.errors << Shapes::ShapeRef.new(shape: ObjectNotFoundException)
end)
end
end
end
| 74.685315 | 206 | 0.78661 |
037f9a8446d7f8686ac309b7942416c5edd495a9
| 1,001 |
#
# Author:: John Keiser (<[email protected]>)
# Copyright:: Copyright (c) 2012 Opscode, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "chef/chef_fs/file_system/repository/file_system_entry"
class Chef
module ChefFS
module FileSystem
module Repository
class FileSystemRootDir < FileSystemEntry
def initialize(file_path)
super("", nil, file_path)
end
end
end
end
end
end
| 29.441176 | 74 | 0.713287 |
f851680183d8fe240ba511437b85b23c4be301e6
| 725 |
$LOAD_PATH.unshift(File.expand_path('../lib/redi-limit', __FILE__))
require 'version'
Gem::Specification.new do |s|
s.name = "redi-limit"
s.version = RediLimit::VERSION
s.authors = ['Jay Rapson']
s.email = ['[email protected]']
s.summary = 'Rack based middleware rate limiting using lua scripts in redis'
s.homepage = 'https://github.com/jayrapson/redi-limit'
s.files = Dir['lib/**/*.{rb,lua}']
s.require_paths = ['lib']
s.add_development_dependency "rake", "~> 13.0"
s.add_development_dependency "rspec", "~> 3.5"
s.add_dependency 'contracts', '~> 0.16.0'
s.add_dependency 'rack', '~> 2.0'
s.add_dependency 'redis', '~> 4.0'
s.licenses = ['MIT']
end
| 30.208333 | 84 | 0.623448 |
625e158ac6e64187d45b259c755c13cd7e51f1e4
| 749 |
require_relative "boot"
require "rails/all"
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(*Rails.groups)
module TravelPhotos
class Application < Rails::Application
# Initialize configuration defaults for originally generated Rails version.
config.load_defaults 6.1
config.serve_static_assets = true
# Configuration for the application, engines, and railties goes here.
#
# These settings can be overridden in specific environments using the files
# in config/environments, which are processed later.
#
# config.time_zone = "Central Time (US & Canada)"
# config.eager_load_paths << Rails.root.join("extras")
end
end
| 29.96 | 79 | 0.738318 |
083465a46054d59ed4fdac0d7e3689f2ffe64354
| 7,635 |
=begin
#Ory Kratos API
#Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests.
The version of the OpenAPI document: v0.8.0-alpha.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
OpenAPI Generator version: 5.2.1
=end
require 'date'
require 'time'
module OryKratosClient
class SelfServiceLogoutUrl
# LogoutToken can be used to perform logout using AJAX.
attr_accessor :logout_token
# LogoutURL can be opened in a browser to sign the user out. format: uri
attr_accessor :logout_url
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'logout_token' => :'logout_token',
:'logout_url' => :'logout_url'
}
end
# Returns all the JSON keys this model knows about
def self.acceptable_attributes
attribute_map.values
end
# Attribute type mapping.
def self.openapi_types
{
:'logout_token' => :'String',
:'logout_url' => :'String'
}
end
# List of attributes with nullable: true
def self.openapi_nullable
Set.new([
])
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
if (!attributes.is_a?(Hash))
fail ArgumentError, "The input argument (attributes) must be a hash in `OryKratosClient::SelfServiceLogoutUrl` initialize method"
end
# check to see if the attribute exists and convert string to symbol for hash key
attributes = attributes.each_with_object({}) { |(k, v), h|
if (!self.class.attribute_map.key?(k.to_sym))
fail ArgumentError, "`#{k}` is not a valid attribute in `OryKratosClient::SelfServiceLogoutUrl`. Please check the name to make sure it's valid. List of attributes: " + self.class.attribute_map.keys.inspect
end
h[k.to_sym] = v
}
if attributes.key?(:'logout_token')
self.logout_token = attributes[:'logout_token']
end
if attributes.key?(:'logout_url')
self.logout_url = attributes[:'logout_url']
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properties with the reasons
def list_invalid_properties
invalid_properties = Array.new
if @logout_token.nil?
invalid_properties.push('invalid value for "logout_token", logout_token cannot be nil.')
end
if @logout_url.nil?
invalid_properties.push('invalid value for "logout_url", logout_url cannot be nil.')
end
invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
return false if @logout_token.nil?
return false if @logout_url.nil?
true
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
logout_token == o.logout_token &&
logout_url == o.logout_url
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Integer] Hash code
def hash
[logout_token, logout_url].hash
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def self.build_from_hash(attributes)
new.build_from_hash(attributes)
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.openapi_types.each_pair do |key, type|
if attributes[self.class.attribute_map[key]].nil? && self.class.openapi_nullable.include?(key)
self.send("#{key}=", nil)
elsif type =~ /\AArray<(.*)>/i
# check to ensure the input is an array given that the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map { |v| _deserialize($1, v) })
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
end
end
self
end
# Deserializes the data based on type
# @param string type Data type
# @param string value Value to be deserialized
# @return [Object] Deserialized data
def _deserialize(type, value)
case type.to_sym
when :Time
Time.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :Boolean
if value.to_s =~ /\A(true|t|yes|y|1)\z/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+?), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
# models (e.g. Pet) or oneOf
klass = OryKratosClient.const_get(type)
klass.respond_to?(:openapi_one_of) ? klass.build(value) : klass.build_from_hash(value)
end
end
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# to_body is an alias to to_hash (backward compatibility)
# @return [Hash] Returns the object in the form of hash
def to_body
to_hash
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
if value.nil?
is_nullable = self.class.openapi_nullable.include?(attr)
next if !is_nullable || (is_nullable && !instance_variable_defined?(:"@#{attr}"))
end
hash[param] = _to_hash(value)
end
hash
end
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
| 31.8125 | 429 | 0.641257 |
ace9ba39166379219475c6a31cd424cf247a893b
| 1,985 |
class Patient < ApplicationRecord
belongs_to :user
has_many :prescriptions, dependent: :destroy
has_many :medications, through: :prescriptions
validates :first_name, :last_name, presence: true
validates :birthdate, format: {
with: /\d{4}-\d{2}-\d{2}/,
message: "must be in format YYYY-MM-DD"
}
validates :first_name, uniqueness: {
scope: [:last_name, :birthdate],
message: ", last name, and birthdate combination must not already exist in patient database"
}
scope :provider_scope, -> (id){where("user_id = ?", id)}
def age
now = Time.now.utc.to_date
dob = self.birthdate
now.year - dob.year - ((now.month > dob.month || (now.month == dob.month && now.day >= dob.day)) ? 0 : 1)
end
def display_name
self.first_name + ' ' + self.last_name
end
# returns array of all pairings of currently prescribed medication ids to be compared against Interaction pairings
def prescription_pairs
seconds = self.medications.ids
self.medications.ids.collect do |first|
seconds.shift
seconds.collect { |second| [first,second] }
end
.flatten(1)
end
# returns array of Interactions where the pair of medications in the Interaction match a pair in the patient's prescription_pairs
def interaction_matches
pairs = self.prescription_pairs
Interaction.all.select do |i|
pairs.include?([i.medication_1_id, i.medication_2_id]) || pairs.include?([i.medication_2_id, i.medication_1_id])
end
end
end
# removed "variable sandwich" for "pairs" and shorthanded the nested loop, old version below. Was also able to skip the patient_prescriptions_medication_ids method entirely, since it's simply self.medications.ids
# def patient_prescription_pairs
# pairs = []
# seconds = patient_prescriptions_medication_ids
# patient_prescriptions_medication_ids.each do |first|
# seconds.shift
# seconds.each do |second|
# pairs.push([first,second])
# end
# end
# pairs
# end
| 32.016129 | 212 | 0.708312 |
9160911f3a1b5d5b5cfa2fd64b53019f69e1e659
| 2,448 |
require 'spec_helper'
provider_class = Puppet::Type.type(:snapshot).provider(:v1)
describe provider_class do
context 'snapshot operations' do
before(:all) do
VCR.use_cassette('snapshot_prepare') do
@datacenter_name = 'puppet_module_test04cacda602e04de3b7c41bd099c46550'
@volume_name = 'puppet_module_test04cacda602e04de3b7c41bd099c46553'
create_datacenter(@datacenter_name)
create_volume(@datacenter_name, @volume_name)
@snapshot_name = 'puppet_module_test04cacda602e04de3b7c41bd099c46558'
@resource = Puppet::Type.type(:snapshot).new(
name: @snapshot_name,
description: 'Puppet Module test snapshot',
volume: @volume_name,
datacenter: @datacenter_name,
)
@provider = provider_class.new(@resource)
end
end
after(:all) do
VCR.use_cassette('snapshot_cleanup') do
delete_datacenter(@datacenter_name)
end
end
it 'is an instance of the ProviderV1' do
expect(@provider).to be_an_instance_of Puppet::Type::Snapshot::ProviderV1
expect(@provider.name).to eq(@snapshot_name)
end
it 'creates snapshot' do
VCR.use_cassette('snapshot_create') do
expect(@provider.create).to be_truthy
expect(@provider.exists?).to be true
expect(@provider.name).to eq(@snapshot_name)
end
end
it 'lists snapshot instances' do
VCR.use_cassette('snapshot_list') do
instances = provider_class.instances
expect(instances.length).to be > 0
expect(instances[0]).to be_an_instance_of Puppet::Type::Snapshot::ProviderV1
end
end
it 'updates snapshot' do
VCR.use_cassette('snapshot_update') do
new_desc = 'Puppet Module test snapshot - RENAME'
@provider.description = new_desc
@provider.flush
updated_instance = nil
provider_class.instances.each do |instance|
updated_instance = instance if instance.name == @snapshot_name
end
expect(updated_instance.description).to eq(new_desc)
end
end
it 'restores snapshot' do
VCR.use_cassette('snapshot_restore') do
expect(@provider.restore = true).to be_truthy
end
end
it 'deletes snapshot' do
VCR.use_cassette('snapshot_delete') do
expect(@provider.destroy).to be_truthy
expect(@provider.exists?).to be false
end
end
end
end
| 30.6 | 84 | 0.669526 |
4a79ee8792b6ffcf82bd4f587e6d89fd604f4e8d
| 334 |
trace_point = TracePoint.new(:call, :return, :line, :c_call, :c_return, :b_call, :b_return) do |trace|
puts [trace.path, trace.lineno].join(":")
end
trace_point.enable
values = {foo: 10}
def shell_escape(x)
x
end
values.map{|key, value| [
key.to_s.upcase,
shell_escape(value) # TracePoint is never triggered for this line.
]}
| 19.647059 | 102 | 0.706587 |
262b4a2853f4cc166d9e397525ffc0d2cae133b3
| 22,280 |
require 'tempfile'
module Git
class GitExecuteError < StandardError
end
class Lib
def initialize(base = nil, logger = nil)
@git_dir = nil
@git_index_file = nil
@git_work_dir = nil
@path = nil
if base.is_a?(Git::Base)
@git_dir = base.repo.path
@git_index_file = base.index.path if base.index
@git_work_dir = base.dir.path if base.dir
elsif base.is_a?(Hash)
@git_dir = base[:repository]
@git_index_file = base[:index]
@git_work_dir = base[:working_directory]
end
@logger = logger
end
# creates or reinitializes the repository
#
# options:
# :bare
# :working_directory
#
def init(opts={})
arr_opts = []
arr_opts << '--bare' if opts[:bare]
command('init', arr_opts, false)
end
# tries to clone the given repo
#
# returns {:repository} (if bare)
# {:working_directory} otherwise
#
# accepts options:
# :remote:: name of remote (rather than 'origin')
# :bare:: no working directory
# :recursive:: after the clone is created, initialize all submodules within, using their default settings.
# :depth:: the number of commits back to pull
# :branch:: name of branch
#
# TODO - make this work with SSH password or auth_key
#
def clone(repository, name, opts = {})
@path = opts[:path] || '.'
clone_dir = opts[:path] ? File.join(@path, name) : name
arr_opts = []
arr_opts << "--bare" if opts[:bare]
arr_opts << "--recursive" if opts[:recursive]
arr_opts << "-o" << opts[:remote] if opts[:remote]
arr_opts << "--depth" << opts[:depth].to_i if opts[:depth] && opts[:depth].to_i > 0
arr_opts << "--config" << opts[:config] if opts[:config]
arr_opts << "--branch" << opts[:branch] if opts[:branch]
arr_opts << '--'
arr_opts << repository
arr_opts << clone_dir
command('clone', arr_opts)
opts[:bare] ? {:repository => clone_dir} : {:working_directory => clone_dir}
end
## READ COMMANDS ##
def log_commits(opts={})
arr_opts = log_common_options(opts)
arr_opts << '--pretty=oneline'
arr_opts += log_path_options(opts)
command_lines('log', arr_opts, true).map { |l| l.split.first }
end
def full_log_commits(opts={})
arr_opts = log_common_options(opts)
arr_opts << '--pretty=raw'
arr_opts << "--skip=#{opts[:skip]}" if opts[:skip]
arr_opts += log_path_options(opts)
full_log = command_lines('log', arr_opts, true)
process_commit_log_data(full_log)
end
def revparse(string)
return string if string =~ /[A-Fa-f0-9]{40}/ # passing in a sha - just no-op it
rev = ['head', 'remotes', 'tags'].map do |d|
File.join(@git_dir, 'refs', d, string)
end.find do |path|
File.file?(path)
end
return File.read(rev).chomp if rev
command('rev-parse', string)
end
def namerev(string)
command('name-rev', string).split[1]
end
def object_type(sha)
command('cat-file', ['-t', sha])
end
def object_size(sha)
command('cat-file', ['-s', sha]).to_i
end
# returns useful array of raw commit object data
def commit_data(sha)
sha = sha.to_s
cdata = command_lines('cat-file', ['commit', sha])
process_commit_data(cdata, sha, 0)
end
def process_commit_data(data, sha = nil, indent = 4)
hsh = {
'sha' => sha,
'message' => '',
'parent' => []
}
loop do
key, *value = data.shift.split
break if key.nil?
if key == 'parent'
hsh['parent'] << value.join(' ')
else
hsh[key] = value.join(' ')
end
end
hsh['message'] = data.collect {|line| line[indent..-1]}.join("\n") + "\n"
return hsh
end
def process_commit_log_data(data)
in_message = false
hsh_array = []
hsh = nil
data.each do |line|
line = line.chomp
if line[0].nil?
in_message = !in_message
next
end
if in_message
hsh['message'] << "#{line[4..-1]}\n"
next
end
key, *value = line.split
value = value.join(' ')
case key
when 'commit'
hsh_array << hsh if hsh
hsh = {'sha' => value, 'message' => '', 'parent' => []}
when 'parent'
hsh['parent'] << value
else
hsh[key] = value
end
end
hsh_array << hsh if hsh
return hsh_array
end
def object_contents(sha, &block)
command('cat-file', ['-p', sha], &block)
end
def ls_tree(sha)
data = {'blob' => {}, 'tree' => {}}
command_lines('ls-tree', sha).each do |line|
(info, filenm) = line.split("\t")
(mode, type, sha) = info.split
data[type][filenm] = {:mode => mode, :sha => sha}
end
data
end
def mv(file1, file2)
command_lines('mv', ['--', file1, file2])
end
def full_tree(sha)
command_lines('ls-tree', ['-r', sha])
end
def tree_depth(sha)
full_tree(sha).size
end
def change_head_branch(branch_name)
command('symbolic-ref', ['HEAD', "refs/heads/#{branch_name}"])
end
def branches_all
arr = []
command_lines('branch', '-a').each do |b|
current = (b[0, 2] == '* ')
arr << [b.gsub('* ', '').strip, current]
end
arr
end
def list_files(ref_dir)
dir = File.join(@git_dir, 'refs', ref_dir)
files = []
Dir.chdir(dir) { files = Dir.glob('**/*').select { |f| File.file?(f) } } rescue nil
files
end
def branch_current
branches_all.select { |b| b[1] }.first[0] rescue nil
end
# returns hash
# [tree-ish] = [[line_no, match], [line_no, match2]]
# [tree-ish] = [[line_no, match], [line_no, match2]]
def grep(string, opts = {})
opts[:object] ||= 'HEAD'
grep_opts = ['-n']
grep_opts << '-i' if opts[:ignore_case]
grep_opts << '-v' if opts[:invert_match]
grep_opts << '-e'
grep_opts << string
grep_opts << opts[:object] if opts[:object].is_a?(String)
grep_opts << '--' << opts[:path_limiter] if opts[:path_limiter].is_a? String
hsh = {}
command_lines('grep', grep_opts).each do |line|
if m = /(.*)\:(\d+)\:(.*)/.match(line)
hsh[m[1]] ||= []
hsh[m[1]] << [m[2].to_i, m[3]]
end
end
hsh
end
def diff_full(obj1 = 'HEAD', obj2 = nil, opts = {})
diff_opts = ['-p']
diff_opts << obj1
diff_opts << obj2 if obj2.is_a?(String)
diff_opts << '--' << opts[:path_limiter] if opts[:path_limiter].is_a? String
command('diff', diff_opts)
end
def diff_stats(obj1 = 'HEAD', obj2 = nil, opts = {})
diff_opts = ['--numstat']
diff_opts << obj1
diff_opts << obj2 if obj2.is_a?(String)
diff_opts << '--' << opts[:path_limiter] if opts[:path_limiter].is_a? String
hsh = {:total => {:insertions => 0, :deletions => 0, :lines => 0, :files => 0}, :files => {}}
command_lines('diff', diff_opts).each do |file|
(insertions, deletions, filename) = file.split("\t")
hsh[:total][:insertions] += insertions.to_i
hsh[:total][:deletions] += deletions.to_i
hsh[:total][:lines] = (hsh[:total][:deletions] + hsh[:total][:insertions])
hsh[:total][:files] += 1
hsh[:files][filename] = {:insertions => insertions.to_i, :deletions => deletions.to_i}
end
hsh
end
# compares the index and the working directory
def diff_files
diff_as_hash('diff-files')
end
# compares the index and the repository
def diff_index(treeish)
diff_as_hash('diff-index', treeish)
end
def ls_files(location=nil)
hsh = {}
command_lines('ls-files', ['--stage', location]).each do |line|
(info, file) = line.split("\t")
(mode, sha, stage) = info.split
file = eval(file) if file =~ /^\".*\"$/ # This takes care of quoted strings returned from git
hsh[file] = {:path => file, :mode_index => mode, :sha_index => sha, :stage => stage}
end
hsh
end
def ignored_files
command_lines('ls-files', ['--others', '-i', '--exclude-standard'])
end
def config_remote(name)
hsh = {}
config_list.each do |key, value|
if /remote.#{name}/.match(key)
hsh[key.gsub("remote.#{name}.", '')] = value
end
end
hsh
end
def config_get(name)
do_get = lambda do |path|
command('config', ['--get', name])
end
if @git_dir
Dir.chdir(@git_dir, &do_get)
else
build_list.call
end
end
def global_config_get(name)
command('config', ['--global', '--get', name], false)
end
def config_list
build_list = lambda do |path|
parse_config_list command_lines('config', ['--list'])
end
if @git_dir
Dir.chdir(@git_dir, &build_list)
else
build_list.call
end
end
def global_config_list
parse_config_list command_lines('config', ['--global', '--list'], false)
end
def parse_config_list(lines)
hsh = {}
lines.each do |line|
(key, *values) = line.split('=')
hsh[key] = values.join('=')
end
hsh
end
def parse_config(file)
parse_config_list command_lines('config', ['--list', '--file', file], false)
end
## WRITE COMMANDS ##
def config_set(name, value)
command('config', [name, value])
end
def global_config_set(name, value)
command('config', ['--global', name, value], false)
end
# updates the repository index using the workig dorectory content
#
# lib.add('path/to/file')
# lib.add(['path/to/file1','path/to/file2'])
# lib.add(:all => true)
#
# options:
# :all => true
# :force => true
#
# @param [String,Array] paths files paths to be added to the repository
# @param [Hash] options
def add(paths='.',options={})
arr_opts = []
arr_opts << '--all' if options[:all]
arr_opts << '--force' if options[:force]
arr_opts << '--'
arr_opts << paths
arr_opts.flatten!
command('add', arr_opts)
end
def remove(path = '.', opts = {})
arr_opts = ['-f'] # overrides the up-to-date check by default
arr_opts << ['-r'] if opts[:recursive]
arr_opts << '--'
if path.is_a?(Array)
arr_opts += path
else
arr_opts << path
end
command('rm', arr_opts)
end
def commit(message, opts = {})
arr_opts = []
arr_opts << "--message=#{message}" if message
arr_opts << '--amend' << '--no-edit' if opts[:amend]
arr_opts << '--all' if opts[:add_all] || opts[:all]
arr_opts << '--allow-empty' if opts[:allow_empty]
arr_opts << "--author=#{opts[:author]}" if opts[:author]
command('commit', arr_opts)
end
def reset(commit, opts = {})
arr_opts = []
arr_opts << '--hard' if opts[:hard]
arr_opts << commit if commit
command('reset', arr_opts)
end
def clean(opts = {})
arr_opts = []
arr_opts << '--force' if opts[:force]
arr_opts << '-d' if opts[:d]
arr_opts << '-x' if opts[:x]
command('clean', arr_opts)
end
def revert(commitish, opts = {})
# Forcing --no-edit as default since it's not an interactive session.
opts = {:no_edit => true}.merge(opts)
arr_opts = []
arr_opts << '--no-edit' if opts[:no_edit]
arr_opts << commitish
command('revert', arr_opts)
end
def apply(patch_file)
arr_opts = []
arr_opts << '--' << patch_file if patch_file
command('apply', arr_opts)
end
def apply_mail(patch_file)
arr_opts = []
arr_opts << '--' << patch_file if patch_file
command('am', arr_opts)
end
def stashes_all
arr = []
filename = File.join(@git_dir, 'logs/refs/stash')
if File.exist?(filename)
File.open(filename).each_with_index { |line, i|
m = line.match(/:(.*)$/)
arr << [i, m[1].strip]
}
end
arr
end
def stash_save(message)
output = command('stash save', ['--', message])
output =~ /HEAD is now at/
end
def stash_apply(id = nil)
if id
command('stash apply', [id])
else
command('stash apply')
end
end
def stash_clear
command('stash clear')
end
def stash_list
command('stash list')
end
def branch_new(branch)
command('branch', branch)
end
def branch_delete(branch)
command('branch', ['-D', branch])
end
def checkout(branch, opts = {})
arr_opts = []
arr_opts << '-f' if opts[:force]
arr_opts << '-b' << opts[:new_branch] if opts[:new_branch]
arr_opts << branch
command('checkout', arr_opts)
end
def checkout_file(version, file)
arr_opts = []
arr_opts << version
arr_opts << file
command('checkout', arr_opts)
end
def merge(branch, message = nil)
arr_opts = []
arr_opts << '-m' << message if message
arr_opts += [branch]
command('merge', arr_opts)
end
def unmerged
unmerged = []
command_lines('diff', ["--cached"]).each do |line|
unmerged << $1 if line =~ /^\* Unmerged path (.*)/
end
unmerged
end
def conflicts # :yields: file, your, their
self.unmerged.each do |f|
your = Tempfile.new("YOUR-#{File.basename(f)}").path
command('show', ":2:#{f}", true, "> #{escape your}")
their = Tempfile.new("THEIR-#{File.basename(f)}").path
command('show', ":3:#{f}", true, "> #{escape their}")
yield(f, your, their)
end
end
def remote_add(name, url, opts = {})
arr_opts = ['add']
arr_opts << '-f' if opts[:with_fetch] || opts[:fetch]
arr_opts << '-t' << opts[:track] if opts[:track]
arr_opts << '--'
arr_opts << name
arr_opts << url
command('remote', arr_opts)
end
def remote_remove(name)
command('remote', ['rm', name])
end
def remotes
command_lines('remote')
end
def tags
command_lines('tag')
end
def tag(name, *opts)
target = opts[0].instance_of?(String) ? opts[0] : nil
opts = opts.last.instance_of?(Hash) ? opts.last : {}
if (opts[:a] || opts[:annotate]) && !(opts[:m] || opts[:message])
raise "Can not create an [:a|:annotate] tag without the precense of [:m|:message]."
end
arr_opts = []
arr_opts << '-f' if opts[:force] || opts[:f]
arr_opts << '-a' if opts[:a] || opts[:annotate]
arr_opts << '-s' if opts[:s] || opts[:sign]
arr_opts << '-d' if opts[:d] || opts[:delete]
arr_opts << name
arr_opts << target if target
arr_opts << "-m #{opts[:m] || opts[:message]}" if opts[:m] || opts[:message]
command('tag', arr_opts)
end
def fetch(remote, opts)
arr_opts = [remote]
arr_opts << '--tags' if opts[:t] || opts[:tags]
command('fetch', arr_opts)
end
def push(remote, branch = 'master', opts = {})
# Small hack to keep backwards compatibility with the 'push(remote, branch, tags)' method signature.
opts = {:tags => opts} if [true, false].include?(opts)
arr_opts = []
arr_opts << '--force' if opts[:force] || opts[:f]
arr_opts << remote
command('push', arr_opts + [branch])
command('push', ['--tags'] + arr_opts) if opts[:tags]
end
def pull(remote='origin', branch='master')
command('pull', [remote, branch])
end
def tag_sha(tag_name)
head = File.join(@git_dir, 'refs', 'tags', tag_name)
return File.read(head).chomp if File.exists?(head)
command('show-ref', ['--tags', '-s', tag_name])
end
def repack
command('repack', ['-a', '-d'])
end
def gc
command('gc', ['--prune', '--aggressive', '--auto'])
end
# reads a tree into the current index file
def read_tree(treeish, opts = {})
arr_opts = []
arr_opts << "--prefix=#{opts[:prefix]}" if opts[:prefix]
arr_opts += [treeish]
command('read-tree', arr_opts)
end
def write_tree
command('write-tree')
end
def commit_tree(tree, opts = {})
opts[:message] ||= "commit tree #{tree}"
t = Tempfile.new('commit-message')
t.write(opts[:message])
t.close
arr_opts = []
arr_opts << tree
arr_opts << '-p' << opts[:parent] if opts[:parent]
arr_opts += [opts[:parents]].map { |p| ['-p', p] }.flatten if opts[:parents]
command('commit-tree', arr_opts, true, "< #{escape t.path}")
end
def update_ref(branch, commit)
command('update-ref', [branch, commit])
end
def checkout_index(opts = {})
arr_opts = []
arr_opts << "--prefix=#{opts[:prefix]}" if opts[:prefix]
arr_opts << "--force" if opts[:force]
arr_opts << "--all" if opts[:all]
arr_opts << '--' << opts[:path_limiter] if opts[:path_limiter].is_a? String
command('checkout-index', arr_opts)
end
# creates an archive file
#
# options
# :format (zip, tar)
# :prefix
# :remote
# :path
def archive(sha, file = nil, opts = {})
opts[:format] ||= 'zip'
if opts[:format] == 'tgz'
opts[:format] = 'tar'
opts[:add_gzip] = true
end
file ||= Tempfile.new('archive').path
arr_opts = []
arr_opts << "--format=#{opts[:format]}" if opts[:format]
arr_opts << "--prefix=#{opts[:prefix]}" if opts[:prefix]
arr_opts << "--remote=#{opts[:remote]}" if opts[:remote]
arr_opts << sha
arr_opts << '--' << opts[:path] if opts[:path]
command('archive', arr_opts, true, (opts[:add_gzip] ? '| gzip' : '') + " > #{escape file}")
return file
end
# returns the current version of git, as an Array of Fixnums.
def current_command_version
output = command('version', [], false)
version = output[/\d+\.\d+(\.\d+)+/]
version.split('.').collect {|i| i.to_i}
end
def required_command_version
[1, 6]
end
def meets_required_version?
(self.current_command_version <=> self.required_command_version) >= 0
end
private
def command_lines(cmd, opts = [], chdir = true, redirect = '')
command(cmd, opts, chdir).split("\n")
end
def command(cmd, opts = [], chdir = true, redirect = '', &block)
ENV['GIT_DIR'] = @git_dir
ENV['GIT_WORK_TREE'] = @git_work_dir
ENV['GIT_INDEX_FILE'] = @git_index_file
path = @git_work_dir || @git_dir || @path
opts = [opts].flatten.map {|s| escape(s) }.join(' ')
git_cmd = "git #{cmd} #{opts} #{redirect} 2>&1"
out = nil
if chdir && (Dir.getwd != path)
Dir.chdir(path) { out = run_command(git_cmd, &block) }
else
out = run_command(git_cmd, &block)
end
if @logger
@logger.info(git_cmd)
@logger.debug(out)
end
if $?.exitstatus > 0
if $?.exitstatus == 1 && out == ''
return ''
end
raise Git::GitExecuteError.new(git_cmd + ':' + out.to_s)
end
out
end
# Takes the diff command line output (as Array) and parse it into a Hash
#
# @param [String] diff_command the diff commadn to be used
# @param [Array] opts the diff options to be used
# @return [Hash] the diff as Hash
def diff_as_hash(diff_command, opts=[])
command_lines(diff_command, opts).inject({}) do |memo, line|
info, file = line.split("\t")
mode_src, mode_dest, sha_src, sha_dest, type = info.split
memo[file] = {
:mode_index => mode_dest,
:mode_repo => mode_src.to_s[1, 7],
:path => file,
:sha_repo => sha_src,
:sha_index => sha_dest,
:type => type
}
memo
end
end
# Returns an array holding the common options for the log commands
#
# @param [Hash] opts the given options
# @return [Array] the set of common options that the log command will use
def log_common_options(opts)
arr_opts = []
arr_opts << "-#{opts[:count]}" if opts[:count]
arr_opts << "--no-color"
arr_opts << "--since=#{opts[:since]}" if opts[:since].is_a? String
arr_opts << "--until=#{opts[:until]}" if opts[:until].is_a? String
arr_opts << "--grep=#{opts[:grep]}" if opts[:grep].is_a? String
arr_opts << "--author=#{opts[:author]}" if opts[:author].is_a? String
arr_opts << "#{opts[:between][0].to_s}..#{opts[:between][1].to_s}" if (opts[:between] && opts[:between].size == 2)
arr_opts
end
# Retrurns an array holding path options for the log commands
#
# @param [Hash] opts the given options
# @return [Array] the set of path options that the log command will use
def log_path_options(opts)
arr_opts = []
arr_opts << opts[:object] if opts[:object].is_a? String
arr_opts << '--' << opts[:path_limiter] if opts[:path_limiter]
arr_opts
end
def run_command(git_cmd, &block)
if block_given?
IO.popen(git_cmd, &block)
else
`#{git_cmd}`.chomp
end
end
def escape(s)
"'#{s && s.to_s.gsub('\'','\'"\'"\'')}'"
end
end
end
| 26.555423 | 120 | 0.535682 |
38e1549e3110d0ad4437a2bed5b0422b5dce0cbe
| 677 |
require 'rails_helper'
RSpec.describe 'Create event', type: :feature do
before(:each) do
user = User.create(username: 'User') # rubocop:disable Lint/UselessAssignment
visit '/session/new'
page.fill_in 'username', with: 'User'
click_button 'Sign In'
end
it 'should create a new event' do
visit root_path
click_on('Create an event')
expect(page).to have_current_path(new_event_path)
page.fill_in 'Title', with: 'Final'
page.fill_in 'Description', with: 'UEFA Champions League final game'
page.fill_in 'Date', with: '08/25/2020'
click_on('Create Event')
expect(page).to have_text('Congrats! A new event created!')
end
end
| 30.772727 | 81 | 0.695716 |
01494ef2b2d7e2d69c9267553b942eb4cde829b3
| 12,631 |
#
# Copyright:: Copyright (c) 2014 Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'pathname'
require 'fileutils'
require 'tmpdir'
require 'zlib'
require 'archive/tar/minitar'
require 'chef/cookbook/chefignore'
require 'chef-dk/service_exceptions'
require 'chef-dk/policyfile_lock'
require 'chef-dk/policyfile/storage_config'
module ChefDK
module PolicyfileServices
class ExportRepo
# Policy groups provide namespaces for policies so that a Chef Server can
# have multiple active iterations of a policy at once, but we don't need
# this when serving a single exported policy via Chef Zero, so hardcode
# it to a "well known" value:
POLICY_GROUP = 'local'.freeze
include Policyfile::StorageConfigDelegation
attr_reader :storage_config
attr_reader :root_dir
attr_reader :export_dir
def initialize(policyfile: nil, export_dir: nil, root_dir: nil, archive: false, force: false)
@root_dir = root_dir
@export_dir = File.expand_path(export_dir)
@archive = archive
@force_export = force
@policy_data = nil
@policyfile_lock = nil
policyfile_rel_path = policyfile || "Policyfile.rb"
policyfile_full_path = File.expand_path(policyfile_rel_path, root_dir)
@storage_config = Policyfile::StorageConfig.new.use_policyfile(policyfile_full_path)
@staging_dir = nil
end
def archive?
@archive
end
def policy_name
policyfile_lock.name
end
def run
assert_lockfile_exists!
assert_export_dir_clean!
validate_lockfile
write_updated_lockfile
export
end
def policy_data
@policy_data ||= FFI_Yajl::Parser.parse(IO.read(policyfile_lock_expanded_path))
rescue => error
raise PolicyfileExportRepoError.new("Error reading lockfile #{policyfile_lock_expanded_path}", error)
end
def policyfile_lock
@policyfile_lock || validate_lockfile
end
def archive_file_location
return nil unless archive?
filename = "#{policyfile_lock.name}-#{policyfile_lock.revision_id}.tgz"
File.join(export_dir, filename)
end
def export
with_staging_dir do
create_repo_structure
copy_cookbooks
create_policyfile_repo_item
create_policy_group_repo_item
copy_policyfile_lock
create_client_rb
create_readme_md
if archive?
create_archive
else
mv_staged_repo
end
end
rescue => error
msg = "Failed to export policy (in #{policyfile_filename}) to #{export_dir}"
raise PolicyfileExportRepoError.new(msg, error)
end
private
def with_staging_dir
p = Process.pid
t = Time.new.utc.strftime("%Y%m%d%H%M%S")
Dir.mktmpdir("chefdk-export-#{p}-#{t}") do |d|
begin
@staging_dir = d
yield
ensure
@staging_dir = nil
end
end
end
def create_archive
Zlib::GzipWriter.open(archive_file_location) do |gz_file|
Dir.chdir(staging_dir) do
Archive::Tar::Minitar.pack(".", gz_file)
end
end
end
def staging_dir
@staging_dir
end
def create_repo_structure
FileUtils.mkdir_p(export_dir)
FileUtils.mkdir_p(dot_chef_staging_dir)
FileUtils.mkdir_p(cookbook_artifacts_staging_dir)
FileUtils.mkdir_p(policies_staging_dir)
FileUtils.mkdir_p(policy_groups_staging_dir)
end
def copy_cookbooks
policyfile_lock.cookbook_locks.each do |name, lock|
copy_cookbook(lock)
end
end
def copy_cookbook(lock)
dirname = "#{lock.name}-#{lock.identifier}"
export_path = File.join(staging_dir, "cookbook_artifacts", dirname)
metadata_rb_path = File.join(export_path, "metadata.rb")
FileUtils.mkdir(export_path) if not File.directory?(export_path)
copy_unignored_cookbook_files(lock, export_path)
FileUtils.rm_f(metadata_rb_path)
metadata = lock.cookbook_version.metadata
metadata_json_path = File.join(export_path, "metadata.json")
File.open(metadata_json_path, "wb+") do |f|
f.print(FFI_Yajl::Encoder.encode(metadata.to_hash, pretty: true ))
end
end
def copy_unignored_cookbook_files(lock, export_path)
cookbook_files_to_copy(lock.cookbook_path).each do |rel_path|
full_source_path = File.join(lock.cookbook_path, rel_path)
full_dest_path = File.join(export_path, rel_path)
dest_dirname = File.dirname(full_dest_path)
FileUtils.mkdir_p(dest_dirname) unless File.directory?(dest_dirname)
FileUtils.cp(full_source_path, full_dest_path)
end
end
def cookbook_files_to_copy(cookbook_path)
cookbook = cookbook_loader_for(cookbook_path).cookbook_version
root = Pathname.new(cookbook.root_dir)
cookbook.all_files.map do |full_path|
Pathname.new(full_path).relative_path_from(root).to_s
end
end
def cookbook_loader_for(cookbook_path)
loader = Chef::Cookbook::CookbookVersionLoader.new(cookbook_path, chefignore_for(cookbook_path))
loader.load!
loader
end
def chefignore_for(cookbook_path)
Chef::Cookbook::Chefignore.new(File.join(cookbook_path, "chefignore"))
end
def create_policyfile_repo_item
File.open(policyfile_repo_item_path, "wb+") do |f|
f.print(FFI_Yajl::Encoder.encode(policyfile_lock.to_lock, pretty: true ))
end
end
def create_policy_group_repo_item
data = {
"policies" => {
policyfile_lock.name => {
"revision_id" => policyfile_lock.revision_id
}
}
}
File.open(policy_group_repo_item_path, "wb+") do |f|
f.print(FFI_Yajl::Encoder.encode(data, pretty: true ))
end
end
def copy_policyfile_lock
File.open(lockfile_staging_path, "wb+") do |f|
f.print(FFI_Yajl::Encoder.encode(policyfile_lock.to_lock, pretty: true ))
end
end
def create_client_rb
File.open(client_rb_staging_path, "wb+") do |f|
f.print( <<-CONFIG )
### Chef Client Configuration ###
# The settings in this file will configure chef to apply the exported policy in
# this directory. To use it, run:
#
# chef-client -z
#
policy_name '#{policy_name}'
policy_group 'local'
use_policyfile true
policy_document_native_api true
# In order to use this repo, you need a version of Chef Client and Chef Zero
# that supports policyfile "native mode" APIs:
current_version = Gem::Version.new(Chef::VERSION)
unless Gem::Requirement.new(">= 12.7").satisfied_by?(current_version)
puts("!" * 80)
puts(<<-MESSAGE)
This Chef Repo requires features introduced in Chef 12.7, but you are using
Chef \#{Chef::VERSION}. Please upgrade to Chef 12.7 or later.
MESSAGE
puts("!" * 80)
exit!(1)
end
CONFIG
end
end
def create_readme_md
File.open(readme_staging_path, "wb+") do |f|
f.print( <<-README )
# Exported Chef Repository for Policy '#{policy_name}'
Policy revision: #{policyfile_lock.revision_id}
This directory contains all the cookbooks and configuration necessary for Chef
to converge a system using this exported policy. To converge a system with the
exported policy, use a privileged account to run `chef-client -z` from the
directory containing the exported policy.
## Contents:
### Policyfile.lock.json
A copy of the exported policy, used by the `chef push-archive` command.
### .chef/config.rb
A configuration file for Chef Client. This file configures Chef Client to use
the correct `policy_name` and `policy_group` for this exported repository. Chef
Client will use this configuration automatically if you've set your working
directory properly.
### cookbook_artifacts/
All of the cookbooks required by the policy will be stored in this directory.
### policies/
A different copy of the exported policy, used by the `chef-client` command.
### policy_groups/
Policy groups are used by Chef Server to manage multiple revisions of the same
policy. However, exported policies contain only a single policy revision, so
this policy group name is hardcoded to "local" and should not be changed.
README
end
end
def mv_staged_repo
# If we got here, either these dirs are empty/don't exist or force is
# set to true.
FileUtils.rm_rf(cookbook_artifacts_dir)
FileUtils.rm_rf(policies_dir)
FileUtils.rm_rf(policy_groups_dir)
FileUtils.rm_rf(dot_chef_dir)
FileUtils.mv(cookbook_artifacts_staging_dir, export_dir)
FileUtils.mv(policies_staging_dir, export_dir)
FileUtils.mv(policy_groups_staging_dir, export_dir)
FileUtils.mv(lockfile_staging_path, export_dir)
FileUtils.mv(dot_chef_staging_dir, export_dir)
FileUtils.mv(readme_staging_path, export_dir)
end
def validate_lockfile
return @policyfile_lock if @policyfile_lock
@policyfile_lock = ChefDK::PolicyfileLock.new(storage_config).build_from_lock_data(policy_data)
# TODO: enumerate any cookbook that have been updated
@policyfile_lock.validate_cookbooks!
@policyfile_lock
rescue PolicyfileExportRepoError
raise
rescue => error
raise PolicyfileExportRepoError.new("Invalid lockfile data", error)
end
def write_updated_lockfile
File.open(policyfile_lock_expanded_path, "wb+") do |f|
f.print(FFI_Yajl::Encoder.encode(policyfile_lock.to_lock, pretty: true ))
end
end
def assert_lockfile_exists!
unless File.exist?(policyfile_lock_expanded_path)
raise LockfileNotFound, "No lockfile at #{policyfile_lock_expanded_path} - you need to run `install` before `push`"
end
end
def assert_export_dir_clean!
if !force_export? && !conflicting_fs_entries.empty? && !archive?
msg = "Export dir (#{export_dir}) not clean. Refusing to export. (Conflicting files: #{conflicting_fs_entries.join(', ')})"
raise ExportDirNotEmpty, msg
end
end
def force_export?
@force_export
end
def conflicting_fs_entries
Dir.glob(File.join(cookbook_artifacts_dir, "*")) +
Dir.glob(File.join(policies_dir, "*")) +
Dir.glob(File.join(policy_groups_dir, "*")) +
Dir.glob(File.join(export_dir, "Policyfile.lock.json"))
end
def cookbook_artifacts_dir
File.join(export_dir, "cookbook_artifacts")
end
def policies_dir
File.join(export_dir, "policies")
end
def policy_groups_dir
File.join(export_dir, "policy_groups")
end
def dot_chef_dir
File.join(export_dir, ".chef")
end
def policyfile_repo_item_path
basename = "#{policyfile_lock.name}-#{policyfile_lock.revision_id}"
File.join(staging_dir, "policies", "#{basename}.json")
end
def policy_group_repo_item_path
File.join(staging_dir, "policy_groups", "local.json")
end
def dot_chef_staging_dir
File.join(staging_dir, ".chef")
end
def cookbook_artifacts_staging_dir
File.join(staging_dir, "cookbook_artifacts")
end
def policies_staging_dir
File.join(staging_dir, "policies")
end
def policy_groups_staging_dir
File.join(staging_dir, "policy_groups")
end
def lockfile_staging_path
File.join(staging_dir, "Policyfile.lock.json")
end
def client_rb_staging_path
File.join(dot_chef_staging_dir, "config.rb")
end
def readme_staging_path
File.join(staging_dir, "README.md")
end
end
end
end
| 29.93128 | 133 | 0.672314 |
fffb8856784dcd5c71b6d753829d61b260877d47
| 264 |
module Event::Addon::Category
module Setting
extend SS::Addon
extend ActiveSupport::Concern
included do
embeds_ids :st_categories, class_name: "Category::Node::Base"
permit_params st_category_ids: []
end
set_order 500
end
end
| 18.857143 | 67 | 0.69697 |
ff689a431fb1bf55dc3bc7245dd574dd06eb8760
| 693 |
require "require_all"
require_all "../Shared"
require_relative "spherorobot"
sphero = SpheroRobot.new
keyboard = KeyboardRobot.new
keyboard.add_handler "b", lambda { sphero.change_color :blue }
keyboard.add_handler "r", lambda { sphero.change_color :red }
keyboard.add_handler "y", lambda { sphero.change_color :yellow }
keyboard.add_handler "g", lambda { sphero.change_color :green }
keyboard.add_handler "space", lambda { sphero.stop }
keyboard.add_handler "up", lambda { sphero.roll 180 }
keyboard.add_handler "down", lambda { sphero.roll 0 }
keyboard.add_handler "left", lambda { sphero.roll 90 }
keyboard.add_handler "right", lambda { sphero.roll 270 }
Artoo::Robot.work!([keyboard])
| 34.65 | 64 | 0.759019 |
91ad12335c3ac58adae185b17809ca2c7ec21108
| 2,545 |
# FGDC <<Class>> SpatialReference
# FGDC CSDGM writer output in XML
# History:
# Stan Smith 2018-10-09 refactor mdJson projection object
# Stan Smith 2018-03-20 refactored error and warning messaging
# Stan Smith 2018-01-15 original script
require_relative '../fgdc_writer'
module ADIWG
module Mdtranslator
module Writers
module Fgdc
class LocalSystem
def initialize(xml, hResponseObj, inContext = nil)
@xml = xml
@hResponseObj = hResponseObj
@NameSpace = ADIWG::Mdtranslator::Writers::Fgdc
end
def writeXML(hProjection, inContext = nil)
# localSYSTEM is not the same as localPLANAR in fgdc
# however they use the same 'local' object
# local system sets projectionIdentifier.identifier = 'localSystem'
# local planar sets projectionIdentifier.identifier = 'localPlanar'
hProjectionId = hProjection[:projectionIdentifier]
hLocal = hProjection[:local]
outContext = 'local system'
outContext = inContext + ' ' + outContext unless inContext.nil?
if hLocal.empty?
@NameSpace.issueError(250, outContext)
return
end
if hProjectionId[:identifier] == 'localSystem'
hProjectionId[:name] = nil unless hProjectionId.has_key?(:name)
if hProjectionId[:name].nil?
hProjectionId[:name] = 'Local Coordinate System'
end
end
# local system 4.1.3.1 (localdes) - local coordinate system description (required)
unless hLocal[:description].nil?
@xml.tag!('localdes', hLocal[:description])
end
if hLocal[:description].nil?
@NameSpace.issueError(251, outContext)
end
# local system 4.1.3.2 (localgeo) - local coordinate system georeference information (required)
unless hLocal[:georeference].nil?
@xml.tag!('localgeo', hLocal[:georeference])
end
if hLocal[:georeference].nil?
@NameSpace.issueError(252, outContext)
end
end # writeXML
end # LocalSystem
end
end
end
end
| 35.347222 | 113 | 0.535953 |
6affba2743c4715f3ae53c5843514c9b8be64ca7
| 155 |
module Shareconomy
class Rating < ActiveRecord::Base
belongs_to :listing
validates :title, :content, :value, :listing, presence: true
end
end
| 19.375 | 64 | 0.722581 |
791902e1662341d17c4d10bc7f8dd8f62e0e9ee3
| 5,981 |
=begin
PureCloud Platform API
With the PureCloud Platform API, you can control all aspects of your PureCloud environment. With the APIs you can access the system configuration, manage conversations and more.
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
License: UNLICENSED
https://help.mypurecloud.com/articles/terms-and-conditions/
Terms of Service: https://help.mypurecloud.com/articles/terms-and-conditions/
=end
require 'date'
module PureCloud
class WfmMoveManagementUnitTopicMoveManagementUnitNotification
attr_accessor :business_unit
attr_accessor :status
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'business_unit' => :'businessUnit',
:'status' => :'status'
}
end
# Attribute type mapping.
def self.swagger_types
{
:'business_unit' => :'WfmMoveManagementUnitTopicBusinessUnit',
:'status' => :'String'
}
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
# convert string to symbol for hash key
attributes = attributes.each_with_object({}){|(k,v), h| h[k.to_sym] = v}
if attributes.has_key?(:'businessUnit')
self.business_unit = attributes[:'businessUnit']
end
if attributes.has_key?(:'status')
self.status = attributes[:'status']
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properies with the reasons
def list_invalid_properties
invalid_properties = Array.new
return invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
allowed_values = ["Processing", "Complete", "Canceled", "Error"]
if @status && !allowed_values.include?(@status)
return false
end
end
# Custom attribute writer method checking allowed values (enum).
# @param [Object] status Object to be assigned
def status=(status)
allowed_values = ["Processing", "Complete", "Canceled", "Error"]
if status && !allowed_values.include?(status)
fail ArgumentError, "invalid value for 'status', must be one of #{allowed_values}."
end
@status = status
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
business_unit == o.business_unit &&
status == o.status
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[business_unit, status].hash
end
# build the object from hash
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /^Array<(.*)>/i
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map{ |v| _deserialize($1, v) } )
else
#TODO show warning in debug mode
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
else
# data not found in attributes(hash), not an issue as the data can be optional
end
end
self
end
def _deserialize(type, value)
case type.to_sym
when :DateTime
DateTime.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :BOOLEAN
if value.to_s =~ /^(true|t|yes|y|1)$/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
_model = Object.const_get("PureCloud").const_get(type).new
_model.build_from_hash(value)
end
end
def to_s
to_hash.to_s
end
# to_body is an alias to to_body (backward compatibility))
def to_body
to_hash
end
# return the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
next if value.nil?
hash[param] = _to_hash(value)
end
hash
end
# Method to output non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
def _to_hash(value)
if value.is_a?(Array)
value.compact.map{ |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
| 24.817427 | 177 | 0.593881 |
6a8ff7ef283a61e93bf119cbd38703a6f4872eea
| 4,119 |
# frozen_string_literal: true
module AhlScraper
module Games
class Goal < Resource
def id
@id ||= @raw_data[:game_goal_id]
end
def number
@number ||= @opts[:number]
end
def period
@period ||= @raw_data[:period][:id].to_i
end
def time
@time ||= @raw_data[:time]
end
def period_time_in_seconds
@period_time_in_seconds ||= period_time.to_sec
end
def game_time_elapsed
@game_time_elapsed ||= period_time.to_elapsed
end
def scorer_goal_number
@scorer_goal_number ||= @raw_data[:scorerGoalNumber]
end
def scored_by
@scored_by ||= {
id: @raw_data[:scoredBy][:id],
first_name: @raw_data[:scoredBy][:firstName],
last_name: @raw_data[:scoredBy][:lastName],
number: @raw_data[:scoredBy][:jerseyNumber],
position: @raw_data[:scoredBy][:position],
}
end
def assists
@assists ||= @raw_data[:assists].map do |assist|
{
id: assist[:id],
first_name: assist[:firstName],
last_name: assist[:lastName],
number: assist[:jerseyNumber],
position: assist[:position],
}
end
end
def assist_numbers
@assist_numbers ||= @raw_data[:assistNumbers]
end
def scoring_team
@scoring_team ||= @raw_data[:team]
end
def power_play?
@power_play ||= @raw_data[:properties][:isPowerPlay] == "1"
end
def short_handed?
@short_handed ||= @raw_data[:properties][:isShortHanded] == "1"
end
def empty_net?
@empty_net ||= @raw_data[:properties][:isEmptyNet] == "1"
end
def extra_skater?
@extra_skater ||= set_extra_skater
end
def penalty_shot?
@penalty_shot ||= @raw_data[:properties][:isPenaltyShot] == "1"
end
def insurance_goal?
@insurance_goal ||= @raw_data[:properties][:isInsuranceGoal] == "1"
end
def game_winner?
@game_winner ||= @raw_data[:properties][:isGameWinningGoal] == "1"
end
def plus_players
@plus_players ||= @raw_data[:plus_players].map { |player| OnIceSkater.new(player, { scoring_team: true, goal_id: id }) }
end
def minus_players
@minus_players ||= @raw_data[:minus_players].map { |player| OnIceSkater.new(player, { scoring_team: false, goal_id: id }) }
end
def situation
@situation ||=
if @raw_data[:properties][:isPowerPlay] == "1"
"PP"
elsif @raw_data[:properties][:isShortHanded] == "1"
"SH"
elsif @raw_data[:properties][:isEmptyNet] == "1"
"EN"
elsif extra_skater? == true
"EX"
elsif @raw_data[:properties][:isPenaltyShot] == "1"
"PS"
else
"EV"
end
end
def description
@description ||=
if assists.nil? || assists.empty?
goalscorer_name
elsif assists.length == 1
"#{goalscorer_name} (#{a1_name})"
else
"#{goalscorer_name} (#{a1_name}, #{a2_name})"
end
end
def special_teams?
@special_teams ||= short_handed? || power_play?
end
private
def set_extra_skater
return true if plus_players.length > minus_players.length && !special_teams? && !penalty_shot?
return true if plus_players.length == minus_players.length && short_handed?
return true if plus_players.length == 6
false
end
def goalscorer_name
@goalscorer_name ||= "#{scored_by[:first_name]} #{scored_by[:last_name]}"
end
def a1_name
@a1_name ||= "#{assists.dig(0, :first_name)} #{assists.dig(0, :last_name)}"
end
def a2_name
@a2_name ||= "#{assists.dig(1, :first_name)} #{assists.dig(1, :last_name)}"
end
def period_time
@period_time ||= PeriodTimeHelper.new(time, period)
end
end
end
end
| 25.425926 | 131 | 0.558145 |
bf2e28cf6a3ce64fbc8938f23b44d7885d5b0c26
| 800 |
require './rb_run_length_encoding.rb'
describe 'rb_run_length_encoding' do
it "encode string AAAAABBBZZZZDDDDDDDIII return to 5A3B4Z7D3I" do
expect(encode("AAAAABBBZZZZDDDDDDDIII")).to eq "5A3B4Z7D3I"
end
it "encode string AAAAABBBZZZZDDDDDDDIIIX return to 5A3B4Z7D3IX" do
expect(encode("AAAAABBBZZZZDDDDDDDIIIX")).to eq "5A3B4Z7D3IX"
end
it "encode string when string doesn't repeat ABCDEF return to ABCDEF" do
expect(encode("ABCDEF")).to eq "ABCDEF"
end
it "decode string 5A3B4Z7D3I return to AAAAABBBZZZZDDDDDDDIII" do
expect(decode("5A3B4Z7D3I")).to eq "AAAAABBBZZZZDDDDDDDIII"
end
it "decode string 12AI7B2I return to AAAAAAAAAAAAIBBBBBBBII" do
expect(decode("12AI7B2I")).to eq "AAAAAAAAAAAAIBBBBBBBII"
end
end
| 42.105263 | 76 | 0.72875 |
d53671b4ac5e790c73e631808a8bd2a52e9dddc0
| 91 |
FactoryBot.define do
factory :friendship do
user { 1 }
friend_id { 1 }
end
end
| 13 | 24 | 0.637363 |
4ada7adef5580f5d5fce5058ab5b5f888dace783
| 219 |
class Request < ActiveRecord::Base
#eval("attr_accessible #{column_names.map { |cn| cn.to_sym }.to_s.gsub(/\[|\]/,"")}")
self.primary_key = "service_request_id"
attr_accessible *column_names
has_many :notes
end
| 31.285714 | 87 | 0.712329 |
91977eee686ac0d13da8e51cf34aa85c36b140f6
| 5,182 |
module Rpush
module Daemon
module Dispatcher
class ApnsTcp < Rpush::Daemon::Dispatcher::Tcp
include Loggable
include Reflectable
SELECT_TIMEOUT = 10
ERROR_TUPLE_BYTES = 6
APNS_ERRORS = {
1 => 'Processing error',
2 => 'Missing device token',
3 => 'Missing topic',
4 => 'Missing payload',
5 => 'Missing token size',
6 => 'Missing topic size',
7 => 'Missing payload size',
8 => 'Invalid token',
10 => 'APNs closed connection (possible maintenance)',
255 => 'None (unknown error)'
}
def initialize(*args)
super
@dispatch_mutex = Mutex.new
@stop_error_receiver = false
@connection.on_connect { start_error_receiver }
end
def dispatch(payload)
@dispatch_mutex.synchronize do
@delivery_class.new(@app, @connection, payload.batch).perform
record_batch(payload.batch)
end
end
def cleanup
if Rpush.config.push
# In push mode only a single batch is sent, followed by immediate shutdown.
# Allow the error receiver time to handle any errors.
@reconnect_disabled = true
sleep 1
end
@stop_error_receiver = true
super
@error_receiver_thread.join if @error_receiver_thread
rescue StandardError => e
log_error(e)
reflect(:error, e)
ensure
@error_receiver_thread = nil
end
private
def start_error_receiver
@error_receiver_thread = Thread.new do
check_for_error until @stop_error_receiver
Rpush::Daemon.store.release_connection
end
end
def delivered_buffer
@delivered_buffer ||= RingBuffer.new(Rpush.config.batch_size * 10)
end
def record_batch(batch)
batch.each_delivered do |notification|
delivered_buffer << notification.id
end
end
def check_for_error
begin
# On Linux, select returns nil from a dropped connection.
# On OS X, Errno::EBADF is raised following a Errno::EADDRNOTAVAIL from the write call.
return unless @connection.select(SELECT_TIMEOUT)
tuple = @connection.read(ERROR_TUPLE_BYTES)
rescue *TcpConnection::TCP_ERRORS
reconnect unless @stop_error_receiver
return
end
@dispatch_mutex.synchronize { handle_error_response(tuple) }
rescue StandardError => e
log_error(e)
end
def handle_error_response(tuple)
if tuple
_, code, notification_id = tuple.unpack('ccN')
handle_error(code, notification_id)
else
handle_disconnect
end
if Rpush.config.push
# Only attempt to handle a single error in Push mode.
@stop_error_receiver = true
return
end
reconnect
ensure
delivered_buffer.clear
end
def reconnect
return if @reconnect_disabled
log_error("Lost connection to #{@connection.host}:#{@connection.port}, reconnecting...")
@connection.reconnect_with_rescue
end
def handle_disconnect
log_error("The APNs disconnected before any notifications could be delivered. This usually indicates you are using an invalid certificate.") if delivered_buffer.size == 0
end
def handle_error(code, notification_id)
notification_id = Rpush::Daemon.store.translate_integer_notification_id(notification_id)
failed_pos = delivered_buffer.index(notification_id)
description = description_for_code(code)
log_error("Notification #{notification_id} failed with error: " + description)
Rpush::Daemon.store.mark_ids_failed([notification_id], code, description, Time.now)
reflect(:notification_id_failed, @app, notification_id, code, description)
if failed_pos
retry_ids = delivered_buffer[(failed_pos + 1)..-1]
retry_notification_ids(retry_ids, notification_id)
elsif delivered_buffer.size > 0
log_error("Delivery sequence unknown for notifications following #{notification_id}.")
end
end
def description_for_code(code)
APNS_ERRORS[code.to_i] ? "#{APNS_ERRORS[code.to_i]} (#{code})" : "Unknown error code #{code.inspect}. Possible Rpush bug?"
end
def retry_notification_ids(ids, notification_id)
return if ids.size == 0
now = Time.now
Rpush::Daemon.store.mark_ids_retryable(ids, now)
notifications_str = 'Notification'
notifications_str += 's' if ids.size > 1
log_warn("#{notifications_str} #{ids.join(', ')} will be retried due to the failure of notification #{notification_id}.")
ids.each { |id| reflect(:notification_id_will_retry, @app, id, now) }
end
end
end
end
end
| 33.869281 | 180 | 0.604979 |
ed362261b38910ea5ead0112f327ad5806c2ddf3
| 1,471 |
# frozen_string_literal: true
require "prometheus/middleware/exporter"
module Yabeda
module Prometheus
# Rack application or middleware that provides metrics exposition endpoint
class Exporter < ::Prometheus::Middleware::Exporter
NOT_FOUND_HANDLER = lambda do |_env|
[404, { "Content-Type" => "text/plain" }, ["Not Found\n"]]
end.freeze
class << self
# Allows to use middleware as standalone rack application
def call(env)
@app ||= new(NOT_FOUND_HANDLER, path: "/")
@app.call(env)
end
def start_metrics_server!
Thread.new do
default_port = ENV.fetch("PORT", 9394)
Rack::Handler::WEBrick.run(
rack_app,
Host: ENV["PROMETHEUS_EXPORTER_BIND"] || "0.0.0.0",
Port: ENV.fetch("PROMETHEUS_EXPORTER_PORT", default_port),
AccessLog: [],
)
end
end
def rack_app(exporter = self, path: "/metrics")
Rack::Builder.new do
use Rack::CommonLogger
use Rack::ShowExceptions
use exporter, path: path
run NOT_FOUND_HANDLER
end
end
end
def initialize(app, options = {})
super(app, options.merge(registry: Yabeda::Prometheus.registry))
end
def call(env)
Yabeda.collectors.each(&:call) if env["PATH_INFO"] == path
super
end
end
end
end
| 27.754717 | 78 | 0.573759 |
08e6ca79f379706801d1bed5f7f74b8d00afa38b
| 3,044 |
require 'spec_helper'
require 'sequel/plugins/enum_guard'
Sequel.extension :migration
describe Sequel::Plugins::EnumGuard do
Migration = Sequel.migration do
up do
extension :pg_enum
create_enum :test_enum, %w(a b c)
create_enum :test_enum2, %w(d e f)
create_table(:enum_test_models) do
String :str_col
String :kind
test_enum :enum_col, null: false
test_enum2 :enum_col2, null: true
end
end
down do
extension :pg_enum
drop_table :enum_test_models
drop_enum :test_enum
drop_enum :test_enum2
end
end
before(:all) do
Migration.apply(DB, :up)
end
after(:all) do
Migration.apply(DB, :down)
end
let(:enums_schema) do
{
enum_col: Set.new(%w'a b c'),
enum_col2: Set.new(['d', 'e', 'f', nil]),
}
end
before do
Sequel::Model.plugin :enum_guard
end
context 'within global Sequel::Model' do
subject(:model) do
Sequel::Model
end
it { should_not respond_to(:enums) }
end
context 'within model with enums' do
subject(:model) do
class EnumTestModel < Sequel::Model
end
EnumTestModel
end
describe '.enum_fields' do
subject(:enums) { model.enum_fields }
it 'contains allowed values for enum' do
expect(enums).to eq(enums_schema)
end
it { should be_frozen }
end
describe 'instance methods' do
subject(:instance) do
model.new
end
describe 'enum setter' do
context 'with invalid enum value' do
it 'raises an ArgumentError' do
expect { instance.enum_col = 'invalid val' }.to raise_error(ArgumentError)
end
it "doesn't accept nil value for NOT NULL field" do
expect { instance.enum_col = nil }.to raise_error(ArgumentError)
end
end
context 'with valid enum value' do
it 'accepts string' do
instance.enum_col = 'b'
expect(instance.enum_col).to eq 'b'
end
it 'accepts symbol' do
instance.enum_col = :c
expect(instance.enum_col).to eq 'c'
end
it 'accepts nil for NULL columns' do
instance.enum_col2 = nil
expect(instance.enum_col2).to be_nil
end
end
end
end
end
context 'in subclass' do
let!(:model) do
class EnumTestModel < Sequel::Model
plugin :single_table_inheritance, :kind
end
EnumTestModel
end
let(:submodel) do
class SubEnumTestModel < EnumTestModel
end
SubEnumTestModel
end
let!(:instance) { submodel.create(enum_col: 'b') }
describe '.enum_fields' do
it 'contains allowed values for enum' do
expect(model.enum_fields).to eq(enums_schema)
expect(submodel.enum_fields).to eq(enums_schema)
end
it 'is a different object' do
expect(model.enum_fields).to_not equal(submodel.enum_fields)
end
end
end
end
| 22.218978 | 86 | 0.605782 |
ed052a7d631a4db446c513b78603f9c6a625e5e2
| 1,792 |
require "project_metric_cycle_time/version"
require 'project_metric_cycle_time/data_generator'
require 'project_metric_base'
require 'faraday'
class ProjectMetricCycleTime
include ProjectMetricBase
add_credentials %I[tracker_project tracker_token]
add_raw_data %w[tracker_stories tracker_cycle_time]
def initialize(credentials, raw_data = nil)
@project = credentials[:tracker_project]
@conn = Faraday.new(url: 'https://www.pivotaltracker.com/services/v5')
@conn.headers['Content-Type'] = 'application/json'
@conn.headers['X-TrackerToken'] = credentials[:tracker_token]
complete_with raw_data
end
def score
# Average cycle time.
accepted_stories.inject(0.0) { |sum, s| sum + cycle_time(s) } / accepted_stories.length.to_f
end
def image
{ chartType: 'story_overall',
data: { accepted_stories: accepted_stories,
all_stories: extend_stories(tracked_stories) }}
end
def obj_id
nil
end
private
def tracker_stories
@tracker_stories = JSON.parse(@conn.get("projects/#{@project}/stories").body)
end
def tracker_cycle_time
@tracker_cycle_time = JSON.parse(@conn.get("projects/#{@project}/stories?fields=cycle_time_details").body)
end
def tracked_stories
@tracker_cycle_time.select { |s| s['cycle_time_details'].key? 'total_cycle_time' }
end
def accepted_stories
extend_stories(tracked_stories).select { |s| s['current_state'].eql? 'accepted' }
end
def extend_stories(stories)
stories.map do |story|
linked_story = @tracker_stories.find { |s| s['id'].eql? story['id']}
linked_story.nil? ? story : story.update(linked_story)
end
end
def cycle_time(s)
s['cycle_time_details']['total_cycle_time'] - s['cycle_time_details']['delivered_time']
end
end
| 28 | 110 | 0.723772 |
18017db943b0c6748e26a898bf2db3061e05d831
| 4,211 |
# Copyright (c) 2017-present, Facebook, Inc. All rights reserved.
#
# You are hereby granted a non-exclusive, worldwide, royalty-free license to use,
# copy, modify, and distribute this software in source code or binary form for use
# in connection with the web services and APIs provided by Facebook.
#
# As with any software that integrates with the Facebook platform, your use of
# this software is subject to the Facebook Platform Policy
# [http://developers.facebook.com/policy/]. This copyright notice shall be
# included in all copies or substantial portions of the software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# FB:AUTOGEN
module FacebookAds
# This class is auto-generated.
# For any issues or feature requests related to this class, please let us know
# on github and we'll fix in our codegen framework. We'll not be able to accept
# pull request for this class.
class CommerceMerchantSettings < AdObject
CTA = [
"CONTACT_MERCHANT",
"OFFSITE_LINK",
]
MERCHANT_STATUS = [
"ENABLED",
"EXTERNALLY_DISABLED",
]
field :braintree_merchant_id, 'string'
field :checkout_message, 'string'
field :contact_email, 'string'
field :cta, 'string'
field :disable_checkout_urls, 'bool'
field :display_name, 'string'
field :facebook_channel, 'object'
field :has_discount_code, 'bool'
field :id, 'string'
field :instagram_channel, 'object'
field :merchant_alert_email, 'string'
field :merchant_page, 'Profile'
field :merchant_status, 'string'
field :onsite_commerce_merchant, 'object'
field :payment_provider, 'string'
field :privacy_url_by_locale, 'hash'
field :review_rejection_messages, { list: 'string' }
field :review_rejection_reasons, { list: 'string' }
field :review_status, 'string'
field :supported_card_types, { list: 'string' }
field :terms, 'string'
field :terms_url_by_locale, 'hash'
field :whatsapp_channel, 'object'
has_no_delete
has_edge :facebook_channel do |edge|
edge.post 'CommerceMerchantSettings' do |api|
api.has_param :pages, { list: 'string' }
end
end
has_edge :instagram_channel do |edge|
edge.delete
edge.post 'CommerceMerchantSettings' do |api|
api.has_param :instagram_business_accounts, { list: 'string' }
api.has_param :instagram_users, { list: 'string' }
end
end
has_edge :order_management_apps do |edge|
edge.get 'Application'
edge.post 'CommerceMerchantSettings'
end
has_edge :product_catalogs do |edge|
edge.get 'ProductCatalog'
end
has_edge :returns do |edge|
edge.get do |api|
api.has_param :end_time_created, 'datetime'
api.has_param :merchant_return_id, 'string'
api.has_param :start_time_created, 'datetime'
api.has_param :statuses, { list: { enum: %w{APPROVED DISAPPROVED MERCHANT_MARKED_COMPLETED REFUNDED REQUESTED }} }
end
end
has_edge :setup_status do |edge|
edge.get 'CommerceMerchantSettingsSetupStatus'
end
has_edge :shipping_profiles do |edge|
edge.get do |api|
api.has_param :reference_id, 'string'
end
edge.post do |api|
api.has_param :handling_time, 'hash'
api.has_param :is_default_shipping_profile, 'bool'
api.has_param :name, 'string'
api.has_param :reference_id, 'string'
api.has_param :shipping_destinations, { list: 'hash' }
end
end
has_edge :tax_settings do |edge|
edge.get
end
has_edge :whatsapp_channel do |edge|
edge.post do |api|
api.has_param :op, { enum: %w{ADD REMOVE }}
api.has_param :whatsapp_business_accounts, { list: 'string' }
end
end
end
end
| 33.15748 | 122 | 0.693185 |
38b782c8b39c74e0ec9810335bb2017ffe24d032
| 4,328 |
# frozen_string_literal: true
require 'sidekiq/api'
class RoomsController < ApplicationController
before_action :load_rooms, only: %i[index show simple]
before_action :load_room, except: %i[new create]
def index; end
def show
@instance = @room.instance
authorize! :read, @room
@message = Message.new
@messages = @room.messages.order(:created_at)
RoomUser.create_or_update!(@room.id, current_or_guest_user.id, @messages&.last&.id)
end
def new
@instance = Instance.find(params[:id])
authorize! :create_room_in, @instance
@room = Room.new
end
def create
@instance = Instance.find(room_params[:instance_id])
authorize! :create_room_in, @instance
@room = Room.new(room_params.merge(owner_id: current_or_guest_user.id))
if @room.save
RoomUser.create_or_update!(@room.id, current_or_guest_user.id, nil)
redirect_to instance_url(@instance, access_token: params[:access_token].present? ? params[:access_token]:nil), notice: 'Thread has been created successfully'
else
flash.now[:alert] = @room.errors.full_messages.join(', ')
render :new
end
end
def edit
authorize! :update, @room
end
def update
authorize! :update, @room
if @room.update_attributes(room_params)
redirect_to rooms_url, notice: 'Thread has been updated successfully'
else
render :edit
end
end
def destroy
authorize! :destroy, @room
instance = @room.instance
@room.destroy
redirect_to instance, notice: 'Thread has been deleted successfully'
end
def simple
authorize! :read, @room
RoomUser.create_or_update!(@room.id, current_or_guest_user.id, @room.messages&.last&.id)
render @rooms
end
def lock
authorize! :update, @room
if @room.update_attributes(locked: true)
redirect_to request.referer, notice: 'Thread has been locked'
else
redirect_to request.referer, notice: 'Something went wrong, try again'
end
end
def unlock
authorize! :update, @room
if @room.update_attributes(locked: false)
redirect_to request.referer, notice: 'Thread has been unlocked'
else
redirect_to request.referer, notice: 'Something went wrong, try again'
end
end
def set_delayed_lock
authorize! :update, @room
if @room.locked
redirect_to request.referer,
notice: 'Can\'t set a delayed lock on a locked thread'
return
end
lock_date = Time.now +
params[:days].to_i.days +
params[:hours].to_i.hours +
params[:minutes].to_i.minutes
@room.update_attributes!(planned_lock: lock_date)
# Cleaning exisiting jobs
queue_jobs = Sidekiq::ScheduledSet.new
queue_jobs.each do |queue_job|
if queue_job.klass == 'DelayedRoomLockWorker' && queue_job.args == [@room.id]
queue_job.delete
end
end
DelayedRoomLockWorker.perform_at(lock_date, @room.id)
redirect_to request.referer, notice: 'Thread will be locked on ' + lock_date.to_formatted_s(:long_ordinal)
end
def cancel_delayed_lock
@room.update_attributes!(planned_lock: nil)
# Cleaning exisiting jobs
queue_jobs = Sidekiq::ScheduledSet.new
queue_jobs.each do |queue_job|
if queue_job.klass == 'DelayedRoomLockWorker' && queue_job.args == [@room.id]
queue_job.delete
end
end
redirect_to request.referer, notice: 'Successfully cancelled the delayed lock'
end
def mute_user
@user = User.find(params[:user_id])
messages_to_remove = @room.messages.where(user: @user)
messages_ids = messages_to_remove.pluck(:id)
messages_to_remove.destroy_all
MutedRoomUser.create!(room: @room, user: @user)
ActionCable.server.broadcast(
"room_#{@room.id}",
action: 'muted_user',
data: {
messages_ids: messages_ids,
muted_user_id: @user.id
}
)
redirect_to request.referer, notice: @user.nickname_in_room(@room) + ' has been muted in this thread'
end
private
def room_params
params.require(:room).permit(:title, :instance_id)
end
def load_rooms
@room = Room.find(params[:id] || params[:room_id])
@rooms = @room.instance.rooms_sorted_by_last_message
end
def load_room
@room = Room.find(params[:id])
end
end
| 25.017341 | 163 | 0.680222 |
ac2bd5a3c39c535daa5c25a1ce159b1f71330f05
| 477 |
module QA
feature 'LDAP user login', :ldap do
scenario 'user logs in using LDAP credentials' do
Runtime::Browser.visit(:gitlab, Page::Main::Login)
Page::Main::Login.act { sign_in_using_ldap_credentials }
# TODO, since `Signed in successfully` message was removed
# this is the only way to tell if user is signed in correctly.
#
Page::Menu::Main.perform do |menu|
expect(menu).to have_personal_area
end
end
end
end
| 29.8125 | 68 | 0.666667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.